bootc_lib/
deploy.rs

1//! # Write deployments merging image with configmap
2//!
3//! Create a merged filesystem tree with the image and mounted configmaps.
4
5use std::collections::HashSet;
6use std::io::{BufRead, Write};
7use std::os::fd::AsFd;
8use std::process::Command;
9
10use anyhow::{Context, Result, anyhow};
11use bootc_kernel_cmdline::utf8::CmdlineOwned;
12use cap_std::fs::{Dir, MetadataExt};
13use cap_std_ext::cap_std;
14use cap_std_ext::dirext::CapStdExtDirExt;
15use fn_error_context::context;
16use ostree::{gio, glib};
17use ostree_container::OstreeImageReference;
18use ostree_ext::container as ostree_container;
19use ostree_ext::container::store::{ImageImporter, ImportProgress, PrepareResult, PreparedImport};
20use ostree_ext::oci_spec::image::{Descriptor, Digest};
21use ostree_ext::ostree::Deployment;
22use ostree_ext::ostree::{self, Sysroot};
23use ostree_ext::sysroot::SysrootLock;
24use ostree_ext::tokio_util::spawn_blocking_cancellable_flatten;
25
26use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep};
27use crate::spec::ImageReference;
28use crate::spec::{BootOrder, HostSpec};
29use crate::status::labels_of_config;
30use crate::store::Storage;
31use crate::utils::async_task_with_spinner;
32
33// TODO use https://github.com/ostreedev/ostree-rs-ext/pull/493/commits/afc1837ff383681b947de30c0cefc70080a4f87a
34const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage/bootc";
35
36/// Create an ImageProxyConfig with bootc's user agent prefix set.
37///
38/// This allows registries to distinguish "image pulls for bootc client runs"
39/// from other skopeo/containers-image users.
40pub(crate) fn new_proxy_config() -> ostree_ext::containers_image_proxy::ImageProxyConfig {
41    ostree_ext::containers_image_proxy::ImageProxyConfig {
42        user_agent_prefix: Some(format!("bootc/{}", env!("CARGO_PKG_VERSION"))),
43        ..Default::default()
44    }
45}
46
47/// Set on an ostree commit if this is a derived commit
48const BOOTC_DERIVED_KEY: &str = "bootc.derived";
49
50/// Variant of HostSpec but required to be filled out
51pub(crate) struct RequiredHostSpec<'a> {
52    pub(crate) image: &'a ImageReference,
53}
54
55/// State of a locally fetched image
56pub(crate) struct ImageState {
57    pub(crate) manifest_digest: Digest,
58    pub(crate) version: Option<String>,
59    pub(crate) ostree_commit: String,
60}
61
62impl<'a> RequiredHostSpec<'a> {
63    /// Given a (borrowed) host specification, "unwrap" its internal
64    /// options, giving a spec that is required to have a base container image.
65    pub(crate) fn from_spec(spec: &'a HostSpec) -> Result<Self> {
66        let image = spec
67            .image
68            .as_ref()
69            .ok_or_else(|| anyhow::anyhow!("Missing image in specification"))?;
70        Ok(Self { image })
71    }
72}
73
74impl From<ostree_container::store::LayeredImageState> for ImageState {
75    fn from(value: ostree_container::store::LayeredImageState) -> Self {
76        let version = value.version().map(|v| v.to_owned());
77        let ostree_commit = value.get_commit().to_owned();
78        Self {
79            manifest_digest: value.manifest_digest,
80            version,
81            ostree_commit,
82        }
83    }
84}
85
86impl ImageState {
87    /// Fetch the manifest corresponding to this image.  May not be available in all backends.
88    pub(crate) fn get_manifest(
89        &self,
90        repo: &ostree::Repo,
91    ) -> Result<Option<ostree_ext::oci_spec::image::ImageManifest>> {
92        ostree_container::store::query_image_commit(repo, &self.ostree_commit)
93            .map(|v| Some(v.manifest))
94    }
95}
96
97/// Wrapper for pulling a container image, wiring up status output.
98pub(crate) async fn new_importer(
99    repo: &ostree::Repo,
100    imgref: &ostree_container::OstreeImageReference,
101    booted_deployment: Option<&ostree::Deployment>,
102) -> Result<ostree_container::store::ImageImporter> {
103    let config = new_proxy_config();
104    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
105    imp.require_bootable();
106    // We do our own GC/prune in deploy::prune(), so skip the importer's internal one.
107    imp.disable_gc();
108    if let Some(deployment) = booted_deployment {
109        imp.set_sepolicy_commit(deployment.csum().to_string());
110    }
111    Ok(imp)
112}
113
114/// Wrapper for pulling a container image with a custom proxy config (e.g. for unified storage).
115pub(crate) async fn new_importer_with_config(
116    repo: &ostree::Repo,
117    imgref: &ostree_container::OstreeImageReference,
118    config: ostree_ext::containers_image_proxy::ImageProxyConfig,
119    booted_deployment: Option<&ostree::Deployment>,
120) -> Result<ostree_container::store::ImageImporter> {
121    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
122    imp.require_bootable();
123    // We do our own GC/prune in deploy::prune(), so skip the importer's internal one.
124    imp.disable_gc();
125    if let Some(deployment) = booted_deployment {
126        imp.set_sepolicy_commit(deployment.csum().to_string());
127    }
128    Ok(imp)
129}
130
131pub(crate) fn check_bootc_label(config: &ostree_ext::oci_spec::image::ImageConfiguration) {
132    if let Some(label) =
133        labels_of_config(config).and_then(|labels| labels.get(crate::metadata::BOOTC_COMPAT_LABEL))
134    {
135        match label.as_str() {
136            crate::metadata::COMPAT_LABEL_V1 => {}
137            o => crate::journal::journal_print(
138                libsystemd::logging::Priority::Warning,
139                &format!(
140                    "notice: Unknown {} value {}",
141                    crate::metadata::BOOTC_COMPAT_LABEL,
142                    o
143                ),
144            ),
145        }
146    } else {
147        crate::journal::journal_print(
148            libsystemd::logging::Priority::Warning,
149            &format!(
150                "notice: Image is missing label: {}",
151                crate::metadata::BOOTC_COMPAT_LABEL
152            ),
153        )
154    }
155}
156
157fn descriptor_of_progress(p: &ImportProgress) -> &Descriptor {
158    match p {
159        ImportProgress::OstreeChunkStarted(l) => l,
160        ImportProgress::OstreeChunkCompleted(l) => l,
161        ImportProgress::DerivedLayerStarted(l) => l,
162        ImportProgress::DerivedLayerCompleted(l) => l,
163    }
164}
165
166fn prefix_of_progress(p: &ImportProgress) -> &'static str {
167    match p {
168        ImportProgress::OstreeChunkStarted(_) | ImportProgress::OstreeChunkCompleted(_) => {
169            "ostree chunk"
170        }
171        ImportProgress::DerivedLayerStarted(_) | ImportProgress::DerivedLayerCompleted(_) => {
172            "layer"
173        }
174    }
175}
176
177/// Configuration for layer progress printing
178struct LayerProgressConfig {
179    layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
180    layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
181    digest: Box<str>,
182    n_layers_to_fetch: usize,
183    layers_total: usize,
184    bytes_to_download: u64,
185    bytes_total: u64,
186    prog: ProgressWriter,
187    quiet: bool,
188}
189
190/// Write container fetch progress to standard output.
191async fn handle_layer_progress_print(mut config: LayerProgressConfig) -> ProgressWriter {
192    let start = std::time::Instant::now();
193    let mut total_read = 0u64;
194    let bar = indicatif::MultiProgress::new();
195    if config.quiet {
196        bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
197    }
198    let layers_bar = bar.add(indicatif::ProgressBar::new(
199        config.n_layers_to_fetch.try_into().unwrap(),
200    ));
201    let byte_bar = bar.add(indicatif::ProgressBar::new(0));
202    // let byte_bar = indicatif::ProgressBar::new(0);
203    // byte_bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
204    layers_bar.set_style(
205        indicatif::ProgressStyle::default_bar()
206            .template("{prefix} {bar} {pos}/{len} {wide_msg}")
207            .unwrap(),
208    );
209    let taskname = "Fetching layers";
210    layers_bar.set_prefix(taskname);
211    layers_bar.set_message("");
212    byte_bar.set_prefix("Fetching");
213    byte_bar.set_style(
214        indicatif::ProgressStyle::default_bar()
215                .template(
216                    " └ {prefix} {bar} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) {wide_msg}",
217                )
218                .unwrap()
219        );
220
221    let mut subtasks = vec![];
222    let mut subtask: SubTaskBytes = Default::default();
223    loop {
224        tokio::select! {
225            // Always handle layer changes first.
226            biased;
227            layer = config.layers.recv() => {
228                if let Some(l) = layer {
229                    let layer = descriptor_of_progress(&l);
230                    let layer_type = prefix_of_progress(&l);
231                    let short_digest = &layer.digest().digest()[0..21];
232                    let layer_size = layer.size();
233                    if l.is_starting() {
234                        // Reset the progress bar
235                        byte_bar.reset_elapsed();
236                        byte_bar.reset_eta();
237                        byte_bar.set_length(layer_size);
238                        byte_bar.set_message(format!("{layer_type} {short_digest}"));
239
240                        subtask = SubTaskBytes {
241                            subtask: layer_type.into(),
242                            description: format!("{layer_type}: {short_digest}").clone().into(),
243                            id: short_digest.to_string().clone().into(),
244                            bytes_cached: 0,
245                            bytes: 0,
246                            bytes_total: layer_size,
247                        };
248                    } else {
249                        // Use the bar's length (actual blob size) rather than
250                        // the manifest descriptor size for completion accounting.
251                        let actual_size = byte_bar.length().unwrap_or(layer_size);
252                        byte_bar.set_position(actual_size);
253                        layers_bar.inc(1);
254                        total_read = total_read.saturating_add(actual_size);
255                        // Emit an event where bytes == total to signal completion.
256                        subtask.bytes_total = actual_size;
257                        subtask.bytes = actual_size;
258                        subtasks.push(subtask.clone());
259                        config.prog.send(Event::ProgressBytes {
260                            task: "pulling".into(),
261                            description: format!("Pulling Image: {}", config.digest).into(),
262                            id: (*config.digest).into(),
263                            bytes_cached: config.bytes_total - config.bytes_to_download,
264                            bytes: total_read,
265                            bytes_total: config.bytes_to_download,
266                            steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
267                            steps: layers_bar.position(),
268                            steps_total: config.n_layers_to_fetch as u64,
269                            subtasks: subtasks.clone(),
270                        }).await;
271                    }
272                } else {
273                    // If the receiver is disconnected, then we're done
274                    break
275                };
276            },
277            r = config.layer_bytes.changed() => {
278                if r.is_err() {
279                    // If the receiver is disconnected, then we're done
280                    break
281                }
282                let bytes = {
283                    let bytes = config.layer_bytes.borrow_and_update();
284                    bytes.as_ref().cloned()
285                };
286                if let Some(bytes) = bytes {
287                    // Update the bar length from the actual blob size, which
288                    // may differ from the manifest descriptor size (e.g.
289                    // containers-storage stores layers uncompressed).
290                    byte_bar.set_length(bytes.total);
291                    byte_bar.set_position(bytes.fetched);
292                    subtask.bytes_total = bytes.total;
293                    subtask.bytes = byte_bar.position();
294                    config.prog.send_lossy(Event::ProgressBytes {
295                        task: "pulling".into(),
296                        description: format!("Pulling Image: {}", config.digest).into(),
297                        id: (*config.digest).into(),
298                        bytes_cached: config.bytes_total - config.bytes_to_download,
299                        bytes: total_read + byte_bar.position(),
300                        bytes_total: config.bytes_to_download,
301                        steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
302                        steps: layers_bar.position(),
303                        steps_total: config.n_layers_to_fetch as u64,
304                        subtasks: subtasks.clone().into_iter().chain([subtask.clone()]).collect(),
305                    }).await;
306                }
307            }
308        }
309    }
310    byte_bar.finish_and_clear();
311    layers_bar.finish_and_clear();
312    if let Err(e) = bar.clear() {
313        tracing::warn!("clearing bar: {e}");
314    }
315    let end = std::time::Instant::now();
316    let elapsed = end.duration_since(start);
317    let persec = total_read as f64 / elapsed.as_secs_f64();
318    let persec = indicatif::HumanBytes(persec as u64);
319    if let Err(e) = bar.println(&format!(
320        "Fetched layers: {} in {} ({}/s)",
321        indicatif::HumanBytes(total_read),
322        indicatif::HumanDuration(elapsed),
323        persec,
324    )) {
325        tracing::warn!("writing to stdout: {e}");
326    }
327
328    // Since the progress notifier closed, we know import has started
329    // use as a heuristic to begin import progress
330    // Cannot be lossy or it is dropped
331    config
332        .prog
333        .send(Event::ProgressSteps {
334            task: "importing".into(),
335            description: "Importing Image".into(),
336            id: (*config.digest).into(),
337            steps_cached: 0,
338            steps: 0,
339            steps_total: 1,
340            subtasks: [SubTaskStep {
341                subtask: "importing".into(),
342                description: "Importing Image".into(),
343                id: "importing".into(),
344                completed: false,
345            }]
346            .into(),
347        })
348        .await;
349
350    // Return the writer
351    config.prog
352}
353
354/// Gather all bound images in all deployments, then prune the image store,
355/// using the gathered images as the roots (that will not be GC'd).
356pub(crate) async fn prune_container_store(sysroot: &Storage) -> Result<()> {
357    let ostree = sysroot.get_ostree()?;
358    let deployments = ostree.deployments();
359    let mut all_bound_images = Vec::new();
360    for deployment in deployments {
361        let bound = crate::boundimage::query_bound_images_for_deployment(ostree, &deployment)?;
362        all_bound_images.extend(bound.into_iter());
363        // Also include the host image itself
364        // Note: Use just the image name (not the full transport:image format) because
365        // podman's image names don't include the transport prefix.
366        if let Some(host_image) = crate::status::boot_entry_from_deployment(ostree, &deployment)?
367            .image
368            .map(|i| i.image)
369        {
370            all_bound_images.push(crate::boundimage::BoundImage {
371                image: host_image.image.clone(),
372                auth_file: None,
373            });
374        }
375    }
376    // Convert to a hashset of just the image names
377    let image_names = HashSet::from_iter(all_bound_images.iter().map(|img| img.image.as_str()));
378    let pruned = sysroot
379        .get_ensure_imgstore()?
380        .prune_except_roots(&image_names)
381        .await?;
382    tracing::debug!("Pruned images: {}", pruned.len());
383    Ok(())
384}
385
386/// Core disk space check: verify that `bytes_to_fetch` fits within available space,
387/// leaving at least `min_free` bytes reserved.
388fn check_disk_space_inner(
389    fd: impl AsFd,
390    bytes_to_fetch: u64,
391    min_free: u64,
392    imgref: &ImageReference,
393) -> Result<()> {
394    let stat = rustix::fs::fstatvfs(fd)?;
395    let bytes_avail = stat.f_bsize.checked_mul(stat.f_bavail).unwrap_or(u64::MAX);
396    let usable = bytes_avail.saturating_sub(min_free);
397    tracing::trace!("bytes_avail: {bytes_avail} min_free: {min_free} usable: {usable}");
398
399    if bytes_to_fetch > usable {
400        anyhow::bail!(
401            "Insufficient free space for {image} (available: {available} required: {required})",
402            available = ostree_ext::glib::format_size(usable),
403            required = ostree_ext::glib::format_size(bytes_to_fetch),
404            image = imgref.image,
405        );
406    }
407    Ok(())
408}
409
410/// Verify there is sufficient disk space to pull an image into the ostree repo.
411/// Respects the repository's configured min-free-space threshold.
412pub(crate) fn check_disk_space_ostree(
413    repo: &ostree::Repo,
414    image_meta: &PreparedImportMeta,
415    imgref: &ImageReference,
416) -> Result<()> {
417    let min_free = repo.min_free_space_bytes().unwrap_or(0);
418    check_disk_space_inner(
419        repo.dfd_borrow(),
420        image_meta.bytes_to_fetch,
421        min_free,
422        imgref,
423    )
424}
425
426/// Verify there is sufficient disk space to pull an image into the composefs store
427/// via the ostree unified-storage path (uses `PreparedImportMeta`).
428pub(crate) fn check_disk_space_unified(
429    cfs: &crate::store::ComposefsRepository,
430    image_meta: &PreparedImportMeta,
431    imgref: &ImageReference,
432) -> Result<()> {
433    check_disk_space_inner(cfs.objects_dir()?, image_meta.bytes_to_fetch, 0, imgref)
434}
435
436/// Verify there is sufficient disk space to pull an image into the composefs store
437/// for the native composefs backend (uses a raw `ImageManifest`).
438pub(crate) fn check_disk_space_composefs(
439    cfs: &crate::store::ComposefsRepository,
440    manifest: &ostree_ext::oci_spec::image::ImageManifest,
441    imgref: &ImageReference,
442) -> Result<()> {
443    let bytes_to_fetch: u64 = manifest
444        .layers()
445        .iter()
446        .map(|l: &ostree_ext::oci_spec::image::Descriptor| l.size())
447        .sum();
448    check_disk_space_inner(cfs.objects_dir()?, bytes_to_fetch, 0, imgref)
449}
450
451pub(crate) struct PreparedImportMeta {
452    pub imp: ImageImporter,
453    pub prep: Box<PreparedImport>,
454    pub digest: Digest,
455    pub n_layers_to_fetch: usize,
456    pub layers_total: usize,
457    pub bytes_to_fetch: u64,
458    pub bytes_total: u64,
459}
460
461pub(crate) enum PreparedPullResult {
462    Ready(Box<PreparedImportMeta>),
463    AlreadyPresent(Box<ImageState>),
464}
465
466pub(crate) async fn prepare_for_pull(
467    repo: &ostree::Repo,
468    imgref: &ImageReference,
469    target_imgref: Option<&OstreeImageReference>,
470    booted_deployment: Option<&ostree::Deployment>,
471) -> Result<PreparedPullResult> {
472    let imgref_canonicalized = imgref.clone().canonicalize()?;
473    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
474    let ostree_imgref = &OstreeImageReference::from(imgref_canonicalized);
475    let mut imp = new_importer(repo, ostree_imgref, booted_deployment).await?;
476    if let Some(target) = target_imgref {
477        imp.set_target(target);
478    }
479    let prep = match imp.prepare().await? {
480        PrepareResult::AlreadyPresent(c) => {
481            println!("No changes in {imgref:#} => {}", c.manifest_digest);
482            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
483        }
484        PrepareResult::Ready(p) => p,
485    };
486    check_bootc_label(&prep.config);
487    if let Some(warning) = prep.deprecated_warning() {
488        ostree_ext::cli::print_deprecated_warning(warning).await;
489    }
490    ostree_ext::cli::print_layer_status(&prep);
491    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
492
493    let prepared_image = PreparedImportMeta {
494        imp,
495        n_layers_to_fetch: layers_to_fetch.len(),
496        layers_total: prep.all_layers().count(),
497        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
498        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
499        digest: prep.manifest_digest.clone(),
500        prep,
501    };
502
503    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
504}
505
506/// Check whether the image exists in bootc's unified container storage.
507///
508/// This is used for auto-detection: if the image already exists in bootc storage
509/// (e.g., from a previous `bootc image set-unified` or LBI pull), we can use
510/// the unified storage path for faster imports.
511///
512/// Returns true if the image exists in bootc storage.
513pub(crate) async fn image_exists_in_unified_storage(
514    store: &Storage,
515    imgref: &ImageReference,
516) -> Result<bool> {
517    let imgstore = store.get_ensure_imgstore()?;
518    let image_ref_str = imgref.to_transport_image()?;
519    imgstore.exists(&image_ref_str).await
520}
521
522/// Unified approach: Use bootc's CStorage to pull the image, then prepare from containers-storage.
523/// This reuses the same infrastructure as LBIs.
524pub(crate) async fn prepare_for_pull_unified(
525    repo: &ostree::Repo,
526    imgref: &ImageReference,
527    target_imgref: Option<&OstreeImageReference>,
528    store: &Storage,
529    booted_deployment: Option<&ostree::Deployment>,
530) -> Result<PreparedPullResult> {
531    // Get or initialize the bootc container storage (same as used for LBIs)
532    let imgstore = store.get_ensure_imgstore()?;
533
534    let image_ref_str = imgref.to_transport_image()?;
535
536    // Always pull to ensure we have the latest image, whether from a remote
537    // registry or a locally rebuilt image
538    tracing::info!(
539        "Unified pull: pulling from transport '{}' to bootc storage",
540        &imgref.transport
541    );
542
543    // Pull the image to bootc storage using the same method as LBIs
544    // Show a spinner since podman pull can take a while and doesn't output progress
545    let pull_msg = format!("Pulling {} to bootc storage", &image_ref_str);
546    async_task_with_spinner(&pull_msg, async move {
547        imgstore
548            .pull(&image_ref_str, crate::podstorage::PullMode::Always)
549            .await
550    })
551    .await?;
552
553    // Now create a containers-storage reference to read from bootc storage
554    tracing::info!("Unified pull: now importing from containers-storage transport");
555    let containers_storage_imgref = ImageReference {
556        transport: "containers-storage".to_string(),
557        image: imgref.image.clone(),
558        signature: imgref.signature.clone(),
559    };
560    let ostree_imgref = OstreeImageReference::from(containers_storage_imgref);
561
562    // Configure the importer to use bootc storage as an additional image store
563    let mut config = new_proxy_config();
564    let mut cmd = Command::new("skopeo");
565    // Use the physical path to bootc storage from the Storage struct
566    let storage_path = format!(
567        "{}/{}",
568        store.physical_root_path,
569        crate::podstorage::CStorage::subpath()
570    );
571    crate::podstorage::set_additional_image_store(&mut cmd, &storage_path);
572    config.skopeo_cmd = Some(cmd);
573
574    // Use the preparation flow with the custom config
575    let mut imp = new_importer_with_config(repo, &ostree_imgref, config, booted_deployment).await?;
576    if let Some(target) = target_imgref {
577        imp.set_target(target);
578    }
579    let prep = match imp.prepare().await? {
580        PrepareResult::AlreadyPresent(c) => {
581            println!("No changes in {imgref:#} => {}", c.manifest_digest);
582            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
583        }
584        PrepareResult::Ready(p) => p,
585    };
586    check_bootc_label(&prep.config);
587    if let Some(warning) = prep.deprecated_warning() {
588        ostree_ext::cli::print_deprecated_warning(warning).await;
589    }
590    ostree_ext::cli::print_layer_status(&prep);
591    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
592
593    // Log that we're importing a new image from containers-storage
594    const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
595    tracing::info!(
596        message_id = PULLING_NEW_IMAGE_ID,
597        bootc.image.reference = &imgref.image,
598        bootc.image.transport = "containers-storage",
599        bootc.original_transport = &imgref.transport,
600        bootc.status = "importing_from_storage",
601        "Importing image from bootc storage: {}",
602        ostree_imgref
603    );
604
605    let prepared_image = PreparedImportMeta {
606        imp,
607        n_layers_to_fetch: layers_to_fetch.len(),
608        layers_total: prep.all_layers().count(),
609        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
610        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
611        digest: prep.manifest_digest.clone(),
612        prep,
613    };
614
615    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
616}
617
618/// Unified pull: Use podman to pull to containers-storage, then read from there
619pub(crate) async fn pull_unified(
620    repo: &ostree::Repo,
621    imgref: &ImageReference,
622    target_imgref: Option<&OstreeImageReference>,
623    quiet: bool,
624    prog: ProgressWriter,
625    store: &Storage,
626    booted_deployment: Option<&ostree::Deployment>,
627) -> Result<Box<ImageState>> {
628    match prepare_for_pull_unified(repo, imgref, target_imgref, store, booted_deployment).await? {
629        PreparedPullResult::AlreadyPresent(existing) => {
630            // Log that the image was already present (Debug level since it's not actionable)
631            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
632            tracing::debug!(
633                message_id = IMAGE_ALREADY_PRESENT_ID,
634                bootc.image.reference = &imgref.image,
635                bootc.image.transport = &imgref.transport,
636                bootc.status = "already_present",
637                "Image already present: {}",
638                imgref
639            );
640            Ok(existing)
641        }
642        PreparedPullResult::Ready(prepared_image_meta) => {
643            check_disk_space_unified(
644                store.get_ensure_composefs()?.as_ref(),
645                &prepared_image_meta,
646                imgref,
647            )?;
648            // To avoid duplicate success logs, pass a containers-storage imgref to the importer
649            let cs_imgref = ImageReference {
650                transport: "containers-storage".to_string(),
651                image: imgref.image.clone(),
652                signature: imgref.signature.clone(),
653            };
654            pull_from_prepared(&cs_imgref, quiet, prog, *prepared_image_meta).await
655        }
656    }
657}
658
659#[context("Pulling")]
660pub(crate) async fn pull_from_prepared(
661    imgref: &ImageReference,
662    quiet: bool,
663    prog: ProgressWriter,
664    mut prepared_image: PreparedImportMeta,
665) -> Result<Box<ImageState>> {
666    let layer_progress = prepared_image.imp.request_progress();
667    let layer_byte_progress = prepared_image.imp.request_layer_progress();
668    let digest = prepared_image.digest.clone();
669    let digest_imp = prepared_image.digest.clone();
670
671    let printer = tokio::task::spawn(async move {
672        handle_layer_progress_print(LayerProgressConfig {
673            layers: layer_progress,
674            layer_bytes: layer_byte_progress,
675            digest: digest.as_ref().into(),
676            n_layers_to_fetch: prepared_image.n_layers_to_fetch,
677            layers_total: prepared_image.layers_total,
678            bytes_to_download: prepared_image.bytes_to_fetch,
679            bytes_total: prepared_image.bytes_total,
680            prog,
681            quiet,
682        })
683        .await
684    });
685    let import = prepared_image.imp.import(prepared_image.prep).await;
686    let prog = printer.await?;
687    // Both the progress and the import are done, so import is done as well
688    prog.send(Event::ProgressSteps {
689        task: "importing".into(),
690        description: "Importing Image".into(),
691        id: digest_imp.clone().as_ref().into(),
692        steps_cached: 0,
693        steps: 1,
694        steps_total: 1,
695        subtasks: [SubTaskStep {
696            subtask: "importing".into(),
697            description: "Importing Image".into(),
698            id: "importing".into(),
699            completed: true,
700        }]
701        .into(),
702    })
703    .await;
704    let import = import?;
705    let imgref_canonicalized = imgref.clone().canonicalize()?;
706    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
707
708    // Log successful import completion (skip if using unified storage to avoid double logging)
709    let is_unified_path = imgref.transport == "containers-storage";
710    if !is_unified_path {
711        const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8";
712
713        tracing::info!(
714            message_id = IMPORT_COMPLETE_JOURNAL_ID,
715            bootc.image.reference = &imgref.image,
716            bootc.image.transport = &imgref.transport,
717            bootc.manifest_digest = import.manifest_digest.as_ref(),
718            bootc.ostree_commit = &import.merge_commit,
719            "Successfully imported image: {}",
720            imgref
721        );
722    }
723
724    if let Some(msg) =
725        ostree_container::store::image_filtered_content_warning(&import.filtered_files)
726            .context("Image content warning")?
727    {
728        tracing::info!("{}", msg);
729    }
730    Ok(Box::new((*import).into()))
731}
732
733/// Wrapper for pulling a container image, wiring up status output.
734pub(crate) async fn pull(
735    repo: &ostree::Repo,
736    imgref: &ImageReference,
737    target_imgref: Option<&OstreeImageReference>,
738    quiet: bool,
739    prog: ProgressWriter,
740    booted_deployment: Option<&ostree::Deployment>,
741) -> Result<Box<ImageState>> {
742    match prepare_for_pull(repo, imgref, target_imgref, booted_deployment).await? {
743        PreparedPullResult::AlreadyPresent(existing) => {
744            // Log that the image was already present (Debug level since it's not actionable)
745            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
746            tracing::debug!(
747                message_id = IMAGE_ALREADY_PRESENT_ID,
748                bootc.image.reference = &imgref.image,
749                bootc.image.transport = &imgref.transport,
750                bootc.status = "already_present",
751                "Image already present: {}",
752                imgref
753            );
754            Ok(existing)
755        }
756        PreparedPullResult::Ready(prepared_image_meta) => {
757            // Check disk space before attempting to pull
758            check_disk_space_ostree(repo, &prepared_image_meta, imgref)?;
759            // Log that we're pulling a new image
760            const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
761            tracing::info!(
762                message_id = PULLING_NEW_IMAGE_ID,
763                bootc.image.reference = &imgref.image,
764                bootc.image.transport = &imgref.transport,
765                bootc.status = "pulling_new",
766                "Pulling new image: {}",
767                imgref
768            );
769            Ok(pull_from_prepared(imgref, quiet, prog, *prepared_image_meta).await?)
770        }
771    }
772}
773
774pub(crate) async fn wipe_ostree(sysroot: Sysroot) -> Result<()> {
775    tokio::task::spawn_blocking(move || {
776        sysroot
777            .write_deployments(&[], gio::Cancellable::NONE)
778            .context("removing deployments")
779    })
780    .await??;
781
782    Ok(())
783}
784
785pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> {
786    // Log the cleanup operation to systemd journal
787    const CLEANUP_JOURNAL_ID: &str = "2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7c6";
788
789    tracing::info!(
790        message_id = CLEANUP_JOURNAL_ID,
791        "Starting cleanup of old images and deployments"
792    );
793
794    let bound_prune = prune_container_store(sysroot);
795
796    // We create clones (just atomic reference bumps) here to move to the thread.
797    let ostree = sysroot.get_ostree_cloned()?;
798    let repo = ostree.repo();
799    let repo_prune =
800        ostree_ext::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| {
801            let locked_sysroot = &SysrootLock::from_assumed_locked(&ostree);
802            let cancellable = Some(cancellable);
803            let repo = &repo;
804            let txn = repo.auto_transaction(cancellable)?;
805            let repo = txn.repo();
806
807            // Regenerate our base references.  First, we delete the ones that exist
808            for ref_entry in repo
809                .list_refs_ext(
810                    Some(BASE_IMAGE_PREFIX),
811                    ostree::RepoListRefsExtFlags::NONE,
812                    cancellable,
813                )
814                .context("Listing refs")?
815                .keys()
816            {
817                repo.transaction_set_refspec(ref_entry, None);
818            }
819
820            // Then, for each deployment which is derived (e.g. has configmaps) we synthesize
821            // a base ref to ensure that it's not GC'd.
822            for (i, deployment) in ostree.deployments().into_iter().enumerate() {
823                let commit = deployment.csum();
824                if let Some(base) = get_base_commit(repo, &commit)? {
825                    repo.transaction_set_refspec(&format!("{BASE_IMAGE_PREFIX}/{i}"), Some(&base));
826                }
827            }
828
829            let pruned =
830                ostree_container::deploy::prune(locked_sysroot).context("Pruning images")?;
831            if !pruned.is_empty() {
832                let size = glib::format_size(pruned.objsize);
833                println!(
834                    "Pruned images: {} (layers: {}, objsize: {})",
835                    pruned.n_images, pruned.n_layers, size
836                );
837            } else {
838                tracing::debug!("Nothing to prune");
839            }
840
841            Ok(())
842        });
843
844    // We run these in parallel mostly because we can.
845    tokio::try_join!(repo_prune, bound_prune)?;
846    Ok(())
847}
848
849/// If commit is a bootc-derived commit (e.g. has configmaps), return its base.
850#[context("Finding base commit")]
851pub(crate) fn get_base_commit(repo: &ostree::Repo, commit: &str) -> Result<Option<String>> {
852    let commitv = repo.load_commit(commit)?.0;
853    let commitmeta = commitv.child_value(0);
854    let commitmeta = &glib::VariantDict::new(Some(&commitmeta));
855    let r = commitmeta.lookup::<String>(BOOTC_DERIVED_KEY)?;
856    Ok(r)
857}
858
859#[context("Writing deployment")]
860async fn deploy(
861    sysroot: &Storage,
862    from: MergeState,
863    image: &ImageState,
864    origin: &glib::KeyFile,
865    lock_finalization: bool,
866) -> Result<Deployment> {
867    // Compute the kernel argument overrides. In practice today this API is always expecting
868    // a merge deployment. The kargs code also always looks at the booted root (which
869    // is a distinct minor issue, but not super important as right now the install path
870    // doesn't use this API).
871    let (stateroot, override_kargs) = match &from {
872        MergeState::MergeDeployment(deployment) => {
873            let kargs = crate::bootc_kargs::get_kargs(sysroot, &deployment, image)?;
874            (deployment.stateroot().into(), Some(kargs))
875        }
876        MergeState::Reset { stateroot, kargs } => (stateroot.clone(), Some(kargs.clone())),
877    };
878    // Clone all the things to move to worker thread
879    let ostree = sysroot.get_ostree_cloned()?;
880    // ostree::Deployment is incorrectly !Send 😢 so convert it to an integer
881    let merge_deployment = from.as_merge_deployment();
882    let merge_deployment = merge_deployment.map(|d| d.index() as usize);
883    let ostree_commit = image.ostree_commit.to_string();
884    // GKeyFile also isn't Send! So we serialize that as a string...
885    let origin_data = origin.to_data();
886    let r = async_task_with_spinner(
887        "Deploying",
888        spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> {
889            let ostree = ostree;
890            let stateroot = Some(stateroot);
891            let mut opts = ostree::SysrootDeployTreeOpts::default();
892
893            // Set finalization lock if requested
894            opts.locked = lock_finalization;
895
896            // Because the C API expects a Vec<&str>, convert the Cmdline to string slices.
897            // The references borrow from the Cmdline, which outlives this usage.
898            let override_kargs_refs = override_kargs
899                .as_ref()
900                .map(|kargs| kargs.iter_str().collect::<Vec<_>>());
901            if let Some(kargs) = override_kargs_refs.as_ref() {
902                opts.override_kernel_argv = Some(kargs);
903            }
904
905            let deployments = ostree.deployments();
906            let merge_deployment = merge_deployment.map(|m| &deployments[m]);
907            let origin = glib::KeyFile::new();
908            origin.load_from_data(&origin_data, glib::KeyFileFlags::NONE)?;
909            let d = ostree.stage_tree_with_options(
910                stateroot.as_deref(),
911                &ostree_commit,
912                Some(&origin),
913                merge_deployment,
914                &opts,
915                Some(cancellable),
916            )?;
917            Ok(d.index())
918        }),
919    )
920    .await?;
921    // SAFETY: We must have a staged deployment
922    let ostree = sysroot.get_ostree()?;
923    let staged = ostree.staged_deployment().unwrap();
924    assert_eq!(staged.index(), r);
925    Ok(staged)
926}
927
928#[context("Generating origin")]
929fn origin_from_imageref(imgref: &ImageReference) -> Result<glib::KeyFile> {
930    let origin = glib::KeyFile::new();
931    let imgref = OstreeImageReference::from(imgref.clone());
932    origin.set_string(
933        "origin",
934        ostree_container::deploy::ORIGIN_CONTAINER,
935        imgref.to_string().as_str(),
936    );
937    Ok(origin)
938}
939
940/// The source of data for staging a new deployment
941#[derive(Debug)]
942pub(crate) enum MergeState {
943    /// Use the provided merge deployment
944    MergeDeployment(Deployment),
945    /// Don't use a merge deployment, but only this
946    /// provided initial state.
947    Reset {
948        stateroot: String,
949        kargs: CmdlineOwned,
950    },
951}
952impl MergeState {
953    /// Initialize using the default merge deployment for the given stateroot.
954    pub(crate) fn from_stateroot(sysroot: &Storage, stateroot: &str) -> Result<Self> {
955        let ostree = sysroot.get_ostree()?;
956        let merge_deployment = ostree.merge_deployment(Some(stateroot)).ok_or_else(|| {
957            anyhow::anyhow!("No merge deployment found for stateroot {stateroot}")
958        })?;
959        Ok(Self::MergeDeployment(merge_deployment))
960    }
961
962    /// Cast this to a merge deployment case.
963    pub(crate) fn as_merge_deployment(&self) -> Option<&Deployment> {
964        match self {
965            Self::MergeDeployment(d) => Some(d),
966            Self::Reset { .. } => None,
967        }
968    }
969}
970
971/// Stage (queue deployment of) a fetched container image.
972#[context("Staging")]
973pub(crate) async fn stage(
974    sysroot: &Storage,
975    from: MergeState,
976    image: &ImageState,
977    spec: &RequiredHostSpec<'_>,
978    prog: ProgressWriter,
979    lock_finalization: bool,
980) -> Result<()> {
981    // Log the staging operation to systemd journal with comprehensive upgrade information
982    const STAGE_JOURNAL_ID: &str = "8f7a2b1c3d4e5f6a7b8c9d0e1f2a3b4c";
983
984    tracing::info!(
985        message_id = STAGE_JOURNAL_ID,
986        bootc.image.reference = &spec.image.image,
987        bootc.image.transport = &spec.image.transport,
988        bootc.manifest_digest = image.manifest_digest.as_ref(),
989        "Staging image for deployment: {} (digest: {})",
990        spec.image,
991        image.manifest_digest
992    );
993
994    let mut subtask = SubTaskStep {
995        subtask: "merging".into(),
996        description: "Merging Image".into(),
997        id: "fetching".into(),
998        completed: false,
999    };
1000    let mut subtasks = vec![];
1001    prog.send(Event::ProgressSteps {
1002        task: "staging".into(),
1003        description: "Deploying Image".into(),
1004        id: image.manifest_digest.clone().as_ref().into(),
1005        steps_cached: 0,
1006        steps: 0,
1007        steps_total: 3,
1008        subtasks: subtasks
1009            .clone()
1010            .into_iter()
1011            .chain([subtask.clone()])
1012            .collect(),
1013    })
1014    .await;
1015
1016    subtask.completed = true;
1017    subtasks.push(subtask.clone());
1018    subtask.subtask = "deploying".into();
1019    subtask.id = "deploying".into();
1020    subtask.description = "Deploying Image".into();
1021    subtask.completed = false;
1022    prog.send(Event::ProgressSteps {
1023        task: "staging".into(),
1024        description: "Deploying Image".into(),
1025        id: image.manifest_digest.clone().as_ref().into(),
1026        steps_cached: 0,
1027        steps: 1,
1028        steps_total: 3,
1029        subtasks: subtasks
1030            .clone()
1031            .into_iter()
1032            .chain([subtask.clone()])
1033            .collect(),
1034    })
1035    .await;
1036    let origin = origin_from_imageref(spec.image)?;
1037    let deployment =
1038        crate::deploy::deploy(sysroot, from, image, &origin, lock_finalization).await?;
1039
1040    subtask.completed = true;
1041    subtasks.push(subtask.clone());
1042    subtask.subtask = "bound_images".into();
1043    subtask.id = "bound_images".into();
1044    subtask.description = "Pulling Bound Images".into();
1045    subtask.completed = false;
1046    prog.send(Event::ProgressSteps {
1047        task: "staging".into(),
1048        description: "Deploying Image".into(),
1049        id: image.manifest_digest.clone().as_ref().into(),
1050        steps_cached: 0,
1051        steps: 1,
1052        steps_total: 3,
1053        subtasks: subtasks
1054            .clone()
1055            .into_iter()
1056            .chain([subtask.clone()])
1057            .collect(),
1058    })
1059    .await;
1060    crate::boundimage::pull_bound_images(sysroot, &deployment).await?;
1061
1062    subtask.completed = true;
1063    subtasks.push(subtask.clone());
1064    subtask.subtask = "cleanup".into();
1065    subtask.id = "cleanup".into();
1066    subtask.description = "Removing old images".into();
1067    subtask.completed = false;
1068    prog.send(Event::ProgressSteps {
1069        task: "staging".into(),
1070        description: "Deploying Image".into(),
1071        id: image.manifest_digest.clone().as_ref().into(),
1072        steps_cached: 0,
1073        steps: 2,
1074        steps_total: 3,
1075        subtasks: subtasks
1076            .clone()
1077            .into_iter()
1078            .chain([subtask.clone()])
1079            .collect(),
1080    })
1081    .await;
1082    crate::deploy::cleanup(sysroot).await?;
1083    println!("Queued for next boot: {:#}", spec.image);
1084    if let Some(version) = image.version.as_deref() {
1085        println!("  Version: {version}");
1086    }
1087    println!("  Digest: {}", image.manifest_digest);
1088
1089    subtask.completed = true;
1090    subtasks.push(subtask.clone());
1091    prog.send(Event::ProgressSteps {
1092        task: "staging".into(),
1093        description: "Deploying Image".into(),
1094        id: image.manifest_digest.clone().as_ref().into(),
1095        steps_cached: 0,
1096        steps: 3,
1097        steps_total: 3,
1098        subtasks: subtasks
1099            .clone()
1100            .into_iter()
1101            .chain([subtask.clone()])
1102            .collect(),
1103    })
1104    .await;
1105
1106    // Unconditionally create or update /run/reboot-required to signal a reboot is needed.
1107    // This is monitored by kured (Kubernetes Reboot Daemon).
1108    write_reboot_required(&image.manifest_digest.as_ref())?;
1109
1110    Ok(())
1111}
1112
1113/// Update the /run/reboot-required file with the image that will be active after a reboot.
1114fn write_reboot_required(image: &str) -> Result<()> {
1115    let reboot_message = format!("bootc: Reboot required for image: {}", image);
1116    let run_dir = Dir::open_ambient_dir("/run", cap_std::ambient_authority())?;
1117    run_dir
1118        .atomic_write("reboot-required", reboot_message.as_bytes())
1119        .context("Creating /run/reboot-required")?;
1120
1121    Ok(())
1122}
1123
1124/// Implementation of rollback functionality
1125pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> {
1126    const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468";
1127    let ostree = sysroot.get_ostree()?;
1128    let (booted_ostree, deployments, host) = crate::status::get_status_require_booted(ostree)?;
1129
1130    let new_spec = {
1131        let mut new_spec = host.spec.clone();
1132        new_spec.boot_order = new_spec.boot_order.swap();
1133        new_spec
1134    };
1135
1136    let repo = &booted_ostree.repo();
1137
1138    // Just to be sure
1139    host.spec.verify_transition(&new_spec)?;
1140
1141    let reverting = new_spec.boot_order == BootOrder::Default;
1142    if reverting {
1143        println!("notice: Reverting queued rollback state");
1144    }
1145    let rollback_status = host
1146        .status
1147        .rollback
1148        .ok_or_else(|| anyhow!("No rollback available"))?;
1149    let rollback_image = rollback_status
1150        .query_image(repo)?
1151        .ok_or_else(|| anyhow!("Rollback is not container image based"))?;
1152
1153    // Get current booted image for comparison
1154    let current_image = host
1155        .status
1156        .booted
1157        .as_ref()
1158        .and_then(|b| b.query_image(repo).ok()?);
1159
1160    tracing::info!(
1161        message_id = ROLLBACK_JOURNAL_ID,
1162        bootc.manifest_digest = rollback_image.manifest_digest.as_ref(),
1163        bootc.ostree_commit = &rollback_image.merge_commit,
1164        bootc.rollback_type = if reverting { "revert" } else { "rollback" },
1165        bootc.current_manifest_digest = current_image
1166            .as_ref()
1167            .map(|i| i.manifest_digest.as_ref())
1168            .unwrap_or("none"),
1169        "Rolling back to image: {}",
1170        rollback_image.manifest_digest
1171    );
1172    // SAFETY: If there's a rollback status, then there's a deployment
1173    let rollback_deployment = deployments.rollback.expect("rollback deployment");
1174    let new_deployments = if reverting {
1175        [booted_ostree.deployment, rollback_deployment]
1176    } else {
1177        [rollback_deployment, booted_ostree.deployment]
1178    };
1179    let new_deployments = new_deployments
1180        .into_iter()
1181        .chain(deployments.other)
1182        .collect::<Vec<_>>();
1183    tracing::debug!("Writing new deployments: {new_deployments:?}");
1184    booted_ostree
1185        .sysroot
1186        .write_deployments(&new_deployments, gio::Cancellable::NONE)?;
1187    if reverting {
1188        println!("Next boot: current deployment");
1189    } else {
1190        println!("Next boot: rollback deployment");
1191    }
1192
1193    write_reboot_required(rollback_image.manifest_digest.as_ref())?;
1194
1195    sysroot.update_mtime()?;
1196
1197    Ok(())
1198}
1199
1200fn find_newest_deployment_name(deploysdir: &Dir) -> Result<String> {
1201    let mut dirs = Vec::new();
1202    for ent in deploysdir.entries()? {
1203        let ent = ent?;
1204        if !ent.file_type()?.is_dir() {
1205            continue;
1206        }
1207        let name = ent.file_name();
1208        let Some(name) = name.to_str() else {
1209            continue;
1210        };
1211        dirs.push((name.to_owned(), ent.metadata()?.mtime()));
1212    }
1213    dirs.sort_unstable_by(|a, b| a.1.cmp(&b.1));
1214    if let Some((name, _ts)) = dirs.pop() {
1215        Ok(name)
1216    } else {
1217        anyhow::bail!("No deployment directory found")
1218    }
1219}
1220
1221// Implementation of `bootc switch --in-place`
1222pub(crate) fn switch_origin_inplace(root: &Dir, imgref: &ImageReference) -> Result<String> {
1223    // Log the in-place switch operation to systemd journal
1224    const SWITCH_INPLACE_JOURNAL_ID: &str = "3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7";
1225
1226    tracing::info!(
1227        message_id = SWITCH_INPLACE_JOURNAL_ID,
1228        bootc.image.reference = &imgref.image,
1229        bootc.image.transport = &imgref.transport,
1230        bootc.switch_type = "in_place",
1231        "Performing in-place switch to image: {}",
1232        imgref
1233    );
1234
1235    // First, just create the new origin file
1236    let origin = origin_from_imageref(imgref)?;
1237    let serialized_origin = origin.to_data();
1238
1239    // Now, we can't rely on being officially booted (e.g. with the `ostree=` karg)
1240    // in a scenario like running in the anaconda %post.
1241    // Eventually, we should support a setup here where ostree-prepare-root
1242    // can officially be run to "enter" an ostree root in a supportable way.
1243    // Anyways for now, the brutal hack is to just scrape through the deployments
1244    // and find the newest one, which we will mutate.  If there's more than one,
1245    // ultimately the calling tooling should be fixed to set things up correctly.
1246
1247    let mut ostree_deploys = root.open_dir("sysroot/ostree/deploy")?.entries()?;
1248    let deploydir = loop {
1249        if let Some(ent) = ostree_deploys.next() {
1250            let ent = ent?;
1251            if !ent.file_type()?.is_dir() {
1252                continue;
1253            }
1254            tracing::debug!("Checking {:?}", ent.file_name());
1255            let child_dir = ent
1256                .open_dir()
1257                .with_context(|| format!("Opening dir {:?}", ent.file_name()))?;
1258            if let Some(d) = child_dir.open_dir_optional("deploy")? {
1259                break d;
1260            }
1261        } else {
1262            anyhow::bail!("Failed to find a deployment");
1263        }
1264    };
1265    let newest_deployment = find_newest_deployment_name(&deploydir)?;
1266    let origin_path = format!("{newest_deployment}.origin");
1267    if !deploydir.try_exists(&origin_path)? {
1268        tracing::warn!("No extant origin for {newest_deployment}");
1269    }
1270    deploydir
1271        .atomic_write(&origin_path, serialized_origin.as_bytes())
1272        .context("Writing origin")?;
1273    Ok(newest_deployment)
1274}
1275
1276/// A workaround for <https://github.com/ostreedev/ostree/issues/3193>
1277/// as generated by anaconda.
1278#[context("Updating /etc/fstab for anaconda+composefs")]
1279pub(crate) fn fixup_etc_fstab(root: &Dir) -> Result<()> {
1280    let fstab_path = "etc/fstab";
1281    // Read the old file
1282    let fd = root
1283        .open(fstab_path)
1284        .with_context(|| format!("Opening {fstab_path}"))
1285        .map(std::io::BufReader::new)?;
1286
1287    // Helper function to possibly change a line from /etc/fstab.
1288    // Returns Ok(true) if we made a change (and we wrote the modified line)
1289    // otherwise returns Ok(false) and the caller should write the original line.
1290    fn edit_fstab_line(line: &str, mut w: impl Write) -> Result<bool> {
1291        if line.starts_with('#') {
1292            return Ok(false);
1293        }
1294        let parts = line.split_ascii_whitespace().collect::<Vec<_>>();
1295
1296        let path_idx = 1;
1297        let options_idx = 3;
1298        let (&path, &options) = match (parts.get(path_idx), parts.get(options_idx)) {
1299            (None, _) => {
1300                tracing::debug!("No path in entry: {line}");
1301                return Ok(false);
1302            }
1303            (_, None) => {
1304                tracing::debug!("No options in entry: {line}");
1305                return Ok(false);
1306            }
1307            (Some(p), Some(o)) => (p, o),
1308        };
1309        // If this is not the root, we're not matching on it
1310        if path != "/" {
1311            return Ok(false);
1312        }
1313        // If options already contains `ro`, nothing to do
1314        if options.split(',').any(|s| s == "ro") {
1315            return Ok(false);
1316        }
1317
1318        writeln!(w, "# {}", crate::generator::BOOTC_EDITED_STAMP)?;
1319
1320        // SAFETY: we unpacked the options before.
1321        // This adds `ro` to the option list
1322        assert!(!options.is_empty()); // Split wouldn't have turned this up if it was empty
1323        let options = format!("{options},ro");
1324        for (i, part) in parts.into_iter().enumerate() {
1325            // TODO: would obviously be nicer to preserve whitespace...but...eh.
1326            if i > 0 {
1327                write!(w, " ")?;
1328            }
1329            if i == options_idx {
1330                write!(w, "{options}")?;
1331            } else {
1332                write!(w, "{part}")?
1333            }
1334        }
1335        // And add the trailing newline
1336        writeln!(w)?;
1337        Ok(true)
1338    }
1339
1340    // Read the input, and atomically write a modified version
1341    root.atomic_replace_with(fstab_path, move |mut w| -> Result<()> {
1342        for line in fd.lines() {
1343            let line = line?;
1344            if !edit_fstab_line(&line, &mut w)? {
1345                writeln!(w, "{line}")?;
1346            }
1347        }
1348        Ok(())
1349    })
1350    .context("Replacing /etc/fstab")?;
1351
1352    println!("Updated /etc/fstab to add `ro` for `/`");
1353    Ok(())
1354}
1355
1356#[cfg(test)]
1357mod tests {
1358    use super::*;
1359
1360    #[test]
1361    fn test_new_proxy_config_user_agent() {
1362        let config = new_proxy_config();
1363        let prefix = config
1364            .user_agent_prefix
1365            .expect("user_agent_prefix should be set");
1366        assert!(
1367            prefix.starts_with("bootc/"),
1368            "User agent should start with bootc/"
1369        );
1370        // Verify the version is present (not just "bootc/")
1371        assert!(
1372            prefix.len() > "bootc/".len(),
1373            "Version should be present after bootc/"
1374        );
1375    }
1376
1377    #[test]
1378    fn test_switch_inplace() -> Result<()> {
1379        use cap_std::fs::DirBuilderExt;
1380
1381        let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1382        let mut builder = cap_std::fs::DirBuilder::new();
1383        let builder = builder.recursive(true).mode(0o755);
1384        let deploydir = "sysroot/ostree/deploy/default/deploy";
1385        let target_deployment =
1386            "af36eb0086bb55ac601600478c6168f834288013d60f8870b7851f44bf86c3c5.0";
1387        td.ensure_dir_with(
1388            format!("sysroot/ostree/deploy/default/deploy/{target_deployment}"),
1389            builder,
1390        )?;
1391        let deploydir = &td.open_dir(deploydir)?;
1392        let orig_imgref = ImageReference {
1393            image: "quay.io/exampleos/original:sometag".into(),
1394            transport: "registry".into(),
1395            signature: None,
1396        };
1397        {
1398            let origin = origin_from_imageref(&orig_imgref)?;
1399            deploydir.atomic_write(
1400                format!("{target_deployment}.origin"),
1401                origin.to_data().as_bytes(),
1402            )?;
1403        }
1404
1405        let target_imgref = ImageReference {
1406            image: "quay.io/someother/otherimage:latest".into(),
1407            transport: "registry".into(),
1408            signature: None,
1409        };
1410
1411        let replaced = switch_origin_inplace(&td, &target_imgref).unwrap();
1412        assert_eq!(replaced, target_deployment);
1413        Ok(())
1414    }
1415
1416    #[test]
1417    fn test_fixup_etc_fstab_default() -> Result<()> {
1418        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1419        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n";
1420        tempdir.create_dir_all("etc")?;
1421        tempdir.atomic_write("etc/fstab", default)?;
1422        fixup_etc_fstab(&tempdir).unwrap();
1423        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1424        Ok(())
1425    }
1426
1427    #[test]
1428    fn test_fixup_etc_fstab_multi() -> Result<()> {
1429        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1430        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1431UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1432        tempdir.create_dir_all("etc")?;
1433        tempdir.atomic_write("etc/fstab", default)?;
1434        fixup_etc_fstab(&tempdir).unwrap();
1435        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1436        Ok(())
1437    }
1438
1439    #[test]
1440    fn test_fixup_etc_fstab_ro() -> Result<()> {
1441        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1442        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1443UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   ro 0 0\n\
1444UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1445        tempdir.create_dir_all("etc")?;
1446        tempdir.atomic_write("etc/fstab", default)?;
1447        fixup_etc_fstab(&tempdir).unwrap();
1448        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1449        Ok(())
1450    }
1451
1452    #[test]
1453    fn test_fixup_etc_fstab_rw() -> Result<()> {
1454        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1455        // This case uses `defaults`
1456        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1457UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   defaults 0 0\n\
1458UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1459        let modified = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1460# Updated by bootc-fstab-edit.service\n\
1461UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs defaults,ro 0 0\n\
1462UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1463        tempdir.create_dir_all("etc")?;
1464        tempdir.atomic_write("etc/fstab", default)?;
1465        fixup_etc_fstab(&tempdir).unwrap();
1466        assert_eq!(tempdir.read_to_string("etc/fstab")?, modified);
1467        Ok(())
1468    }
1469    #[test]
1470    fn test_check_disk_space_inner() -> Result<()> {
1471        let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1472        let imgref = ImageReference {
1473            image: "quay.io/exampleos/exampleos:latest".into(),
1474            transport: "registry".into(),
1475            signature: None,
1476        };
1477
1478        // 0 bytes needed always passes
1479        check_disk_space_inner(&*td, 0, 0, &imgref)?;
1480
1481        // u64::MAX bytes needed always fails
1482        assert!(check_disk_space_inner(&*td, u64::MAX, 0, &imgref).is_err());
1483
1484        // With min_free consuming all usable space, even a tiny fetch fails
1485        assert!(check_disk_space_inner(&*td, 1, u64::MAX, &imgref).is_err());
1486
1487        Ok(())
1488    }
1489}