bootc_lib/
deploy.rs

1//! # Write deployments merging image with configmap
2//!
3//! Create a merged filesystem tree with the image and mounted configmaps.
4
5use std::collections::HashSet;
6use std::io::{BufRead, Write};
7use std::process::Command;
8
9use anyhow::{Context, Result, anyhow};
10use bootc_kernel_cmdline::utf8::CmdlineOwned;
11use cap_std::fs::{Dir, MetadataExt};
12use cap_std_ext::cap_std;
13use cap_std_ext::dirext::CapStdExtDirExt;
14use fn_error_context::context;
15use ostree::{gio, glib};
16use ostree_container::OstreeImageReference;
17use ostree_ext::container as ostree_container;
18use ostree_ext::container::store::{ImageImporter, ImportProgress, PrepareResult, PreparedImport};
19use ostree_ext::oci_spec::image::{Descriptor, Digest};
20use ostree_ext::ostree::Deployment;
21use ostree_ext::ostree::{self, Sysroot};
22use ostree_ext::sysroot::SysrootLock;
23use ostree_ext::tokio_util::spawn_blocking_cancellable_flatten;
24
25use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep};
26use crate::spec::ImageReference;
27use crate::spec::{BootOrder, HostSpec};
28use crate::status::labels_of_config;
29use crate::store::Storage;
30use crate::utils::async_task_with_spinner;
31
32// TODO use https://github.com/ostreedev/ostree-rs-ext/pull/493/commits/afc1837ff383681b947de30c0cefc70080a4f87a
33const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage/bootc";
34
35/// Create an ImageProxyConfig with bootc's user agent prefix set.
36///
37/// This allows registries to distinguish "image pulls for bootc client runs"
38/// from other skopeo/containers-image users.
39pub(crate) fn new_proxy_config() -> ostree_ext::containers_image_proxy::ImageProxyConfig {
40    ostree_ext::containers_image_proxy::ImageProxyConfig {
41        user_agent_prefix: Some(format!("bootc/{}", env!("CARGO_PKG_VERSION"))),
42        ..Default::default()
43    }
44}
45
46/// Set on an ostree commit if this is a derived commit
47const BOOTC_DERIVED_KEY: &str = "bootc.derived";
48
49/// Variant of HostSpec but required to be filled out
50pub(crate) struct RequiredHostSpec<'a> {
51    pub(crate) image: &'a ImageReference,
52}
53
54/// State of a locally fetched image
55pub(crate) struct ImageState {
56    pub(crate) manifest_digest: Digest,
57    pub(crate) version: Option<String>,
58    pub(crate) ostree_commit: String,
59}
60
61impl<'a> RequiredHostSpec<'a> {
62    /// Given a (borrowed) host specification, "unwrap" its internal
63    /// options, giving a spec that is required to have a base container image.
64    pub(crate) fn from_spec(spec: &'a HostSpec) -> Result<Self> {
65        let image = spec
66            .image
67            .as_ref()
68            .ok_or_else(|| anyhow::anyhow!("Missing image in specification"))?;
69        Ok(Self { image })
70    }
71}
72
73impl From<ostree_container::store::LayeredImageState> for ImageState {
74    fn from(value: ostree_container::store::LayeredImageState) -> Self {
75        let version = value.version().map(|v| v.to_owned());
76        let ostree_commit = value.get_commit().to_owned();
77        Self {
78            manifest_digest: value.manifest_digest,
79            version,
80            ostree_commit,
81        }
82    }
83}
84
85impl ImageState {
86    /// Fetch the manifest corresponding to this image.  May not be available in all backends.
87    pub(crate) fn get_manifest(
88        &self,
89        repo: &ostree::Repo,
90    ) -> Result<Option<ostree_ext::oci_spec::image::ImageManifest>> {
91        ostree_container::store::query_image_commit(repo, &self.ostree_commit)
92            .map(|v| Some(v.manifest))
93    }
94}
95
96/// Wrapper for pulling a container image, wiring up status output.
97pub(crate) async fn new_importer(
98    repo: &ostree::Repo,
99    imgref: &ostree_container::OstreeImageReference,
100) -> Result<ostree_container::store::ImageImporter> {
101    let config = new_proxy_config();
102    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
103    imp.require_bootable();
104    Ok(imp)
105}
106
107/// Wrapper for pulling a container image with a custom proxy config (e.g. for unified storage).
108pub(crate) async fn new_importer_with_config(
109    repo: &ostree::Repo,
110    imgref: &ostree_container::OstreeImageReference,
111    config: ostree_ext::containers_image_proxy::ImageProxyConfig,
112) -> Result<ostree_container::store::ImageImporter> {
113    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
114    imp.require_bootable();
115    Ok(imp)
116}
117
118pub(crate) fn check_bootc_label(config: &ostree_ext::oci_spec::image::ImageConfiguration) {
119    if let Some(label) =
120        labels_of_config(config).and_then(|labels| labels.get(crate::metadata::BOOTC_COMPAT_LABEL))
121    {
122        match label.as_str() {
123            crate::metadata::COMPAT_LABEL_V1 => {}
124            o => crate::journal::journal_print(
125                libsystemd::logging::Priority::Warning,
126                &format!(
127                    "notice: Unknown {} value {}",
128                    crate::metadata::BOOTC_COMPAT_LABEL,
129                    o
130                ),
131            ),
132        }
133    } else {
134        crate::journal::journal_print(
135            libsystemd::logging::Priority::Warning,
136            &format!(
137                "notice: Image is missing label: {}",
138                crate::metadata::BOOTC_COMPAT_LABEL
139            ),
140        )
141    }
142}
143
144fn descriptor_of_progress(p: &ImportProgress) -> &Descriptor {
145    match p {
146        ImportProgress::OstreeChunkStarted(l) => l,
147        ImportProgress::OstreeChunkCompleted(l) => l,
148        ImportProgress::DerivedLayerStarted(l) => l,
149        ImportProgress::DerivedLayerCompleted(l) => l,
150    }
151}
152
153fn prefix_of_progress(p: &ImportProgress) -> &'static str {
154    match p {
155        ImportProgress::OstreeChunkStarted(_) | ImportProgress::OstreeChunkCompleted(_) => {
156            "ostree chunk"
157        }
158        ImportProgress::DerivedLayerStarted(_) | ImportProgress::DerivedLayerCompleted(_) => {
159            "layer"
160        }
161    }
162}
163
164/// Configuration for layer progress printing
165struct LayerProgressConfig {
166    layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
167    layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
168    digest: Box<str>,
169    n_layers_to_fetch: usize,
170    layers_total: usize,
171    bytes_to_download: u64,
172    bytes_total: u64,
173    prog: ProgressWriter,
174    quiet: bool,
175}
176
177/// Write container fetch progress to standard output.
178async fn handle_layer_progress_print(mut config: LayerProgressConfig) -> ProgressWriter {
179    let start = std::time::Instant::now();
180    let mut total_read = 0u64;
181    let bar = indicatif::MultiProgress::new();
182    if config.quiet {
183        bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
184    }
185    let layers_bar = bar.add(indicatif::ProgressBar::new(
186        config.n_layers_to_fetch.try_into().unwrap(),
187    ));
188    let byte_bar = bar.add(indicatif::ProgressBar::new(0));
189    // let byte_bar = indicatif::ProgressBar::new(0);
190    // byte_bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
191    layers_bar.set_style(
192        indicatif::ProgressStyle::default_bar()
193            .template("{prefix} {bar} {pos}/{len} {wide_msg}")
194            .unwrap(),
195    );
196    let taskname = "Fetching layers";
197    layers_bar.set_prefix(taskname);
198    layers_bar.set_message("");
199    byte_bar.set_prefix("Fetching");
200    byte_bar.set_style(
201        indicatif::ProgressStyle::default_bar()
202                .template(
203                    " └ {prefix} {bar} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) {wide_msg}",
204                )
205                .unwrap()
206        );
207
208    let mut subtasks = vec![];
209    let mut subtask: SubTaskBytes = Default::default();
210    loop {
211        tokio::select! {
212            // Always handle layer changes first.
213            biased;
214            layer = config.layers.recv() => {
215                if let Some(l) = layer {
216                    let layer = descriptor_of_progress(&l);
217                    let layer_type = prefix_of_progress(&l);
218                    let short_digest = &layer.digest().digest()[0..21];
219                    let layer_size = layer.size();
220                    if l.is_starting() {
221                        // Reset the progress bar
222                        byte_bar.reset_elapsed();
223                        byte_bar.reset_eta();
224                        byte_bar.set_length(layer_size);
225                        byte_bar.set_message(format!("{layer_type} {short_digest}"));
226
227                        subtask = SubTaskBytes {
228                            subtask: layer_type.into(),
229                            description: format!("{layer_type}: {short_digest}").clone().into(),
230                            id: short_digest.to_string().clone().into(),
231                            bytes_cached: 0,
232                            bytes: 0,
233                            bytes_total: layer_size,
234                        };
235                    } else {
236                        byte_bar.set_position(layer_size);
237                        layers_bar.inc(1);
238                        total_read = total_read.saturating_add(layer_size);
239                        // Emit an event where bytes == total to signal completion.
240                        subtask.bytes = layer_size;
241                        subtasks.push(subtask.clone());
242                        config.prog.send(Event::ProgressBytes {
243                            task: "pulling".into(),
244                            description: format!("Pulling Image: {}", config.digest).into(),
245                            id: (*config.digest).into(),
246                            bytes_cached: config.bytes_total - config.bytes_to_download,
247                            bytes: total_read,
248                            bytes_total: config.bytes_to_download,
249                            steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
250                            steps: layers_bar.position(),
251                            steps_total: config.n_layers_to_fetch as u64,
252                            subtasks: subtasks.clone(),
253                        }).await;
254                    }
255                } else {
256                    // If the receiver is disconnected, then we're done
257                    break
258                };
259            },
260            r = config.layer_bytes.changed() => {
261                if r.is_err() {
262                    // If the receiver is disconnected, then we're done
263                    break
264                }
265                let bytes = {
266                    let bytes = config.layer_bytes.borrow_and_update();
267                    bytes.as_ref().cloned()
268                };
269                if let Some(bytes) = bytes {
270                    byte_bar.set_position(bytes.fetched);
271                    subtask.bytes = byte_bar.position();
272                    config.prog.send_lossy(Event::ProgressBytes {
273                        task: "pulling".into(),
274                        description: format!("Pulling Image: {}", config.digest).into(),
275                        id: (*config.digest).into(),
276                        bytes_cached: config.bytes_total - config.bytes_to_download,
277                        bytes: total_read + byte_bar.position(),
278                        bytes_total: config.bytes_to_download,
279                        steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
280                        steps: layers_bar.position(),
281                        steps_total: config.n_layers_to_fetch as u64,
282                        subtasks: subtasks.clone().into_iter().chain([subtask.clone()]).collect(),
283                    }).await;
284                }
285            }
286        }
287    }
288    byte_bar.finish_and_clear();
289    layers_bar.finish_and_clear();
290    if let Err(e) = bar.clear() {
291        tracing::warn!("clearing bar: {e}");
292    }
293    let end = std::time::Instant::now();
294    let elapsed = end.duration_since(start);
295    let persec = total_read as f64 / elapsed.as_secs_f64();
296    let persec = indicatif::HumanBytes(persec as u64);
297    if let Err(e) = bar.println(&format!(
298        "Fetched layers: {} in {} ({}/s)",
299        indicatif::HumanBytes(total_read),
300        indicatif::HumanDuration(elapsed),
301        persec,
302    )) {
303        tracing::warn!("writing to stdout: {e}");
304    }
305
306    // Since the progress notifier closed, we know import has started
307    // use as a heuristic to begin import progress
308    // Cannot be lossy or it is dropped
309    config
310        .prog
311        .send(Event::ProgressSteps {
312            task: "importing".into(),
313            description: "Importing Image".into(),
314            id: (*config.digest).into(),
315            steps_cached: 0,
316            steps: 0,
317            steps_total: 1,
318            subtasks: [SubTaskStep {
319                subtask: "importing".into(),
320                description: "Importing Image".into(),
321                id: "importing".into(),
322                completed: false,
323            }]
324            .into(),
325        })
326        .await;
327
328    // Return the writer
329    config.prog
330}
331
332/// Gather all bound images in all deployments, then prune the image store,
333/// using the gathered images as the roots (that will not be GC'd).
334pub(crate) async fn prune_container_store(sysroot: &Storage) -> Result<()> {
335    let ostree = sysroot.get_ostree()?;
336    let deployments = ostree.deployments();
337    let mut all_bound_images = Vec::new();
338    for deployment in deployments {
339        let bound = crate::boundimage::query_bound_images_for_deployment(ostree, &deployment)?;
340        all_bound_images.extend(bound.into_iter());
341        // Also include the host image itself
342        // Note: Use just the image name (not the full transport:image format) because
343        // podman's image names don't include the transport prefix.
344        if let Some(host_image) = crate::status::boot_entry_from_deployment(ostree, &deployment)?
345            .image
346            .map(|i| i.image)
347        {
348            all_bound_images.push(crate::boundimage::BoundImage {
349                image: host_image.image.clone(),
350                auth_file: None,
351            });
352        }
353    }
354    // Convert to a hashset of just the image names
355    let image_names = HashSet::from_iter(all_bound_images.iter().map(|img| img.image.as_str()));
356    let pruned = sysroot
357        .get_ensure_imgstore()?
358        .prune_except_roots(&image_names)
359        .await?;
360    tracing::debug!("Pruned images: {}", pruned.len());
361    Ok(())
362}
363
364pub(crate) struct PreparedImportMeta {
365    pub imp: ImageImporter,
366    pub prep: Box<PreparedImport>,
367    pub digest: Digest,
368    pub n_layers_to_fetch: usize,
369    pub layers_total: usize,
370    pub bytes_to_fetch: u64,
371    pub bytes_total: u64,
372}
373
374pub(crate) enum PreparedPullResult {
375    Ready(Box<PreparedImportMeta>),
376    AlreadyPresent(Box<ImageState>),
377}
378
379pub(crate) async fn prepare_for_pull(
380    repo: &ostree::Repo,
381    imgref: &ImageReference,
382    target_imgref: Option<&OstreeImageReference>,
383) -> Result<PreparedPullResult> {
384    let imgref_canonicalized = imgref.clone().canonicalize()?;
385    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
386    let ostree_imgref = &OstreeImageReference::from(imgref_canonicalized);
387    let mut imp = new_importer(repo, ostree_imgref).await?;
388    if let Some(target) = target_imgref {
389        imp.set_target(target);
390    }
391    let prep = match imp.prepare().await? {
392        PrepareResult::AlreadyPresent(c) => {
393            println!("No changes in {imgref:#} => {}", c.manifest_digest);
394            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
395        }
396        PrepareResult::Ready(p) => p,
397    };
398    check_bootc_label(&prep.config);
399    if let Some(warning) = prep.deprecated_warning() {
400        ostree_ext::cli::print_deprecated_warning(warning).await;
401    }
402    ostree_ext::cli::print_layer_status(&prep);
403    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
404
405    let prepared_image = PreparedImportMeta {
406        imp,
407        n_layers_to_fetch: layers_to_fetch.len(),
408        layers_total: prep.all_layers().count(),
409        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
410        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
411        digest: prep.manifest_digest.clone(),
412        prep,
413    };
414
415    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
416}
417
418/// Check whether the image exists in bootc's unified container storage.
419///
420/// This is used for auto-detection: if the image already exists in bootc storage
421/// (e.g., from a previous `bootc image set-unified` or LBI pull), we can use
422/// the unified storage path for faster imports.
423///
424/// Returns true if the image exists in bootc storage.
425pub(crate) async fn image_exists_in_unified_storage(
426    store: &Storage,
427    imgref: &ImageReference,
428) -> Result<bool> {
429    let imgstore = store.get_ensure_imgstore()?;
430    let image_ref_str = imgref.to_transport_image()?;
431    imgstore.exists(&image_ref_str).await
432}
433
434/// Unified approach: Use bootc's CStorage to pull the image, then prepare from containers-storage.
435/// This reuses the same infrastructure as LBIs.
436pub(crate) async fn prepare_for_pull_unified(
437    repo: &ostree::Repo,
438    imgref: &ImageReference,
439    target_imgref: Option<&OstreeImageReference>,
440    store: &Storage,
441) -> Result<PreparedPullResult> {
442    // Get or initialize the bootc container storage (same as used for LBIs)
443    let imgstore = store.get_ensure_imgstore()?;
444
445    let image_ref_str = imgref.to_transport_image()?;
446
447    // Always pull to ensure we have the latest image, whether from a remote
448    // registry or a locally rebuilt image
449    tracing::info!(
450        "Unified pull: pulling from transport '{}' to bootc storage",
451        &imgref.transport
452    );
453
454    // Pull the image to bootc storage using the same method as LBIs
455    // Show a spinner since podman pull can take a while and doesn't output progress
456    let pull_msg = format!("Pulling {} to bootc storage", &image_ref_str);
457    async_task_with_spinner(&pull_msg, async move {
458        imgstore
459            .pull(&image_ref_str, crate::podstorage::PullMode::Always)
460            .await
461    })
462    .await?;
463
464    // Now create a containers-storage reference to read from bootc storage
465    tracing::info!("Unified pull: now importing from containers-storage transport");
466    let containers_storage_imgref = ImageReference {
467        transport: "containers-storage".to_string(),
468        image: imgref.image.clone(),
469        signature: imgref.signature.clone(),
470    };
471    let ostree_imgref = OstreeImageReference::from(containers_storage_imgref);
472
473    // Configure the importer to use bootc storage as an additional image store
474    let mut config = new_proxy_config();
475    let mut cmd = Command::new("skopeo");
476    // Use the physical path to bootc storage from the Storage struct
477    let storage_path = format!(
478        "{}/{}",
479        store.physical_root_path,
480        crate::podstorage::CStorage::subpath()
481    );
482    crate::podstorage::set_additional_image_store(&mut cmd, &storage_path);
483    config.skopeo_cmd = Some(cmd);
484
485    // Use the preparation flow with the custom config
486    let mut imp = new_importer_with_config(repo, &ostree_imgref, config).await?;
487    if let Some(target) = target_imgref {
488        imp.set_target(target);
489    }
490    let prep = match imp.prepare().await? {
491        PrepareResult::AlreadyPresent(c) => {
492            println!("No changes in {imgref:#} => {}", c.manifest_digest);
493            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
494        }
495        PrepareResult::Ready(p) => p,
496    };
497    check_bootc_label(&prep.config);
498    if let Some(warning) = prep.deprecated_warning() {
499        ostree_ext::cli::print_deprecated_warning(warning).await;
500    }
501    ostree_ext::cli::print_layer_status(&prep);
502    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
503
504    // Log that we're importing a new image from containers-storage
505    const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
506    tracing::info!(
507        message_id = PULLING_NEW_IMAGE_ID,
508        bootc.image.reference = &imgref.image,
509        bootc.image.transport = "containers-storage",
510        bootc.original_transport = &imgref.transport,
511        bootc.status = "importing_from_storage",
512        "Importing image from bootc storage: {}",
513        ostree_imgref
514    );
515
516    let prepared_image = PreparedImportMeta {
517        imp,
518        n_layers_to_fetch: layers_to_fetch.len(),
519        layers_total: prep.all_layers().count(),
520        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
521        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
522        digest: prep.manifest_digest.clone(),
523        prep,
524    };
525
526    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
527}
528
529/// Unified pull: Use podman to pull to containers-storage, then read from there
530pub(crate) async fn pull_unified(
531    repo: &ostree::Repo,
532    imgref: &ImageReference,
533    target_imgref: Option<&OstreeImageReference>,
534    quiet: bool,
535    prog: ProgressWriter,
536    store: &Storage,
537) -> Result<Box<ImageState>> {
538    match prepare_for_pull_unified(repo, imgref, target_imgref, store).await? {
539        PreparedPullResult::AlreadyPresent(existing) => {
540            // Log that the image was already present (Debug level since it's not actionable)
541            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
542            tracing::debug!(
543                message_id = IMAGE_ALREADY_PRESENT_ID,
544                bootc.image.reference = &imgref.image,
545                bootc.image.transport = &imgref.transport,
546                bootc.status = "already_present",
547                "Image already present: {}",
548                imgref
549            );
550            Ok(existing)
551        }
552        PreparedPullResult::Ready(prepared_image_meta) => {
553            // To avoid duplicate success logs, pass a containers-storage imgref to the importer
554            let cs_imgref = ImageReference {
555                transport: "containers-storage".to_string(),
556                image: imgref.image.clone(),
557                signature: imgref.signature.clone(),
558            };
559            pull_from_prepared(&cs_imgref, quiet, prog, *prepared_image_meta).await
560        }
561    }
562}
563
564#[context("Pulling")]
565pub(crate) async fn pull_from_prepared(
566    imgref: &ImageReference,
567    quiet: bool,
568    prog: ProgressWriter,
569    mut prepared_image: PreparedImportMeta,
570) -> Result<Box<ImageState>> {
571    let layer_progress = prepared_image.imp.request_progress();
572    let layer_byte_progress = prepared_image.imp.request_layer_progress();
573    let digest = prepared_image.digest.clone();
574    let digest_imp = prepared_image.digest.clone();
575
576    let printer = tokio::task::spawn(async move {
577        handle_layer_progress_print(LayerProgressConfig {
578            layers: layer_progress,
579            layer_bytes: layer_byte_progress,
580            digest: digest.as_ref().into(),
581            n_layers_to_fetch: prepared_image.n_layers_to_fetch,
582            layers_total: prepared_image.layers_total,
583            bytes_to_download: prepared_image.bytes_to_fetch,
584            bytes_total: prepared_image.bytes_total,
585            prog,
586            quiet,
587        })
588        .await
589    });
590    let import = prepared_image.imp.import(prepared_image.prep).await;
591    let prog = printer.await?;
592    // Both the progress and the import are done, so import is done as well
593    prog.send(Event::ProgressSteps {
594        task: "importing".into(),
595        description: "Importing Image".into(),
596        id: digest_imp.clone().as_ref().into(),
597        steps_cached: 0,
598        steps: 1,
599        steps_total: 1,
600        subtasks: [SubTaskStep {
601            subtask: "importing".into(),
602            description: "Importing Image".into(),
603            id: "importing".into(),
604            completed: true,
605        }]
606        .into(),
607    })
608    .await;
609    let import = import?;
610    let imgref_canonicalized = imgref.clone().canonicalize()?;
611    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
612
613    // Log successful import completion (skip if using unified storage to avoid double logging)
614    let is_unified_path = imgref.transport == "containers-storage";
615    if !is_unified_path {
616        const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8";
617
618        tracing::info!(
619            message_id = IMPORT_COMPLETE_JOURNAL_ID,
620            bootc.image.reference = &imgref.image,
621            bootc.image.transport = &imgref.transport,
622            bootc.manifest_digest = import.manifest_digest.as_ref(),
623            bootc.ostree_commit = &import.merge_commit,
624            "Successfully imported image: {}",
625            imgref
626        );
627    }
628
629    if let Some(msg) =
630        ostree_container::store::image_filtered_content_warning(&import.filtered_files)
631            .context("Image content warning")?
632    {
633        tracing::info!("{}", msg);
634    }
635    Ok(Box::new((*import).into()))
636}
637
638/// Wrapper for pulling a container image, wiring up status output.
639pub(crate) async fn pull(
640    repo: &ostree::Repo,
641    imgref: &ImageReference,
642    target_imgref: Option<&OstreeImageReference>,
643    quiet: bool,
644    prog: ProgressWriter,
645) -> Result<Box<ImageState>> {
646    match prepare_for_pull(repo, imgref, target_imgref).await? {
647        PreparedPullResult::AlreadyPresent(existing) => {
648            // Log that the image was already present (Debug level since it's not actionable)
649            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
650            tracing::debug!(
651                message_id = IMAGE_ALREADY_PRESENT_ID,
652                bootc.image.reference = &imgref.image,
653                bootc.image.transport = &imgref.transport,
654                bootc.status = "already_present",
655                "Image already present: {}",
656                imgref
657            );
658            Ok(existing)
659        }
660        PreparedPullResult::Ready(prepared_image_meta) => {
661            // Log that we're pulling a new image
662            const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
663            tracing::info!(
664                message_id = PULLING_NEW_IMAGE_ID,
665                bootc.image.reference = &imgref.image,
666                bootc.image.transport = &imgref.transport,
667                bootc.status = "pulling_new",
668                "Pulling new image: {}",
669                imgref
670            );
671            Ok(pull_from_prepared(imgref, quiet, prog, *prepared_image_meta).await?)
672        }
673    }
674}
675
676pub(crate) async fn wipe_ostree(sysroot: Sysroot) -> Result<()> {
677    tokio::task::spawn_blocking(move || {
678        sysroot
679            .write_deployments(&[], gio::Cancellable::NONE)
680            .context("removing deployments")
681    })
682    .await??;
683
684    Ok(())
685}
686
687pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> {
688    // Log the cleanup operation to systemd journal
689    const CLEANUP_JOURNAL_ID: &str = "2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7c6";
690
691    tracing::info!(
692        message_id = CLEANUP_JOURNAL_ID,
693        "Starting cleanup of old images and deployments"
694    );
695
696    let bound_prune = prune_container_store(sysroot);
697
698    // We create clones (just atomic reference bumps) here to move to the thread.
699    let ostree = sysroot.get_ostree_cloned()?;
700    let repo = ostree.repo();
701    let repo_prune =
702        ostree_ext::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| {
703            let locked_sysroot = &SysrootLock::from_assumed_locked(&ostree);
704            let cancellable = Some(cancellable);
705            let repo = &repo;
706            let txn = repo.auto_transaction(cancellable)?;
707            let repo = txn.repo();
708
709            // Regenerate our base references.  First, we delete the ones that exist
710            for ref_entry in repo
711                .list_refs_ext(
712                    Some(BASE_IMAGE_PREFIX),
713                    ostree::RepoListRefsExtFlags::NONE,
714                    cancellable,
715                )
716                .context("Listing refs")?
717                .keys()
718            {
719                repo.transaction_set_refspec(ref_entry, None);
720            }
721
722            // Then, for each deployment which is derived (e.g. has configmaps) we synthesize
723            // a base ref to ensure that it's not GC'd.
724            for (i, deployment) in ostree.deployments().into_iter().enumerate() {
725                let commit = deployment.csum();
726                if let Some(base) = get_base_commit(repo, &commit)? {
727                    repo.transaction_set_refspec(&format!("{BASE_IMAGE_PREFIX}/{i}"), Some(&base));
728                }
729            }
730
731            let pruned =
732                ostree_container::deploy::prune(locked_sysroot).context("Pruning images")?;
733            if !pruned.is_empty() {
734                let size = glib::format_size(pruned.objsize);
735                println!(
736                    "Pruned images: {} (layers: {}, objsize: {})",
737                    pruned.n_images, pruned.n_layers, size
738                );
739            } else {
740                tracing::debug!("Nothing to prune");
741            }
742
743            Ok(())
744        });
745
746    // We run these in parallel mostly because we can.
747    tokio::try_join!(repo_prune, bound_prune)?;
748    Ok(())
749}
750
751/// If commit is a bootc-derived commit (e.g. has configmaps), return its base.
752#[context("Finding base commit")]
753pub(crate) fn get_base_commit(repo: &ostree::Repo, commit: &str) -> Result<Option<String>> {
754    let commitv = repo.load_commit(commit)?.0;
755    let commitmeta = commitv.child_value(0);
756    let commitmeta = &glib::VariantDict::new(Some(&commitmeta));
757    let r = commitmeta.lookup::<String>(BOOTC_DERIVED_KEY)?;
758    Ok(r)
759}
760
761#[context("Writing deployment")]
762async fn deploy(
763    sysroot: &Storage,
764    from: MergeState,
765    image: &ImageState,
766    origin: &glib::KeyFile,
767    lock_finalization: bool,
768) -> Result<Deployment> {
769    // Compute the kernel argument overrides. In practice today this API is always expecting
770    // a merge deployment. The kargs code also always looks at the booted root (which
771    // is a distinct minor issue, but not super important as right now the install path
772    // doesn't use this API).
773    let (stateroot, override_kargs) = match &from {
774        MergeState::MergeDeployment(deployment) => {
775            let kargs = crate::bootc_kargs::get_kargs(sysroot, &deployment, image)?;
776            (deployment.stateroot().into(), Some(kargs))
777        }
778        MergeState::Reset { stateroot, kargs } => (stateroot.clone(), Some(kargs.clone())),
779    };
780    // Clone all the things to move to worker thread
781    let ostree = sysroot.get_ostree_cloned()?;
782    // ostree::Deployment is incorrectly !Send 😢 so convert it to an integer
783    let merge_deployment = from.as_merge_deployment();
784    let merge_deployment = merge_deployment.map(|d| d.index() as usize);
785    let ostree_commit = image.ostree_commit.to_string();
786    // GKeyFile also isn't Send! So we serialize that as a string...
787    let origin_data = origin.to_data();
788    let r = async_task_with_spinner(
789        "Deploying",
790        spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> {
791            let ostree = ostree;
792            let stateroot = Some(stateroot);
793            let mut opts = ostree::SysrootDeployTreeOpts::default();
794
795            // Set finalization lock if requested
796            opts.locked = lock_finalization;
797
798            // Because the C API expects a Vec<&str>, convert the Cmdline to string slices.
799            // The references borrow from the Cmdline, which outlives this usage.
800            let override_kargs_refs = override_kargs
801                .as_ref()
802                .map(|kargs| kargs.iter_str().collect::<Vec<_>>());
803            if let Some(kargs) = override_kargs_refs.as_ref() {
804                opts.override_kernel_argv = Some(kargs);
805            }
806
807            let deployments = ostree.deployments();
808            let merge_deployment = merge_deployment.map(|m| &deployments[m]);
809            let origin = glib::KeyFile::new();
810            origin.load_from_data(&origin_data, glib::KeyFileFlags::NONE)?;
811            let d = ostree.stage_tree_with_options(
812                stateroot.as_deref(),
813                &ostree_commit,
814                Some(&origin),
815                merge_deployment,
816                &opts,
817                Some(cancellable),
818            )?;
819            Ok(d.index())
820        }),
821    )
822    .await?;
823    // SAFETY: We must have a staged deployment
824    let ostree = sysroot.get_ostree()?;
825    let staged = ostree.staged_deployment().unwrap();
826    assert_eq!(staged.index(), r);
827    Ok(staged)
828}
829
830#[context("Generating origin")]
831fn origin_from_imageref(imgref: &ImageReference) -> Result<glib::KeyFile> {
832    let origin = glib::KeyFile::new();
833    let imgref = OstreeImageReference::from(imgref.clone());
834    origin.set_string(
835        "origin",
836        ostree_container::deploy::ORIGIN_CONTAINER,
837        imgref.to_string().as_str(),
838    );
839    Ok(origin)
840}
841
842/// The source of data for staging a new deployment
843#[derive(Debug)]
844pub(crate) enum MergeState {
845    /// Use the provided merge deployment
846    MergeDeployment(Deployment),
847    /// Don't use a merge deployment, but only this
848    /// provided initial state.
849    Reset {
850        stateroot: String,
851        kargs: CmdlineOwned,
852    },
853}
854impl MergeState {
855    /// Initialize using the default merge deployment for the given stateroot.
856    pub(crate) fn from_stateroot(sysroot: &Storage, stateroot: &str) -> Result<Self> {
857        let ostree = sysroot.get_ostree()?;
858        let merge_deployment = ostree.merge_deployment(Some(stateroot)).ok_or_else(|| {
859            anyhow::anyhow!("No merge deployment found for stateroot {stateroot}")
860        })?;
861        Ok(Self::MergeDeployment(merge_deployment))
862    }
863
864    /// Cast this to a merge deployment case.
865    pub(crate) fn as_merge_deployment(&self) -> Option<&Deployment> {
866        match self {
867            Self::MergeDeployment(d) => Some(d),
868            Self::Reset { .. } => None,
869        }
870    }
871}
872
873/// Stage (queue deployment of) a fetched container image.
874#[context("Staging")]
875pub(crate) async fn stage(
876    sysroot: &Storage,
877    from: MergeState,
878    image: &ImageState,
879    spec: &RequiredHostSpec<'_>,
880    prog: ProgressWriter,
881    lock_finalization: bool,
882) -> Result<()> {
883    // Log the staging operation to systemd journal with comprehensive upgrade information
884    const STAGE_JOURNAL_ID: &str = "8f7a2b1c3d4e5f6a7b8c9d0e1f2a3b4c";
885
886    tracing::info!(
887        message_id = STAGE_JOURNAL_ID,
888        bootc.image.reference = &spec.image.image,
889        bootc.image.transport = &spec.image.transport,
890        bootc.manifest_digest = image.manifest_digest.as_ref(),
891        "Staging image for deployment: {} (digest: {})",
892        spec.image,
893        image.manifest_digest
894    );
895
896    let mut subtask = SubTaskStep {
897        subtask: "merging".into(),
898        description: "Merging Image".into(),
899        id: "fetching".into(),
900        completed: false,
901    };
902    let mut subtasks = vec![];
903    prog.send(Event::ProgressSteps {
904        task: "staging".into(),
905        description: "Deploying Image".into(),
906        id: image.manifest_digest.clone().as_ref().into(),
907        steps_cached: 0,
908        steps: 0,
909        steps_total: 3,
910        subtasks: subtasks
911            .clone()
912            .into_iter()
913            .chain([subtask.clone()])
914            .collect(),
915    })
916    .await;
917
918    subtask.completed = true;
919    subtasks.push(subtask.clone());
920    subtask.subtask = "deploying".into();
921    subtask.id = "deploying".into();
922    subtask.description = "Deploying Image".into();
923    subtask.completed = false;
924    prog.send(Event::ProgressSteps {
925        task: "staging".into(),
926        description: "Deploying Image".into(),
927        id: image.manifest_digest.clone().as_ref().into(),
928        steps_cached: 0,
929        steps: 1,
930        steps_total: 3,
931        subtasks: subtasks
932            .clone()
933            .into_iter()
934            .chain([subtask.clone()])
935            .collect(),
936    })
937    .await;
938    let origin = origin_from_imageref(spec.image)?;
939    let deployment =
940        crate::deploy::deploy(sysroot, from, image, &origin, lock_finalization).await?;
941
942    subtask.completed = true;
943    subtasks.push(subtask.clone());
944    subtask.subtask = "bound_images".into();
945    subtask.id = "bound_images".into();
946    subtask.description = "Pulling Bound Images".into();
947    subtask.completed = false;
948    prog.send(Event::ProgressSteps {
949        task: "staging".into(),
950        description: "Deploying Image".into(),
951        id: image.manifest_digest.clone().as_ref().into(),
952        steps_cached: 0,
953        steps: 1,
954        steps_total: 3,
955        subtasks: subtasks
956            .clone()
957            .into_iter()
958            .chain([subtask.clone()])
959            .collect(),
960    })
961    .await;
962    crate::boundimage::pull_bound_images(sysroot, &deployment).await?;
963
964    subtask.completed = true;
965    subtasks.push(subtask.clone());
966    subtask.subtask = "cleanup".into();
967    subtask.id = "cleanup".into();
968    subtask.description = "Removing old images".into();
969    subtask.completed = false;
970    prog.send(Event::ProgressSteps {
971        task: "staging".into(),
972        description: "Deploying Image".into(),
973        id: image.manifest_digest.clone().as_ref().into(),
974        steps_cached: 0,
975        steps: 2,
976        steps_total: 3,
977        subtasks: subtasks
978            .clone()
979            .into_iter()
980            .chain([subtask.clone()])
981            .collect(),
982    })
983    .await;
984    crate::deploy::cleanup(sysroot).await?;
985    println!("Queued for next boot: {:#}", spec.image);
986    if let Some(version) = image.version.as_deref() {
987        println!("  Version: {version}");
988    }
989    println!("  Digest: {}", image.manifest_digest);
990
991    subtask.completed = true;
992    subtasks.push(subtask.clone());
993    prog.send(Event::ProgressSteps {
994        task: "staging".into(),
995        description: "Deploying Image".into(),
996        id: image.manifest_digest.clone().as_ref().into(),
997        steps_cached: 0,
998        steps: 3,
999        steps_total: 3,
1000        subtasks: subtasks
1001            .clone()
1002            .into_iter()
1003            .chain([subtask.clone()])
1004            .collect(),
1005    })
1006    .await;
1007
1008    // Unconditionally create or update /run/reboot-required to signal a reboot is needed.
1009    // This is monitored by kured (Kubernetes Reboot Daemon).
1010    write_reboot_required(&image.manifest_digest.as_ref())?;
1011
1012    Ok(())
1013}
1014
1015/// Update the /run/reboot-required file with the image that will be active after a reboot.
1016fn write_reboot_required(image: &str) -> Result<()> {
1017    let reboot_message = format!("bootc: Reboot required for image: {}", image);
1018    let run_dir = Dir::open_ambient_dir("/run", cap_std::ambient_authority())?;
1019    run_dir
1020        .atomic_write("reboot-required", reboot_message.as_bytes())
1021        .context("Creating /run/reboot-required")?;
1022
1023    Ok(())
1024}
1025
1026/// Implementation of rollback functionality
1027pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> {
1028    const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468";
1029    let ostree = sysroot.get_ostree()?;
1030    let (booted_ostree, deployments, host) = crate::status::get_status_require_booted(ostree)?;
1031
1032    let new_spec = {
1033        let mut new_spec = host.spec.clone();
1034        new_spec.boot_order = new_spec.boot_order.swap();
1035        new_spec
1036    };
1037
1038    let repo = &booted_ostree.repo();
1039
1040    // Just to be sure
1041    host.spec.verify_transition(&new_spec)?;
1042
1043    let reverting = new_spec.boot_order == BootOrder::Default;
1044    if reverting {
1045        println!("notice: Reverting queued rollback state");
1046    }
1047    let rollback_status = host
1048        .status
1049        .rollback
1050        .ok_or_else(|| anyhow!("No rollback available"))?;
1051    let rollback_image = rollback_status
1052        .query_image(repo)?
1053        .ok_or_else(|| anyhow!("Rollback is not container image based"))?;
1054
1055    // Get current booted image for comparison
1056    let current_image = host
1057        .status
1058        .booted
1059        .as_ref()
1060        .and_then(|b| b.query_image(repo).ok()?);
1061
1062    tracing::info!(
1063        message_id = ROLLBACK_JOURNAL_ID,
1064        bootc.manifest_digest = rollback_image.manifest_digest.as_ref(),
1065        bootc.ostree_commit = &rollback_image.merge_commit,
1066        bootc.rollback_type = if reverting { "revert" } else { "rollback" },
1067        bootc.current_manifest_digest = current_image
1068            .as_ref()
1069            .map(|i| i.manifest_digest.as_ref())
1070            .unwrap_or("none"),
1071        "Rolling back to image: {}",
1072        rollback_image.manifest_digest
1073    );
1074    // SAFETY: If there's a rollback status, then there's a deployment
1075    let rollback_deployment = deployments.rollback.expect("rollback deployment");
1076    let new_deployments = if reverting {
1077        [booted_ostree.deployment, rollback_deployment]
1078    } else {
1079        [rollback_deployment, booted_ostree.deployment]
1080    };
1081    let new_deployments = new_deployments
1082        .into_iter()
1083        .chain(deployments.other)
1084        .collect::<Vec<_>>();
1085    tracing::debug!("Writing new deployments: {new_deployments:?}");
1086    booted_ostree
1087        .sysroot
1088        .write_deployments(&new_deployments, gio::Cancellable::NONE)?;
1089    if reverting {
1090        println!("Next boot: current deployment");
1091    } else {
1092        println!("Next boot: rollback deployment");
1093    }
1094
1095    write_reboot_required(rollback_image.manifest_digest.as_ref())?;
1096
1097    sysroot.update_mtime()?;
1098
1099    Ok(())
1100}
1101
1102fn find_newest_deployment_name(deploysdir: &Dir) -> Result<String> {
1103    let mut dirs = Vec::new();
1104    for ent in deploysdir.entries()? {
1105        let ent = ent?;
1106        if !ent.file_type()?.is_dir() {
1107            continue;
1108        }
1109        let name = ent.file_name();
1110        let Some(name) = name.to_str() else {
1111            continue;
1112        };
1113        dirs.push((name.to_owned(), ent.metadata()?.mtime()));
1114    }
1115    dirs.sort_unstable_by(|a, b| a.1.cmp(&b.1));
1116    if let Some((name, _ts)) = dirs.pop() {
1117        Ok(name)
1118    } else {
1119        anyhow::bail!("No deployment directory found")
1120    }
1121}
1122
1123// Implementation of `bootc switch --in-place`
1124pub(crate) fn switch_origin_inplace(root: &Dir, imgref: &ImageReference) -> Result<String> {
1125    // Log the in-place switch operation to systemd journal
1126    const SWITCH_INPLACE_JOURNAL_ID: &str = "3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7";
1127
1128    tracing::info!(
1129        message_id = SWITCH_INPLACE_JOURNAL_ID,
1130        bootc.image.reference = &imgref.image,
1131        bootc.image.transport = &imgref.transport,
1132        bootc.switch_type = "in_place",
1133        "Performing in-place switch to image: {}",
1134        imgref
1135    );
1136
1137    // First, just create the new origin file
1138    let origin = origin_from_imageref(imgref)?;
1139    let serialized_origin = origin.to_data();
1140
1141    // Now, we can't rely on being officially booted (e.g. with the `ostree=` karg)
1142    // in a scenario like running in the anaconda %post.
1143    // Eventually, we should support a setup here where ostree-prepare-root
1144    // can officially be run to "enter" an ostree root in a supportable way.
1145    // Anyways for now, the brutal hack is to just scrape through the deployments
1146    // and find the newest one, which we will mutate.  If there's more than one,
1147    // ultimately the calling tooling should be fixed to set things up correctly.
1148
1149    let mut ostree_deploys = root.open_dir("sysroot/ostree/deploy")?.entries()?;
1150    let deploydir = loop {
1151        if let Some(ent) = ostree_deploys.next() {
1152            let ent = ent?;
1153            if !ent.file_type()?.is_dir() {
1154                continue;
1155            }
1156            tracing::debug!("Checking {:?}", ent.file_name());
1157            let child_dir = ent
1158                .open_dir()
1159                .with_context(|| format!("Opening dir {:?}", ent.file_name()))?;
1160            if let Some(d) = child_dir.open_dir_optional("deploy")? {
1161                break d;
1162            }
1163        } else {
1164            anyhow::bail!("Failed to find a deployment");
1165        }
1166    };
1167    let newest_deployment = find_newest_deployment_name(&deploydir)?;
1168    let origin_path = format!("{newest_deployment}.origin");
1169    if !deploydir.try_exists(&origin_path)? {
1170        tracing::warn!("No extant origin for {newest_deployment}");
1171    }
1172    deploydir
1173        .atomic_write(&origin_path, serialized_origin.as_bytes())
1174        .context("Writing origin")?;
1175    Ok(newest_deployment)
1176}
1177
1178/// A workaround for <https://github.com/ostreedev/ostree/issues/3193>
1179/// as generated by anaconda.
1180#[context("Updating /etc/fstab for anaconda+composefs")]
1181pub(crate) fn fixup_etc_fstab(root: &Dir) -> Result<()> {
1182    let fstab_path = "etc/fstab";
1183    // Read the old file
1184    let fd = root
1185        .open(fstab_path)
1186        .with_context(|| format!("Opening {fstab_path}"))
1187        .map(std::io::BufReader::new)?;
1188
1189    // Helper function to possibly change a line from /etc/fstab.
1190    // Returns Ok(true) if we made a change (and we wrote the modified line)
1191    // otherwise returns Ok(false) and the caller should write the original line.
1192    fn edit_fstab_line(line: &str, mut w: impl Write) -> Result<bool> {
1193        if line.starts_with('#') {
1194            return Ok(false);
1195        }
1196        let parts = line.split_ascii_whitespace().collect::<Vec<_>>();
1197
1198        let path_idx = 1;
1199        let options_idx = 3;
1200        let (&path, &options) = match (parts.get(path_idx), parts.get(options_idx)) {
1201            (None, _) => {
1202                tracing::debug!("No path in entry: {line}");
1203                return Ok(false);
1204            }
1205            (_, None) => {
1206                tracing::debug!("No options in entry: {line}");
1207                return Ok(false);
1208            }
1209            (Some(p), Some(o)) => (p, o),
1210        };
1211        // If this is not the root, we're not matching on it
1212        if path != "/" {
1213            return Ok(false);
1214        }
1215        // If options already contains `ro`, nothing to do
1216        if options.split(',').any(|s| s == "ro") {
1217            return Ok(false);
1218        }
1219
1220        writeln!(w, "# {}", crate::generator::BOOTC_EDITED_STAMP)?;
1221
1222        // SAFETY: we unpacked the options before.
1223        // This adds `ro` to the option list
1224        assert!(!options.is_empty()); // Split wouldn't have turned this up if it was empty
1225        let options = format!("{options},ro");
1226        for (i, part) in parts.into_iter().enumerate() {
1227            // TODO: would obviously be nicer to preserve whitespace...but...eh.
1228            if i > 0 {
1229                write!(w, " ")?;
1230            }
1231            if i == options_idx {
1232                write!(w, "{options}")?;
1233            } else {
1234                write!(w, "{part}")?
1235            }
1236        }
1237        // And add the trailing newline
1238        writeln!(w)?;
1239        Ok(true)
1240    }
1241
1242    // Read the input, and atomically write a modified version
1243    root.atomic_replace_with(fstab_path, move |mut w| -> Result<()> {
1244        for line in fd.lines() {
1245            let line = line?;
1246            if !edit_fstab_line(&line, &mut w)? {
1247                writeln!(w, "{line}")?;
1248            }
1249        }
1250        Ok(())
1251    })
1252    .context("Replacing /etc/fstab")?;
1253
1254    println!("Updated /etc/fstab to add `ro` for `/`");
1255    Ok(())
1256}
1257
1258#[cfg(test)]
1259mod tests {
1260    use super::*;
1261
1262    #[test]
1263    fn test_new_proxy_config_user_agent() {
1264        let config = new_proxy_config();
1265        let prefix = config
1266            .user_agent_prefix
1267            .expect("user_agent_prefix should be set");
1268        assert!(
1269            prefix.starts_with("bootc/"),
1270            "User agent should start with bootc/"
1271        );
1272        // Verify the version is present (not just "bootc/")
1273        assert!(
1274            prefix.len() > "bootc/".len(),
1275            "Version should be present after bootc/"
1276        );
1277    }
1278
1279    #[test]
1280    fn test_switch_inplace() -> Result<()> {
1281        use cap_std::fs::DirBuilderExt;
1282
1283        let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1284        let mut builder = cap_std::fs::DirBuilder::new();
1285        let builder = builder.recursive(true).mode(0o755);
1286        let deploydir = "sysroot/ostree/deploy/default/deploy";
1287        let target_deployment =
1288            "af36eb0086bb55ac601600478c6168f834288013d60f8870b7851f44bf86c3c5.0";
1289        td.ensure_dir_with(
1290            format!("sysroot/ostree/deploy/default/deploy/{target_deployment}"),
1291            builder,
1292        )?;
1293        let deploydir = &td.open_dir(deploydir)?;
1294        let orig_imgref = ImageReference {
1295            image: "quay.io/exampleos/original:sometag".into(),
1296            transport: "registry".into(),
1297            signature: None,
1298        };
1299        {
1300            let origin = origin_from_imageref(&orig_imgref)?;
1301            deploydir.atomic_write(
1302                format!("{target_deployment}.origin"),
1303                origin.to_data().as_bytes(),
1304            )?;
1305        }
1306
1307        let target_imgref = ImageReference {
1308            image: "quay.io/someother/otherimage:latest".into(),
1309            transport: "registry".into(),
1310            signature: None,
1311        };
1312
1313        let replaced = switch_origin_inplace(&td, &target_imgref).unwrap();
1314        assert_eq!(replaced, target_deployment);
1315        Ok(())
1316    }
1317
1318    #[test]
1319    fn test_fixup_etc_fstab_default() -> Result<()> {
1320        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1321        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n";
1322        tempdir.create_dir_all("etc")?;
1323        tempdir.atomic_write("etc/fstab", default)?;
1324        fixup_etc_fstab(&tempdir).unwrap();
1325        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1326        Ok(())
1327    }
1328
1329    #[test]
1330    fn test_fixup_etc_fstab_multi() -> Result<()> {
1331        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1332        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1333UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1334        tempdir.create_dir_all("etc")?;
1335        tempdir.atomic_write("etc/fstab", default)?;
1336        fixup_etc_fstab(&tempdir).unwrap();
1337        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1338        Ok(())
1339    }
1340
1341    #[test]
1342    fn test_fixup_etc_fstab_ro() -> Result<()> {
1343        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1344        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1345UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   ro 0 0\n\
1346UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1347        tempdir.create_dir_all("etc")?;
1348        tempdir.atomic_write("etc/fstab", default)?;
1349        fixup_etc_fstab(&tempdir).unwrap();
1350        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1351        Ok(())
1352    }
1353
1354    #[test]
1355    fn test_fixup_etc_fstab_rw() -> Result<()> {
1356        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1357        // This case uses `defaults`
1358        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1359UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   defaults 0 0\n\
1360UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1361        let modified = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1362# Updated by bootc-fstab-edit.service\n\
1363UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs defaults,ro 0 0\n\
1364UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1365        tempdir.create_dir_all("etc")?;
1366        tempdir.atomic_write("etc/fstab", default)?;
1367        fixup_etc_fstab(&tempdir).unwrap();
1368        assert_eq!(tempdir.read_to_string("etc/fstab")?, modified);
1369        Ok(())
1370    }
1371}