1use std::collections::HashSet;
6use std::io::{BufRead, Write};
7use std::os::fd::AsFd;
8use std::process::Command;
9
10use anyhow::{Context, Result, anyhow};
11use bootc_kernel_cmdline::utf8::CmdlineOwned;
12use cap_std::fs::{Dir, MetadataExt};
13use cap_std_ext::cap_std;
14use cap_std_ext::dirext::CapStdExtDirExt;
15use fn_error_context::context;
16use ostree::{gio, glib};
17use ostree_container::OstreeImageReference;
18use ostree_ext::container as ostree_container;
19use ostree_ext::container::store::{ImageImporter, ImportProgress, PrepareResult, PreparedImport};
20use ostree_ext::oci_spec::image::{Descriptor, Digest};
21use ostree_ext::ostree::Deployment;
22use ostree_ext::ostree::{self, Sysroot};
23use ostree_ext::sysroot::SysrootLock;
24use ostree_ext::tokio_util::spawn_blocking_cancellable_flatten;
25
26use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep};
27use crate::spec::ImageReference;
28use crate::spec::{BootOrder, HostSpec};
29use crate::status::labels_of_config;
30use crate::store::Storage;
31use crate::utils::async_task_with_spinner;
32
33const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage/bootc";
35
36pub(crate) fn new_proxy_config() -> ostree_ext::containers_image_proxy::ImageProxyConfig {
41 ostree_ext::containers_image_proxy::ImageProxyConfig {
42 user_agent_prefix: Some(format!("bootc/{}", env!("CARGO_PKG_VERSION"))),
43 ..Default::default()
44 }
45}
46
47const BOOTC_DERIVED_KEY: &str = "bootc.derived";
49
50pub(crate) struct RequiredHostSpec<'a> {
52 pub(crate) image: &'a ImageReference,
53}
54
55pub(crate) struct ImageState {
57 pub(crate) manifest_digest: Digest,
58 pub(crate) version: Option<String>,
59 pub(crate) ostree_commit: String,
60}
61
62impl<'a> RequiredHostSpec<'a> {
63 pub(crate) fn from_spec(spec: &'a HostSpec) -> Result<Self> {
66 let image = spec
67 .image
68 .as_ref()
69 .ok_or_else(|| anyhow::anyhow!("Missing image in specification"))?;
70 Ok(Self { image })
71 }
72}
73
74impl From<ostree_container::store::LayeredImageState> for ImageState {
75 fn from(value: ostree_container::store::LayeredImageState) -> Self {
76 let version = value.version().map(|v| v.to_owned());
77 let ostree_commit = value.get_commit().to_owned();
78 Self {
79 manifest_digest: value.manifest_digest,
80 version,
81 ostree_commit,
82 }
83 }
84}
85
86impl ImageState {
87 pub(crate) fn get_manifest(
89 &self,
90 repo: &ostree::Repo,
91 ) -> Result<Option<ostree_ext::oci_spec::image::ImageManifest>> {
92 ostree_container::store::query_image_commit(repo, &self.ostree_commit)
93 .map(|v| Some(v.manifest))
94 }
95}
96
97pub(crate) async fn new_importer(
99 repo: &ostree::Repo,
100 imgref: &ostree_container::OstreeImageReference,
101 booted_deployment: Option<&ostree::Deployment>,
102) -> Result<ostree_container::store::ImageImporter> {
103 let config = new_proxy_config();
104 let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
105 imp.require_bootable();
106 imp.disable_gc();
108 if let Some(deployment) = booted_deployment {
109 imp.set_sepolicy_commit(deployment.csum().to_string());
110 }
111 Ok(imp)
112}
113
114pub(crate) async fn new_importer_with_config(
116 repo: &ostree::Repo,
117 imgref: &ostree_container::OstreeImageReference,
118 config: ostree_ext::containers_image_proxy::ImageProxyConfig,
119 booted_deployment: Option<&ostree::Deployment>,
120) -> Result<ostree_container::store::ImageImporter> {
121 let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
122 imp.require_bootable();
123 imp.disable_gc();
125 if let Some(deployment) = booted_deployment {
126 imp.set_sepolicy_commit(deployment.csum().to_string());
127 }
128 Ok(imp)
129}
130
131pub(crate) fn check_bootc_label(config: &ostree_ext::oci_spec::image::ImageConfiguration) {
132 if let Some(label) =
133 labels_of_config(config).and_then(|labels| labels.get(crate::metadata::BOOTC_COMPAT_LABEL))
134 {
135 match label.as_str() {
136 crate::metadata::COMPAT_LABEL_V1 => {}
137 o => crate::journal::journal_print(
138 libsystemd::logging::Priority::Warning,
139 &format!(
140 "notice: Unknown {} value {}",
141 crate::metadata::BOOTC_COMPAT_LABEL,
142 o
143 ),
144 ),
145 }
146 } else {
147 crate::journal::journal_print(
148 libsystemd::logging::Priority::Warning,
149 &format!(
150 "notice: Image is missing label: {}",
151 crate::metadata::BOOTC_COMPAT_LABEL
152 ),
153 )
154 }
155}
156
157fn descriptor_of_progress(p: &ImportProgress) -> &Descriptor {
158 match p {
159 ImportProgress::OstreeChunkStarted(l) => l,
160 ImportProgress::OstreeChunkCompleted(l) => l,
161 ImportProgress::DerivedLayerStarted(l) => l,
162 ImportProgress::DerivedLayerCompleted(l) => l,
163 }
164}
165
166fn prefix_of_progress(p: &ImportProgress) -> &'static str {
167 match p {
168 ImportProgress::OstreeChunkStarted(_) | ImportProgress::OstreeChunkCompleted(_) => {
169 "ostree chunk"
170 }
171 ImportProgress::DerivedLayerStarted(_) | ImportProgress::DerivedLayerCompleted(_) => {
172 "layer"
173 }
174 }
175}
176
177struct LayerProgressConfig {
179 layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
180 layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
181 digest: Box<str>,
182 n_layers_to_fetch: usize,
183 layers_total: usize,
184 bytes_to_download: u64,
185 bytes_total: u64,
186 prog: ProgressWriter,
187 quiet: bool,
188}
189
190async fn handle_layer_progress_print(mut config: LayerProgressConfig) -> ProgressWriter {
192 let start = std::time::Instant::now();
193 let mut total_read = 0u64;
194 let bar = indicatif::MultiProgress::new();
195 if config.quiet {
196 bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
197 }
198 let layers_bar = bar.add(indicatif::ProgressBar::new(
199 config.n_layers_to_fetch.try_into().unwrap(),
200 ));
201 let byte_bar = bar.add(indicatif::ProgressBar::new(0));
202 layers_bar.set_style(
205 indicatif::ProgressStyle::default_bar()
206 .template("{prefix} {bar} {pos}/{len} {wide_msg}")
207 .unwrap(),
208 );
209 let taskname = "Fetching layers";
210 layers_bar.set_prefix(taskname);
211 layers_bar.set_message("");
212 byte_bar.set_prefix("Fetching");
213 byte_bar.set_style(
214 indicatif::ProgressStyle::default_bar()
215 .template(
216 " └ {prefix} {bar} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) {wide_msg}",
217 )
218 .unwrap()
219 );
220
221 let mut subtasks = vec![];
222 let mut subtask: SubTaskBytes = Default::default();
223 loop {
224 tokio::select! {
225 biased;
227 layer = config.layers.recv() => {
228 if let Some(l) = layer {
229 let layer = descriptor_of_progress(&l);
230 let layer_type = prefix_of_progress(&l);
231 let short_digest = &layer.digest().digest()[0..21];
232 let layer_size = layer.size();
233 if l.is_starting() {
234 byte_bar.reset_elapsed();
236 byte_bar.reset_eta();
237 byte_bar.set_length(layer_size);
238 byte_bar.set_message(format!("{layer_type} {short_digest}"));
239
240 subtask = SubTaskBytes {
241 subtask: layer_type.into(),
242 description: format!("{layer_type}: {short_digest}").clone().into(),
243 id: short_digest.to_string().clone().into(),
244 bytes_cached: 0,
245 bytes: 0,
246 bytes_total: layer_size,
247 };
248 } else {
249 let actual_size = byte_bar.length().unwrap_or(layer_size);
252 byte_bar.set_position(actual_size);
253 layers_bar.inc(1);
254 total_read = total_read.saturating_add(actual_size);
255 subtask.bytes_total = actual_size;
257 subtask.bytes = actual_size;
258 subtasks.push(subtask.clone());
259 config.prog.send(Event::ProgressBytes {
260 task: "pulling".into(),
261 description: format!("Pulling Image: {}", config.digest).into(),
262 id: (*config.digest).into(),
263 bytes_cached: config.bytes_total - config.bytes_to_download,
264 bytes: total_read,
265 bytes_total: config.bytes_to_download,
266 steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
267 steps: layers_bar.position(),
268 steps_total: config.n_layers_to_fetch as u64,
269 subtasks: subtasks.clone(),
270 }).await;
271 }
272 } else {
273 break
275 };
276 },
277 r = config.layer_bytes.changed() => {
278 if r.is_err() {
279 break
281 }
282 let bytes = {
283 let bytes = config.layer_bytes.borrow_and_update();
284 bytes.as_ref().cloned()
285 };
286 if let Some(bytes) = bytes {
287 byte_bar.set_length(bytes.total);
291 byte_bar.set_position(bytes.fetched);
292 subtask.bytes_total = bytes.total;
293 subtask.bytes = byte_bar.position();
294 config.prog.send_lossy(Event::ProgressBytes {
295 task: "pulling".into(),
296 description: format!("Pulling Image: {}", config.digest).into(),
297 id: (*config.digest).into(),
298 bytes_cached: config.bytes_total - config.bytes_to_download,
299 bytes: total_read + byte_bar.position(),
300 bytes_total: config.bytes_to_download,
301 steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
302 steps: layers_bar.position(),
303 steps_total: config.n_layers_to_fetch as u64,
304 subtasks: subtasks.clone().into_iter().chain([subtask.clone()]).collect(),
305 }).await;
306 }
307 }
308 }
309 }
310 byte_bar.finish_and_clear();
311 layers_bar.finish_and_clear();
312 if let Err(e) = bar.clear() {
313 tracing::warn!("clearing bar: {e}");
314 }
315 let end = std::time::Instant::now();
316 let elapsed = end.duration_since(start);
317 let persec = total_read as f64 / elapsed.as_secs_f64();
318 let persec = indicatif::HumanBytes(persec as u64);
319 if let Err(e) = bar.println(&format!(
320 "Fetched layers: {} in {} ({}/s)",
321 indicatif::HumanBytes(total_read),
322 indicatif::HumanDuration(elapsed),
323 persec,
324 )) {
325 tracing::warn!("writing to stdout: {e}");
326 }
327
328 config
332 .prog
333 .send(Event::ProgressSteps {
334 task: "importing".into(),
335 description: "Importing Image".into(),
336 id: (*config.digest).into(),
337 steps_cached: 0,
338 steps: 0,
339 steps_total: 1,
340 subtasks: [SubTaskStep {
341 subtask: "importing".into(),
342 description: "Importing Image".into(),
343 id: "importing".into(),
344 completed: false,
345 }]
346 .into(),
347 })
348 .await;
349
350 config.prog
352}
353
354pub(crate) async fn prune_container_store(sysroot: &Storage) -> Result<()> {
357 let ostree = sysroot.get_ostree()?;
358 let deployments = ostree.deployments();
359 let mut all_bound_images = Vec::new();
360 for deployment in deployments {
361 let bound = crate::boundimage::query_bound_images_for_deployment(ostree, &deployment)?;
362 all_bound_images.extend(bound.into_iter());
363 if let Some(host_image) = crate::status::boot_entry_from_deployment(ostree, &deployment)?
367 .image
368 .map(|i| i.image)
369 {
370 all_bound_images.push(crate::boundimage::BoundImage {
371 image: host_image.image.clone(),
372 auth_file: None,
373 });
374 }
375 }
376 let image_names = HashSet::from_iter(all_bound_images.iter().map(|img| img.image.as_str()));
378 let pruned = sysroot
379 .get_ensure_imgstore()?
380 .prune_except_roots(&image_names)
381 .await?;
382 tracing::debug!("Pruned images: {}", pruned.len());
383 Ok(())
384}
385
386fn check_disk_space_inner(
389 fd: impl AsFd,
390 bytes_to_fetch: u64,
391 min_free: u64,
392 imgref: &ImageReference,
393) -> Result<()> {
394 let stat = rustix::fs::fstatvfs(fd)?;
395 let bytes_avail = stat.f_bsize.checked_mul(stat.f_bavail).unwrap_or(u64::MAX);
396 let usable = bytes_avail.saturating_sub(min_free);
397 tracing::trace!("bytes_avail: {bytes_avail} min_free: {min_free} usable: {usable}");
398
399 if bytes_to_fetch > usable {
400 anyhow::bail!(
401 "Insufficient free space for {image} (available: {available} required: {required})",
402 available = ostree_ext::glib::format_size(usable),
403 required = ostree_ext::glib::format_size(bytes_to_fetch),
404 image = imgref.image,
405 );
406 }
407 Ok(())
408}
409
410pub(crate) fn check_disk_space_ostree(
413 repo: &ostree::Repo,
414 image_meta: &PreparedImportMeta,
415 imgref: &ImageReference,
416) -> Result<()> {
417 let min_free = repo.min_free_space_bytes().unwrap_or(0);
418 check_disk_space_inner(
419 repo.dfd_borrow(),
420 image_meta.bytes_to_fetch,
421 min_free,
422 imgref,
423 )
424}
425
426pub(crate) fn check_disk_space_unified(
429 cfs: &crate::store::ComposefsRepository,
430 image_meta: &PreparedImportMeta,
431 imgref: &ImageReference,
432) -> Result<()> {
433 check_disk_space_inner(cfs.objects_dir()?, image_meta.bytes_to_fetch, 0, imgref)
434}
435
436pub(crate) fn check_disk_space_composefs(
439 cfs: &crate::store::ComposefsRepository,
440 manifest: &ostree_ext::oci_spec::image::ImageManifest,
441 imgref: &ImageReference,
442) -> Result<()> {
443 let bytes_to_fetch: u64 = manifest
444 .layers()
445 .iter()
446 .map(|l: &ostree_ext::oci_spec::image::Descriptor| l.size())
447 .sum();
448 check_disk_space_inner(cfs.objects_dir()?, bytes_to_fetch, 0, imgref)
449}
450
451pub(crate) struct PreparedImportMeta {
452 pub imp: ImageImporter,
453 pub prep: Box<PreparedImport>,
454 pub digest: Digest,
455 pub n_layers_to_fetch: usize,
456 pub layers_total: usize,
457 pub bytes_to_fetch: u64,
458 pub bytes_total: u64,
459}
460
461pub(crate) enum PreparedPullResult {
462 Ready(Box<PreparedImportMeta>),
463 AlreadyPresent(Box<ImageState>),
464}
465
466pub(crate) async fn prepare_for_pull(
467 repo: &ostree::Repo,
468 imgref: &ImageReference,
469 target_imgref: Option<&OstreeImageReference>,
470 booted_deployment: Option<&ostree::Deployment>,
471) -> Result<PreparedPullResult> {
472 let imgref_canonicalized = imgref.clone().canonicalize()?;
473 tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
474 let ostree_imgref = &OstreeImageReference::from(imgref_canonicalized);
475 let mut imp = new_importer(repo, ostree_imgref, booted_deployment).await?;
476 if let Some(target) = target_imgref {
477 imp.set_target(target);
478 }
479 let prep = match imp.prepare().await? {
480 PrepareResult::AlreadyPresent(c) => {
481 println!("No changes in {imgref:#} => {}", c.manifest_digest);
482 return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
483 }
484 PrepareResult::Ready(p) => p,
485 };
486 check_bootc_label(&prep.config);
487 if let Some(warning) = prep.deprecated_warning() {
488 ostree_ext::cli::print_deprecated_warning(warning).await;
489 }
490 ostree_ext::cli::print_layer_status(&prep);
491 let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
492
493 let prepared_image = PreparedImportMeta {
494 imp,
495 n_layers_to_fetch: layers_to_fetch.len(),
496 layers_total: prep.all_layers().count(),
497 bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
498 bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
499 digest: prep.manifest_digest.clone(),
500 prep,
501 };
502
503 Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
504}
505
506pub(crate) async fn image_exists_in_unified_storage(
514 store: &Storage,
515 imgref: &ImageReference,
516) -> Result<bool> {
517 let imgstore = store.get_ensure_imgstore()?;
518 let image_ref_str = imgref.to_transport_image()?;
519 imgstore.exists(&image_ref_str).await
520}
521
522pub(crate) async fn prepare_for_pull_unified(
525 repo: &ostree::Repo,
526 imgref: &ImageReference,
527 target_imgref: Option<&OstreeImageReference>,
528 store: &Storage,
529 booted_deployment: Option<&ostree::Deployment>,
530) -> Result<PreparedPullResult> {
531 let imgstore = store.get_ensure_imgstore()?;
533
534 let image_ref_str = imgref.to_transport_image()?;
535
536 tracing::info!(
539 "Unified pull: pulling from transport '{}' to bootc storage",
540 &imgref.transport
541 );
542
543 let pull_msg = format!("Pulling {} to bootc storage", &image_ref_str);
546 async_task_with_spinner(&pull_msg, async move {
547 imgstore
548 .pull(&image_ref_str, crate::podstorage::PullMode::Always)
549 .await
550 })
551 .await?;
552
553 tracing::info!("Unified pull: now importing from containers-storage transport");
555 let containers_storage_imgref = ImageReference {
556 transport: "containers-storage".to_string(),
557 image: imgref.image.clone(),
558 signature: imgref.signature.clone(),
559 };
560 let ostree_imgref = OstreeImageReference::from(containers_storage_imgref);
561
562 let mut config = new_proxy_config();
564 let mut cmd = Command::new("skopeo");
565 let storage_path = format!(
567 "{}/{}",
568 store.physical_root_path,
569 crate::podstorage::CStorage::subpath()
570 );
571 crate::podstorage::set_additional_image_store(&mut cmd, &storage_path);
572 config.skopeo_cmd = Some(cmd);
573
574 let mut imp = new_importer_with_config(repo, &ostree_imgref, config, booted_deployment).await?;
576 if let Some(target) = target_imgref {
577 imp.set_target(target);
578 }
579 let prep = match imp.prepare().await? {
580 PrepareResult::AlreadyPresent(c) => {
581 println!("No changes in {imgref:#} => {}", c.manifest_digest);
582 return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
583 }
584 PrepareResult::Ready(p) => p,
585 };
586 check_bootc_label(&prep.config);
587 if let Some(warning) = prep.deprecated_warning() {
588 ostree_ext::cli::print_deprecated_warning(warning).await;
589 }
590 ostree_ext::cli::print_layer_status(&prep);
591 let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
592
593 const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
595 tracing::info!(
596 message_id = PULLING_NEW_IMAGE_ID,
597 bootc.image.reference = &imgref.image,
598 bootc.image.transport = "containers-storage",
599 bootc.original_transport = &imgref.transport,
600 bootc.status = "importing_from_storage",
601 "Importing image from bootc storage: {}",
602 ostree_imgref
603 );
604
605 let prepared_image = PreparedImportMeta {
606 imp,
607 n_layers_to_fetch: layers_to_fetch.len(),
608 layers_total: prep.all_layers().count(),
609 bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
610 bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
611 digest: prep.manifest_digest.clone(),
612 prep,
613 };
614
615 Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
616}
617
618pub(crate) async fn pull_unified(
620 repo: &ostree::Repo,
621 imgref: &ImageReference,
622 target_imgref: Option<&OstreeImageReference>,
623 quiet: bool,
624 prog: ProgressWriter,
625 store: &Storage,
626 booted_deployment: Option<&ostree::Deployment>,
627) -> Result<Box<ImageState>> {
628 match prepare_for_pull_unified(repo, imgref, target_imgref, store, booted_deployment).await? {
629 PreparedPullResult::AlreadyPresent(existing) => {
630 const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
632 tracing::debug!(
633 message_id = IMAGE_ALREADY_PRESENT_ID,
634 bootc.image.reference = &imgref.image,
635 bootc.image.transport = &imgref.transport,
636 bootc.status = "already_present",
637 "Image already present: {}",
638 imgref
639 );
640 Ok(existing)
641 }
642 PreparedPullResult::Ready(prepared_image_meta) => {
643 check_disk_space_unified(
644 store.get_ensure_composefs()?.as_ref(),
645 &prepared_image_meta,
646 imgref,
647 )?;
648 let cs_imgref = ImageReference {
650 transport: "containers-storage".to_string(),
651 image: imgref.image.clone(),
652 signature: imgref.signature.clone(),
653 };
654 pull_from_prepared(&cs_imgref, quiet, prog, *prepared_image_meta).await
655 }
656 }
657}
658
659#[context("Pulling")]
660pub(crate) async fn pull_from_prepared(
661 imgref: &ImageReference,
662 quiet: bool,
663 prog: ProgressWriter,
664 mut prepared_image: PreparedImportMeta,
665) -> Result<Box<ImageState>> {
666 let layer_progress = prepared_image.imp.request_progress();
667 let layer_byte_progress = prepared_image.imp.request_layer_progress();
668 let digest = prepared_image.digest.clone();
669 let digest_imp = prepared_image.digest.clone();
670
671 let printer = tokio::task::spawn(async move {
672 handle_layer_progress_print(LayerProgressConfig {
673 layers: layer_progress,
674 layer_bytes: layer_byte_progress,
675 digest: digest.as_ref().into(),
676 n_layers_to_fetch: prepared_image.n_layers_to_fetch,
677 layers_total: prepared_image.layers_total,
678 bytes_to_download: prepared_image.bytes_to_fetch,
679 bytes_total: prepared_image.bytes_total,
680 prog,
681 quiet,
682 })
683 .await
684 });
685 let import = prepared_image.imp.import(prepared_image.prep).await;
686 let prog = printer.await?;
687 prog.send(Event::ProgressSteps {
689 task: "importing".into(),
690 description: "Importing Image".into(),
691 id: digest_imp.clone().as_ref().into(),
692 steps_cached: 0,
693 steps: 1,
694 steps_total: 1,
695 subtasks: [SubTaskStep {
696 subtask: "importing".into(),
697 description: "Importing Image".into(),
698 id: "importing".into(),
699 completed: true,
700 }]
701 .into(),
702 })
703 .await;
704 let import = import?;
705 let imgref_canonicalized = imgref.clone().canonicalize()?;
706 tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
707
708 let is_unified_path = imgref.transport == "containers-storage";
710 if !is_unified_path {
711 const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8";
712
713 tracing::info!(
714 message_id = IMPORT_COMPLETE_JOURNAL_ID,
715 bootc.image.reference = &imgref.image,
716 bootc.image.transport = &imgref.transport,
717 bootc.manifest_digest = import.manifest_digest.as_ref(),
718 bootc.ostree_commit = &import.merge_commit,
719 "Successfully imported image: {}",
720 imgref
721 );
722 }
723
724 if let Some(msg) =
725 ostree_container::store::image_filtered_content_warning(&import.filtered_files)
726 .context("Image content warning")?
727 {
728 tracing::info!("{}", msg);
729 }
730 Ok(Box::new((*import).into()))
731}
732
733pub(crate) async fn pull(
735 repo: &ostree::Repo,
736 imgref: &ImageReference,
737 target_imgref: Option<&OstreeImageReference>,
738 quiet: bool,
739 prog: ProgressWriter,
740 booted_deployment: Option<&ostree::Deployment>,
741) -> Result<Box<ImageState>> {
742 match prepare_for_pull(repo, imgref, target_imgref, booted_deployment).await? {
743 PreparedPullResult::AlreadyPresent(existing) => {
744 const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
746 tracing::debug!(
747 message_id = IMAGE_ALREADY_PRESENT_ID,
748 bootc.image.reference = &imgref.image,
749 bootc.image.transport = &imgref.transport,
750 bootc.status = "already_present",
751 "Image already present: {}",
752 imgref
753 );
754 Ok(existing)
755 }
756 PreparedPullResult::Ready(prepared_image_meta) => {
757 check_disk_space_ostree(repo, &prepared_image_meta, imgref)?;
759 const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
761 tracing::info!(
762 message_id = PULLING_NEW_IMAGE_ID,
763 bootc.image.reference = &imgref.image,
764 bootc.image.transport = &imgref.transport,
765 bootc.status = "pulling_new",
766 "Pulling new image: {}",
767 imgref
768 );
769 Ok(pull_from_prepared(imgref, quiet, prog, *prepared_image_meta).await?)
770 }
771 }
772}
773
774pub(crate) async fn wipe_ostree(sysroot: Sysroot) -> Result<()> {
775 tokio::task::spawn_blocking(move || {
776 sysroot
777 .write_deployments(&[], gio::Cancellable::NONE)
778 .context("removing deployments")
779 })
780 .await??;
781
782 Ok(())
783}
784
785pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> {
786 const CLEANUP_JOURNAL_ID: &str = "2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7c6";
788
789 tracing::info!(
790 message_id = CLEANUP_JOURNAL_ID,
791 "Starting cleanup of old images and deployments"
792 );
793
794 let bound_prune = prune_container_store(sysroot);
795
796 let ostree = sysroot.get_ostree_cloned()?;
798 let repo = ostree.repo();
799 let repo_prune =
800 ostree_ext::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| {
801 let locked_sysroot = &SysrootLock::from_assumed_locked(&ostree);
802 let cancellable = Some(cancellable);
803 let repo = &repo;
804 let txn = repo.auto_transaction(cancellable)?;
805 let repo = txn.repo();
806
807 for ref_entry in repo
809 .list_refs_ext(
810 Some(BASE_IMAGE_PREFIX),
811 ostree::RepoListRefsExtFlags::NONE,
812 cancellable,
813 )
814 .context("Listing refs")?
815 .keys()
816 {
817 repo.transaction_set_refspec(ref_entry, None);
818 }
819
820 for (i, deployment) in ostree.deployments().into_iter().enumerate() {
823 let commit = deployment.csum();
824 if let Some(base) = get_base_commit(repo, &commit)? {
825 repo.transaction_set_refspec(&format!("{BASE_IMAGE_PREFIX}/{i}"), Some(&base));
826 }
827 }
828
829 let pruned =
830 ostree_container::deploy::prune(locked_sysroot).context("Pruning images")?;
831 if !pruned.is_empty() {
832 let size = glib::format_size(pruned.objsize);
833 println!(
834 "Pruned images: {} (layers: {}, objsize: {})",
835 pruned.n_images, pruned.n_layers, size
836 );
837 } else {
838 tracing::debug!("Nothing to prune");
839 }
840
841 Ok(())
842 });
843
844 tokio::try_join!(repo_prune, bound_prune)?;
846 Ok(())
847}
848
849#[context("Finding base commit")]
851pub(crate) fn get_base_commit(repo: &ostree::Repo, commit: &str) -> Result<Option<String>> {
852 let commitv = repo.load_commit(commit)?.0;
853 let commitmeta = commitv.child_value(0);
854 let commitmeta = &glib::VariantDict::new(Some(&commitmeta));
855 let r = commitmeta.lookup::<String>(BOOTC_DERIVED_KEY)?;
856 Ok(r)
857}
858
859#[context("Writing deployment")]
860async fn deploy(
861 sysroot: &Storage,
862 from: MergeState,
863 image: &ImageState,
864 origin: &glib::KeyFile,
865 lock_finalization: bool,
866) -> Result<Deployment> {
867 let (stateroot, override_kargs) = match &from {
872 MergeState::MergeDeployment(deployment) => {
873 let kargs = crate::bootc_kargs::get_kargs(sysroot, &deployment, image)?;
874 (deployment.stateroot().into(), Some(kargs))
875 }
876 MergeState::Reset { stateroot, kargs } => (stateroot.clone(), Some(kargs.clone())),
877 };
878 let ostree = sysroot.get_ostree_cloned()?;
880 let merge_deployment = from.as_merge_deployment();
882 let merge_deployment = merge_deployment.map(|d| d.index() as usize);
883 let ostree_commit = image.ostree_commit.to_string();
884 let origin_data = origin.to_data();
886 let r = async_task_with_spinner(
887 "Deploying",
888 spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> {
889 let ostree = ostree;
890 let stateroot = Some(stateroot);
891 let mut opts = ostree::SysrootDeployTreeOpts::default();
892
893 opts.locked = lock_finalization;
895
896 let override_kargs_refs = override_kargs
899 .as_ref()
900 .map(|kargs| kargs.iter_str().collect::<Vec<_>>());
901 if let Some(kargs) = override_kargs_refs.as_ref() {
902 opts.override_kernel_argv = Some(kargs);
903 }
904
905 let deployments = ostree.deployments();
906 let merge_deployment = merge_deployment.map(|m| &deployments[m]);
907 let origin = glib::KeyFile::new();
908 origin.load_from_data(&origin_data, glib::KeyFileFlags::NONE)?;
909 let d = ostree.stage_tree_with_options(
910 stateroot.as_deref(),
911 &ostree_commit,
912 Some(&origin),
913 merge_deployment,
914 &opts,
915 Some(cancellable),
916 )?;
917 Ok(d.index())
918 }),
919 )
920 .await?;
921 let ostree = sysroot.get_ostree()?;
923 let staged = ostree.staged_deployment().unwrap();
924 assert_eq!(staged.index(), r);
925 Ok(staged)
926}
927
928#[context("Generating origin")]
929fn origin_from_imageref(imgref: &ImageReference) -> Result<glib::KeyFile> {
930 let origin = glib::KeyFile::new();
931 let imgref = OstreeImageReference::from(imgref.clone());
932 origin.set_string(
933 "origin",
934 ostree_container::deploy::ORIGIN_CONTAINER,
935 imgref.to_string().as_str(),
936 );
937 Ok(origin)
938}
939
940#[derive(Debug)]
942pub(crate) enum MergeState {
943 MergeDeployment(Deployment),
945 Reset {
948 stateroot: String,
949 kargs: CmdlineOwned,
950 },
951}
952impl MergeState {
953 pub(crate) fn from_stateroot(sysroot: &Storage, stateroot: &str) -> Result<Self> {
955 let ostree = sysroot.get_ostree()?;
956 let merge_deployment = ostree.merge_deployment(Some(stateroot)).ok_or_else(|| {
957 anyhow::anyhow!("No merge deployment found for stateroot {stateroot}")
958 })?;
959 Ok(Self::MergeDeployment(merge_deployment))
960 }
961
962 pub(crate) fn as_merge_deployment(&self) -> Option<&Deployment> {
964 match self {
965 Self::MergeDeployment(d) => Some(d),
966 Self::Reset { .. } => None,
967 }
968 }
969}
970
971#[context("Staging")]
973pub(crate) async fn stage(
974 sysroot: &Storage,
975 from: MergeState,
976 image: &ImageState,
977 spec: &RequiredHostSpec<'_>,
978 prog: ProgressWriter,
979 lock_finalization: bool,
980) -> Result<()> {
981 const STAGE_JOURNAL_ID: &str = "8f7a2b1c3d4e5f6a7b8c9d0e1f2a3b4c";
983
984 tracing::info!(
985 message_id = STAGE_JOURNAL_ID,
986 bootc.image.reference = &spec.image.image,
987 bootc.image.transport = &spec.image.transport,
988 bootc.manifest_digest = image.manifest_digest.as_ref(),
989 "Staging image for deployment: {} (digest: {})",
990 spec.image,
991 image.manifest_digest
992 );
993
994 let mut subtask = SubTaskStep {
995 subtask: "merging".into(),
996 description: "Merging Image".into(),
997 id: "fetching".into(),
998 completed: false,
999 };
1000 let mut subtasks = vec![];
1001 prog.send(Event::ProgressSteps {
1002 task: "staging".into(),
1003 description: "Deploying Image".into(),
1004 id: image.manifest_digest.clone().as_ref().into(),
1005 steps_cached: 0,
1006 steps: 0,
1007 steps_total: 3,
1008 subtasks: subtasks
1009 .clone()
1010 .into_iter()
1011 .chain([subtask.clone()])
1012 .collect(),
1013 })
1014 .await;
1015
1016 subtask.completed = true;
1017 subtasks.push(subtask.clone());
1018 subtask.subtask = "deploying".into();
1019 subtask.id = "deploying".into();
1020 subtask.description = "Deploying Image".into();
1021 subtask.completed = false;
1022 prog.send(Event::ProgressSteps {
1023 task: "staging".into(),
1024 description: "Deploying Image".into(),
1025 id: image.manifest_digest.clone().as_ref().into(),
1026 steps_cached: 0,
1027 steps: 1,
1028 steps_total: 3,
1029 subtasks: subtasks
1030 .clone()
1031 .into_iter()
1032 .chain([subtask.clone()])
1033 .collect(),
1034 })
1035 .await;
1036 let origin = origin_from_imageref(spec.image)?;
1037 let deployment =
1038 crate::deploy::deploy(sysroot, from, image, &origin, lock_finalization).await?;
1039
1040 subtask.completed = true;
1041 subtasks.push(subtask.clone());
1042 subtask.subtask = "bound_images".into();
1043 subtask.id = "bound_images".into();
1044 subtask.description = "Pulling Bound Images".into();
1045 subtask.completed = false;
1046 prog.send(Event::ProgressSteps {
1047 task: "staging".into(),
1048 description: "Deploying Image".into(),
1049 id: image.manifest_digest.clone().as_ref().into(),
1050 steps_cached: 0,
1051 steps: 1,
1052 steps_total: 3,
1053 subtasks: subtasks
1054 .clone()
1055 .into_iter()
1056 .chain([subtask.clone()])
1057 .collect(),
1058 })
1059 .await;
1060 crate::boundimage::pull_bound_images(sysroot, &deployment).await?;
1061
1062 subtask.completed = true;
1063 subtasks.push(subtask.clone());
1064 subtask.subtask = "cleanup".into();
1065 subtask.id = "cleanup".into();
1066 subtask.description = "Removing old images".into();
1067 subtask.completed = false;
1068 prog.send(Event::ProgressSteps {
1069 task: "staging".into(),
1070 description: "Deploying Image".into(),
1071 id: image.manifest_digest.clone().as_ref().into(),
1072 steps_cached: 0,
1073 steps: 2,
1074 steps_total: 3,
1075 subtasks: subtasks
1076 .clone()
1077 .into_iter()
1078 .chain([subtask.clone()])
1079 .collect(),
1080 })
1081 .await;
1082 crate::deploy::cleanup(sysroot).await?;
1083 println!("Queued for next boot: {:#}", spec.image);
1084 if let Some(version) = image.version.as_deref() {
1085 println!(" Version: {version}");
1086 }
1087 println!(" Digest: {}", image.manifest_digest);
1088
1089 subtask.completed = true;
1090 subtasks.push(subtask.clone());
1091 prog.send(Event::ProgressSteps {
1092 task: "staging".into(),
1093 description: "Deploying Image".into(),
1094 id: image.manifest_digest.clone().as_ref().into(),
1095 steps_cached: 0,
1096 steps: 3,
1097 steps_total: 3,
1098 subtasks: subtasks
1099 .clone()
1100 .into_iter()
1101 .chain([subtask.clone()])
1102 .collect(),
1103 })
1104 .await;
1105
1106 write_reboot_required(&image.manifest_digest.as_ref())?;
1109
1110 Ok(())
1111}
1112
1113fn write_reboot_required(image: &str) -> Result<()> {
1115 let reboot_message = format!("bootc: Reboot required for image: {}", image);
1116 let run_dir = Dir::open_ambient_dir("/run", cap_std::ambient_authority())?;
1117 run_dir
1118 .atomic_write("reboot-required", reboot_message.as_bytes())
1119 .context("Creating /run/reboot-required")?;
1120
1121 Ok(())
1122}
1123
1124pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> {
1126 const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468";
1127 let ostree = sysroot.get_ostree()?;
1128 let (booted_ostree, deployments, host) = crate::status::get_status_require_booted(ostree)?;
1129
1130 let new_spec = {
1131 let mut new_spec = host.spec.clone();
1132 new_spec.boot_order = new_spec.boot_order.swap();
1133 new_spec
1134 };
1135
1136 let repo = &booted_ostree.repo();
1137
1138 host.spec.verify_transition(&new_spec)?;
1140
1141 let reverting = new_spec.boot_order == BootOrder::Default;
1142 if reverting {
1143 println!("notice: Reverting queued rollback state");
1144 }
1145 let rollback_status = host
1146 .status
1147 .rollback
1148 .ok_or_else(|| anyhow!("No rollback available"))?;
1149 let rollback_image = rollback_status
1150 .query_image(repo)?
1151 .ok_or_else(|| anyhow!("Rollback is not container image based"))?;
1152
1153 let current_image = host
1155 .status
1156 .booted
1157 .as_ref()
1158 .and_then(|b| b.query_image(repo).ok()?);
1159
1160 tracing::info!(
1161 message_id = ROLLBACK_JOURNAL_ID,
1162 bootc.manifest_digest = rollback_image.manifest_digest.as_ref(),
1163 bootc.ostree_commit = &rollback_image.merge_commit,
1164 bootc.rollback_type = if reverting { "revert" } else { "rollback" },
1165 bootc.current_manifest_digest = current_image
1166 .as_ref()
1167 .map(|i| i.manifest_digest.as_ref())
1168 .unwrap_or("none"),
1169 "Rolling back to image: {}",
1170 rollback_image.manifest_digest
1171 );
1172 let rollback_deployment = deployments.rollback.expect("rollback deployment");
1174 let new_deployments = if reverting {
1175 [booted_ostree.deployment, rollback_deployment]
1176 } else {
1177 [rollback_deployment, booted_ostree.deployment]
1178 };
1179 let new_deployments = new_deployments
1180 .into_iter()
1181 .chain(deployments.other)
1182 .collect::<Vec<_>>();
1183 tracing::debug!("Writing new deployments: {new_deployments:?}");
1184 booted_ostree
1185 .sysroot
1186 .write_deployments(&new_deployments, gio::Cancellable::NONE)?;
1187 if reverting {
1188 println!("Next boot: current deployment");
1189 } else {
1190 println!("Next boot: rollback deployment");
1191 }
1192
1193 write_reboot_required(rollback_image.manifest_digest.as_ref())?;
1194
1195 sysroot.update_mtime()?;
1196
1197 Ok(())
1198}
1199
1200fn find_newest_deployment_name(deploysdir: &Dir) -> Result<String> {
1201 let mut dirs = Vec::new();
1202 for ent in deploysdir.entries()? {
1203 let ent = ent?;
1204 if !ent.file_type()?.is_dir() {
1205 continue;
1206 }
1207 let name = ent.file_name();
1208 let Some(name) = name.to_str() else {
1209 continue;
1210 };
1211 dirs.push((name.to_owned(), ent.metadata()?.mtime()));
1212 }
1213 dirs.sort_unstable_by(|a, b| a.1.cmp(&b.1));
1214 if let Some((name, _ts)) = dirs.pop() {
1215 Ok(name)
1216 } else {
1217 anyhow::bail!("No deployment directory found")
1218 }
1219}
1220
1221pub(crate) fn switch_origin_inplace(root: &Dir, imgref: &ImageReference) -> Result<String> {
1223 const SWITCH_INPLACE_JOURNAL_ID: &str = "3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7";
1225
1226 tracing::info!(
1227 message_id = SWITCH_INPLACE_JOURNAL_ID,
1228 bootc.image.reference = &imgref.image,
1229 bootc.image.transport = &imgref.transport,
1230 bootc.switch_type = "in_place",
1231 "Performing in-place switch to image: {}",
1232 imgref
1233 );
1234
1235 let origin = origin_from_imageref(imgref)?;
1237 let serialized_origin = origin.to_data();
1238
1239 let mut ostree_deploys = root.open_dir("sysroot/ostree/deploy")?.entries()?;
1248 let deploydir = loop {
1249 if let Some(ent) = ostree_deploys.next() {
1250 let ent = ent?;
1251 if !ent.file_type()?.is_dir() {
1252 continue;
1253 }
1254 tracing::debug!("Checking {:?}", ent.file_name());
1255 let child_dir = ent
1256 .open_dir()
1257 .with_context(|| format!("Opening dir {:?}", ent.file_name()))?;
1258 if let Some(d) = child_dir.open_dir_optional("deploy")? {
1259 break d;
1260 }
1261 } else {
1262 anyhow::bail!("Failed to find a deployment");
1263 }
1264 };
1265 let newest_deployment = find_newest_deployment_name(&deploydir)?;
1266 let origin_path = format!("{newest_deployment}.origin");
1267 if !deploydir.try_exists(&origin_path)? {
1268 tracing::warn!("No extant origin for {newest_deployment}");
1269 }
1270 deploydir
1271 .atomic_write(&origin_path, serialized_origin.as_bytes())
1272 .context("Writing origin")?;
1273 Ok(newest_deployment)
1274}
1275
1276#[context("Updating /etc/fstab for anaconda+composefs")]
1279pub(crate) fn fixup_etc_fstab(root: &Dir) -> Result<()> {
1280 let fstab_path = "etc/fstab";
1281 let fd = root
1283 .open(fstab_path)
1284 .with_context(|| format!("Opening {fstab_path}"))
1285 .map(std::io::BufReader::new)?;
1286
1287 fn edit_fstab_line(line: &str, mut w: impl Write) -> Result<bool> {
1291 if line.starts_with('#') {
1292 return Ok(false);
1293 }
1294 let parts = line.split_ascii_whitespace().collect::<Vec<_>>();
1295
1296 let path_idx = 1;
1297 let options_idx = 3;
1298 let (&path, &options) = match (parts.get(path_idx), parts.get(options_idx)) {
1299 (None, _) => {
1300 tracing::debug!("No path in entry: {line}");
1301 return Ok(false);
1302 }
1303 (_, None) => {
1304 tracing::debug!("No options in entry: {line}");
1305 return Ok(false);
1306 }
1307 (Some(p), Some(o)) => (p, o),
1308 };
1309 if path != "/" {
1311 return Ok(false);
1312 }
1313 if options.split(',').any(|s| s == "ro") {
1315 return Ok(false);
1316 }
1317
1318 writeln!(w, "# {}", crate::generator::BOOTC_EDITED_STAMP)?;
1319
1320 assert!(!options.is_empty()); let options = format!("{options},ro");
1324 for (i, part) in parts.into_iter().enumerate() {
1325 if i > 0 {
1327 write!(w, " ")?;
1328 }
1329 if i == options_idx {
1330 write!(w, "{options}")?;
1331 } else {
1332 write!(w, "{part}")?
1333 }
1334 }
1335 writeln!(w)?;
1337 Ok(true)
1338 }
1339
1340 root.atomic_replace_with(fstab_path, move |mut w| -> Result<()> {
1342 for line in fd.lines() {
1343 let line = line?;
1344 if !edit_fstab_line(&line, &mut w)? {
1345 writeln!(w, "{line}")?;
1346 }
1347 }
1348 Ok(())
1349 })
1350 .context("Replacing /etc/fstab")?;
1351
1352 println!("Updated /etc/fstab to add `ro` for `/`");
1353 Ok(())
1354}
1355
1356#[cfg(test)]
1357mod tests {
1358 use super::*;
1359
1360 #[test]
1361 fn test_new_proxy_config_user_agent() {
1362 let config = new_proxy_config();
1363 let prefix = config
1364 .user_agent_prefix
1365 .expect("user_agent_prefix should be set");
1366 assert!(
1367 prefix.starts_with("bootc/"),
1368 "User agent should start with bootc/"
1369 );
1370 assert!(
1372 prefix.len() > "bootc/".len(),
1373 "Version should be present after bootc/"
1374 );
1375 }
1376
1377 #[test]
1378 fn test_switch_inplace() -> Result<()> {
1379 use cap_std::fs::DirBuilderExt;
1380
1381 let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1382 let mut builder = cap_std::fs::DirBuilder::new();
1383 let builder = builder.recursive(true).mode(0o755);
1384 let deploydir = "sysroot/ostree/deploy/default/deploy";
1385 let target_deployment =
1386 "af36eb0086bb55ac601600478c6168f834288013d60f8870b7851f44bf86c3c5.0";
1387 td.ensure_dir_with(
1388 format!("sysroot/ostree/deploy/default/deploy/{target_deployment}"),
1389 builder,
1390 )?;
1391 let deploydir = &td.open_dir(deploydir)?;
1392 let orig_imgref = ImageReference {
1393 image: "quay.io/exampleos/original:sometag".into(),
1394 transport: "registry".into(),
1395 signature: None,
1396 };
1397 {
1398 let origin = origin_from_imageref(&orig_imgref)?;
1399 deploydir.atomic_write(
1400 format!("{target_deployment}.origin"),
1401 origin.to_data().as_bytes(),
1402 )?;
1403 }
1404
1405 let target_imgref = ImageReference {
1406 image: "quay.io/someother/otherimage:latest".into(),
1407 transport: "registry".into(),
1408 signature: None,
1409 };
1410
1411 let replaced = switch_origin_inplace(&td, &target_imgref).unwrap();
1412 assert_eq!(replaced, target_deployment);
1413 Ok(())
1414 }
1415
1416 #[test]
1417 fn test_fixup_etc_fstab_default() -> Result<()> {
1418 let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1419 let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n";
1420 tempdir.create_dir_all("etc")?;
1421 tempdir.atomic_write("etc/fstab", default)?;
1422 fixup_etc_fstab(&tempdir).unwrap();
1423 assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1424 Ok(())
1425 }
1426
1427 #[test]
1428 fn test_fixup_etc_fstab_multi() -> Result<()> {
1429 let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1430 let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1431UUID=6907-17CA /boot/efi vfat umask=0077,shortname=winnt 0 2\n";
1432 tempdir.create_dir_all("etc")?;
1433 tempdir.atomic_write("etc/fstab", default)?;
1434 fixup_etc_fstab(&tempdir).unwrap();
1435 assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1436 Ok(())
1437 }
1438
1439 #[test]
1440 fn test_fixup_etc_fstab_ro() -> Result<()> {
1441 let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1442 let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1443UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs ro 0 0\n\
1444UUID=6907-17CA /boot/efi vfat umask=0077,shortname=winnt 0 2\n";
1445 tempdir.create_dir_all("etc")?;
1446 tempdir.atomic_write("etc/fstab", default)?;
1447 fixup_etc_fstab(&tempdir).unwrap();
1448 assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1449 Ok(())
1450 }
1451
1452 #[test]
1453 fn test_fixup_etc_fstab_rw() -> Result<()> {
1454 let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1455 let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1457UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs defaults 0 0\n\
1458UUID=6907-17CA /boot/efi vfat umask=0077,shortname=winnt 0 2\n";
1459 let modified = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1460# Updated by bootc-fstab-edit.service\n\
1461UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs defaults,ro 0 0\n\
1462UUID=6907-17CA /boot/efi vfat umask=0077,shortname=winnt 0 2\n";
1463 tempdir.create_dir_all("etc")?;
1464 tempdir.atomic_write("etc/fstab", default)?;
1465 fixup_etc_fstab(&tempdir).unwrap();
1466 assert_eq!(tempdir.read_to_string("etc/fstab")?, modified);
1467 Ok(())
1468 }
1469 #[test]
1470 fn test_check_disk_space_inner() -> Result<()> {
1471 let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1472 let imgref = ImageReference {
1473 image: "quay.io/exampleos/exampleos:latest".into(),
1474 transport: "registry".into(),
1475 signature: None,
1476 };
1477
1478 check_disk_space_inner(&*td, 0, 0, &imgref)?;
1480
1481 assert!(check_disk_space_inner(&*td, u64::MAX, 0, &imgref).is_err());
1483
1484 assert!(check_disk_space_inner(&*td, 1, u64::MAX, &imgref).is_err());
1486
1487 Ok(())
1488 }
1489}