bootc_lib/
cli.rs

1//! # Bootable container image CLI
2//!
3//! Command line tool to manage bootable ostree-based containers.
4
5use std::ffi::{CString, OsStr, OsString};
6use std::fs::File;
7use std::io::{BufWriter, Seek};
8use std::os::unix::process::CommandExt;
9use std::process::Command;
10
11use anyhow::{Context, Result, anyhow, ensure};
12use camino::{Utf8Path, Utf8PathBuf};
13use cap_std_ext::cap_std;
14use cap_std_ext::cap_std::fs::Dir;
15use clap::CommandFactory;
16use clap::Parser;
17use clap::ValueEnum;
18use composefs::dumpfile;
19use composefs_boot::BootOps as _;
20use etc_merge::{compute_diff, print_diff};
21use fn_error_context::context;
22use indoc::indoc;
23use ostree::gio;
24use ostree_container::store::PrepareResult;
25use ostree_ext::composefs::fsverity;
26use ostree_ext::composefs::fsverity::FsVerityHashValue;
27use ostree_ext::composefs::splitstream::SplitStreamWriter;
28use ostree_ext::container as ostree_container;
29
30use ostree_ext::keyfileext::KeyFileExt;
31use ostree_ext::ostree;
32use ostree_ext::sysroot::SysrootLock;
33use schemars::schema_for;
34use serde::{Deserialize, Serialize};
35
36use crate::bootc_composefs::delete::delete_composefs_deployment;
37use crate::bootc_composefs::soft_reboot::{prepare_soft_reboot_composefs, reset_soft_reboot};
38use crate::bootc_composefs::{
39    digest::{compute_composefs_digest, new_temp_composefs_repo},
40    finalize::{composefs_backend_finalize, get_etc_diff},
41    rollback::composefs_rollback,
42    state::composefs_usr_overlay,
43    switch::switch_composefs,
44    update::upgrade_composefs,
45};
46use crate::deploy::{MergeState, RequiredHostSpec};
47use crate::podstorage::set_additional_image_store;
48use crate::progress_jsonl::{ProgressWriter, RawProgressFd};
49use crate::spec::Host;
50use crate::spec::ImageReference;
51use crate::status::get_host;
52use crate::store::{BootedOstree, Storage};
53use crate::store::{BootedStorage, BootedStorageKind};
54use crate::utils::sigpolicy_from_opt;
55use crate::{bootc_composefs, lints};
56
57/// Shared progress options
58#[derive(Debug, Parser, PartialEq, Eq)]
59pub(crate) struct ProgressOptions {
60    /// File descriptor number which must refer to an open pipe.
61    ///
62    /// Progress is written as JSON lines to this file descriptor.
63    #[clap(long, hide = true)]
64    pub(crate) progress_fd: Option<RawProgressFd>,
65}
66
67impl TryFrom<ProgressOptions> for ProgressWriter {
68    type Error = anyhow::Error;
69
70    fn try_from(value: ProgressOptions) -> Result<Self> {
71        let r = value
72            .progress_fd
73            .map(TryInto::try_into)
74            .transpose()?
75            .unwrap_or_default();
76        Ok(r)
77    }
78}
79
80/// Perform an upgrade operation
81#[derive(Debug, Parser, PartialEq, Eq)]
82pub(crate) struct UpgradeOpts {
83    /// Don't display progress
84    #[clap(long)]
85    pub(crate) quiet: bool,
86
87    /// Check if an update is available without applying it.
88    ///
89    /// This only downloads updated metadata, not the full image layers.
90    #[clap(long, conflicts_with = "apply")]
91    pub(crate) check: bool,
92
93    /// Restart or reboot into the new target image.
94    ///
95    /// Currently, this always reboots. Future versions may support userspace-only restart.
96    #[clap(long, conflicts_with = "check")]
97    pub(crate) apply: bool,
98
99    /// Configure soft reboot behavior.
100    ///
101    /// 'required' fails if soft reboot unavailable, 'auto' falls back to regular reboot.
102    #[clap(long = "soft-reboot", conflicts_with = "check")]
103    pub(crate) soft_reboot: Option<SoftRebootMode>,
104
105    /// Download and stage the update without applying it.
106    ///
107    /// Download the update and ensure it's retained on disk for the lifetime of this system boot,
108    /// but it will not be applied on reboot. If the system is rebooted without applying the update,
109    /// the image will be eligible for garbage collection again.
110    #[clap(long, conflicts_with_all = ["check", "apply"])]
111    pub(crate) download_only: bool,
112
113    /// Apply a staged deployment that was previously downloaded with --download-only.
114    ///
115    /// This unlocks the staged deployment without fetching updates from the container image source.
116    /// The deployment will be applied on the next shutdown or reboot. Use with --apply to
117    /// reboot immediately.
118    #[clap(long, conflicts_with_all = ["check", "download_only"])]
119    pub(crate) from_downloaded: bool,
120
121    #[clap(flatten)]
122    pub(crate) progress: ProgressOptions,
123}
124
125/// Perform an switch operation
126#[derive(Debug, Parser, PartialEq, Eq)]
127pub(crate) struct SwitchOpts {
128    /// Don't display progress
129    #[clap(long)]
130    pub(crate) quiet: bool,
131
132    /// Restart or reboot into the new target image.
133    ///
134    /// Currently, this always reboots. Future versions may support userspace-only restart.
135    #[clap(long)]
136    pub(crate) apply: bool,
137
138    /// Configure soft reboot behavior.
139    ///
140    /// 'required' fails if soft reboot unavailable, 'auto' falls back to regular reboot.
141    #[clap(long = "soft-reboot")]
142    pub(crate) soft_reboot: Option<SoftRebootMode>,
143
144    /// The transport; e.g. registry, oci, oci-archive, docker-daemon, containers-storage.  Defaults to `registry`.
145    #[clap(long, default_value = "registry")]
146    pub(crate) transport: String,
147
148    /// This argument is deprecated and does nothing.
149    #[clap(long, hide = true)]
150    pub(crate) no_signature_verification: bool,
151
152    /// This is the inverse of the previous `--target-no-signature-verification` (which is now
153    /// a no-op).
154    ///
155    /// Enabling this option enforces that `/etc/containers/policy.json` includes a
156    /// default policy which requires signatures.
157    #[clap(long)]
158    pub(crate) enforce_container_sigpolicy: bool,
159
160    /// Don't create a new deployment, but directly mutate the booted state.
161    /// This is hidden because it's not something we generally expect to be done,
162    /// but this can be used in e.g. Anaconda %post to fixup
163    #[clap(long, hide = true)]
164    pub(crate) mutate_in_place: bool,
165
166    /// Retain reference to currently booted image
167    #[clap(long)]
168    pub(crate) retain: bool,
169
170    /// Use unified storage path to pull images (experimental)
171    ///
172    /// When enabled, this uses bootc's container storage (/usr/lib/bootc/storage) to pull
173    /// the image first, then imports it from there. This is the same approach used for
174    /// logically bound images.
175    #[clap(long = "experimental-unified-storage", hide = true)]
176    pub(crate) unified_storage_exp: bool,
177
178    /// Target image to use for the next boot.
179    pub(crate) target: String,
180
181    #[clap(flatten)]
182    pub(crate) progress: ProgressOptions,
183}
184
185/// Options controlling rollback
186#[derive(Debug, Parser, PartialEq, Eq)]
187pub(crate) struct RollbackOpts {
188    /// Restart or reboot into the rollback image.
189    ///
190    /// Currently, this option always reboots.  In the future this command
191    /// will detect the case where no kernel changes are queued, and perform
192    /// a userspace-only restart.
193    #[clap(long)]
194    pub(crate) apply: bool,
195
196    /// Configure soft reboot behavior.
197    ///
198    /// 'required' fails if soft reboot unavailable, 'auto' falls back to regular reboot.
199    #[clap(long = "soft-reboot")]
200    pub(crate) soft_reboot: Option<SoftRebootMode>,
201}
202
203/// Perform an edit operation
204#[derive(Debug, Parser, PartialEq, Eq)]
205pub(crate) struct EditOpts {
206    /// Use filename to edit system specification
207    #[clap(long, short = 'f')]
208    pub(crate) filename: Option<String>,
209
210    /// Don't display progress
211    #[clap(long)]
212    pub(crate) quiet: bool,
213}
214
215#[derive(Debug, Clone, ValueEnum, PartialEq, Eq)]
216#[clap(rename_all = "lowercase")]
217pub(crate) enum OutputFormat {
218    /// Output in Human Readable format.
219    HumanReadable,
220    /// Output in YAML format.
221    Yaml,
222    /// Output in JSON format.
223    Json,
224}
225
226#[derive(Debug, Clone, Copy, ValueEnum, PartialEq, Eq)]
227#[clap(rename_all = "lowercase")]
228pub(crate) enum SoftRebootMode {
229    /// Require a soft reboot; fail if not possible
230    Required,
231    /// Automatically use soft reboot if possible, otherwise use regular reboot
232    Auto,
233}
234
235/// Perform an status operation
236#[derive(Debug, Parser, PartialEq, Eq)]
237pub(crate) struct StatusOpts {
238    /// Output in JSON format.
239    ///
240    /// Superceded by the `format` option.
241    #[clap(long, hide = true)]
242    pub(crate) json: bool,
243
244    /// The output format.
245    #[clap(long)]
246    pub(crate) format: Option<OutputFormat>,
247
248    /// The desired format version. There is currently one supported
249    /// version, which is exposed as both `0` and `1`. Pass this
250    /// option to explicitly request it; it is possible that another future
251    /// version 2 or newer will be supported in the future.
252    #[clap(long)]
253    pub(crate) format_version: Option<u32>,
254
255    /// Only display status for the booted deployment.
256    #[clap(long)]
257    pub(crate) booted: bool,
258
259    /// Include additional fields in human readable format.
260    #[clap(long, short = 'v')]
261    pub(crate) verbose: bool,
262}
263
264#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
265pub(crate) enum InstallOpts {
266    /// Install to the target block device.
267    ///
268    /// This command must be invoked inside of the container, which will be
269    /// installed. The container must be run in `--privileged` mode, and hence
270    /// will be able to see all block devices on the system.
271    ///
272    /// The default storage layout uses the root filesystem type configured
273    /// in the container image, alongside any required system partitions such as
274    /// the EFI system partition. Use `install to-filesystem` for anything more
275    /// complex such as RAID, LVM, LUKS etc.
276    #[cfg(feature = "install-to-disk")]
277    ToDisk(crate::install::InstallToDiskOpts),
278    /// Install to an externally created filesystem structure.
279    ///
280    /// In this variant of installation, the root filesystem alongside any necessary
281    /// platform partitions (such as the EFI system partition) are prepared and mounted by an
282    /// external tool or script. The root filesystem is currently expected to be empty
283    /// by default.
284    ToFilesystem(crate::install::InstallToFilesystemOpts),
285    /// Install to the host root filesystem.
286    ///
287    /// This is a variant of `install to-filesystem` that is designed to install "alongside"
288    /// the running host root filesystem. Currently, the host root filesystem's `/boot` partition
289    /// will be wiped, but the content of the existing root will otherwise be retained, and will
290    /// need to be cleaned up if desired when rebooted into the new root.
291    ToExistingRoot(crate::install::InstallToExistingRootOpts),
292    /// Nondestructively create a fresh installation state inside an existing bootc system.
293    ///
294    /// This is a nondestructive variant of `install to-existing-root` that works only inside
295    /// an existing bootc system.
296    #[clap(hide = true)]
297    Reset(crate::install::InstallResetOpts),
298    /// Execute this as the penultimate step of an installation using `install to-filesystem`.
299    ///
300    Finalize {
301        /// Path to the mounted root filesystem.
302        root_path: Utf8PathBuf,
303    },
304    /// Intended for use in environments that are performing an ostree-based installation, not bootc.
305    ///
306    /// In this scenario the installation may be missing bootc specific features such as
307    /// kernel arguments, logically bound images and more. This command can be used to attempt
308    /// to reconcile. At the current time, the only tested environment is Anaconda using `ostreecontainer`
309    /// and it is recommended to avoid usage outside of that environment. Instead, ensure your
310    /// code is using `bootc install to-filesystem` from the start.
311    EnsureCompletion {},
312    /// Output JSON to stdout that contains the merged installation configuration
313    /// as it may be relevant to calling processes using `install to-filesystem`
314    /// that in particular want to discover the desired root filesystem type from the container image.
315    ///
316    /// At the current time, the only output key is `root-fs-type` which is a string-valued
317    /// filesystem name suitable for passing to `mkfs.$type`.
318    PrintConfiguration(crate::install::InstallPrintConfigurationOpts),
319}
320
321/// Subcommands which can be executed as part of a container build.
322#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
323pub(crate) enum ContainerOpts {
324    /// Output information about the container image.
325    ///
326    /// By default, a human-readable summary is output. Use --json or --format
327    /// to change the output format.
328    Inspect {
329        /// Operate on the provided rootfs.
330        #[clap(long, default_value = "/")]
331        rootfs: Utf8PathBuf,
332
333        /// Output in JSON format.
334        #[clap(long)]
335        json: bool,
336
337        /// The output format.
338        #[clap(long, conflicts_with = "json")]
339        format: Option<OutputFormat>,
340    },
341    /// Perform relatively inexpensive static analysis checks as part of a container
342    /// build.
343    ///
344    /// This is intended to be invoked via e.g. `RUN bootc container lint` as part
345    /// of a build process; it will error if any problems are detected.
346    Lint {
347        /// Operate on the provided rootfs.
348        #[clap(long, default_value = "/")]
349        rootfs: Utf8PathBuf,
350
351        /// Make warnings fatal.
352        #[clap(long)]
353        fatal_warnings: bool,
354
355        /// Instead of executing the lints, just print all available lints.
356        /// At the current time, this will output in YAML format because it's
357        /// reasonably human friendly. However, there is no commitment to
358        /// maintaining this exact format; do not parse it via code or scripts.
359        #[clap(long)]
360        list: bool,
361
362        /// Skip checking the targeted lints, by name. Use `--list` to discover the set
363        /// of available lints.
364        ///
365        /// Example: --skip nonempty-boot --skip baseimage-root
366        #[clap(long)]
367        skip: Vec<String>,
368
369        /// Don't truncate the output. By default, only a limited number of entries are
370        /// shown for each lint, followed by a count of remaining entries.
371        #[clap(long)]
372        no_truncate: bool,
373    },
374    /// Output the bootable composefs digest for a directory.
375    #[clap(hide = true)]
376    ComputeComposefsDigest {
377        /// Path to the filesystem root
378        #[clap(default_value = "/target")]
379        path: Utf8PathBuf,
380
381        /// Additionally generate a dumpfile written to the target path
382        #[clap(long)]
383        write_dumpfile_to: Option<Utf8PathBuf>,
384    },
385    /// Output the bootable composefs digest from container storage.
386    #[clap(hide = true)]
387    ComputeComposefsDigestFromStorage {
388        /// Additionally generate a dumpfile written to the target path
389        #[clap(long)]
390        write_dumpfile_to: Option<Utf8PathBuf>,
391
392        /// Identifier for image; if not provided, the running image will be used.
393        image: Option<String>,
394    },
395    /// Build a Unified Kernel Image (UKI) using ukify.
396    ///
397    /// This command computes the necessary arguments from the container image
398    /// (kernel, initrd, cmdline, os-release) and invokes ukify with them.
399    /// Any additional arguments after `--` are passed through to ukify unchanged.
400    ///
401    /// Example:
402    ///   bootc container ukify --rootfs /target -- --output /output/uki.efi
403    Ukify {
404        /// Operate on the provided rootfs.
405        #[clap(long, default_value = "/")]
406        rootfs: Utf8PathBuf,
407
408        /// Additional kernel arguments to append to the cmdline.
409        /// Can be specified multiple times.
410        /// This is a temporary workaround and will be removed.
411        #[clap(long = "karg", hide = true)]
412        kargs: Vec<String>,
413
414        /// Additional arguments to pass to ukify (after `--`).
415        #[clap(last = true)]
416        args: Vec<OsString>,
417    },
418}
419
420/// Subcommands which operate on images.
421#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
422pub(crate) enum ImageCmdOpts {
423    /// Wrapper for `podman image list` in bootc storage.
424    List {
425        #[clap(allow_hyphen_values = true)]
426        args: Vec<OsString>,
427    },
428    /// Wrapper for `podman image build` in bootc storage.
429    Build {
430        #[clap(allow_hyphen_values = true)]
431        args: Vec<OsString>,
432    },
433    /// Wrapper for `podman image pull` in bootc storage.
434    Pull {
435        #[clap(allow_hyphen_values = true)]
436        args: Vec<OsString>,
437    },
438    /// Wrapper for `podman image push` in bootc storage.
439    Push {
440        #[clap(allow_hyphen_values = true)]
441        args: Vec<OsString>,
442    },
443}
444
445#[derive(ValueEnum, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
446#[serde(rename_all = "kebab-case")]
447pub(crate) enum ImageListType {
448    /// List all images
449    #[default]
450    All,
451    /// List only logically bound images
452    Logical,
453    /// List only host images
454    Host,
455}
456
457impl std::fmt::Display for ImageListType {
458    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
459        self.to_possible_value().unwrap().get_name().fmt(f)
460    }
461}
462
463#[derive(ValueEnum, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
464#[serde(rename_all = "kebab-case")]
465pub(crate) enum ImageListFormat {
466    /// Human readable table format
467    #[default]
468    Table,
469    /// JSON format
470    Json,
471}
472impl std::fmt::Display for ImageListFormat {
473    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
474        self.to_possible_value().unwrap().get_name().fmt(f)
475    }
476}
477
478/// Subcommands which operate on images.
479#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
480pub(crate) enum ImageOpts {
481    /// List fetched images stored in the bootc storage.
482    ///
483    /// Note that these are distinct from images stored via e.g. `podman`.
484    List {
485        /// Type of image to list
486        #[clap(long = "type")]
487        #[arg(default_value_t)]
488        list_type: ImageListType,
489        #[clap(long = "format")]
490        #[arg(default_value_t)]
491        list_format: ImageListFormat,
492    },
493    /// Copy a container image from the bootc storage to `containers-storage:`.
494    ///
495    /// The source and target are both optional; if both are left unspecified,
496    /// via a simple invocation of `bootc image copy-to-storage`, then the default is to
497    /// push the currently booted image to `containers-storage` (as used by podman, etc.)
498    /// and tagged with the image name `localhost/bootc`,
499    ///
500    /// ## Copying a non-default container image
501    ///
502    /// It is also possible to copy an image other than the currently booted one by
503    /// specifying `--source`.
504    ///
505    /// ## Pulling images
506    ///
507    /// At the current time there is no explicit support for pulling images other than indirectly
508    /// via e.g. `bootc switch` or `bootc upgrade`.
509    CopyToStorage {
510        #[clap(long)]
511        /// The source image; if not specified, the booted image will be used.
512        source: Option<String>,
513
514        #[clap(long)]
515        /// The destination; if not specified, then the default is to push to `containers-storage:localhost/bootc`;
516        /// this will make the image accessible via e.g. `podman run localhost/bootc` and for builds.
517        target: Option<String>,
518    },
519    /// Re-pull the currently booted image into the bootc-owned container storage.
520    ///
521    /// This onboards the system to the unified storage path so that future
522    /// upgrade/switch operations can read from the bootc storage directly.
523    SetUnified,
524    /// Copy a container image from the default `containers-storage:` to the bootc-owned container storage.
525    PullFromDefaultStorage {
526        /// The image to pull
527        image: String,
528    },
529    /// Wrapper for selected `podman image` subcommands in bootc storage.
530    #[clap(subcommand)]
531    Cmd(ImageCmdOpts),
532}
533
534#[derive(Debug, Clone, clap::ValueEnum, PartialEq, Eq)]
535pub(crate) enum SchemaType {
536    Host,
537    Progress,
538}
539
540/// Options for consistency checking
541#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
542pub(crate) enum FsverityOpts {
543    /// Measure the fsverity digest of the target file.
544    Measure {
545        /// Path to file
546        path: Utf8PathBuf,
547    },
548    /// Enable fsverity on the target file.
549    Enable {
550        /// Ptah to file
551        path: Utf8PathBuf,
552    },
553}
554
555/// Hidden, internal only options
556#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
557pub(crate) enum InternalsOpts {
558    SystemdGenerator {
559        normal_dir: Utf8PathBuf,
560        #[allow(dead_code)]
561        early_dir: Option<Utf8PathBuf>,
562        #[allow(dead_code)]
563        late_dir: Option<Utf8PathBuf>,
564    },
565    FixupEtcFstab,
566    /// Should only be used by `make update-generated`
567    PrintJsonSchema {
568        #[clap(long)]
569        of: SchemaType,
570    },
571    #[clap(subcommand)]
572    Fsverity(FsverityOpts),
573    /// Perform consistency checking.
574    Fsck,
575    /// Perform cleanup actions
576    Cleanup,
577    Relabel {
578        #[clap(long)]
579        /// Relabel using this path as root
580        as_path: Option<Utf8PathBuf>,
581
582        /// Relabel this path
583        path: Utf8PathBuf,
584    },
585    /// Proxy frontend for the `ostree-ext` CLI.
586    OstreeExt {
587        #[clap(allow_hyphen_values = true)]
588        args: Vec<OsString>,
589    },
590    /// Proxy frontend for the `cfsctl` CLI
591    Cfs {
592        #[clap(allow_hyphen_values = true)]
593        args: Vec<OsString>,
594    },
595    /// Proxy frontend for the legacy `ostree container` CLI.
596    OstreeContainer {
597        #[clap(allow_hyphen_values = true)]
598        args: Vec<OsString>,
599    },
600    /// Ensure that a composefs repository is initialized
601    TestComposefs,
602    /// Loopback device cleanup helper (internal use only)
603    LoopbackCleanupHelper {
604        /// Device path to clean up
605        #[clap(long)]
606        device: String,
607    },
608    /// Test loopback device allocation and cleanup (internal use only)
609    AllocateCleanupLoopback {
610        /// File path to create loopback device for
611        #[clap(long)]
612        file_path: Utf8PathBuf,
613    },
614    /// Invoked from ostree-ext to complete an installation.
615    BootcInstallCompletion {
616        /// Path to the sysroot
617        sysroot: Utf8PathBuf,
618
619        // The stateroot
620        stateroot: String,
621    },
622    /// Initiate a reboot the same way we would after --apply; intended
623    /// primarily for testing.
624    Reboot,
625    #[cfg(feature = "rhsm")]
626    /// Publish subscription-manager facts to /etc/rhsm/facts/bootc.facts
627    PublishRhsmFacts,
628    /// Internal command for testing etc-diff/etc-merge
629    DirDiff {
630        /// Directory path to the pristine_etc
631        pristine_etc: Utf8PathBuf,
632        /// Directory path to the current_etc
633        current_etc: Utf8PathBuf,
634        /// Directory path to the new_etc
635        new_etc: Utf8PathBuf,
636        /// Whether to perform the three way merge or not
637        #[clap(long)]
638        merge: bool,
639    },
640    #[cfg(feature = "docgen")]
641    /// Dump CLI structure as JSON for documentation generation
642    DumpCliJson,
643    PrepSoftReboot {
644        #[clap(required_unless_present = "reset")]
645        deployment: Option<String>,
646        #[clap(long, conflicts_with = "reset")]
647        reboot: bool,
648        #[clap(long, conflicts_with = "reboot")]
649        reset: bool,
650    },
651}
652
653#[derive(Debug, clap::Subcommand, PartialEq, Eq)]
654pub(crate) enum StateOpts {
655    /// Remove all ostree deployments from this system
656    WipeOstree,
657}
658
659impl InternalsOpts {
660    /// The name of the binary we inject into /usr/lib/systemd/system-generators
661    const GENERATOR_BIN: &'static str = "bootc-systemd-generator";
662}
663
664/// Deploy and transactionally in-place with bootable container images.
665///
666/// The `bootc` project currently uses ostree-containers as a backend
667/// to support a model of bootable container images.  Once installed,
668/// whether directly via `bootc install` (executed as part of a container)
669/// or via another mechanism such as an OS installer tool, further
670/// updates can be pulled and `bootc upgrade`.
671#[derive(Debug, Parser, PartialEq, Eq)]
672#[clap(name = "bootc")]
673#[clap(rename_all = "kebab-case")]
674#[clap(version,long_version=clap::crate_version!())]
675#[allow(clippy::large_enum_variant)]
676pub(crate) enum Opt {
677    /// Download and queue an updated container image to apply.
678    ///
679    /// This does not affect the running system; updates operate in an "A/B" style by default.
680    ///
681    /// A queued update is visible as `staged` in `bootc status`.
682    ///
683    /// Currently by default, the update will be applied at shutdown time via `ostree-finalize-staged.service`.
684    /// There is also an explicit `bootc upgrade --apply` verb which will automatically take action (rebooting)
685    /// if the system has changed.
686    ///
687    /// However, in the future this is likely to change such that reboots outside of a `bootc upgrade --apply`
688    /// do *not* automatically apply the update in addition.
689    #[clap(alias = "update")]
690    Upgrade(UpgradeOpts),
691    /// Target a new container image reference to boot.
692    ///
693    /// This is almost exactly the same operation as `upgrade`, but additionally changes the container image reference
694    /// instead.
695    ///
696    /// ## Usage
697    ///
698    /// A common pattern is to have a management agent control operating system updates via container image tags;
699    /// for example, `quay.io/exampleos/someuser:v1.0` and `quay.io/exampleos/someuser:v1.1` where some machines
700    /// are tracking `:v1.0`, and as a rollout progresses, machines can be switched to `v:1.1`.
701    Switch(SwitchOpts),
702    /// Change the bootloader entry ordering; the deployment under `rollback` will be queued for the next boot,
703    /// and the current will become rollback.  If there is a `staged` entry (an unapplied, queued upgrade)
704    /// then it will be discarded.
705    ///
706    /// Note that absent any additional control logic, if there is an active agent doing automated upgrades
707    /// (such as the default `bootc-fetch-apply-updates.timer` and associated `.service`) the
708    /// change here may be reverted.  It's recommended to only use this in concert with an agent that
709    /// is in active control.
710    ///
711    /// A systemd journal message will be logged with `MESSAGE_ID=26f3b1eb24464d12aa5e7b544a6b5468` in
712    /// order to detect a rollback invocation.
713    #[command(after_help = indoc! {r#"
714        Note on Rollbacks and the `/etc` Directory:
715
716        When you perform a rollback (e.g., with `bootc rollback`), any
717        changes made to files in the `/etc` directory won't carry over
718        to the rolled-back deployment.  The `/etc` files will revert
719        to their state from that previous deployment instead.
720
721        This is because `bootc rollback` just reorders the existing
722        deployments. It doesn't create new deployments. The `/etc`
723        merges happen when new deployments are created.
724    "#})]
725    Rollback(RollbackOpts),
726    /// Apply full changes to the host specification.
727    ///
728    /// This command operates very similarly to `kubectl apply`; if invoked interactively,
729    /// then the current host specification will be presented in the system default `$EDITOR`
730    /// for interactive changes.
731    ///
732    /// It is also possible to directly provide new contents via `bootc edit --filename`.
733    ///
734    /// Only changes to the `spec` section are honored.
735    Edit(EditOpts),
736    /// Display status.
737    ///
738    /// Shows bootc system state. Outputs YAML by default, human-readable if terminal detected.
739    Status(StatusOpts),
740    /// Add a transient writable overlayfs on `/usr`.
741    ///
742    /// Allows temporary package installation that will be discarded on reboot.
743    #[clap(alias = "usroverlay")]
744    UsrOverlay,
745    /// Install the running container to a target.
746    ///
747    /// Takes a container image and installs it to disk in a bootable format.
748    #[clap(subcommand)]
749    Install(InstallOpts),
750    /// Operations which can be executed as part of a container build.
751    #[clap(subcommand)]
752    Container(ContainerOpts),
753    /// Operations on container images.
754    ///
755    /// Stability: This interface may change in the future.
756    #[clap(subcommand, hide = true)]
757    Image(ImageOpts),
758    /// Execute the given command in the host mount namespace
759    #[clap(hide = true)]
760    ExecInHostMountNamespace {
761        #[clap(trailing_var_arg = true, allow_hyphen_values = true)]
762        args: Vec<OsString>,
763    },
764    /// Modify the state of the system
765    #[clap(hide = true)]
766    #[clap(subcommand)]
767    State(StateOpts),
768    #[clap(subcommand)]
769    #[clap(hide = true)]
770    Internals(InternalsOpts),
771    ComposefsFinalizeStaged,
772    /// Diff current /etc configuration versus default
773    #[clap(hide = true)]
774    ConfigDiff,
775    /// Generate shell completion script for supported shells.
776    ///
777    /// Example: `bootc completion bash` prints a bash completion script to stdout.
778    #[clap(hide = true)]
779    Completion {
780        /// Shell type to generate (bash, zsh, fish)
781        #[clap(value_enum)]
782        shell: clap_complete::aot::Shell,
783    },
784    #[clap(hide = true)]
785    DeleteDeployment {
786        depl_id: String,
787    },
788}
789
790/// Ensure we've entered a mount namespace, so that we can remount
791/// `/sysroot` read-write
792/// TODO use <https://github.com/ostreedev/ostree/pull/2779> once
793/// we can depend on a new enough ostree
794#[context("Ensuring mountns")]
795pub(crate) fn ensure_self_unshared_mount_namespace() -> Result<()> {
796    let uid = rustix::process::getuid();
797    if !uid.is_root() {
798        tracing::debug!("Not root, assuming no need to unshare");
799        return Ok(());
800    }
801    let recurse_env = "_ostree_unshared";
802    let ns_pid1 = std::fs::read_link("/proc/1/ns/mnt").context("Reading /proc/1/ns/mnt")?;
803    let ns_self = std::fs::read_link("/proc/self/ns/mnt").context("Reading /proc/self/ns/mnt")?;
804    // If we already appear to be in a mount namespace, or we're already pid1, we're done
805    if ns_pid1 != ns_self {
806        tracing::debug!("Already in a mount namespace");
807        return Ok(());
808    }
809    if std::env::var_os(recurse_env).is_some() {
810        let am_pid1 = rustix::process::getpid().is_init();
811        if am_pid1 {
812            tracing::debug!("We are pid 1");
813            return Ok(());
814        } else {
815            anyhow::bail!("Failed to unshare mount namespace");
816        }
817    }
818    bootc_utils::reexec::reexec_with_guardenv(recurse_env, &["unshare", "-m", "--"])
819}
820
821/// Load global storage state, expecting that we're booted into a bootc system.
822/// This prepares the process for write operations (re-exec, mount namespace, etc).
823#[context("Initializing storage")]
824pub(crate) async fn get_storage() -> Result<crate::store::BootedStorage> {
825    let env = crate::store::Environment::detect()?;
826    // Always call prepare_for_write() for write operations - it checks
827    // for container, root privileges, mount namespace setup, etc.
828    prepare_for_write()?;
829    let r = BootedStorage::new(env)
830        .await?
831        .ok_or_else(|| anyhow!("System not booted via bootc"))?;
832    Ok(r)
833}
834
835#[context("Querying root privilege")]
836pub(crate) fn require_root(is_container: bool) -> Result<()> {
837    ensure!(
838        rustix::process::getuid().is_root(),
839        if is_container {
840            "The user inside the container from which you are running this command must be root"
841        } else {
842            "This command must be executed as the root user"
843        }
844    );
845
846    ensure!(
847        rustix::thread::capability_is_in_bounding_set(rustix::thread::CapabilitySet::SYS_ADMIN)?,
848        if is_container {
849            "The container must be executed with full privileges (e.g. --privileged flag)"
850        } else {
851            "This command requires full root privileges (CAP_SYS_ADMIN)"
852        }
853    );
854
855    tracing::trace!("Verified uid 0 with CAP_SYS_ADMIN");
856
857    Ok(())
858}
859
860/// Check if a deployment has soft reboot capability
861fn has_soft_reboot_capability(deployment: Option<&crate::spec::BootEntry>) -> bool {
862    deployment.map(|d| d.soft_reboot_capable).unwrap_or(false)
863}
864
865/// Prepare a soft reboot for the given deployment
866#[context("Preparing soft reboot")]
867fn prepare_soft_reboot(sysroot: &SysrootLock, deployment: &ostree::Deployment) -> Result<()> {
868    let cancellable = ostree::gio::Cancellable::NONE;
869    sysroot
870        .deployment_set_soft_reboot(deployment, false, cancellable)
871        .context("Failed to prepare soft-reboot")?;
872    Ok(())
873}
874
875/// Handle soft reboot based on the configured mode
876#[context("Handling soft reboot")]
877fn handle_soft_reboot<F>(
878    soft_reboot_mode: Option<SoftRebootMode>,
879    entry: Option<&crate::spec::BootEntry>,
880    deployment_type: &str,
881    execute_soft_reboot: F,
882) -> Result<()>
883where
884    F: FnOnce() -> Result<()>,
885{
886    let Some(mode) = soft_reboot_mode else {
887        return Ok(());
888    };
889
890    let can_soft_reboot = has_soft_reboot_capability(entry);
891    match mode {
892        SoftRebootMode::Required => {
893            if can_soft_reboot {
894                execute_soft_reboot()?;
895            } else {
896                anyhow::bail!(
897                    "Soft reboot was required but {} deployment is not soft-reboot capable",
898                    deployment_type
899                );
900            }
901        }
902        SoftRebootMode::Auto => {
903            if can_soft_reboot {
904                execute_soft_reboot()?;
905            }
906        }
907    }
908    Ok(())
909}
910
911/// Handle soft reboot for staged deployments (used by upgrade and switch)
912#[context("Handling staged soft reboot")]
913fn handle_staged_soft_reboot(
914    booted_ostree: &BootedOstree<'_>,
915    soft_reboot_mode: Option<SoftRebootMode>,
916    host: &crate::spec::Host,
917) -> Result<()> {
918    handle_soft_reboot(
919        soft_reboot_mode,
920        host.status.staged.as_ref(),
921        "staged",
922        || soft_reboot_staged(booted_ostree.sysroot),
923    )
924}
925
926/// Perform a soft reboot for a staged deployment
927#[context("Soft reboot staged deployment")]
928fn soft_reboot_staged(sysroot: &SysrootLock) -> Result<()> {
929    println!("Staged deployment is soft-reboot capable, preparing for soft-reboot...");
930
931    let deployments_list = sysroot.deployments();
932    let staged_deployment = deployments_list
933        .iter()
934        .find(|d| d.is_staged())
935        .ok_or_else(|| anyhow::anyhow!("Failed to find staged deployment"))?;
936
937    prepare_soft_reboot(sysroot, staged_deployment)?;
938    Ok(())
939}
940
941/// Perform a soft reboot for a rollback deployment
942#[context("Soft reboot rollback deployment")]
943fn soft_reboot_rollback(booted_ostree: &BootedOstree<'_>) -> Result<()> {
944    println!("Rollback deployment is soft-reboot capable, preparing for soft-reboot...");
945
946    let deployments_list = booted_ostree.sysroot.deployments();
947    let target_deployment = deployments_list
948        .first()
949        .ok_or_else(|| anyhow::anyhow!("No rollback deployment found!"))?;
950
951    prepare_soft_reboot(booted_ostree.sysroot, target_deployment)
952}
953
954/// A few process changes that need to be made for writing.
955/// IMPORTANT: This may end up re-executing the current process,
956/// so anything that happens before this should be idempotent.
957#[context("Preparing for write")]
958pub(crate) fn prepare_for_write() -> Result<()> {
959    use std::sync::atomic::{AtomicBool, Ordering};
960
961    // This is intending to give "at most once" semantics to this
962    // function. We should never invoke this from multiple threads
963    // at the same time, but verifying "on main thread" is messy.
964    // Yes, using SeqCst is likely overkill, but there is nothing perf
965    // sensitive about this.
966    static ENTERED: AtomicBool = AtomicBool::new(false);
967    if ENTERED.load(Ordering::SeqCst) {
968        return Ok(());
969    }
970    if ostree_ext::container_utils::running_in_container() {
971        anyhow::bail!("Detected container; this command requires a booted host system.");
972    }
973    crate::cli::require_root(false)?;
974    ensure_self_unshared_mount_namespace()?;
975    if crate::lsm::selinux_enabled()? && !crate::lsm::selinux_ensure_install()? {
976        tracing::debug!("Do not have install_t capabilities");
977    }
978    ENTERED.store(true, Ordering::SeqCst);
979    Ok(())
980}
981
982/// Implementation of the `bootc upgrade` CLI command.
983#[context("Upgrading")]
984async fn upgrade(
985    opts: UpgradeOpts,
986    storage: &Storage,
987    booted_ostree: &BootedOstree<'_>,
988) -> Result<()> {
989    let repo = &booted_ostree.repo();
990
991    let host = crate::status::get_status(booted_ostree)?.1;
992    let imgref = host.spec.image.as_ref();
993    let prog: ProgressWriter = opts.progress.try_into()?;
994
995    // If there's no specified image, let's be nice and check if the booted system is using rpm-ostree
996    if imgref.is_none() {
997        let booted_incompatible = host.status.booted.as_ref().is_some_and(|b| b.incompatible);
998
999        let staged_incompatible = host.status.staged.as_ref().is_some_and(|b| b.incompatible);
1000
1001        if booted_incompatible || staged_incompatible {
1002            return Err(anyhow::anyhow!(
1003                "Deployment contains local rpm-ostree modifications; cannot upgrade via bootc. You can run `rpm-ostree reset` to undo the modifications."
1004            ));
1005        }
1006    }
1007
1008    let spec = RequiredHostSpec::from_spec(&host.spec)?;
1009    let booted_image = host
1010        .status
1011        .booted
1012        .as_ref()
1013        .map(|b| b.query_image(repo))
1014        .transpose()?
1015        .flatten();
1016    let imgref = imgref.ok_or_else(|| anyhow::anyhow!("No image source specified"))?;
1017    // Find the currently queued digest, if any before we pull
1018    let staged = host.status.staged.as_ref();
1019    let staged_image = staged.as_ref().and_then(|s| s.image.as_ref());
1020    let mut changed = false;
1021
1022    // Handle --from-downloaded: unlock existing staged deployment without fetching from image source
1023    if opts.from_downloaded {
1024        let ostree = storage.get_ostree()?;
1025        let staged_deployment = ostree
1026            .staged_deployment()
1027            .ok_or_else(|| anyhow::anyhow!("No staged deployment found"))?;
1028
1029        if staged_deployment.is_finalization_locked() {
1030            ostree.change_finalization(&staged_deployment)?;
1031            println!("Staged deployment will now be applied on reboot");
1032        } else {
1033            println!("Staged deployment is already set to apply on reboot");
1034        }
1035
1036        handle_staged_soft_reboot(booted_ostree, opts.soft_reboot, &host)?;
1037        if opts.apply {
1038            crate::reboot::reboot()?;
1039        }
1040        return Ok(());
1041    }
1042
1043    if opts.check {
1044        let imgref = imgref.clone().into();
1045        let mut imp = crate::deploy::new_importer(repo, &imgref).await?;
1046        match imp.prepare().await? {
1047            PrepareResult::AlreadyPresent(_) => {
1048                println!("No changes in: {imgref:#}");
1049            }
1050            PrepareResult::Ready(r) => {
1051                crate::deploy::check_bootc_label(&r.config);
1052                println!("Update available for: {imgref:#}");
1053                if let Some(version) = r.version() {
1054                    println!("  Version: {version}");
1055                }
1056                println!("  Digest: {}", r.manifest_digest);
1057                changed = true;
1058                if let Some(previous_image) = booted_image.as_ref() {
1059                    let diff =
1060                        ostree_container::ManifestDiff::new(&previous_image.manifest, &r.manifest);
1061                    diff.print();
1062                }
1063            }
1064        }
1065    } else {
1066        // Auto-detect whether to use unified storage based on image presence in bootc storage
1067        let use_unified = crate::deploy::image_exists_in_unified_storage(storage, imgref).await?;
1068
1069        let fetched = if use_unified {
1070            crate::deploy::pull_unified(repo, imgref, None, opts.quiet, prog.clone(), storage)
1071                .await?
1072        } else {
1073            crate::deploy::pull(repo, imgref, None, opts.quiet, prog.clone()).await?
1074        };
1075        let staged_digest = staged_image.map(|s| s.digest().expect("valid digest in status"));
1076        let fetched_digest = &fetched.manifest_digest;
1077        tracing::debug!("staged: {staged_digest:?}");
1078        tracing::debug!("fetched: {fetched_digest}");
1079        let staged_unchanged = staged_digest
1080            .as_ref()
1081            .map(|d| d == fetched_digest)
1082            .unwrap_or_default();
1083        let booted_unchanged = booted_image
1084            .as_ref()
1085            .map(|img| &img.manifest_digest == fetched_digest)
1086            .unwrap_or_default();
1087        if staged_unchanged {
1088            let staged_deployment = storage.get_ostree()?.staged_deployment();
1089            let mut download_only_changed = false;
1090
1091            if let Some(staged) = staged_deployment {
1092                // Handle download-only mode based on flags
1093                if opts.download_only {
1094                    // --download-only: set download-only mode
1095                    if !staged.is_finalization_locked() {
1096                        storage.get_ostree()?.change_finalization(&staged)?;
1097                        println!("Image downloaded, but will not be applied on reboot");
1098                        download_only_changed = true;
1099                    }
1100                } else if !opts.check {
1101                    // --apply or no flags: clear download-only mode
1102                    // (skip if --check, which is read-only)
1103                    if staged.is_finalization_locked() {
1104                        storage.get_ostree()?.change_finalization(&staged)?;
1105                        println!("Staged deployment will now be applied on reboot");
1106                        download_only_changed = true;
1107                    }
1108                }
1109            } else if opts.download_only || opts.apply {
1110                anyhow::bail!("No staged deployment found");
1111            }
1112
1113            if !download_only_changed {
1114                println!("Staged update present, not changed");
1115            }
1116
1117            handle_staged_soft_reboot(booted_ostree, opts.soft_reboot, &host)?;
1118            if opts.apply {
1119                crate::reboot::reboot()?;
1120            }
1121        } else if booted_unchanged {
1122            println!("No update available.")
1123        } else {
1124            let stateroot = booted_ostree.stateroot();
1125            let from = MergeState::from_stateroot(storage, &stateroot)?;
1126            crate::deploy::stage(
1127                storage,
1128                from,
1129                &fetched,
1130                &spec,
1131                prog.clone(),
1132                opts.download_only,
1133            )
1134            .await?;
1135            changed = true;
1136            if let Some(prev) = booted_image.as_ref() {
1137                if let Some(fetched_manifest) = fetched.get_manifest(repo)? {
1138                    let diff =
1139                        ostree_container::ManifestDiff::new(&prev.manifest, &fetched_manifest);
1140                    diff.print();
1141                }
1142            }
1143        }
1144    }
1145    if changed {
1146        storage.update_mtime()?;
1147
1148        if opts.soft_reboot.is_some() {
1149            // At this point we have new staged deployment and the host definition has changed.
1150            // We need the updated host status before we check if we can prepare the soft-reboot.
1151            let updated_host = crate::status::get_status(booted_ostree)?.1;
1152            handle_staged_soft_reboot(booted_ostree, opts.soft_reboot, &updated_host)?;
1153        }
1154
1155        if opts.apply {
1156            crate::reboot::reboot()?;
1157        }
1158    } else {
1159        tracing::debug!("No changes");
1160    }
1161
1162    Ok(())
1163}
1164
1165pub(crate) fn imgref_for_switch(opts: &SwitchOpts) -> Result<ImageReference> {
1166    let transport = ostree_container::Transport::try_from(opts.transport.as_str())?;
1167    let imgref = ostree_container::ImageReference {
1168        transport,
1169        name: opts.target.to_string(),
1170    };
1171    let sigverify = sigpolicy_from_opt(opts.enforce_container_sigpolicy);
1172    let target = ostree_container::OstreeImageReference { sigverify, imgref };
1173    let target = ImageReference::from(target);
1174
1175    return Ok(target);
1176}
1177
1178/// Implementation of the `bootc switch` CLI command for ostree backend.
1179#[context("Switching (ostree)")]
1180async fn switch_ostree(
1181    opts: SwitchOpts,
1182    storage: &Storage,
1183    booted_ostree: &BootedOstree<'_>,
1184) -> Result<()> {
1185    let target = imgref_for_switch(&opts)?;
1186    let prog: ProgressWriter = opts.progress.try_into()?;
1187    let cancellable = gio::Cancellable::NONE;
1188
1189    let repo = &booted_ostree.repo();
1190    let (_, host) = crate::status::get_status(booted_ostree)?;
1191
1192    let new_spec = {
1193        let mut new_spec = host.spec.clone();
1194        new_spec.image = Some(target.clone());
1195        new_spec
1196    };
1197
1198    if new_spec == host.spec {
1199        println!("Image specification is unchanged.");
1200        return Ok(());
1201    }
1202
1203    // Log the switch operation to systemd journal
1204    const SWITCH_JOURNAL_ID: &str = "7a6b5c4d3e2f1a0b9c8d7e6f5a4b3c2d1";
1205    let old_image = host
1206        .spec
1207        .image
1208        .as_ref()
1209        .map(|i| i.image.as_str())
1210        .unwrap_or("none");
1211
1212    tracing::info!(
1213        message_id = SWITCH_JOURNAL_ID,
1214        bootc.old_image_reference = old_image,
1215        bootc.new_image_reference = &target.image,
1216        bootc.new_image_transport = &target.transport,
1217        "Switching from image {} to {}",
1218        old_image,
1219        target.image
1220    );
1221
1222    let new_spec = RequiredHostSpec::from_spec(&new_spec)?;
1223
1224    // Determine whether to use unified storage path.
1225    // If explicitly requested via flag, use unified storage directly.
1226    // Otherwise, auto-detect based on whether the image exists in bootc storage.
1227    let use_unified = if opts.unified_storage_exp {
1228        true
1229    } else {
1230        crate::deploy::image_exists_in_unified_storage(storage, &target).await?
1231    };
1232
1233    let fetched = if use_unified {
1234        crate::deploy::pull_unified(repo, &target, None, opts.quiet, prog.clone(), storage).await?
1235    } else {
1236        crate::deploy::pull(repo, &target, None, opts.quiet, prog.clone()).await?
1237    };
1238
1239    if !opts.retain {
1240        // By default, we prune the previous ostree ref so it will go away after later upgrades
1241        if let Some(booted_origin) = booted_ostree.deployment.origin() {
1242            if let Some(ostree_ref) = booted_origin.optional_string("origin", "refspec")? {
1243                let (remote, ostree_ref) =
1244                    ostree::parse_refspec(&ostree_ref).context("Failed to parse ostree ref")?;
1245                repo.set_ref_immediate(remote.as_deref(), &ostree_ref, None, cancellable)?;
1246            }
1247        }
1248    }
1249
1250    let stateroot = booted_ostree.stateroot();
1251    let from = MergeState::from_stateroot(storage, &stateroot)?;
1252    crate::deploy::stage(storage, from, &fetched, &new_spec, prog.clone(), false).await?;
1253
1254    storage.update_mtime()?;
1255
1256    if opts.soft_reboot.is_some() {
1257        // At this point we have staged the deployment and the host definition has changed.
1258        // We need the updated host status before we check if we can prepare the soft-reboot.
1259        let updated_host = crate::status::get_status(booted_ostree)?.1;
1260        handle_staged_soft_reboot(booted_ostree, opts.soft_reboot, &updated_host)?;
1261    }
1262
1263    if opts.apply {
1264        crate::reboot::reboot()?;
1265    }
1266
1267    Ok(())
1268}
1269
1270/// Implementation of the `bootc switch` CLI command.
1271#[context("Switching")]
1272async fn switch(opts: SwitchOpts) -> Result<()> {
1273    // If we're doing an in-place mutation, we shortcut most of the rest of the work here
1274    // TODO: what we really want here is Storage::detect_from_root() that also handles
1275    // composefs. But for now this just assumes ostree.
1276    if opts.mutate_in_place {
1277        let target = imgref_for_switch(&opts)?;
1278        let deployid = {
1279            // Clone to pass into helper thread
1280            let target = target.clone();
1281            let root = cap_std::fs::Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
1282            tokio::task::spawn_blocking(move || {
1283                crate::deploy::switch_origin_inplace(&root, &target)
1284            })
1285            .await??
1286        };
1287        println!("Updated {deployid} to pull from {target}");
1288        return Ok(());
1289    }
1290    let storage = &get_storage().await?;
1291    match storage.kind()? {
1292        BootedStorageKind::Ostree(booted_ostree) => {
1293            switch_ostree(opts, storage, &booted_ostree).await
1294        }
1295        BootedStorageKind::Composefs(booted_cfs) => {
1296            switch_composefs(opts, storage, &booted_cfs).await
1297        }
1298    }
1299}
1300
1301/// Implementation of the `bootc rollback` CLI command for ostree backend.
1302#[context("Rollback (ostree)")]
1303async fn rollback_ostree(
1304    opts: &RollbackOpts,
1305    storage: &Storage,
1306    booted_ostree: &BootedOstree<'_>,
1307) -> Result<()> {
1308    crate::deploy::rollback(storage).await?;
1309
1310    if opts.soft_reboot.is_some() {
1311        // Get status of rollback deployment to check soft-reboot capability
1312        let host = crate::status::get_status(booted_ostree)?.1;
1313
1314        handle_soft_reboot(
1315            opts.soft_reboot,
1316            host.status.rollback.as_ref(),
1317            "rollback",
1318            || soft_reboot_rollback(booted_ostree),
1319        )?;
1320    }
1321
1322    Ok(())
1323}
1324
1325/// Implementation of the `bootc rollback` CLI command.
1326#[context("Rollback")]
1327async fn rollback(opts: &RollbackOpts) -> Result<()> {
1328    let storage = &get_storage().await?;
1329    match storage.kind()? {
1330        BootedStorageKind::Ostree(booted_ostree) => {
1331            rollback_ostree(opts, storage, &booted_ostree).await
1332        }
1333        BootedStorageKind::Composefs(booted_cfs) => composefs_rollback(storage, &booted_cfs).await,
1334    }
1335}
1336
1337/// Implementation of the `bootc edit` CLI command for ostree backend.
1338#[context("Editing spec (ostree)")]
1339async fn edit_ostree(
1340    opts: EditOpts,
1341    storage: &Storage,
1342    booted_ostree: &BootedOstree<'_>,
1343) -> Result<()> {
1344    let repo = &booted_ostree.repo();
1345    let (_, host) = crate::status::get_status(booted_ostree)?;
1346
1347    let new_host: Host = if let Some(filename) = opts.filename {
1348        let mut r = std::io::BufReader::new(std::fs::File::open(filename)?);
1349        serde_yaml::from_reader(&mut r)?
1350    } else {
1351        let tmpf = tempfile::NamedTempFile::with_suffix(".yaml")?;
1352        serde_yaml::to_writer(std::io::BufWriter::new(tmpf.as_file()), &host)?;
1353        crate::utils::spawn_editor(&tmpf)?;
1354        tmpf.as_file().seek(std::io::SeekFrom::Start(0))?;
1355        serde_yaml::from_reader(&mut tmpf.as_file())?
1356    };
1357
1358    if new_host.spec == host.spec {
1359        println!("Edit cancelled, no changes made.");
1360        return Ok(());
1361    }
1362    host.spec.verify_transition(&new_host.spec)?;
1363    let new_spec = RequiredHostSpec::from_spec(&new_host.spec)?;
1364
1365    let prog = ProgressWriter::default();
1366
1367    // We only support two state transitions right now; switching the image,
1368    // or flipping the bootloader ordering.
1369    if host.spec.boot_order != new_host.spec.boot_order {
1370        return crate::deploy::rollback(storage).await;
1371    }
1372
1373    let fetched = crate::deploy::pull(repo, new_spec.image, None, opts.quiet, prog.clone()).await?;
1374
1375    // TODO gc old layers here
1376
1377    let stateroot = booted_ostree.stateroot();
1378    let from = MergeState::from_stateroot(storage, &stateroot)?;
1379    crate::deploy::stage(storage, from, &fetched, &new_spec, prog.clone(), false).await?;
1380
1381    storage.update_mtime()?;
1382
1383    Ok(())
1384}
1385
1386/// Implementation of the `bootc edit` CLI command.
1387#[context("Editing spec")]
1388async fn edit(opts: EditOpts) -> Result<()> {
1389    let storage = &get_storage().await?;
1390    match storage.kind()? {
1391        BootedStorageKind::Ostree(booted_ostree) => {
1392            edit_ostree(opts, storage, &booted_ostree).await
1393        }
1394        BootedStorageKind::Composefs(_) => {
1395            anyhow::bail!("Edit is not yet supported for composefs backend")
1396        }
1397    }
1398}
1399
1400/// Implementation of `bootc usroverlay`
1401async fn usroverlay() -> Result<()> {
1402    // This is just a pass-through today.  At some point we may make this a libostree API
1403    // or even oxidize it.
1404    Err(Command::new("ostree")
1405        .args(["admin", "unlock"])
1406        .exec()
1407        .into())
1408}
1409
1410/// Perform process global initialization. This should be called as early as possible
1411/// in the standard `main` function.
1412#[allow(unsafe_code)]
1413pub fn global_init() -> Result<()> {
1414    // In some cases we re-exec with a temporary binary,
1415    // so ensure that the syslog identifier is set.
1416    ostree::glib::set_prgname(bootc_utils::NAME.into());
1417    if let Err(e) = rustix::thread::set_name(&CString::new(bootc_utils::NAME).unwrap()) {
1418        // This shouldn't ever happen
1419        eprintln!("failed to set name: {e}");
1420    }
1421    // Silence SELinux log warnings
1422    ostree::SePolicy::set_null_log();
1423    let am_root = rustix::process::getuid().is_root();
1424    // Work around bootc-image-builder not setting HOME, in combination with podman (really c/common)
1425    // bombing out if it is unset.
1426    if std::env::var_os("HOME").is_none() && am_root {
1427        // Setting the environment is thread-unsafe, but we ask calling code
1428        // to invoke this as early as possible. (In practice, that's just the cli's `main.rs`)
1429        // xref https://internals.rust-lang.org/t/synchronized-ffi-access-to-posix-environment-variable-functions/15475
1430        // SAFETY: Called early in main() before any threads are spawned.
1431        unsafe {
1432            std::env::set_var("HOME", "/root");
1433        }
1434    }
1435    Ok(())
1436}
1437
1438/// Parse the provided arguments and execute.
1439/// Calls [`clap::Error::exit`] on failure, printing the error message and aborting the program.
1440pub async fn run_from_iter<I>(args: I) -> Result<()>
1441where
1442    I: IntoIterator,
1443    I::Item: Into<OsString> + Clone,
1444{
1445    run_from_opt(Opt::parse_including_static(args)).await
1446}
1447
1448/// Find the base binary name from argv0 (without a full path). The empty string
1449/// is never returned; instead a fallback string is used. If the input is not valid
1450/// UTF-8, a default is used.
1451fn callname_from_argv0(argv0: &OsStr) -> &str {
1452    let default = "bootc";
1453    std::path::Path::new(argv0)
1454        .file_name()
1455        .and_then(|s| s.to_str())
1456        .filter(|s| !s.is_empty())
1457        .unwrap_or(default)
1458}
1459
1460impl Opt {
1461    /// In some cases (e.g. systemd generator) we dispatch specifically on argv0.  This
1462    /// requires some special handling in clap.
1463    fn parse_including_static<I>(args: I) -> Self
1464    where
1465        I: IntoIterator,
1466        I::Item: Into<OsString> + Clone,
1467    {
1468        let mut args = args.into_iter();
1469        let first = if let Some(first) = args.next() {
1470            let first: OsString = first.into();
1471            let argv0 = callname_from_argv0(&first);
1472            tracing::debug!("argv0={argv0:?}");
1473            let mapped = match argv0 {
1474                InternalsOpts::GENERATOR_BIN => {
1475                    Some(["bootc", "internals", "systemd-generator"].as_slice())
1476                }
1477                "ostree-container" | "ostree-ima-sign" | "ostree-provisional-repair" => {
1478                    Some(["bootc", "internals", "ostree-ext"].as_slice())
1479                }
1480                _ => None,
1481            };
1482            if let Some(base_args) = mapped {
1483                let base_args = base_args.iter().map(OsString::from);
1484                return Opt::parse_from(base_args.chain(args.map(|i| i.into())));
1485            }
1486            Some(first)
1487        } else {
1488            None
1489        };
1490        Opt::parse_from(first.into_iter().chain(args.map(|i| i.into())))
1491    }
1492}
1493
1494/// Internal (non-generic/monomorphized) primary CLI entrypoint
1495async fn run_from_opt(opt: Opt) -> Result<()> {
1496    let root = &Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
1497    match opt {
1498        Opt::Upgrade(opts) => {
1499            let storage = &get_storage().await?;
1500            match storage.kind()? {
1501                BootedStorageKind::Ostree(booted_ostree) => {
1502                    upgrade(opts, storage, &booted_ostree).await
1503                }
1504                BootedStorageKind::Composefs(booted_cfs) => {
1505                    upgrade_composefs(opts, storage, &booted_cfs).await
1506                }
1507            }
1508        }
1509        Opt::Switch(opts) => switch(opts).await,
1510        Opt::Rollback(opts) => {
1511            rollback(&opts).await?;
1512            if opts.apply {
1513                crate::reboot::reboot()?;
1514            }
1515            Ok(())
1516        }
1517        Opt::Edit(opts) => edit(opts).await,
1518        Opt::UsrOverlay => {
1519            use crate::store::Environment;
1520            let env = Environment::detect()?;
1521            match env {
1522                Environment::OstreeBooted => usroverlay().await,
1523                Environment::ComposefsBooted(_) => composefs_usr_overlay(),
1524                _ => anyhow::bail!("usroverlay only applies on booted hosts"),
1525            }
1526        }
1527        Opt::Container(opts) => match opts {
1528            ContainerOpts::Inspect {
1529                rootfs,
1530                json,
1531                format,
1532            } => crate::status::container_inspect(&rootfs, json, format),
1533            ContainerOpts::Lint {
1534                rootfs,
1535                fatal_warnings,
1536                list,
1537                skip,
1538                no_truncate,
1539            } => {
1540                if list {
1541                    return lints::lint_list(std::io::stdout().lock());
1542                }
1543                let warnings = if fatal_warnings {
1544                    lints::WarningDisposition::FatalWarnings
1545                } else {
1546                    lints::WarningDisposition::AllowWarnings
1547                };
1548                let root_type = if rootfs == "/" {
1549                    lints::RootType::Running
1550                } else {
1551                    lints::RootType::Alternative
1552                };
1553
1554                let root = &Dir::open_ambient_dir(rootfs, cap_std::ambient_authority())?;
1555                let skip = skip.iter().map(|s| s.as_str());
1556                lints::lint(
1557                    root,
1558                    warnings,
1559                    root_type,
1560                    skip,
1561                    std::io::stdout().lock(),
1562                    no_truncate,
1563                )?;
1564                Ok(())
1565            }
1566            ContainerOpts::ComputeComposefsDigest {
1567                path,
1568                write_dumpfile_to,
1569            } => {
1570                let digest = compute_composefs_digest(&path, write_dumpfile_to.as_deref())?;
1571                println!("{digest}");
1572                Ok(())
1573            }
1574            ContainerOpts::ComputeComposefsDigestFromStorage {
1575                write_dumpfile_to,
1576                image,
1577            } => {
1578                let (_td_guard, repo) = new_temp_composefs_repo()?;
1579
1580                let mut proxycfg = crate::deploy::new_proxy_config();
1581
1582                let image = if let Some(image) = image {
1583                    image
1584                } else {
1585                    let host_container_store = Utf8Path::new("/run/host-container-storage");
1586                    // If no image is provided, assume that we're running in a container in privileged mode
1587                    // with access to the container storage.
1588                    let container_info = crate::containerenv::get_container_execution_info(&root)?;
1589                    let iid = container_info.imageid;
1590                    tracing::debug!("Computing digest of {iid}");
1591
1592                    if !host_container_store.try_exists()? {
1593                        anyhow::bail!(
1594                            "Must be readonly mount of host container store: {host_container_store}"
1595                        );
1596                    }
1597                    // And ensure we're finding the image in the host storage
1598                    let mut cmd = Command::new("skopeo");
1599                    set_additional_image_store(&mut cmd, "/run/host-container-storage");
1600                    proxycfg.skopeo_cmd = Some(cmd);
1601                    iid
1602                };
1603
1604                let imgref = format!("containers-storage:{image}");
1605                let (imgid, verity) = composefs_oci::pull(&repo, &imgref, None, Some(proxycfg))
1606                    .await
1607                    .context("Pulling image")?;
1608                let imgid = hex::encode(imgid);
1609                let mut fs = composefs_oci::image::create_filesystem(&repo, &imgid, Some(&verity))
1610                    .context("Populating fs")?;
1611                fs.transform_for_boot(&repo).context("Preparing for boot")?;
1612                let id = fs.compute_image_id();
1613                println!("{}", id.to_hex());
1614
1615                if let Some(path) = write_dumpfile_to.as_deref() {
1616                    let mut w = File::create(path)
1617                        .with_context(|| format!("Opening {path}"))
1618                        .map(BufWriter::new)?;
1619                    dumpfile::write_dumpfile(&mut w, &fs).context("Writing dumpfile")?;
1620                }
1621
1622                Ok(())
1623            }
1624            ContainerOpts::Ukify {
1625                rootfs,
1626                kargs,
1627                args,
1628            } => crate::ukify::build_ukify(&rootfs, &kargs, &args),
1629        },
1630        Opt::Completion { shell } => {
1631            use clap_complete::aot::generate;
1632
1633            let mut cmd = Opt::command();
1634            let mut stdout = std::io::stdout();
1635            let bin_name = "bootc";
1636            generate(shell, &mut cmd, bin_name, &mut stdout);
1637            Ok(())
1638        }
1639        Opt::Image(opts) => match opts {
1640            ImageOpts::List {
1641                list_type,
1642                list_format,
1643            } => crate::image::list_entrypoint(list_type, list_format).await,
1644
1645            ImageOpts::CopyToStorage { source, target } => {
1646                // We get "host" here to avoid deadlock in the ostree path
1647                let host = get_host().await?;
1648
1649                let storage = get_storage().await?;
1650
1651                match storage.kind()? {
1652                    BootedStorageKind::Ostree(..) => {
1653                        crate::image::push_entrypoint(
1654                            &storage,
1655                            &host,
1656                            source.as_deref(),
1657                            target.as_deref(),
1658                        )
1659                        .await
1660                    }
1661                    BootedStorageKind::Composefs(booted) => {
1662                        bootc_composefs::export::export_repo_to_image(
1663                            &storage,
1664                            &booted,
1665                            source.as_deref(),
1666                            target.as_deref(),
1667                        )
1668                        .await
1669                    }
1670                }
1671            }
1672            ImageOpts::SetUnified => crate::image::set_unified_entrypoint().await,
1673            ImageOpts::PullFromDefaultStorage { image } => {
1674                let storage = get_storage().await?;
1675                storage
1676                    .get_ensure_imgstore()?
1677                    .pull_from_host_storage(&image)
1678                    .await
1679            }
1680            ImageOpts::Cmd(opt) => {
1681                let storage = get_storage().await?;
1682                let imgstore = storage.get_ensure_imgstore()?;
1683                match opt {
1684                    ImageCmdOpts::List { args } => {
1685                        crate::image::imgcmd_entrypoint(imgstore, "list", &args).await
1686                    }
1687                    ImageCmdOpts::Build { args } => {
1688                        crate::image::imgcmd_entrypoint(imgstore, "build", &args).await
1689                    }
1690                    ImageCmdOpts::Pull { args } => {
1691                        crate::image::imgcmd_entrypoint(imgstore, "pull", &args).await
1692                    }
1693                    ImageCmdOpts::Push { args } => {
1694                        crate::image::imgcmd_entrypoint(imgstore, "push", &args).await
1695                    }
1696                }
1697            }
1698        },
1699        Opt::Install(opts) => match opts {
1700            #[cfg(feature = "install-to-disk")]
1701            InstallOpts::ToDisk(opts) => crate::install::install_to_disk(opts).await,
1702            InstallOpts::ToFilesystem(opts) => {
1703                crate::install::install_to_filesystem(opts, false, crate::install::Cleanup::Skip)
1704                    .await
1705            }
1706            InstallOpts::ToExistingRoot(opts) => {
1707                crate::install::install_to_existing_root(opts).await
1708            }
1709            InstallOpts::Reset(opts) => crate::install::install_reset(opts).await,
1710            InstallOpts::PrintConfiguration(opts) => crate::install::print_configuration(opts),
1711            InstallOpts::EnsureCompletion {} => {
1712                let rootfs = &Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
1713                crate::install::completion::run_from_anaconda(rootfs).await
1714            }
1715            InstallOpts::Finalize { root_path } => {
1716                crate::install::install_finalize(&root_path).await
1717            }
1718        },
1719        Opt::ExecInHostMountNamespace { args } => {
1720            crate::install::exec_in_host_mountns(args.as_slice())
1721        }
1722        Opt::Status(opts) => super::status::status(opts).await,
1723        Opt::Internals(opts) => match opts {
1724            InternalsOpts::SystemdGenerator {
1725                normal_dir,
1726                early_dir: _,
1727                late_dir: _,
1728            } => {
1729                let unit_dir = &Dir::open_ambient_dir(normal_dir, cap_std::ambient_authority())?;
1730                crate::generator::generator(root, unit_dir)
1731            }
1732            InternalsOpts::OstreeExt { args } => {
1733                ostree_ext::cli::run_from_iter(["ostree-ext".into()].into_iter().chain(args)).await
1734            }
1735            InternalsOpts::OstreeContainer { args } => {
1736                ostree_ext::cli::run_from_iter(
1737                    ["ostree-ext".into(), "container".into()]
1738                        .into_iter()
1739                        .chain(args),
1740                )
1741                .await
1742            }
1743            InternalsOpts::TestComposefs => {
1744                // This is a stub to be replaced
1745                let storage = get_storage().await?;
1746                let cfs = storage.get_ensure_composefs()?;
1747                let testdata = b"some test data";
1748                let testdata_digest = hex::encode(openssl::sha::sha256(testdata));
1749                let mut w = SplitStreamWriter::new(&cfs, 0);
1750                w.write_inline(testdata);
1751                let object = cfs
1752                    .write_stream(w, &testdata_digest, Some("testobject"))?
1753                    .to_hex();
1754                assert_eq!(
1755                    object,
1756                    "dc31ae5d2f637e98d2171821d60d2fcafb8084d6a4bb3bd9cdc7ad41decce6e48f85d5413d22371d36b223945042f53a2a6ab449b8e45d8896ba7d8694a16681"
1757                );
1758                Ok(())
1759            }
1760            // We don't depend on fsverity-utils today, so re-expose some helpful CLI tools.
1761            InternalsOpts::Fsverity(args) => match args {
1762                FsverityOpts::Measure { path } => {
1763                    let fd =
1764                        std::fs::File::open(&path).with_context(|| format!("Reading {path}"))?;
1765                    let digest: fsverity::Sha256HashValue = fsverity::measure_verity(&fd)?;
1766                    let digest = digest.to_hex();
1767                    println!("{digest}");
1768                    Ok(())
1769                }
1770                FsverityOpts::Enable { path } => {
1771                    let fd =
1772                        std::fs::File::open(&path).with_context(|| format!("Reading {path}"))?;
1773                    fsverity::enable_verity_raw::<fsverity::Sha256HashValue>(&fd)?;
1774                    Ok(())
1775                }
1776            },
1777            InternalsOpts::Cfs { args } => crate::cfsctl::run_from_iter(args.iter()).await,
1778            InternalsOpts::Reboot => crate::reboot::reboot(),
1779            InternalsOpts::Fsck => {
1780                let storage = &get_storage().await?;
1781                crate::fsck::fsck(&storage, std::io::stdout().lock()).await?;
1782                Ok(())
1783            }
1784            InternalsOpts::FixupEtcFstab => crate::deploy::fixup_etc_fstab(&root),
1785            InternalsOpts::PrintJsonSchema { of } => {
1786                let schema = match of {
1787                    SchemaType::Host => schema_for!(crate::spec::Host),
1788                    SchemaType::Progress => schema_for!(crate::progress_jsonl::Event),
1789                };
1790                let mut stdout = std::io::stdout().lock();
1791                serde_json::to_writer_pretty(&mut stdout, &schema)?;
1792                Ok(())
1793            }
1794            InternalsOpts::Cleanup => {
1795                let storage = get_storage().await?;
1796                crate::deploy::cleanup(&storage).await
1797            }
1798            InternalsOpts::Relabel { as_path, path } => {
1799                let root = &Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
1800                let path = path.strip_prefix("/")?;
1801                let sepolicy =
1802                    &ostree::SePolicy::new(&gio::File::for_path("/"), gio::Cancellable::NONE)?;
1803                crate::lsm::relabel_recurse(root, path, as_path.as_deref(), sepolicy)?;
1804                Ok(())
1805            }
1806            InternalsOpts::BootcInstallCompletion { sysroot, stateroot } => {
1807                let rootfs = &Dir::open_ambient_dir("/", cap_std::ambient_authority())?;
1808                crate::install::completion::run_from_ostree(rootfs, &sysroot, &stateroot).await
1809            }
1810            InternalsOpts::LoopbackCleanupHelper { device } => {
1811                crate::blockdev::run_loopback_cleanup_helper(&device).await
1812            }
1813            InternalsOpts::AllocateCleanupLoopback { file_path: _ } => {
1814                // Create a temporary file for testing
1815                let temp_file =
1816                    tempfile::NamedTempFile::new().context("Failed to create temporary file")?;
1817                let temp_path = temp_file.path();
1818
1819                // Create a loopback device
1820                let loopback = crate::blockdev::LoopbackDevice::new(temp_path)
1821                    .context("Failed to create loopback device")?;
1822
1823                println!("Created loopback device: {}", loopback.path());
1824
1825                // Close the device to test cleanup
1826                loopback
1827                    .close()
1828                    .context("Failed to close loopback device")?;
1829
1830                println!("Successfully closed loopback device");
1831                Ok(())
1832            }
1833            #[cfg(feature = "rhsm")]
1834            InternalsOpts::PublishRhsmFacts => crate::rhsm::publish_facts(&root).await,
1835            #[cfg(feature = "docgen")]
1836            InternalsOpts::DumpCliJson => {
1837                use clap::CommandFactory;
1838                let cmd = Opt::command();
1839                let json = crate::cli_json::dump_cli_json(&cmd)?;
1840                println!("{}", json);
1841                Ok(())
1842            }
1843            InternalsOpts::DirDiff {
1844                pristine_etc,
1845                current_etc,
1846                new_etc,
1847                merge,
1848            } => {
1849                let pristine_etc =
1850                    Dir::open_ambient_dir(pristine_etc, cap_std::ambient_authority())?;
1851                let current_etc = Dir::open_ambient_dir(current_etc, cap_std::ambient_authority())?;
1852                let new_etc = Dir::open_ambient_dir(new_etc, cap_std::ambient_authority())?;
1853
1854                let (p, c, n) =
1855                    etc_merge::traverse_etc(&pristine_etc, &current_etc, Some(&new_etc))?;
1856
1857                let n = n
1858                    .as_ref()
1859                    .ok_or_else(|| anyhow::anyhow!("Failed to get new directory tree"))?;
1860
1861                let diff = compute_diff(&p, &c, &n)?;
1862                print_diff(&diff, &mut std::io::stdout());
1863
1864                if merge {
1865                    etc_merge::merge(&current_etc, &c, &new_etc, &n, &diff)?;
1866                }
1867
1868                Ok(())
1869            }
1870            InternalsOpts::PrepSoftReboot {
1871                deployment,
1872                reboot,
1873                reset,
1874            } => {
1875                let storage = &get_storage().await?;
1876
1877                match storage.kind()? {
1878                    BootedStorageKind::Ostree(..) => {
1879                        // TODO: Call ostree implementation?
1880                        anyhow::bail!("soft-reboot only implemented for composefs")
1881                    }
1882
1883                    BootedStorageKind::Composefs(booted_cfs) => {
1884                        if reset {
1885                            return reset_soft_reboot();
1886                        }
1887
1888                        prepare_soft_reboot_composefs(
1889                            &storage,
1890                            &booted_cfs,
1891                            deployment.as_deref(),
1892                            SoftRebootMode::Required,
1893                            reboot,
1894                        )
1895                        .await
1896                    }
1897                }
1898            }
1899        },
1900        Opt::State(opts) => match opts {
1901            StateOpts::WipeOstree => {
1902                let sysroot = ostree::Sysroot::new_default();
1903                sysroot.load(gio::Cancellable::NONE)?;
1904                crate::deploy::wipe_ostree(sysroot).await?;
1905                Ok(())
1906            }
1907        },
1908
1909        Opt::ComposefsFinalizeStaged => {
1910            let storage = &get_storage().await?;
1911            match storage.kind()? {
1912                BootedStorageKind::Ostree(_) => {
1913                    anyhow::bail!("ComposefsFinalizeStaged is only supported for composefs backend")
1914                }
1915                BootedStorageKind::Composefs(booted_cfs) => {
1916                    composefs_backend_finalize(storage, &booted_cfs).await
1917                }
1918            }
1919        }
1920
1921        Opt::ConfigDiff => {
1922            let storage = &get_storage().await?;
1923            match storage.kind()? {
1924                BootedStorageKind::Ostree(_) => {
1925                    anyhow::bail!("ConfigDiff is only supported for composefs backend")
1926                }
1927                BootedStorageKind::Composefs(booted_cfs) => {
1928                    get_etc_diff(storage, &booted_cfs).await
1929                }
1930            }
1931        }
1932
1933        Opt::DeleteDeployment { depl_id } => {
1934            let storage = &get_storage().await?;
1935            match storage.kind()? {
1936                BootedStorageKind::Ostree(_) => {
1937                    anyhow::bail!("DeleteDeployment is only supported for composefs backend")
1938                }
1939                BootedStorageKind::Composefs(booted_cfs) => {
1940                    delete_composefs_deployment(&depl_id, storage, &booted_cfs).await
1941                }
1942            }
1943        }
1944    }
1945}
1946
1947#[cfg(test)]
1948mod tests {
1949    use super::*;
1950
1951    #[test]
1952    fn test_callname() {
1953        use std::os::unix::ffi::OsStrExt;
1954
1955        // Cases that change
1956        let mapped_cases = [
1957            ("", "bootc"),
1958            ("/foo/bar", "bar"),
1959            ("/foo/bar/", "bar"),
1960            ("foo/bar", "bar"),
1961            ("../foo/bar", "bar"),
1962            ("usr/bin/ostree-container", "ostree-container"),
1963        ];
1964        for (input, output) in mapped_cases {
1965            assert_eq!(
1966                output,
1967                callname_from_argv0(OsStr::new(input)),
1968                "Handling mapped case {input}"
1969            );
1970        }
1971
1972        // Invalid UTF-8
1973        assert_eq!("bootc", callname_from_argv0(OsStr::from_bytes(b"foo\x80")));
1974
1975        // Cases that are identical
1976        let ident_cases = ["foo", "bootc"];
1977        for case in ident_cases {
1978            assert_eq!(
1979                case,
1980                callname_from_argv0(OsStr::new(case)),
1981                "Handling ident case {case}"
1982            );
1983        }
1984    }
1985
1986    #[test]
1987    fn test_parse_install_args() {
1988        // Verify we still process the legacy --target-no-signature-verification
1989        let o = Opt::try_parse_from([
1990            "bootc",
1991            "install",
1992            "to-filesystem",
1993            "--target-no-signature-verification",
1994            "/target",
1995        ])
1996        .unwrap();
1997        let o = match o {
1998            Opt::Install(InstallOpts::ToFilesystem(fsopts)) => fsopts,
1999            o => panic!("Expected filesystem opts, not {o:?}"),
2000        };
2001        assert!(o.target_opts.target_no_signature_verification);
2002        assert_eq!(o.filesystem_opts.root_path.as_str(), "/target");
2003        // Ensure we default to old bound images behavior
2004        assert_eq!(
2005            o.config_opts.bound_images,
2006            crate::install::BoundImagesOpt::Stored
2007        );
2008    }
2009
2010    #[test]
2011    fn test_parse_opts() {
2012        assert!(matches!(
2013            Opt::parse_including_static(["bootc", "status"]),
2014            Opt::Status(StatusOpts {
2015                json: false,
2016                format: None,
2017                format_version: None,
2018                booted: false,
2019                verbose: false
2020            })
2021        ));
2022        assert!(matches!(
2023            Opt::parse_including_static(["bootc", "status", "--format-version=0"]),
2024            Opt::Status(StatusOpts {
2025                format_version: Some(0),
2026                ..
2027            })
2028        ));
2029
2030        // Test verbose long form
2031        assert!(matches!(
2032            Opt::parse_including_static(["bootc", "status", "--verbose"]),
2033            Opt::Status(StatusOpts { verbose: true, .. })
2034        ));
2035
2036        // Test verbose short form
2037        assert!(matches!(
2038            Opt::parse_including_static(["bootc", "status", "-v"]),
2039            Opt::Status(StatusOpts { verbose: true, .. })
2040        ));
2041    }
2042
2043    #[test]
2044    fn test_parse_generator() {
2045        assert!(matches!(
2046            Opt::parse_including_static([
2047                "/usr/lib/systemd/system/bootc-systemd-generator",
2048                "/run/systemd/system"
2049            ]),
2050            Opt::Internals(InternalsOpts::SystemdGenerator { normal_dir, .. }) if normal_dir == "/run/systemd/system"
2051        ));
2052    }
2053
2054    #[test]
2055    fn test_parse_ostree_ext() {
2056        assert!(matches!(
2057            Opt::parse_including_static(["bootc", "internals", "ostree-container"]),
2058            Opt::Internals(InternalsOpts::OstreeContainer { .. })
2059        ));
2060
2061        fn peel(o: Opt) -> Vec<OsString> {
2062            match o {
2063                Opt::Internals(InternalsOpts::OstreeExt { args }) => args,
2064                o => panic!("unexpected {o:?}"),
2065            }
2066        }
2067        let args = peel(Opt::parse_including_static([
2068            "/usr/libexec/libostree/ext/ostree-ima-sign",
2069            "ima-sign",
2070            "--repo=foo",
2071            "foo",
2072            "bar",
2073            "baz",
2074        ]));
2075        assert_eq!(
2076            args.as_slice(),
2077            ["ima-sign", "--repo=foo", "foo", "bar", "baz"]
2078        );
2079
2080        let args = peel(Opt::parse_including_static([
2081            "/usr/libexec/libostree/ext/ostree-container",
2082            "container",
2083            "image",
2084            "pull",
2085        ]));
2086        assert_eq!(args.as_slice(), ["container", "image", "pull"]);
2087    }
2088
2089    #[test]
2090    fn test_generate_completion_scripts_contain_commands() {
2091        use clap_complete::aot::{Shell, generate};
2092
2093        // For each supported shell, generate the completion script and
2094        // ensure obvious subcommands appear in the output. This mirrors
2095        // the style of completion checks used in other projects (e.g.
2096        // podman) where the generated script is examined for expected
2097        // tokens.
2098
2099        // `completion` is intentionally hidden from --help / suggestions;
2100        // ensure other visible subcommands are present instead.
2101        let want = ["install", "upgrade"];
2102
2103        for shell in [Shell::Bash, Shell::Zsh, Shell::Fish] {
2104            let mut cmd = Opt::command();
2105            let mut buf = Vec::new();
2106            generate(shell, &mut cmd, "bootc", &mut buf);
2107            let s = String::from_utf8(buf).expect("completion should be utf8");
2108            for w in &want {
2109                assert!(s.contains(w), "{shell:?} completion missing {w}");
2110            }
2111        }
2112    }
2113}