diff --git a/crates/lib/src/cli.rs b/crates/lib/src/cli.rs index 3412a6b03..1b23e93ff 100644 --- a/crates/lib/src/cli.rs +++ b/crates/lib/src/cli.rs @@ -416,6 +416,11 @@ pub(crate) enum ImageOpts { /// this will make the image accessible via e.g. `podman run localhost/bootc` and for builds. target: Option, }, + /// Re-pull the currently booted image into the bootc-owned container storage. + /// + /// This onboards the system to the unified storage path so that future + /// upgrade/switch operations can read from the bootc storage directly. + SetUnified, /// Copy a container image from the default `containers-storage:` to the bootc-owned container storage. PullFromDefaultStorage { /// The image to pull @@ -928,7 +933,22 @@ async fn upgrade(opts: UpgradeOpts) -> Result<()> { } } } else { - let fetched = crate::deploy::pull(repo, imgref, None, opts.quiet, prog.clone()).await?; + // Check if image exists in bootc storage (/usr/lib/bootc/storage) + let imgstore = sysroot.get_ensure_imgstore()?; + let use_unified = match imgstore.exists(&format!("{imgref:#}")).await { + Ok(v) => v, + Err(e) => { + tracing::warn!("Failed to check bootc storage for image: {e}; falling back to standard pull"); + false + } + }; + + let fetched = if use_unified { + crate::deploy::pull_unified(repo, imgref, None, opts.quiet, prog.clone(), sysroot) + .await? + } else { + crate::deploy::pull(repo, imgref, None, opts.quiet, prog.clone()).await? + }; let staged_digest = staged_image.map(|s| s.digest().expect("valid digest in status")); let fetched_digest = &fetched.manifest_digest; tracing::debug!("staged: {staged_digest:?}"); @@ -1056,7 +1076,21 @@ async fn switch(opts: SwitchOpts) -> Result<()> { let new_spec = RequiredHostSpec::from_spec(&new_spec)?; - let fetched = crate::deploy::pull(repo, &target, None, opts.quiet, prog.clone()).await?; + // Check if image exists in bootc storage (/usr/lib/bootc/storage) + let imgstore = sysroot.get_ensure_imgstore()?; + let use_unified = match imgstore.exists(&format!("{target:#}")).await { + Ok(v) => v, + Err(e) => { + tracing::warn!("Failed to check bootc storage for image: {e}; falling back to standard pull"); + false + } + }; + + let fetched = if use_unified { + crate::deploy::pull_unified(repo, &target, None, opts.quiet, prog.clone(), sysroot).await? + } else { + crate::deploy::pull(repo, &target, None, opts.quiet, prog.clone()).await? + }; if !opts.retain { // By default, we prune the previous ostree ref so it will go away after later upgrades @@ -1344,6 +1378,9 @@ async fn run_from_opt(opt: Opt) -> Result<()> { ImageOpts::CopyToStorage { source, target } => { crate::image::push_entrypoint(source.as_deref(), target.as_deref()).await } + ImageOpts::SetUnified => { + crate::image::set_unified_entrypoint().await + } ImageOpts::PullFromDefaultStorage { image } => { let sysroot = get_storage().await?; sysroot @@ -1422,7 +1459,10 @@ async fn run_from_opt(opt: Opt) -> Result<()> { let mut w = SplitStreamWriter::new(&cfs, None, Some(testdata_digest)); w.write_inline(testdata); let object = cfs.write_stream(w, Some("testobject"))?.to_hex(); - assert_eq!(object, "5d94ceb0b2bb3a78237e0a74bc030a262239ab5f47754a5eb2e42941056b64cb21035d64a8f7c2f156e34b820802fa51884de2b1f7dc3a41b9878fc543cd9b07"); + assert_eq!( + object, + "5d94ceb0b2bb3a78237e0a74bc030a262239ab5f47754a5eb2e42941056b64cb21035d64a8f7c2f156e34b820802fa51884de2b1f7dc3a41b9878fc543cd9b07" + ); Ok(()) } // We don't depend on fsverity-utils today, so re-expose some helpful CLI tools. diff --git a/crates/lib/src/deploy.rs b/crates/lib/src/deploy.rs index 7b7ff59e1..6552a69af 100644 --- a/crates/lib/src/deploy.rs +++ b/crates/lib/src/deploy.rs @@ -380,6 +380,118 @@ pub(crate) async fn prepare_for_pull( Ok(PreparedPullResult::Ready(Box::new(prepared_image))) } +/// Unified approach: Use bootc's CStorage to pull the image, then prepare from containers-storage. +/// This reuses the same infrastructure as LBIs. +pub(crate) async fn prepare_for_pull_unified( + repo: &ostree::Repo, + imgref: &ImageReference, + target_imgref: Option<&OstreeImageReference>, + store: &Storage, +) -> Result { + // Get or initialize the bootc container storage (same as used for LBIs) + let imgstore = store.get_ensure_imgstore()?; + + let image_ref_str = format!("{imgref:#}"); + + // Log the original transport being used for the pull + tracing::info!( + "Unified pull: pulling from transport '{}' to bootc storage", + &imgref.transport + ); + + // Pull the image to bootc storage using the same method as LBIs + imgstore + .pull(&image_ref_str, crate::podstorage::PullMode::Always) + .await?; + + // Now create a containers-storage reference to read from bootc storage + tracing::info!("Unified pull: now importing from containers-storage transport"); + let containers_storage_imgref = ImageReference { + transport: "containers-storage".to_string(), + image: imgref.image.clone(), + signature: imgref.signature.clone(), + }; + let ostree_imgref = OstreeImageReference::from(containers_storage_imgref); + + // Use the standard preparation flow but reading from containers-storage + let mut imp = new_importer(repo, &ostree_imgref).await?; + if let Some(target) = target_imgref { + imp.set_target(target); + } + let prep = match imp.prepare().await? { + PrepareResult::AlreadyPresent(c) => { + println!("No changes in {imgref:#} => {}", c.manifest_digest); + return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into()))); + } + PrepareResult::Ready(p) => p, + }; + check_bootc_label(&prep.config); + if let Some(warning) = prep.deprecated_warning() { + ostree_ext::cli::print_deprecated_warning(warning).await; + } + ostree_ext::cli::print_layer_status(&prep); + let layers_to_fetch = prep.layers_to_fetch().collect::>>()?; + + // Log that we're importing a new image from containers-storage + const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0"; + tracing::info!( + message_id = PULLING_NEW_IMAGE_ID, + bootc.image.reference = &imgref.image, + bootc.image.transport = "containers-storage", + bootc.original_transport = &imgref.transport, + bootc.status = "importing_from_storage", + "Importing image from bootc storage: {}", + ostree_imgref + ); + + let prepared_image = PreparedImportMeta { + imp, + n_layers_to_fetch: layers_to_fetch.len(), + layers_total: prep.all_layers().count(), + bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(), + bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(), + digest: prep.manifest_digest.clone(), + prep, + }; + + Ok(PreparedPullResult::Ready(Box::new(prepared_image))) +} + +/// Unified pull: Use podman to pull to containers-storage, then read from there +pub(crate) async fn pull_unified( + repo: &ostree::Repo, + imgref: &ImageReference, + target_imgref: Option<&OstreeImageReference>, + quiet: bool, + prog: ProgressWriter, + store: &Storage, +) -> Result> { + match prepare_for_pull_unified(repo, imgref, target_imgref, store).await? { + PreparedPullResult::AlreadyPresent(existing) => { + // Log that the image was already present (Debug level since it's not actionable) + const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9"; + tracing::debug!( + message_id = IMAGE_ALREADY_PRESENT_ID, + bootc.image.reference = &imgref.image, + bootc.image.transport = &imgref.transport, + bootc.status = "already_present", + "Image already present: {}", + imgref + ); + Ok(existing) + } + PreparedPullResult::Ready(prepared_image_meta) => { + // To avoid duplicate success logs, pass a containers-storage imgref to the importer + let cs_imgref = ImageReference { + transport: "containers-storage".to_string(), + image: imgref.image.clone(), + signature: imgref.signature.clone(), + }; + pull_from_prepared(&cs_imgref, quiet, prog, *prepared_image_meta).await + } + } +} + #[context("Pulling")] pub(crate) async fn pull_from_prepared( imgref: &ImageReference, @@ -429,18 +541,21 @@ pub(crate) async fn pull_from_prepared( let imgref_canonicalized = imgref.clone().canonicalize()?; tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}"); - // Log successful import completion - const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8"; - - tracing::info!( - message_id = IMPORT_COMPLETE_JOURNAL_ID, - bootc.image.reference = &imgref.image, - bootc.image.transport = &imgref.transport, - bootc.manifest_digest = import.manifest_digest.as_ref(), - bootc.ostree_commit = &import.merge_commit, - "Successfully imported image: {}", - imgref - ); + // Log successful import completion (skip if using unified storage to avoid double logging) + let is_unified_path = imgref.transport == "containers-storage"; + if !is_unified_path { + const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8"; + + tracing::info!( + message_id = IMPORT_COMPLETE_JOURNAL_ID, + bootc.image.reference = &imgref.image, + bootc.image.transport = &imgref.transport, + bootc.manifest_digest = import.manifest_digest.as_ref(), + bootc.ostree_commit = &import.merge_commit, + "Successfully imported image: {}", + imgref + ); + } if let Some(msg) = ostree_container::store::image_filtered_content_warning(&import.filtered_files) @@ -489,6 +604,9 @@ pub(crate) async fn pull( } } +/// Pull selecting unified vs standard path based on persistent storage config. +// pull_auto was reverted per request; keep explicit callers branching. + pub(crate) async fn wipe_ostree(sysroot: Sysroot) -> Result<()> { tokio::task::spawn_blocking(move || { sysroot diff --git a/crates/lib/src/image.rs b/crates/lib/src/image.rs index ad984ed6f..d472a25a4 100644 --- a/crates/lib/src/image.rs +++ b/crates/lib/src/image.rs @@ -181,3 +181,59 @@ pub(crate) async fn imgcmd_entrypoint( cmd.args(args); cmd.run_capture_stderr() } + +/// Re-pull the currently booted image into the bootc-owned container storage. +/// +/// This onboards the system to unified storage for host images so that +/// upgrade/switch can use the unified path automatically when the image is present. +#[context("Setting unified storage for booted image")] +pub(crate) async fn set_unified_entrypoint() -> Result<()> { + let sysroot = crate::cli::get_storage().await?; + let ostree = sysroot.get_ostree()?; + let repo = &ostree.repo(); + + // Discover the currently booted image reference + let (_booted_deployment, _deployments, host) = + crate::status::get_status_require_booted(ostree)?; + let imgref = host + .spec + .image + .as_ref() + .ok_or_else(|| anyhow::anyhow!("No image source specified in host spec"))?; + + // Canonicalize for pull display only, but we want to preserve original pullspec + let imgref_display = imgref.clone().canonicalize()?; + + // Pull the image from its original source into bootc storage using LBI machinery + let imgstore = sysroot.get_ensure_imgstore()?; + let img_string = format!("{:#}", imgref); + const SET_UNIFIED_JOURNAL_ID: &str = "1a0b9c8d7e6f5a4b3c2d1e0f9a8b7c6d"; + tracing::info!( + message_id = SET_UNIFIED_JOURNAL_ID, + bootc.image.reference = &imgref_display.image, + bootc.image.transport = &imgref_display.transport, + "Re-pulling booted image into bootc storage via unified path: {}", + imgref_display + ); + imgstore + .pull(&img_string, crate::podstorage::PullMode::Always) + .await?; + + // Optionally verify we can import from containers-storage by preparing in a temp importer + // without actually importing into the main repo; this is a lightweight validation. + let containers_storage_imgref = crate::spec::ImageReference { + transport: "containers-storage".to_string(), + image: imgref.image.clone(), + signature: imgref.signature.clone(), + }; + let ostree_imgref = ostree_ext::container::OstreeImageReference::from(containers_storage_imgref); + let _ = ostree_ext::container::store::ImageImporter::new(repo, &ostree_imgref, Default::default()) + .await?; + + tracing::info!( + message_id = SET_UNIFIED_JOURNAL_ID, + bootc.status = "set_unified_complete", + "Unified storage set for current image. Future upgrade/switch will use it automatically." + ); + Ok(()) +} diff --git a/crates/lib/src/install.rs b/crates/lib/src/install.rs index defa9d19c..44dd49195 100644 --- a/crates/lib/src/install.rs +++ b/crates/lib/src/install.rs @@ -166,6 +166,15 @@ pub(crate) struct InstallTargetOpts { #[clap(long)] #[serde(default)] pub(crate) skip_fetch_check: bool, + + /// Use unified storage path to pull images (experimental) + /// + /// When enabled, this uses bootc's container storage (/usr/lib/bootc/storage) to pull + /// the image first, then imports it from there. This is the same approach used for + /// logically bound images. + #[clap(long = "experimental-unified-storage")] + #[serde(default)] + pub(crate) unified_storage_exp: bool, } #[derive(clap::Args, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -426,6 +435,7 @@ pub(crate) struct State { pub(crate) selinux_state: SELinuxFinalState, #[allow(dead_code)] pub(crate) config_opts: InstallConfigOpts, + pub(crate) target_opts: InstallTargetOpts, pub(crate) target_imgref: ostree_container::OstreeImageReference, #[allow(dead_code)] pub(crate) prepareroot_config: HashMap, @@ -787,6 +797,7 @@ async fn install_container( state: &State, root_setup: &RootSetup, sysroot: &ostree::Sysroot, + storage: &Storage, has_ostree: bool, ) -> Result<(ostree::Deployment, InstallAleph)> { let sepolicy = state.load_policy()?; @@ -826,9 +837,38 @@ async fn install_container( let repo = &sysroot.repo(); repo.set_disable_fsync(true); - let pulled_image = match prepare_for_pull(repo, &spec_imgref, Some(&state.target_imgref)) + // Determine whether to use unified storage path + let use_unified = if state.target_opts.unified_storage_exp { + // Explicit flag always uses unified path + true + } else { + // Auto-detect: check if image exists in bootc storage (same as upgrade/switch) + let imgstore = storage.get_ensure_imgstore()?; + imgstore + .exists(&format!("{spec_imgref:#}")) + .await + .unwrap_or_else(|e| { + tracing::warn!( + "Failed to check bootc storage for image: {e}; falling back to standard pull" + ); + false + }) + }; + + let prepared = if use_unified { + tracing::info!("Using unified storage path for installation"); + crate::deploy::prepare_for_pull_unified( + repo, + &spec_imgref, + Some(&state.target_imgref), + storage, + ) .await? - { + } else { + prepare_for_pull(repo, &spec_imgref, Some(&state.target_imgref)).await? + }; + + let pulled_image = match prepared { PreparedPullResult::AlreadyPresent(existing) => existing, PreparedPullResult::Ready(image_meta) => { check_disk_space(root_setup.physical_root.as_fd(), &image_meta, &spec_imgref)?; @@ -1364,6 +1404,7 @@ async fn prepare_install( selinux_state, source, config_opts, + target_opts, target_imgref, install_config, prepareroot_config, @@ -1395,7 +1436,7 @@ async fn install_with_sysroot( // And actually set up the container in that root, returning a deployment and // the aleph state (see below). - let (deployment, aleph) = install_container(state, rootfs, ostree, has_ostree).await?; + let (deployment, aleph) = install_container(state, rootfs, ostree, storage, has_ostree).await?; // Write the aleph data that captures the system state at the time of provisioning for aid in future debugging. aleph.write_to(&rootfs.physical_root)?; diff --git a/tmt/plans/integration.fmf b/tmt/plans/integration.fmf index a423bd2aa..6a3e5bb38 100644 --- a/tmt/plans/integration.fmf +++ b/tmt/plans/integration.fmf @@ -85,3 +85,17 @@ execute: - when: running_env == packit enabled: false because: tmt-reboot does not work with systemd reboot in testing farm environment + +/test-28-install-unified-flag: + summary: Verify install help exposes experimental unified storage flag + discover: + how: fmf + test: + - /tmt/tests/test-28-install-unified-flag + +/test-29-switch-to-unified: + summary: Onboard to unified storage and verify subsequent operations use it + discover: + how: fmf + test: + - /tmt/tests/test-29-switch-to-unified diff --git a/tmt/tests/booted/test-install-unified-flag.nu b/tmt/tests/booted/test-install-unified-flag.nu new file mode 100644 index 000000000..52d0e0df9 --- /dev/null +++ b/tmt/tests/booted/test-install-unified-flag.nu @@ -0,0 +1,14 @@ +use tap.nu + +def main [] { + tap begin "install help shows experimental unified flag" + let help = (bootc install --help) + # Grep-like check in nushell + let has = ($help | lines | any { |l| $l | str contains "--experimental-unified-storage" }) + if (not $has) { + error make { msg: "missing --experimental-unified-storage in help" } + } + tap ok +} + + diff --git a/tmt/tests/booted/test-switch-to-unified.nu b/tmt/tests/booted/test-switch-to-unified.nu new file mode 100644 index 000000000..f9862a347 --- /dev/null +++ b/tmt/tests/booted/test-switch-to-unified.nu @@ -0,0 +1,36 @@ +use std assert +use tap.nu + +# Multi-boot test: boot 0 onboards to unified storage; boot 1 verifies we use containers-storage + +def main [] { + match $env.TMT_REBOOT_COUNT? { + null | "0" => first_boot, + "1" => second_boot, + $o => { error make { msg: $"Invalid TMT_REBOOT_COUNT ($o)" } }, + } +} + +def first_boot [] { + tap begin "onboard to unified storage" + # Sanity: booted status + let st = (bootc status --json | from json) + # Run the onboarding command + bootc image set-unified + # Verify bootc-owned store is usable + podman --storage-opt=additionalimagestore=/usr/lib/bootc/storage images + # Stage a no-op upgrade to exercise the unified path; tolerate no-update + bootc upgrade || true + tmt-reboot +} + +def second_boot [] { + tap begin "verify unified usage after onboarding" + let st = (bootc status --json | from json) + let booted = $st.status.booted.image + # After onboarding, future pulls may use containers-storage; assert transport is either registry or containers-storage + assert ($booted.transport in [registry containers-storage]) + tap ok +} + + diff --git a/tmt/tests/test-28-install-unified-flag.fmf b/tmt/tests/test-28-install-unified-flag.fmf new file mode 100644 index 000000000..37bb27c3e --- /dev/null +++ b/tmt/tests/test-28-install-unified-flag.fmf @@ -0,0 +1,3 @@ +summary: Verify install help exposes experimental unified storage flag +test: nu booted/test-install-unified-flag.nu +duration: 5m diff --git a/tmt/tests/test-29-switch-to-unified.fmf b/tmt/tests/test-29-switch-to-unified.fmf new file mode 100644 index 000000000..4ef6f85a0 --- /dev/null +++ b/tmt/tests/test-29-switch-to-unified.fmf @@ -0,0 +1,3 @@ +summary: Onboard to unified storage and verify subsequent operations use it +test: nu booted/test-switch-to-unified.nu +duration: 30m