diff --git a/crates/lib/src/install/completion.rs b/crates/lib/src/install/completion.rs
index 32362e354..68af138ac 100644
--- a/crates/lib/src/install/completion.rs
+++ b/crates/lib/src/install/completion.rs
@@ -315,7 +315,9 @@ pub(crate) async fn impl_completion(
// When we're run through ostree, we only lazily initialize the podman storage to avoid
// having a hard dependency on it.
- let imgstorage = &CStorage::create(&sysroot_dir, &rundir, sepolicy.as_ref())?;
+ // Note: We pass None for booted_root since during install completion there's no
+ // booted deployment to fall back to for auth file lookup.
+ let imgstorage = &CStorage::create(&sysroot_dir, None, &rundir, sepolicy.as_ref())?;
crate::boundimage::pull_images_impl(imgstorage, bound_images)
.await
.context("pulling bound images")?;
diff --git a/crates/lib/src/podstorage.rs b/crates/lib/src/podstorage.rs
index 74880a7c5..b7dece885 100644
--- a/crates/lib/src/podstorage.rs
+++ b/crates/lib/src/podstorage.rs
@@ -48,13 +48,41 @@ pub(crate) const SUBPATH: &str = "storage";
const RUNROOT: &str = "bootc/storage";
/// A bootc-owned instance of `containers-storage:`.
+///
+/// This struct manages bootc's container image storage, used for:
+/// - Logically bound images (LBIs)
+/// - Unified image pulls (pulling the host image into bootc storage)
+/// - Other container image operations
+///
+/// ## Auth file lookup
+///
+/// When pulling images that require authentication, we need to locate auth.json.
+/// This struct maintains two root directories to handle auth lookup correctly:
+///
+/// - `sysroot`: The ostree sysroot directory. This is checked first for auth.json.
+/// Depending on the operation, this may be the staged deployment's sysroot (during
+/// LBI pulls for an upgrade) or the current sysroot.
+///
+/// - `booted_root`: The currently running deployment's root filesystem, obtained via
+/// `deployment_fd()`. This is used as a fallback when auth.json is not found in
+/// the sysroot. This handles the upgrade scenario where the user has auth.json on
+/// their running system but is upgrading to an image that doesn't have it baked in.
+///
+/// This fallback is essential for LBI pulls during upgrades: the LBIs are defined
+/// in the *new* image, but we may need to authenticate using credentials from the
+/// *running* system.
pub(crate) struct CStorage {
- /// The root directory
+ /// The ostree sysroot directory. This is also checked first for auth.json.
sysroot: Dir,
- /// The location of container storage
+ /// The booted (currently running) deployment's root directory, obtained via
+ /// `deployment_fd()`. Used as a fallback for auth file lookup when the sysroot
+ /// doesn't contain auth.json. This is `None` during fresh installs where there
+ /// is no booted deployment.
+ booted_root: Option
,
+ /// The location of container storage, relative to the sysroot.
storage_root: Dir,
#[allow(dead_code)]
- /// Our runtime state
+ /// Our runtime state directory.
run: Dir,
/// Disallow using this across multiple threads concurrently; while we
/// have internal locking in podman, in the future we may change how
@@ -119,10 +147,50 @@ fn bind_storage_roots(cmd: &mut Command, storage_root: &Dir, run_root: &Dir) ->
Ok(())
}
-// Initialize a `podman` subprocess with:
-// - storage overridden to point to to storage_root
-// - Authentication (auth.json) using the bootc/ostree owned auth
-fn new_podman_cmd_in(sysroot: &Dir, storage_root: &Dir, run_root: &Dir) -> Result {
+/// Get the global authfile from the booted deployment's root filesystem.
+///
+/// This is used as a fallback when the authfile is not found in the sysroot.
+/// The booted deployment's root is obtained via `deployment_fd()`, which gives us
+/// a Dir handle to the on-disk deployment directory.
+///
+/// This fallback handles the upgrade scenario where:
+/// 1. The user's running system has auth.json (manually added or from the current image)
+/// 2. They upgrade to a new image that does NOT have auth.json baked in
+/// 3. The new image has LBIs that require authentication
+/// 4. We need to use the running system's auth.json to pull those LBIs
+fn get_booted_authfile(
+ booted_root: Option<&Dir>,
+) -> Result> {
+ let Some(booted_root) = booted_root else {
+ return Ok(None);
+ };
+ ostree_ext::globals::get_global_authfile(booted_root)
+}
+
+/// Initialize a `podman` subprocess configured for bootc's container storage.
+///
+/// This sets up podman with:
+/// - `--root` pointing to bootc's container storage
+/// - `--runroot` pointing to runtime state
+/// - `REGISTRY_AUTH_FILE` set to an auth.json for authenticated registry access
+///
+/// # Auth file lookup order
+///
+/// The auth.json is resolved with the following priority:
+/// 1. **Sysroot** (`sysroot` param): Check the ostree sysroot for auth.json.
+/// This finds credentials in the sysroot, which depending on the operation
+/// may be the staged deployment or the current deployment.
+/// 2. **Booted deployment** (`booted_root` param): Fall back to the currently running
+/// deployment's root. This finds credentials from the user's running system,
+/// which is essential during upgrades where the new image lacks auth.json.
+/// 3. **Empty auth**: If neither has auth.json, use an empty `{}` to prevent podman
+/// from searching user-owned paths.
+fn new_podman_cmd_in(
+ sysroot: &Dir,
+ booted_root: Option<&Dir>,
+ storage_root: &Dir,
+ run_root: &Dir,
+) -> Result {
let mut cmd = Command::new("podman");
bind_storage_roots(&mut cmd, storage_root, run_root)?;
let run_root = format!("/proc/self/fd/{STORAGE_RUN_FD}");
@@ -132,11 +200,21 @@ fn new_podman_cmd_in(sysroot: &Dir, storage_root: &Dir, run_root: &Dir) -> Resul
let mut tempfile = cap_tempfile::TempFile::new_anonymous(tmpd).map(std::io::BufWriter::new)?;
// Keep this in sync with https://github.com/bootc-dev/containers-image-proxy-rs/blob/b5e0861ad5065f47eaf9cda0d48da3529cc1bc43/src/imageproxy.rs#L310
- // We always override the auth to match the bootc setup.
- let authfile_fd = ostree_ext::globals::get_global_authfile(sysroot)?.map(|v| v.1);
- if let Some(mut fd) = authfile_fd {
+ // We always override the auth to match the bootc setup. See the function doc comment
+ // for the full auth lookup order explanation.
+ let authfile = if let Some((path, file)) = ostree_ext::globals::get_global_authfile(sysroot)? {
+ tracing::debug!("Using authfile from staged sysroot: {path}");
+ Some(file)
+ } else if let Some((path, file)) = get_booted_authfile(booted_root)? {
+ tracing::debug!("Using authfile from booted deployment: {path}");
+ Some(file)
+ } else {
+ None
+ };
+ if let Some(mut fd) = authfile {
std::io::copy(&mut fd, &mut tempfile)?;
} else {
+ tracing::debug!("No authfile found, using empty auth");
// Note that if there's no bootc-owned auth, then we force an empty authfile to ensure
// that podman doesn't fall back to searching the user-owned paths.
tempfile.write_all(b"{}")?;
@@ -194,7 +272,12 @@ impl CStorage {
/// Create a `podman image` Command instance prepared to operate on our alternative
/// root.
pub(crate) fn new_image_cmd(&self) -> Result {
- let mut r = new_podman_cmd_in(&self.sysroot, &self.storage_root, &self.run)?;
+ let mut r = new_podman_cmd_in(
+ &self.sysroot,
+ self.booted_root.as_ref(),
+ &self.storage_root,
+ &self.run,
+ )?;
// We want to limit things to only manipulating images by default.
r.arg("image");
Ok(r)
@@ -237,6 +320,7 @@ impl CStorage {
#[context("Creating imgstorage")]
pub(crate) fn create(
sysroot: &Dir,
+ booted_root: Option<&Dir>,
run: &Dir,
sepolicy: Option<&ostree::SePolicy>,
) -> Result {
@@ -260,7 +344,7 @@ impl CStorage {
// There's no explicit API to initialize a containers-storage:
// root, simply passing a path will attempt to auto-create it.
// We run "podman images" in the new root.
- new_podman_cmd_in(&sysroot, &storage_root, &run)?
+ new_podman_cmd_in(&sysroot, booted_root, &storage_root, &run)?
.stdout(Stdio::null())
.arg("images")
.run_capture_stderr()
@@ -277,11 +361,11 @@ impl CStorage {
Self::ensure_labeled(&storage_root, sepolicy)?;
}
- Self::open(sysroot, run)
+ Self::open(sysroot, booted_root, run)
}
#[context("Opening imgstorage")]
- pub(crate) fn open(sysroot: &Dir, run: &Dir) -> Result {
+ pub(crate) fn open(sysroot: &Dir, booted_root: Option<&Dir>, run: &Dir) -> Result {
tracing::trace!("Opening container image store");
Self::init_globals()?;
let subpath = &Self::subpath();
@@ -294,6 +378,7 @@ impl CStorage {
let run = run.open_dir(RUNROOT)?;
Ok(Self {
sysroot: sysroot.try_clone()?,
+ booted_root: booted_root.map(|d| d.try_clone()).transpose()?,
storage_root,
run,
_unsync: Default::default(),
diff --git a/crates/lib/src/store/mod.rs b/crates/lib/src/store/mod.rs
index b001e7abb..3b509c103 100644
--- a/crates/lib/src/store/mod.rs
+++ b/crates/lib/src/store/mod.rs
@@ -369,7 +369,15 @@ impl Storage {
let ostree = self.get_ostree()?;
let sysroot_dir = crate::utils::sysroot_dir(ostree)?;
- let sepolicy = if ostree.booted_deployment().is_none() {
+ // Get the booted deployment's root filesystem if available.
+ // This is used for auth file lookup during upgrades.
+ let booted_root = if let Some(dep) = ostree.booted_deployment() {
+ Some(deployment_fd(ostree, &dep)?)
+ } else {
+ None
+ };
+
+ let sepolicy = if booted_root.is_none() {
// fallback to policy from container root
// this should only happen during cleanup of a broken install
tracing::trace!("falling back to container root's selinux policy");
@@ -379,14 +387,17 @@ impl Storage {
// load the sepolicy from the booted ostree deployment so the imgstorage can be
// properly labeled with /var/lib/container/storage labels
tracing::trace!("loading sepolicy from booted ostree deployment");
- let dep = ostree.booted_deployment().unwrap();
- let dep_fs = deployment_fd(ostree, &dep)?;
- lsm::new_sepolicy_at(&dep_fs)?
+ lsm::new_sepolicy_at(booted_root.as_ref().unwrap())?
};
tracing::trace!("sepolicy in get_ensure_imgstore: {sepolicy:?}");
- let imgstore = CStorage::create(&sysroot_dir, &self.run, sepolicy.as_ref())?;
+ let imgstore = CStorage::create(
+ &sysroot_dir,
+ booted_root.as_ref(),
+ &self.run,
+ sepolicy.as_ref(),
+ )?;
Ok(self.imgstore.get_or_init(|| imgstore))
}
diff --git a/tmt/tests/booted/bootc_testlib.nu b/tmt/tests/booted/bootc_testlib.nu
index f3d1fa013..dc041d6c8 100644
--- a/tmt/tests/booted/bootc_testlib.nu
+++ b/tmt/tests/booted/bootc_testlib.nu
@@ -23,3 +23,88 @@ export def have_hostexports [] {
export def parse_cmdline [] {
open /proc/cmdline | str trim | split row " "
}
+
+# cstor-dist configuration for authenticated registry testing
+# cstor-dist serves images from containers-storage via an authenticated OCI registry endpoint
+# https://github.com/ckyrouac/cstor-dist
+const CSTOR_DIST_IMAGE = "ghcr.io/ckyrouac/cstor-dist:latest"
+const CSTOR_DIST_USER = "testuser"
+const CSTOR_DIST_PASS = "testpass"
+const CSTOR_DIST_PORT = 8000
+
+# The registry address for cstor-dist
+export const CSTOR_DIST_REGISTRY = $"localhost:($CSTOR_DIST_PORT)"
+
+# Start cstor-dist with basic auth on localhost
+# Fails if cstor-dist cannot be started
+export def start_cstor_dist [] {
+ print "Starting cstor-dist with basic auth..."
+
+ # Pull test images that cstor-dist will serve
+ print "Pulling test images for cstor-dist to serve..."
+ podman pull docker.io/library/alpine:latest
+ podman pull docker.io/library/busybox:latest
+
+ # Run cstor-dist container with auth enabled
+ # Mount the local containers storage so cstor-dist can serve images from it
+ let storage_path = if ("/var/lib/containers/storage" | path exists) {
+ "/var/lib/containers/storage"
+ } else {
+ $"($env.HOME)/.local/share/containers/storage"
+ }
+
+ (podman run --privileged --rm -d --name cstor-dist-auth
+ -p $"($CSTOR_DIST_PORT):8000"
+ -v $"($storage_path):/var/lib/containers/storage"
+ $CSTOR_DIST_IMAGE --username $CSTOR_DIST_USER --password $CSTOR_DIST_PASS)
+
+ # Wait for cstor-dist to be ready by testing HTTP connection
+ # Loop for up to 20 seconds
+ print "Waiting for cstor-dist to be ready..."
+ let auth_header = $"($CSTOR_DIST_USER):($CSTOR_DIST_PASS)" | encode base64
+ mut ready = false
+ for i in 1..20 {
+ let result = do { curl -sf -H $"Authorization: Basic ($auth_header)" $"http://($CSTOR_DIST_REGISTRY)/v2/" } | complete
+ if $result.exit_code == 0 {
+ $ready = true
+ break
+ }
+ print $"Attempt ($i)/20: cstor-dist not ready yet..."
+ sleep 1sec
+ }
+
+ if not $ready {
+ # Show container logs for debugging
+ print "cstor-dist failed to start. Container logs:"
+ podman logs cstor-dist-auth
+ error make { msg: "cstor-dist failed to become ready within 20 seconds" }
+ }
+
+ print $"cstor-dist running on ($CSTOR_DIST_REGISTRY)"
+}
+
+# Get cstor-dist auth config
+export def get_cstor_auth [] {
+ # Base64 encode the credentials for auth.json
+ let auth_b64 = $"($CSTOR_DIST_USER):($CSTOR_DIST_PASS)" | encode base64
+ {
+ registry: $CSTOR_DIST_REGISTRY,
+ auth_b64: $auth_b64
+ }
+}
+
+# Configure insecure registry for cstor-dist (no TLS)
+export def setup_insecure_registry [] {
+ mkdir /etc/containers/registries.conf.d
+ (echo $"[[registry]]\nlocation=\"($CSTOR_DIST_REGISTRY)\"\ninsecure=true"
+ | save -f /etc/containers/registries.conf.d/99-cstor-dist.conf)
+}
+
+# Set up auth.json on the running system with cstor-dist credentials
+export def setup_system_auth [] {
+ mkdir /run/ostree
+ let cstor = get_cstor_auth
+ print $"Setting up system auth for cstor-dist at ($cstor.registry)"
+ let auth_json = $'{"auths": {"($cstor.registry)": {"auth": "($cstor.auth_b64)"}}}'
+ echo $auth_json | save -f /run/ostree/auth.json
+}
diff --git a/tmt/tests/booted/test-logically-bound-switch.nu b/tmt/tests/booted/test-logically-bound-switch.nu
index fbe69a9da..feda79c8b 100644
--- a/tmt/tests/booted/test-logically-bound-switch.nu
+++ b/tmt/tests/booted/test-logically-bound-switch.nu
@@ -12,27 +12,31 @@
#
#
#
+#
+# This test also verifies that authenticated LBIs work in two scenarios:
+# 1. Auth credentials stored in the image itself
+# 2. Auth credentials stored on the running system
+#
+# The test uses cstor-dist to serve images from containers-storage via an
+# authenticated OCI registry endpoint.
use std assert
use tap.nu
+use bootc_testlib.nu [CSTOR_DIST_REGISTRY, start_cstor_dist, get_cstor_auth, setup_insecure_registry, setup_system_auth]
# This code runs on *each* boot.
bootc status
let st = bootc status --json | from json
let booted = $st.status.booted.image
-# The tests here aren't fetching from a registry which requires auth by default,
-# but we can replicate the failure in https://github.com/bootc-dev/bootc/pull/1852
-# by just injecting any auth file.
-echo '{}' | save -f /run/ostree/auth.json
-
def initial_setup [] {
bootc image copy-to-storage
podman images
podman image inspect localhost/bootc | from json
}
-def build_image [name images containers] {
+# Build an image with optional auth.json baked in
+def build_image [name images containers --with-auth] {
let td = mktemp -d
cd $td
mkdir usr/share/containers/systemd
@@ -59,6 +63,16 @@ RUN echo sanity check > /usr/share/bound-image-sanity-check.txt
}
}
+ # Optionally bake auth.json into the image
+ if $with_auth {
+ let cstor = get_cstor_auth
+ print "Baking auth.json into the image"
+ mkdir etc/ostree
+ let auth_json = $'{"auths": {"($cstor.registry)": {"auth": "($cstor.auth_b64)"}}}'
+ echo $auth_json | save etc/ostree/auth.json
+ echo "COPY etc/ /etc/\n" | save Dockerfile --append
+ }
+
# Build it
podman build -t $name .
# Just sanity check it
@@ -74,12 +88,13 @@ def verify_images [images containers] {
let image_names = podman --storage-opt=additionalimagestore=/usr/lib/bootc/storage images --format json | from json | select -i Names
for $image in $bound_images {
- let found = $image_names | where Names == [$image.image]
+ # Check if the expected image name is IN the Names array (not exact match)
+ let found = $image_names | where { |row| $image.image in $row.Names }
assert (($found | length) > 0) $"($image.image) not found"
}
for $container in $bound_containers {
- let found = $image_names | where Names == [$container.image]
+ let found = $image_names | where { |row| $container.image in $row.Names }
assert (($found | length) > 0) $"($container.image) not found"
}
}
@@ -89,18 +104,29 @@ def first_boot [] {
initial_setup
- # build a bootc image that includes bound images
+ # Start cstor-dist for authenticated LBI testing
+ start_cstor_dist
+ setup_insecure_registry
+
+ # Set up auth on running system for the switch operation
+ # The image will also have auth baked in - both should work
+ setup_system_auth
+
+ # Build a bootc image that includes bound images
+ # Include an authenticated LBI from cstor-dist with auth baked into the image
let images = [
{ "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.4", "name": "ubi-minimal" },
- { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" }
+ { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/alpine:latest", "name": "cstor-alpine" }
]
let containers = [{
- "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
+ "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
}]
let image_name = "localhost/bootc-bound"
- build_image $image_name $images $containers
+ print "Building image WITH auth.json baked in (tests auth from image)"
+ build_image $image_name $images $containers --with-auth
bootc switch --transport containers-storage $image_name
verify_images $images $containers
tmt-reboot
@@ -111,21 +137,39 @@ def second_boot [] {
assert equal $booted.image.transport containers-storage
assert equal $booted.image.image localhost/bootc-bound
- # verify images are still there after boot
+ # Start cstor-dist again (container doesn't survive reboot)
+ start_cstor_dist
+ setup_insecure_registry
+
+ # Set up auth on the RUNNING SYSTEM for the upgrade
+ # The new image will NOT have auth baked in, so the fallback to system auth is needed
+ setup_system_auth
+
+ # Verify images from first switch are still there
let images = [
{ "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.4", "name": "ubi-minimal" },
- { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" }
+ { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/alpine:latest", "name": "cstor-alpine" }
]
let containers = [{
- "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
+ "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
}]
verify_images $images $containers
- # build a new bootc image with an additional bound image
+ # Build a NEW bootc image WITHOUT auth baked in
+ # Add a DIFFERENT authenticated LBI (busybox instead of alpine)
+ # This tests that auth from the running system works (the fallback fix)
print "bootc upgrade with another bound image"
let image_name = "localhost/bootc-bound"
- let more_images = $images | append [{ "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.3", "name": "ubi-minimal-9-3" }]
+ let more_images = [
+ { "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.4", "name": "ubi-minimal" },
+ { "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.3", "name": "ubi-minimal-9-3" },
+ { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/alpine:latest", "name": "cstor-alpine" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/busybox:latest", "name": "cstor-busybox" }
+ ]
+ print "Building image WITHOUT auth.json (tests auth fallback from running system)"
build_image $image_name $more_images $containers
bootc upgrade
verify_images $more_images $containers
@@ -137,14 +181,17 @@ def third_boot [] {
assert equal $booted.image.transport containers-storage
assert equal $booted.image.image localhost/bootc-bound
+ # No need to start cstor-dist - we're just verifying the images are in storage
let images = [
{ "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.4", "name": "ubi-minimal" },
{ "bound": true, "image": "registry.access.redhat.com/ubi9/ubi-minimal:9.3", "name": "ubi-minimal-9-3" },
- { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" }
+ { "bound": false, "image": "quay.io/centos-bootc/centos-bootc:stream9", "name": "centos-bootc" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/alpine:latest", "name": "cstor-alpine" },
+ { "bound": true, "image": $"($CSTOR_DIST_REGISTRY)/docker.io/library/busybox:latest", "name": "cstor-busybox" }
]
let containers = [{
- "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
+ "bound": true, "image": "docker.io/library/alpine:latest", "name": "alpine"
}]
verify_images $images $containers