Skip to content
This repository has been archived by the owner on Nov 7, 2024. It is now read-only.

store: If ostree >= 2024.3, retain content in /var #602

Merged
merged 2 commits into from
Feb 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion lib/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,8 @@ async fn testing(opts: &TestingOpts) -> Result<()> {
TestingOpts::Run => crate::integrationtest::run_tests(),
TestingOpts::RunIMA => crate::integrationtest::test_ima(),
TestingOpts::FilterTar => {
crate::tar::filter_tar(std::io::stdin(), std::io::stdout(), false).map(|_| {})
crate::tar::filter_tar(std::io::stdin(), std::io::stdout(), &Default::default())
.map(|_| {})
}
}
}
Expand Down
9 changes: 9 additions & 0 deletions lib/src/container/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,8 @@ pub struct ImageImporter {
disable_gc: bool, // If true, don't prune unused image layers
/// If true, require the image has the bootable flag
require_bootable: bool,
/// If true, we have ostree v2024.3 or newer.
ostree_v2024_3: bool,
pub(crate) proxy_img: OpenedImage,

layer_progress: Option<Sender<ImportProgress>>,
Expand Down Expand Up @@ -471,6 +473,7 @@ impl ImageImporter {
proxy_img,
target_imgref: None,
no_imgref: false,
ostree_v2024_3: ostree::check_version(2024, 3),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now of course, we in theory need to care about the case where a new ostree is deploying an old target. But personally, I don't care too much about this; we'll get the new ostree out pretty quickly. And also the emphasis on the bootc side is on self-installing operating systems, which don't have this problem.

Copy link
Member

@jmarrero jmarrero Feb 10, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this mean we need to make sure not to ship ostree 2024.3 in any version of OCP that has 2024.2 or older and commit only to backports?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a good topic to raise. There's a lot of complexity and nuance here. First, all of this code is really only relevant in the case of deploying a container image with content in /var.

Our stock FCOS/RHCOS images don't have any. Now until the previous change here in #569 we just dropped all container /var content on the floor.

This change is taking it from using systemd-tmpfiles with C+ where we have "add new files" semantics to "at most once" semantics.

Now that all said:

Now of course, we in theory need to care about the case where a new ostree is deploying an old target.

For OCP (and really most cases) we are always going from old ➡️ new, as one would expect.

So I don't think this is a problem.

The bigger thing to do here is to verify that these new semantics for /var work as expected before we do another release.

disable_gc: false,
require_bootable: false,
imgref: imgref.clone(),
Expand All @@ -496,6 +499,11 @@ impl ImageImporter {
self.require_bootable = true;
}

/// Override the ostree version being targeted
pub fn set_ostree_version(&mut self, year: u32, v: u32) {
self.ostree_v2024_3 = (year > 2024) || (year == 2024 && v >= 3)
}

/// Do not prune image layers.
pub fn disable_gc(&mut self) {
self.disable_gc = true;
Expand Down Expand Up @@ -864,6 +872,7 @@ impl ImageImporter {
base: Some(base_commit.clone()),
selinux: true,
allow_nonusr: root_is_transient,
retain_var: self.ostree_v2024_3,
};
let r =
crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts));
Expand Down
4 changes: 4 additions & 0 deletions lib/src/fixture.rs
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,10 @@ impl Fixture {
pub fn clear_destrepo(&self) -> Result<()> {
self.destrepo()
.set_ref_immediate(None, self.testref(), None, gio::Cancellable::NONE)?;
for (r, _) in self.destrepo().list_refs(None, gio::Cancellable::NONE)? {
self.destrepo()
.set_ref_immediate(None, &r, None, gio::Cancellable::NONE)?;
}
self.destrepo()
.prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::Cancellable::NONE)?;
Ok(())
Expand Down
73 changes: 53 additions & 20 deletions lib/src/tar/write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ pub struct WriteTarOptions {
pub selinux: bool,
/// Allow content not in /usr; this should be paired with ostree rootfs.transient = true
pub allow_nonusr: bool,
/// If true, do not move content in /var to /usr/share/factory/var. This should be used
/// with ostree v2024.3 or newer.
pub retain_var: bool,
}

/// The result of writing a tar stream.
Expand Down Expand Up @@ -109,10 +112,16 @@ enum NormalizedPathResult<'a> {
Normal(Utf8PathBuf),
}

fn normalize_validate_path(
path: &Utf8Path,
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub(crate) struct TarImportConfig {
allow_nonusr: bool,
) -> Result<NormalizedPathResult<'_>> {
remap_factory_var: bool,
}

fn normalize_validate_path<'a>(
path: &'a Utf8Path,
config: &'_ TarImportConfig,
) -> Result<NormalizedPathResult<'a>> {
// This converts e.g. `foo//bar/./baz` into `foo/bar/baz`.
let mut components = path
.components()
Expand Down Expand Up @@ -145,14 +154,18 @@ fn normalize_validate_path(
"etc" => {
ret.push("usr/etc");
}
// Content in /var will get copied by a systemd tmpfiles.d unit
"var" => {
ret.push("usr/share/factory/var");
// Content in /var will get copied by a systemd tmpfiles.d unit
if config.remap_factory_var {
ret.push("usr/share/factory/var");
} else {
ret.push(part)
}
}
o if EXCLUDED_TOPLEVEL_PATHS.contains(&o) => {
return Ok(NormalizedPathResult::Filtered(part));
}
_ if allow_nonusr => ret.push(part),
_ if config.allow_nonusr => ret.push(part),
_ => {
return Ok(NormalizedPathResult::Filtered(part));
}
Expand Down Expand Up @@ -180,7 +193,7 @@ fn normalize_validate_path(
pub(crate) fn filter_tar(
src: impl std::io::Read,
dest: impl std::io::Write,
allow_nonusr: bool,
config: &TarImportConfig,
) -> Result<BTreeMap<String, u32>> {
let src = std::io::BufReader::new(src);
let mut src = tar::Archive::new(src);
Expand All @@ -190,7 +203,7 @@ pub(crate) fn filter_tar(

let ents = src.entries()?;

tracing::debug!("Filtering tar; allow_nonusr={allow_nonusr}");
tracing::debug!("Filtering tar; config={config:?}");

// Lookaside data for dealing with hardlinked files into /sysroot; see below.
let mut changed_sysroot_objects = HashMap::new();
Expand Down Expand Up @@ -259,7 +272,7 @@ pub(crate) fn filter_tar(
}
}

let normalized = match normalize_validate_path(path, allow_nonusr)? {
let normalized = match normalize_validate_path(path, config)? {
NormalizedPathResult::Filtered(path) => {
tracing::trace!("Filtered: {path}");
if let Some(v) = filtered.get_mut(path) {
Expand All @@ -282,15 +295,16 @@ pub(crate) fn filter_tar(
async fn filter_tar_async(
src: impl AsyncRead + Send + 'static,
mut dest: impl AsyncWrite + Send + Unpin,
allow_nonusr: bool,
config: &TarImportConfig,
) -> Result<BTreeMap<String, u32>> {
let (tx_buf, mut rx_buf) = tokio::io::duplex(8192);
// The source must be moved to the heap so we know it is stable for passing to the worker thread
let src = Box::pin(src);
let config = config.clone();
let tar_transformer = tokio::task::spawn_blocking(move || {
let mut src = tokio_util::io::SyncIoBridge::new(src);
let dest = tokio_util::io::SyncIoBridge::new(tx_buf);
let r = filter_tar(&mut src, dest, allow_nonusr);
let r = filter_tar(&mut src, dest, &config);
// Pass ownership of the input stream back to the caller - see below.
(r, src)
});
Expand Down Expand Up @@ -365,7 +379,10 @@ pub async fn write_tar(
let mut child_stdout = r.stdout.take().unwrap();
let mut child_stderr = r.stderr.take().unwrap();
// Copy the filtered tar stream to child stdin
let filtered_result = filter_tar_async(src, child_stdin, options.allow_nonusr);
let mut import_config = TarImportConfig::default();
import_config.allow_nonusr = options.allow_nonusr;
import_config.remap_factory_var = !options.retain_var;
let filtered_result = filter_tar_async(src, child_stdin, &import_config);
let output_copier = async move {
// Gather stdout/stderr to buffers
let mut child_stdout_buf = String::new();
Expand Down Expand Up @@ -421,6 +438,18 @@ mod tests {

#[test]
fn test_normalize_path() {
let imp_default = &TarImportConfig {
allow_nonusr: false,
remap_factory_var: true,
};
let allow_nonusr = &TarImportConfig {
allow_nonusr: true,
remap_factory_var: true,
};
let composefs_and_new_ostree = &TarImportConfig {
allow_nonusr: true,
remap_factory_var: false,
};
let valid_all = &[
("/usr/bin/blah", "./usr/bin/blah"),
("usr/bin/blah", "./usr/bin/blah"),
Expand All @@ -431,29 +460,29 @@ mod tests {
];
let valid_nonusr = &[("boot", "./boot"), ("opt/puppet/blah", "./opt/puppet/blah")];
for &(k, v) in valid_all {
let r = normalize_validate_path(k.into(), false).unwrap();
let r2 = normalize_validate_path(k.into(), true).unwrap();
let r = normalize_validate_path(k.into(), imp_default).unwrap();
let r2 = normalize_validate_path(k.into(), allow_nonusr).unwrap();
assert_eq!(r, r2);
match r {
NormalizedPathResult::Normal(r) => assert_eq!(r, v),
NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"),
}
}
for &(k, v) in valid_nonusr {
let strict = normalize_validate_path(k.into(), false).unwrap();
let strict = normalize_validate_path(k.into(), imp_default).unwrap();
assert!(
matches!(strict, NormalizedPathResult::Filtered(_)),
"Incorrect filter for {k}"
);
let nonusr = normalize_validate_path(k.into(), true).unwrap();
let nonusr = normalize_validate_path(k.into(), allow_nonusr).unwrap();
match nonusr {
NormalizedPathResult::Normal(r) => assert_eq!(r, v),
NormalizedPathResult::Filtered(o) => panic!("Should not have filtered {o}"),
}
}
let filtered = &["/run/blah", "/sys/foo", "/dev/somedev"];
for &k in filtered {
match normalize_validate_path(k.into(), true).unwrap() {
match normalize_validate_path(k.into(), imp_default).unwrap() {
NormalizedPathResult::Filtered(_) => {}
NormalizedPathResult::Normal(_) => {
panic!("{} should be filtered", k)
Expand All @@ -462,9 +491,13 @@ mod tests {
}
let errs = &["usr/foo/../../bar"];
for &k in errs {
assert!(normalize_validate_path(k.into(), true).is_err());
assert!(normalize_validate_path(k.into(), false).is_err());
assert!(normalize_validate_path(k.into(), allow_nonusr).is_err());
assert!(normalize_validate_path(k.into(), imp_default).is_err());
}
assert!(matches!(
normalize_validate_path("var/lib/foo".into(), composefs_and_new_ostree).unwrap(),
NormalizedPathResult::Normal(_)
));
}

#[tokio::test]
Expand All @@ -481,7 +514,7 @@ mod tests {
let _ = rootfs_tar.into_inner()?;
let mut dest = Vec::new();
let src = tokio::io::BufReader::new(tokio::fs::File::open(rootfs_tar_path).await?);
filter_tar_async(src, &mut dest, false).await?;
filter_tar_async(src, &mut dest, &Default::default()).await?;
let dest = dest.as_slice();
let mut final_tar = tar::Archive::new(Cursor::new(dest));
let destdir = &tempd.path().join("destdir");
Expand Down
24 changes: 24 additions & 0 deletions lib/tests/it/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -935,6 +935,7 @@ async fn test_container_var_content() -> Result<()> {
};
let mut imp =
store::ImageImporter::new(fixture.destrepo(), &derived_imgref, Default::default()).await?;
imp.set_ostree_version(2023, 11);
let prep = match imp.prepare().await.unwrap() {
store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"),
store::PrepareResult::Ready(r) => r,
Expand Down Expand Up @@ -963,6 +964,29 @@ async fn test_container_var_content() -> Result<()> {
.is_none()
);

// Reset things
fixture.clear_destrepo()?;

let mut imp =
store::ImageImporter::new(fixture.destrepo(), &derived_imgref, Default::default()).await?;
imp.set_ostree_version(2024, 3);
let prep = match imp.prepare().await.unwrap() {
store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"),
store::PrepareResult::Ready(r) => r,
};
let import = imp.import(prep).await.unwrap();
let ostree_root = fixture
.destrepo()
.read_commit(&import.merge_commit, gio::Cancellable::NONE)?
.0;
let varfile = ostree_root
.child("usr/share/factory/var/lib/foo")
.downcast::<ostree::RepoFile>()
.unwrap();
assert!(!varfile.query_exists(gio::Cancellable::NONE));
assert!(ostree_root
.child("var/lib/foo")
.query_exists(gio::Cancellable::NONE));
Ok(())
}

Expand Down
Loading