Skip to content

Commit

Permalink
imagebuildah.StageExecutor: clean up volumes/volumeCache
Browse files Browse the repository at this point in the history
Clean up the distinctions between the volumes slice and the volumeCache
and volumeCacheInfo maps so that --compat-volumes will work correctly
when we're building with multiple layers.

Signed-off-by: Nalin Dahyabhai <[email protected]>
  • Loading branch information
nalind committed Sep 9, 2024
1 parent 4565497 commit 6221d1f
Show file tree
Hide file tree
Showing 5 changed files with 82 additions and 83 deletions.
3 changes: 2 additions & 1 deletion cmd/buildah/build.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package main

import (
"fmt"
"os"

"github.com/containers/buildah/imagebuildah"
Expand Down Expand Up @@ -73,7 +74,7 @@ func buildCmd(c *cobra.Command, inputArgs []string, iopts buildahcli.BuildOption
if c.Flag("logfile").Changed {
logfile, err := os.OpenFile(iopts.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600)
if err != nil {
return err
return fmt.Errorf("opening log file: %w", err)
}
iopts.Logwriter = logfile
defer iopts.Logwriter.Close()
Expand Down
2 changes: 1 addition & 1 deletion imagebuildah/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
contents, err = os.Open(dfile)
if err != nil {
return "", nil, err
return "", nil, fmt.Errorf("reading build instructions: %w", err)
}
dinfo, err = contents.Stat()
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion imagebuildah/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
} else {
rusageLogFile, err = os.OpenFile(options.RusageLogFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
return nil, fmt.Errorf("creating file to store rusage logs: %w", err)
}
}
}
Expand Down
110 changes: 52 additions & 58 deletions imagebuildah/stage_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ type StageExecutor struct {
name string
builder *buildah.Builder
preserved int
volumes imagebuilder.VolumeSet
volumeCache map[string]string
volumeCacheInfo map[string]os.FileInfo
volumes imagebuilder.VolumeSet // list of directories which are volumes
volumeCache map[string]string // mapping from volume directories to cache archives (used by vfs method)
volumeCacheInfo map[string]os.FileInfo // mapping from volume directories to perms/datestamps to reset after restoring
mountPoint string
output string
containerIDs []string
Expand All @@ -92,7 +92,7 @@ type StageExecutor struct {
// writeable while the RUN instruction is being handled, even if any changes
// made within the directory are ultimately discarded.
func (s *StageExecutor) Preserve(path string) error {
logrus.Debugf("PRESERVE %q in %q", path, s.builder.ContainerID)
logrus.Debugf("PRESERVE %q in %q (already preserving %v)", path, s.builder.ContainerID, s.volumes)

// Try and resolve the symlink (if one exists)
// Set archivedPath and path based on whether a symlink is found or not
Expand All @@ -111,71 +111,60 @@ func (s *StageExecutor) Preserve(path string) error {
return fmt.Errorf("evaluating path %q: %w", path, err)
}

// Whether or not we're caching and restoring the contents of this
// directory, we need to ensure it exists now.
const createdDirPerms = os.FileMode(0o755)
if s.executor.compatVolumes != types.OptionalBoolTrue {
logrus.Debugf("ensuring volume path %q exists", path)
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
// Yup, we do have to create it. That means it's not in any
// cached copy of the path that covers it, so we have to
// invalidate such cached copy.
logrus.Debugf("have to create volume %q", path)
createdDirPerms := createdDirPerms
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
return nil
if err := s.volumeCacheInvalidate(path); err != nil {
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
if st, err = os.Stat(archivedPath); err != nil {
return fmt.Errorf("checking on just-created volume path: %w", err)
}
}
if err != nil {
return fmt.Errorf("reading info cache for volume at %q: %w", path, err)
}

if s.volumes.Covers(path) {
// This path is a subdirectory of a volume path that we're
// already preserving, so there's nothing new to be done except
// ensure that it exists.
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
// We do have to create it. That means it's not in any
// cached copy of the path that covers it, so we have
// to invalidate such cached copy.
logrus.Debugf("have to create volume %q", path)
createdDirPerms := createdDirPerms
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
if err := s.volumeCacheInvalidate(path); err != nil {
return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
if st, err = os.Stat(archivedPath); err != nil {
return fmt.Errorf("checking on just-created volume path: %w", err)
}
}
// already preserving, so there's nothing new to be done now
// that we've ensured that it exists.
s.volumeCacheInfo[path] = st
return nil
}

// Figure out where the cache for this volume would be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))

// Save info about the top level of the location that we'll be archiving.
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
logrus.Debugf("have to create volume %q", path)
createdDirPerms := os.FileMode(0o755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
return fmt.Errorf("ensuring volume path exists: %w", err)
}
st, err = os.Stat(archivedPath)
}
if err != nil {
logrus.Debugf("error reading info about %q: %v", archivedPath, err)
return err
}
s.volumeCacheInfo[path] = st
// Add the new volume path to the ones that we're tracking.
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should have
// worked.
return fmt.Errorf("adding %q to the volume cache", path)
}
s.volumeCacheInfo[path] = st

// If we're not doing save/restore, we're done, since volumeCache should be empty.
if s.executor.compatVolumes != types.OptionalBoolTrue {
logrus.Debugf("not doing volume save-and-restore of %q in %q", path, s.builder.ContainerID)
return nil
}

// Decide where the cache for this volume will be stored.
s.preserved++
cacheDir, err := s.executor.store.ContainerDirectory(s.builder.ContainerID)
if err != nil {
return fmt.Errorf("unable to locate temporary directory for container")
}
cacheFile := filepath.Join(cacheDir, fmt.Sprintf("volume%d.tar", s.preserved))
s.volumeCache[path] = cacheFile

// Now prune cache files for volumes that are newly supplanted by this one.
Expand Down Expand Up @@ -206,7 +195,7 @@ func (s *StageExecutor) Preserve(path string) error {
if errors.Is(err, os.ErrNotExist) {
continue
}
return err
return fmt.Errorf("removing cache of %q: %w", archivedPath, err)
}
delete(s.volumeCache, cachedPath)
}
Expand Down Expand Up @@ -238,7 +227,7 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error {
// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
for _, cachedPath := range s.volumes {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
return nil, fmt.Errorf("evaluating volume path: %w", err)
Expand All @@ -250,13 +239,14 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
return nil, fmt.Errorf("converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
cacheFile := s.volumeCache[cachedPath]
_, err = os.Stat(cacheFile)
if err == nil {
logrus.Debugf("contents of volume %q are already cached in %q", archivedPath, cacheFile)
continue
}
if !errors.Is(err, os.ErrNotExist) {
return nil, err
return nil, fmt.Errorf("checking for presence of a cached copy of %q at %q: %w", cachedPath, cacheFile, err)
}
createdDirPerms := os.FileMode(0o755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
Expand All @@ -265,7 +255,7 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
if err != nil {
return nil, err
return nil, fmt.Errorf("creating cache for volume %q: %w", archivedPath, err)
}
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
Expand Down Expand Up @@ -298,7 +288,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
if err != nil {
return err
return fmt.Errorf("restoring contents of volume %q: %w", archivedPath, err)
}
defer cache.Close()
if err := copier.Remove(s.mountPoint, archivedPath, copier.RemoveOptions{All: true}); err != nil {
Expand Down Expand Up @@ -334,10 +324,13 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
}

// Save the contents of each of the executor's list of volumes for which we
// don't already have a cache file.
// don't already have a cache file. For overlay, we "save" and "restore" by
// using it as a lower for an overlay mount in the same location, and then
// discarding the upper.
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
for cachedPath := range s.volumeCache {
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{})
for _, cachedPath := range s.volumes {
createdDirPerms := os.FileMode(0o755)
err = copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.MkdirOptions{ChmodNew: &createdDirPerms})
if err != nil {
return nil, fmt.Errorf("ensuring volume exists: %w", err)
}
Expand Down Expand Up @@ -1089,6 +1082,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
s.mountPoint = mountPoint
s.builder = builder
// Now that the rootfs is mounted, set up handling of volumes from the base image.
s.volumes = make([]string, 0, len(s.volumes))
s.volumeCache = make(map[string]string)
s.volumeCacheInfo = make(map[string]os.FileInfo)
for _, v := range builder.Volumes() {
Expand Down
48 changes: 26 additions & 22 deletions tests/bud.bats
Original file line number Diff line number Diff line change
Expand Up @@ -2468,18 +2468,20 @@ _EOF
skip_if_no_runtime

_prefetch alpine
target=volume-image
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes $BUDFILES/preserve-volumes
run_buildah from --quiet ${target}
cid=$output
run_buildah mount ${cid}
root=$output
test -s $root/vol/subvol/subsubvol/subsubvolfile
test ! -s $root/vol/subvol/subvolfile
test -s $root/vol/volfile
test -s $root/vol/Dockerfile
test -s $root/vol/Dockerfile2
test ! -s $root/vol/anothervolfile
for layers in "" --layers ; do
target=volume-image$layers
run_buildah build $WITH_POLICY_JSON -t ${target} ${layers} --compat-volumes $BUDFILES/preserve-volumes
run_buildah from --quiet ${target}
cid=$output
run_buildah mount ${cid}
root=$output
test -s $root/vol/subvol/subsubvol/subsubvolfile
test ! -s $root/vol/subvol/subvolfile
test -s $root/vol/volfile
test -s $root/vol/Dockerfile
test -s $root/vol/Dockerfile2
test ! -s $root/vol/anothervolfile
done
}

# Helper function for several of the tests which pull from http.
Expand Down Expand Up @@ -2701,16 +2703,18 @@ function validate_instance_compression {
skip_if_no_runtime

_prefetch alpine
target=volume-image
run_buildah build $WITH_POLICY_JSON -t ${target} --compat-volumes=true $BUDFILES/volume-perms
run_buildah from --quiet $WITH_POLICY_JSON ${target}
cid=$output
run_buildah mount ${cid}
root=$output
test ! -s $root/vol/subvol/subvolfile
run stat -c %f $root/vol/subvol
assert "$status" -eq 0 "status code from stat $root/vol/subvol"
expect_output "41ed" "stat($root/vol/subvol) [0x41ed = 040755]"
for layers in "" --layers ; do
target=volume-image$layers
run_buildah build $WITH_POLICY_JSON -t ${target} ${layers} --compat-volumes=true $BUDFILES/volume-perms
run_buildah from --quiet $WITH_POLICY_JSON ${target}
cid=$output
run_buildah mount ${cid}
root=$output
test ! -s $root/vol/subvol/subvolfile
run stat -c %f $root/vol/subvol
assert "$status" -eq 0 "status code from stat $root/vol/subvol"
expect_output "41ed" "stat($root/vol/subvol) [0x41ed = 040755]"
done
}

@test "bud-volume-ownership" {
Expand Down

0 comments on commit 6221d1f

Please sign in to comment.