diff --git a/image.go b/image.go index e5665d36c21..9beddda6149 100644 --- a/image.go +++ b/image.go @@ -148,36 +148,6 @@ func expectedDockerDiffIDs(image docker.V2Image) int { return expected } -// Compute the media types which we need to attach to a layer, given the type of -// compression that we'll be applying. -func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) { - omediaType = v1.MediaTypeImageLayer - dmediaType = docker.V2S2MediaTypeUncompressedLayer - if layerCompression != archive.Uncompressed { - switch layerCompression { - case archive.Gzip: - omediaType = v1.MediaTypeImageLayerGzip - dmediaType = manifest.DockerV2Schema2LayerMediaType - logrus.Debugf("compressing %s with gzip", what) - case archive.Bzip2: - // Until the image specs define a media type for bzip2-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with bzip2. - return "", "", errors.New("media type for bzip2-compressed layers is not defined") - case archive.Xz: - // Until the image specs define a media type for xz-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with xz. - return "", "", errors.New("media type for xz-compressed layers is not defined") - case archive.Zstd: - // Until the image specs define a media type for zstd-compressed layers, even if we know - // how to decompress them, we can't try to compress layers with zstd. - return "", "", errors.New("media type for zstd-compressed layers is not defined") - default: - logrus.Debugf("compressing %s with unknown compressor(?)", what) - } - } - return omediaType, dmediaType, nil -} - // Extract the container's whole filesystem as a filesystem image, wrapped // in LUKS-compatible encryption. func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) { @@ -304,34 +274,32 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo }), errChan, nil } +type manifestBuilder interface { + addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) + computeLayerMIMEType(what string, layerCompression archive.Compression) error + buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error + manifestAndConfig() ([]byte, []byte, error) +} + +type dockerSchema2ManifestBuilder struct { + i *containerImageRef + mediaType string + dimage docker.V2Image + dmanifest docker.V2S2Manifest +} + // Build fresh copies of the container configuration structures so that we can edit them -// without making unintended changes to the original Builder. -func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) { +// without making unintended changes to the original Builder (Docker schema 2). +func newDockerSchema2ManifestBuilder(i *containerImageRef) (manifestBuilder, error) { created := time.Now().UTC() if i.created != nil { created = *i.created } - // Build an empty image, and then decode over it. - oimage := v1.Image{} - if err := json.Unmarshal(i.oconfig, &oimage); err != nil { - return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err - } - // Always replace this value, since we're newer than our base image. - oimage.Created = &created - // Clear the list of diffIDs, since we always repopulate it. - oimage.RootFS.Type = docker.TypeLayers - oimage.RootFS.DiffIDs = []digest.Digest{} - // Only clear the history if we're squashing, otherwise leave it be so that we can append - // entries to it. - if i.confidentialWorkload.Convert || i.squash || i.omitHistory { - oimage.History = []v1.History{} - } - // Build an empty image, and then decode over it. dimage := docker.V2Image{} if err := json.Unmarshal(i.dconfig, &dimage); err != nil { - return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err + return nil, err } // Set the parent, but only if we want to be compatible with "classic" docker build. if i.compatSetParent == types.OptionalBoolTrue { @@ -358,64 +326,409 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, // If we were supplied with a configuration, copy fields from it to // matching fields in both formats. - if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil { - return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err) + if err := config.OverrideDocker(dimage.Config, i.overrideChanges, i.overrideConfig); err != nil { + return nil, fmt.Errorf("applying changes: %w", err) } // If we're producing a confidential workload, override the command and // assorted other settings that aren't expected to work correctly. if i.confidentialWorkload.Convert { dimage.Config.Entrypoint = []string{"/entrypoint"} - oimage.Config.Entrypoint = []string{"/entrypoint"} dimage.Config.Cmd = nil - oimage.Config.Cmd = nil dimage.Config.User = "" - oimage.Config.User = "" dimage.Config.WorkingDir = "" - oimage.Config.WorkingDir = "" dimage.Config.Healthcheck = nil dimage.Config.Shell = nil dimage.Config.Volumes = nil - oimage.Config.Volumes = nil dimage.Config.ExposedPorts = nil - oimage.Config.ExposedPorts = nil } - // Build empty manifests. The Layers lists will be populated later. - omanifest := v1.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - MediaType: v1.MediaTypeImageManifest, - Config: v1.Descriptor{ - MediaType: v1.MediaTypeImageConfig, + // Build empty manifest. The Layers lists will be populated later. + return &dockerSchema2ManifestBuilder{ + i: i, + mediaType: docker.V2S2MediaTypeUncompressedLayer, + dimage: dimage, + dmanifest: docker.V2S2Manifest{ + V2Versioned: docker.V2Versioned{ + SchemaVersion: 2, + MediaType: manifest.DockerV2Schema2MediaType, + }, + Config: docker.V2S2Descriptor{ + MediaType: manifest.DockerV2Schema2ConfigMediaType, + }, + Layers: []docker.V2S2Descriptor{}, }, - Layers: []v1.Descriptor{}, - Annotations: i.annotations, + }, nil +} + +func (mb *dockerSchema2ManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) { + dlayerDescriptor := docker.V2S2Descriptor{ + MediaType: mb.mediaType, + Digest: layerBlobSum, + Size: layerBlobSize, } + mb.dmanifest.Layers = append(mb.dmanifest.Layers, dlayerDescriptor) + // Note this layer in the list of diffIDs, again using the uncompressed digest. + mb.dimage.RootFS.DiffIDs = append(mb.dimage.RootFS.DiffIDs, diffID) +} - dmanifest := docker.V2S2Manifest{ - V2Versioned: docker.V2Versioned{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - }, - Config: docker.V2S2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func (mb *dockerSchema2ManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error { + dmediaType := docker.V2S2MediaTypeUncompressedLayer + if layerCompression != archive.Uncompressed { + switch layerCompression { + case archive.Gzip: + dmediaType = manifest.DockerV2Schema2LayerMediaType + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return errors.New("media type for xz-compressed layers is not defined") + case archive.Zstd: + // Until the image specs define a media type for zstd-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with zstd. + return errors.New("media type for zstd-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + mb.mediaType = dmediaType + return nil +} + +func (mb *dockerSchema2ManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error { + // Build history notes in the image configurations. + appendHistory := func(history []v1.History, empty bool) { + for i := range history { + var created *time.Time + if history[i].Created != nil { + copiedTimestamp := *history[i].Created + created = &copiedTimestamp + } + if created == nil { + created = &time.Time{} + } + dnews := docker.V2S2History{ + Created: *created, + CreatedBy: history[i].CreatedBy, + Author: history[i].Author, + Comment: history[i].Comment, + EmptyLayer: empty, + } + mb.dimage.History = append(mb.dimage.History, dnews) + } + } + + // Keep track of how many entries the base image's history had + // before we started adding to it. + baseImageHistoryLen := len(mb.dimage.History) + + // Add history entries for prepended empty layers. + appendHistory(mb.i.preEmptyLayers, true) + // Add history entries for prepended API-supplied layers. + for _, h := range mb.i.preLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) + } + // Add a history entry for this layer, empty or not. + created := time.Now().UTC() + if mb.i.created != nil { + created = (*mb.i.created).UTC() + } + dnews := docker.V2S2History{ + Created: created, + CreatedBy: mb.i.createdBy, + Author: mb.dimage.Author, + EmptyLayer: mb.i.emptyLayer, + Comment: mb.i.historyComment, + } + mb.dimage.History = append(mb.dimage.History, dnews) + // Add a history entry for the extra image content if we added a layer for it. + // This diff was added to the list of layers before API-supplied layers that + // needed to be appended, and we need to keep the order of history entries for + // not-empty layers consistent with that. + if extraImageContentDiff != "" { + createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) + dnews := docker.V2S2History{ + Created: created, + CreatedBy: createdBy, + } + mb.dimage.History = append(mb.dimage.History, dnews) + } + // Add history entries for appended empty layers. + appendHistory(mb.i.postEmptyLayers, true) + // Add history entries for appended API-supplied layers. + for _, h := range mb.i.postLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) + } + + // Assemble a comment indicating which base image was used, if it wasn't + // just an image ID, and add it to the first history entry we added. + var fromComment string + if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { + if mb.dimage.History[baseImageHistoryLen].Comment != "" { + fromComment = " " + } + fromComment += "FROM " + mb.i.fromImageName + } + mb.dimage.History[baseImageHistoryLen].Comment += fromComment + + // Confidence check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. Only applicable if the base image (if there was + // one) provided us at least one entry to use as a starting point. + if baseImageHistoryLen != 0 { + expectedDiffIDs := expectedDockerDiffIDs(mb.dimage) + if len(mb.dimage.RootFS.DiffIDs) != expectedDiffIDs { + return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.dimage.RootFS.DiffIDs)) + } + } + return nil +} + +func (mb *dockerSchema2ManifestBuilder) manifestAndConfig() ([]byte, []byte, error) { + // Encode the image configuration blob. + dconfig, err := json.Marshal(&mb.dimage) + if err != nil { + return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dimage, err) + } + logrus.Debugf("Docker v2s2 config = %s", dconfig) + + // Add the configuration blob to the manifest. + mb.dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) + mb.dmanifest.Config.Size = int64(len(dconfig)) + mb.dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType + + // Encode the manifest. + dmanifestbytes, err := json.Marshal(&mb.dmanifest) + if err != nil { + return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.dmanifest, err) + } + logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) + + return dmanifestbytes, dconfig, nil +} + +type ociManifestBuilder struct { + i *containerImageRef + mediaType string + oimage v1.Image + omanifest v1.Manifest +} + +// Build fresh copies of the container configuration structures so that we can edit them +// without making unintended changes to the original Builder (OCI manifest). +func newOCIManifestBuilder(i *containerImageRef) (manifestBuilder, error) { + created := time.Now().UTC() + if i.created != nil { + created = *i.created + } + + // Build an empty image, and then decode over it. + oimage := v1.Image{} + if err := json.Unmarshal(i.oconfig, &oimage); err != nil { + return nil, err + } + // Always replace this value, since we're newer than our base image. + oimage.Created = &created + // Clear the list of diffIDs, since we always repopulate it. + oimage.RootFS.Type = docker.TypeLayers + oimage.RootFS.DiffIDs = []digest.Digest{} + // Only clear the history if we're squashing, otherwise leave it be so that we can append + // entries to it. + if i.confidentialWorkload.Convert || i.squash || i.omitHistory { + oimage.History = []v1.History{} + } + + // If we were supplied with a configuration, copy fields from it to + // matching fields in both formats. + if err := config.OverrideOCI(&oimage.Config, i.overrideChanges, i.overrideConfig); err != nil { + return nil, fmt.Errorf("applying changes: %w", err) + } + + // If we're producing a confidential workload, override the command and + // assorted other settings that aren't expected to work correctly. + if i.confidentialWorkload.Convert { + oimage.Config.Entrypoint = []string{"/entrypoint"} + oimage.Config.Cmd = nil + oimage.Config.User = "" + oimage.Config.WorkingDir = "" + oimage.Config.Volumes = nil + oimage.Config.ExposedPorts = nil + } + + // Build empty manifest. The Layers lists will be populated later. + return &ociManifestBuilder{ + i: i, + // The default layer media type assumes no compression. + mediaType: v1.MediaTypeImageLayer, + oimage: oimage, + omanifest: v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + }, + Layers: []v1.Descriptor{}, + Annotations: i.annotations, }, - Layers: []docker.V2S2Descriptor{}, + }, nil +} + +func (mb *ociManifestBuilder) addLayer(layerBlobSum digest.Digest, layerBlobSize int64, diffID digest.Digest) { + olayerDescriptor := v1.Descriptor{ + MediaType: mb.mediaType, + Digest: layerBlobSum, + Size: layerBlobSize, } + mb.omanifest.Layers = append(mb.omanifest.Layers, olayerDescriptor) + // Note this layer in the list of diffIDs, again using the uncompressed digest. + mb.oimage.RootFS.DiffIDs = append(mb.oimage.RootFS.DiffIDs, diffID) +} - return oimage, omanifest, dimage, dmanifest, nil +// Compute the media types which we need to attach to a layer, given the type of +// compression that we'll be applying. +func (mb *ociManifestBuilder) computeLayerMIMEType(what string, layerCompression archive.Compression) error { + omediaType := v1.MediaTypeImageLayer + if layerCompression != archive.Uncompressed { + switch layerCompression { + case archive.Gzip: + omediaType = v1.MediaTypeImageLayerGzip + logrus.Debugf("compressing %s with gzip", what) + case archive.Bzip2: + // Until the image specs define a media type for bzip2-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with bzip2. + return errors.New("media type for bzip2-compressed layers is not defined") + case archive.Xz: + // Until the image specs define a media type for xz-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with xz. + return errors.New("media type for xz-compressed layers is not defined") + case archive.Zstd: + // Until the image specs define a media type for zstd-compressed layers, even if we know + // how to decompress them, we can't try to compress layers with zstd. + return errors.New("media type for zstd-compressed layers is not defined") + default: + logrus.Debugf("compressing %s with unknown compressor(?)", what) + } + } + mb.mediaType = omediaType + return nil } -func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) { - // Decide which type of manifest and configuration output we're going to provide. - manifestType := i.preferredManifestType - // If it's not a format we support, return an error. - if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType { - return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", - manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) +func (mb *ociManifestBuilder) buildHistory(extraImageContentDiff string, extraImageContentDiffDigest digest.Digest) error { + // Build history notes in the image configurations. + appendHistory := func(history []v1.History, empty bool) { + for i := range history { + var created *time.Time + if history[i].Created != nil { + copiedTimestamp := *history[i].Created + created = &copiedTimestamp + } + onews := v1.History{ + Created: created, + CreatedBy: history[i].CreatedBy, + Author: history[i].Author, + Comment: history[i].Comment, + EmptyLayer: empty, + } + mb.oimage.History = append(mb.oimage.History, onews) + } + } + + // Keep track of how many entries the base image's history had + // before we started adding to it. + baseImageHistoryLen := len(mb.oimage.History) + + // Add history entries for prepended empty layers. + appendHistory(mb.i.preEmptyLayers, true) + // Add history entries for prepended API-supplied layers. + for _, h := range mb.i.preLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) } + // Add a history entry for this layer, empty or not. + created := time.Now().UTC() + if mb.i.created != nil { + created = (*mb.i.created).UTC() + } + onews := v1.History{ + Created: &created, + CreatedBy: mb.i.createdBy, + Author: mb.oimage.Author, + EmptyLayer: mb.i.emptyLayer, + Comment: mb.i.historyComment, + } + mb.oimage.History = append(mb.oimage.History, onews) + // Add a history entry for the extra image content if we added a layer for it. + // This diff was added to the list of layers before API-supplied layers that + // needed to be appended, and we need to keep the order of history entries for + // not-empty layers consistent with that. + if extraImageContentDiff != "" { + createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) + onews := v1.History{ + Created: &created, + CreatedBy: createdBy, + } + mb.oimage.History = append(mb.oimage.History, onews) + } + // Add history entries for appended empty layers. + appendHistory(mb.i.postEmptyLayers, true) + // Add history entries for appended API-supplied layers. + for _, h := range mb.i.postLayers { + appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) + } + + // Assemble a comment indicating which base image was used, if it wasn't + // just an image ID, and add it to the first history entry we added. + var fromComment string + if strings.Contains(mb.i.parent, mb.i.fromImageID) && mb.i.fromImageName != "" && !strings.HasPrefix(mb.i.fromImageID, mb.i.fromImageName) { + if mb.oimage.History[baseImageHistoryLen].Comment != "" { + fromComment = " " + } + fromComment += "FROM " + mb.i.fromImageName + } + mb.oimage.History[baseImageHistoryLen].Comment += fromComment + + // Confidence check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. Only applicable if the base image (if there was + // one) provided us at least one entry to use as a starting point. + if baseImageHistoryLen != 0 { + expectedDiffIDs := expectedOCIDiffIDs(mb.oimage) + if len(mb.oimage.RootFS.DiffIDs) != expectedDiffIDs { + return fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(mb.oimage.RootFS.DiffIDs)) + } + } + return nil +} + +func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) { + // Encode the image configuration blob. + oconfig, err := json.Marshal(&mb.oimage) + if err != nil { + return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.oimage, err) + } + logrus.Debugf("OCIv1 config = %s", oconfig) + + // Add the configuration blob to the manifest. + mb.omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) + mb.omanifest.Config.Size = int64(len(oconfig)) + mb.omanifest.Config.MediaType = v1.MediaTypeImageConfig + + // Encode the manifest. + omanifestbytes, err := json.Marshal(&mb.omanifest) + if err != nil { + return nil, nil, fmt.Errorf("encoding %#v as json: %w", mb.omanifest, err) + } + logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) + + return omanifestbytes, oconfig, nil +} + +func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemContext) (src types.ImageSource, err error) { // These maps will let us check if a layer ID is part of one group or another. parentLayerIDs := make(map[string]bool) apiLayerIDs := make(map[string]bool) @@ -492,9 +805,21 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon // Build fresh copies of the configurations and manifest so that we don't mess with any // values in the Builder object itself. - oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests() - if err != nil { - return nil, err + var mb manifestBuilder + switch i.preferredManifestType { + case v1.MediaTypeImageManifest: + mb, err = newOCIManifestBuilder(i) + if err != nil { + return nil, err + } + case manifest.DockerV2Schema2MediaType: + mb, err = newDockerSchema2ManifestBuilder(i) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)", + i.preferredManifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType) } // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. @@ -512,9 +837,6 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon if apiLayerIDs[layerID] { what = layerID } - // The default layer media type assumes no compression. - omediaType := v1.MediaTypeImageLayer - dmediaType := docker.V2S2MediaTypeUncompressedLayer // Look up this layer. var layerUncompressedDigest digest.Digest var layerUncompressedSize int64 @@ -552,21 +874,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon layerBlobSize := layerUncompressedSize diffID := layerUncompressedDigest // Note this layer in the manifest, using the appropriate blobsum. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: layerBlobSum, - Size: layerBlobSize, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: layerBlobSum, - Size: layerBlobSize, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Note this layer in the list of diffIDs, again using the uncompressed digest. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) + mb.addLayer(layerBlobSum, layerBlobSize, diffID) blobLayers[diffID] = blobLayerInfo{ ID: layerID, Size: layerBlobSize, @@ -574,8 +882,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon continue } // Figure out if we need to change the media type, in case we've changed the compression. - omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression) - if err != nil { + if err := mb.computeLayerMIMEType(what, i.compression); err != nil { return nil, err } // Start reading either the layer or the whole container rootfs. @@ -720,187 +1027,19 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon } // Add a note in the manifest about the layer. The blobs are identified by their possibly- // compressed blob digests. - olayerDescriptor := v1.Descriptor{ - MediaType: omediaType, - Digest: destHasher.Digest(), - Size: size, - } - omanifest.Layers = append(omanifest.Layers, olayerDescriptor) - dlayerDescriptor := docker.V2S2Descriptor{ - MediaType: dmediaType, - Digest: destHasher.Digest(), - Size: size, - } - dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor) - // Add a note about the diffID, which is always the layer's uncompressed digest. - oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest()) - dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest()) - } - - // Build history notes in the image configurations. - appendHistory := func(history []v1.History, empty bool) { - for i := range history { - var created *time.Time - if history[i].Created != nil { - copiedTimestamp := *history[i].Created - created = &copiedTimestamp - } - onews := v1.History{ - Created: created, - CreatedBy: history[i].CreatedBy, - Author: history[i].Author, - Comment: history[i].Comment, - EmptyLayer: empty, - } - oimage.History = append(oimage.History, onews) - if created == nil { - created = &time.Time{} - } - dnews := docker.V2S2History{ - Created: *created, - CreatedBy: history[i].CreatedBy, - Author: history[i].Author, - Comment: history[i].Comment, - EmptyLayer: empty, - } - dimage.History = append(dimage.History, dnews) - } + mb.addLayer(destHasher.Digest(), size, srcHasher.Digest()) } // Only attempt to append history if history was not disabled explicitly. if !i.omitHistory { - // Keep track of how many entries the base image's history had - // before we started adding to it. - baseImageHistoryLen := len(oimage.History) - - // Add history entries for prepended empty layers. - appendHistory(i.preEmptyLayers, true) - // Add history entries for prepended API-supplied layers. - for _, h := range i.preLayers { - appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) - } - // Add a history entry for this layer, empty or not. - created := time.Now().UTC() - if i.created != nil { - created = (*i.created).UTC() - } - onews := v1.History{ - Created: &created, - CreatedBy: i.createdBy, - Author: oimage.Author, - EmptyLayer: i.emptyLayer, - Comment: i.historyComment, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: created, - CreatedBy: i.createdBy, - Author: dimage.Author, - EmptyLayer: i.emptyLayer, - Comment: i.historyComment, - } - dimage.History = append(dimage.History, dnews) - // Add a history entry for the extra image content if we added a layer for it. - // This diff was added to the list of layers before API-supplied layers that - // needed to be appended, and we need to keep the order of history entries for - // not-empty layers consistent with that. - if extraImageContentDiff != "" { - createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) - onews := v1.History{ - Created: &created, - CreatedBy: createdBy, - } - oimage.History = append(oimage.History, onews) - dnews := docker.V2S2History{ - Created: created, - CreatedBy: createdBy, - } - dimage.History = append(dimage.History, dnews) - } - // Add history entries for appended empty layers. - appendHistory(i.postEmptyLayers, true) - // Add history entries for appended API-supplied layers. - for _, h := range i.postLayers { - appendHistory([]v1.History{h.linkedLayer.History}, h.linkedLayer.History.EmptyLayer) - } - - // Assemble a comment indicating which base image was used, if it wasn't - // just an image ID, and add it to the first history entry we added. - var fromComment string - if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != "" && !strings.HasPrefix(i.fromImageID, i.fromImageName) { - if oimage.History[baseImageHistoryLen].Comment != "" { - fromComment = " " - } - fromComment += "FROM " + i.fromImageName - } - oimage.History[baseImageHistoryLen].Comment += fromComment - dimage.History[baseImageHistoryLen].Comment += fromComment - - // Confidence check that we didn't just create a mismatch between non-empty layers in the - // history and the number of diffIDs. Only applicable if the base image (if there was - // one) provided us at least one entry to use as a starting point. - if baseImageHistoryLen != 0 { - expectedDiffIDs := expectedOCIDiffIDs(oimage) - if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs)) - } - expectedDiffIDs = expectedDockerDiffIDs(dimage) - if len(dimage.RootFS.DiffIDs) != expectedDiffIDs { - return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs)) - } + if err := mb.buildHistory(extraImageContentDiff, extraImageContentDiffDigest); err != nil { + return nil, err } } - // Encode the image configuration blob. - oconfig, err := json.Marshal(&oimage) + imageManifest, config, err := mb.manifestAndConfig() if err != nil { - return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err) - } - logrus.Debugf("OCIv1 config = %s", oconfig) - - // Add the configuration blob to the manifest. - omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) - omanifest.Config.Size = int64(len(oconfig)) - omanifest.Config.MediaType = v1.MediaTypeImageConfig - - // Encode the manifest. - omanifestbytes, err := json.Marshal(&omanifest) - if err != nil { - return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err) - } - logrus.Debugf("OCIv1 manifest = %s", omanifestbytes) - - // Encode the image configuration blob. - dconfig, err := json.Marshal(&dimage) - if err != nil { - return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err) - } - logrus.Debugf("Docker v2s2 config = %s", dconfig) - - // Add the configuration blob to the manifest. - dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) - dmanifest.Config.Size = int64(len(dconfig)) - dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - // Encode the manifest. - dmanifestbytes, err := json.Marshal(&dmanifest) - if err != nil { - return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err) - } - logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes) - - // Decide which manifest and configuration blobs we'll actually output. - var config []byte - var imageManifest []byte - switch manifestType { - case v1.MediaTypeImageManifest: - imageManifest = omanifestbytes - config = oconfig - case manifest.DockerV2Schema2MediaType: - imageManifest = dmanifestbytes - config = dconfig - default: - panic("unreachable code: unsupported manifest type") + return nil, err } src = &containerImageSource{ path: path, @@ -914,7 +1053,7 @@ func (i *containerImageRef) NewImageSource(_ context.Context, _ *types.SystemCon config: config, configDigest: digest.Canonical.FromBytes(config), manifest: imageManifest, - manifestType: manifestType, + manifestType: i.preferredManifestType, blobDirectory: i.blobDirectory, blobLayers: blobLayers, } diff --git a/internal/config/override.go b/internal/config/override.go index a1dfebf6953..431b014cc57 100644 --- a/internal/config/override.go +++ b/internal/config/override.go @@ -64,74 +64,124 @@ func mergeEnv(a, b []string) []string { return results } -// Override takes a buildah docker config and an OCI ImageConfig, and applies a +func parseOverrideChanges(overrideChanges []string, overrideConfig *manifest.Schema2Config) (*manifest.Schema2Config, error) { + if len(overrideChanges) == 0 { + return overrideConfig, nil + } + if overrideConfig == nil { + overrideConfig = &manifest.Schema2Config{} + } + // Parse the set of changes as we would a Dockerfile. + changes := strings.Join(overrideChanges, "\n") + parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes)) + if err != nil { + return overrideConfig, fmt.Errorf("parsing change set %+v: %w", changes, err) + } + // Create a dummy builder object to process configuration-related + // instructions. + subBuilder := imagebuilder.NewBuilder(nil) + // Convert the incoming data into an initial RunConfig. + subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig) + // Process the change instructions one by one. + for _, node := range parsed.Children { + var step imagebuilder.Step + if err := step.Resolve(node); err != nil { + return overrideConfig, fmt.Errorf("resolving change %q: %w", node.Original, err) + } + if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil { + return overrideConfig, fmt.Errorf("processing change %q: %w", node.Original, err) + } + } + // Pull settings out of the dummy builder's RunConfig. + return Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig), nil +} + +// OverrideOCI takes a buildah docker config and an OCI ImageConfig, and applies a // mixture of a slice of Dockerfile-style instructions and fields from a config // blob to them both -func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error { - if len(overrideChanges) > 0 { - if overrideConfig == nil { - overrideConfig = &manifest.Schema2Config{} +func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConfig *manifest.Schema2Config) error { + overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig) + if err != nil { + return err + } + + if overrideConfig != nil { + // Apply changes from a possibly-provided possibly-changed config struct. + oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User) + if len(overrideConfig.ExposedPorts) > 0 { + dexposedPorts := make(map[docker.Port]struct{}) + oexposedPorts := make(map[string]struct{}) + for port := range overrideConfig.ExposedPorts { + dexposedPorts[docker.Port(port)] = struct{}{} + } + for port := range oconfig.ExposedPorts { + oexposedPorts[port] = struct{}{} + } + for port := range overrideConfig.ExposedPorts { + oexposedPorts[string(port)] = struct{}{} + } + oconfig.ExposedPorts = oexposedPorts + } + if len(overrideConfig.Env) > 0 { + oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env) } - // Parse the set of changes as we would a Dockerfile. - changes := strings.Join(overrideChanges, "\n") - parsed, err := imagebuilder.ParseDockerfile(strings.NewReader(changes)) - if err != nil { - return fmt.Errorf("parsing change set %+v: %w", changes, err) + oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd) + if len(overrideConfig.Volumes) > 0 { + if oconfig.Volumes == nil { + oconfig.Volumes = make(map[string]struct{}) + } + for volume := range overrideConfig.Volumes { + oconfig.Volumes[volume] = struct{}{} + } } - // Create a dummy builder object to process configuration-related - // instructions. - subBuilder := imagebuilder.NewBuilder(nil) - // Convert the incoming data into an initial RunConfig. - subBuilder.RunConfig = *GoDockerclientConfigFromSchema2Config(overrideConfig) - // Process the change instructions one by one. - for _, node := range parsed.Children { - var step imagebuilder.Step - if err := step.Resolve(node); err != nil { - return fmt.Errorf("resolving change %q: %w", node.Original, err) + oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir) + if len(overrideConfig.Labels) > 0 { + if oconfig.Labels == nil { + oconfig.Labels = make(map[string]string) } - if err := subBuilder.Run(&step, &configOnlyExecutor{}, true); err != nil { - return fmt.Errorf("processing change %q: %w", node.Original, err) + for k, v := range overrideConfig.Labels { + oconfig.Labels[k] = v } } - // Pull settings out of the dummy builder's RunConfig. - overrideConfig = Schema2ConfigFromGoDockerclientConfig(&subBuilder.RunConfig) + oconfig.StopSignal = overrideConfig.StopSignal + } + return nil +} + +// OverrideDocker takes a buildah docker config and an Docker Config, and applies a +// mixture of a slice of Dockerfile-style instructions and fields from a config +// blob to them both +func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideConfig *manifest.Schema2Config) error { + overrideConfig, err := parseOverrideChanges(overrideChanges, overrideConfig) + if err != nil { + return err } + if overrideConfig != nil { // Apply changes from a possibly-provided possibly-changed config struct. dconfig.Hostname = firstStringElseSecondString(overrideConfig.Hostname, dconfig.Hostname) dconfig.Domainname = firstStringElseSecondString(overrideConfig.Domainname, dconfig.Domainname) dconfig.User = firstStringElseSecondString(overrideConfig.User, dconfig.User) - oconfig.User = firstStringElseSecondString(overrideConfig.User, oconfig.User) dconfig.AttachStdin = overrideConfig.AttachStdin dconfig.AttachStdout = overrideConfig.AttachStdout dconfig.AttachStderr = overrideConfig.AttachStderr if len(overrideConfig.ExposedPorts) > 0 { dexposedPorts := make(map[docker.Port]struct{}) - oexposedPorts := make(map[string]struct{}) for port := range dconfig.ExposedPorts { dexposedPorts[port] = struct{}{} } for port := range overrideConfig.ExposedPorts { dexposedPorts[docker.Port(port)] = struct{}{} } - for port := range oconfig.ExposedPorts { - oexposedPorts[port] = struct{}{} - } - for port := range overrideConfig.ExposedPorts { - oexposedPorts[string(port)] = struct{}{} - } dconfig.ExposedPorts = dexposedPorts - oconfig.ExposedPorts = oexposedPorts } dconfig.Tty = overrideConfig.Tty dconfig.OpenStdin = overrideConfig.OpenStdin dconfig.StdinOnce = overrideConfig.StdinOnce if len(overrideConfig.Env) > 0 { dconfig.Env = mergeEnv(dconfig.Env, overrideConfig.Env) - oconfig.Env = mergeEnv(oconfig.Env, overrideConfig.Env) } dconfig.Entrypoint, dconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, dconfig.Entrypoint, dconfig.Cmd) - oconfig.Entrypoint, oconfig.Cmd = firstSlicePairElseSecondSlicePair(overrideConfig.Entrypoint, overrideConfig.Cmd, oconfig.Entrypoint, oconfig.Cmd) if overrideConfig.Healthcheck != nil { dconfig.Healthcheck = &docker.HealthConfig{ Test: append([]string{}, overrideConfig.Healthcheck.Test...), @@ -147,16 +197,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [ if dconfig.Volumes == nil { dconfig.Volumes = make(map[string]struct{}) } - if oconfig.Volumes == nil { - oconfig.Volumes = make(map[string]struct{}) - } for volume := range overrideConfig.Volumes { dconfig.Volumes[volume] = struct{}{} - oconfig.Volumes[volume] = struct{}{} } } dconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, dconfig.WorkingDir) - oconfig.WorkingDir = firstStringElseSecondString(overrideConfig.WorkingDir, oconfig.WorkingDir) dconfig.NetworkDisabled = overrideConfig.NetworkDisabled dconfig.MacAddress = overrideConfig.MacAddress dconfig.OnBuild = overrideConfig.OnBuild @@ -164,16 +209,11 @@ func Override(dconfig *docker.Config, oconfig *v1.ImageConfig, overrideChanges [ if dconfig.Labels == nil { dconfig.Labels = make(map[string]string) } - if oconfig.Labels == nil { - oconfig.Labels = make(map[string]string) - } for k, v := range overrideConfig.Labels { dconfig.Labels[k] = v - oconfig.Labels[k] = v } } dconfig.StopSignal = overrideConfig.StopSignal - oconfig.StopSignal = overrideConfig.StopSignal dconfig.StopTimeout = overrideConfig.StopTimeout dconfig.Shell = firstSliceElseSecondSlice(overrideConfig.Shell, dconfig.Shell) }