Skip to content

Commit

Permalink
verify entire integrity
Browse files Browse the repository at this point in the history
Signed-off-by: Naoki MATSUMOTO <[email protected]>
  • Loading branch information
naoki9911 committed Apr 19, 2024
1 parent 15dfa7f commit a805339
Show file tree
Hide file tree
Showing 6 changed files with 243 additions and 73 deletions.
100 changes: 58 additions & 42 deletions pkg/di3fs/di3fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,54 +107,62 @@ func (dn *Di3fsNode) readBaseFiles() ([]byte, error) {

func (dn *Di3fsNode) openFileInImage() (fs.FileHandle, uint32, syscall.Errno) {
if len(dn.data) != 0 {
} else if dn.meta.IsNew() {
patchBytes := make([]byte, dn.meta.CompressedSize)
_, err := dn.root.diffImageFile.ReadAt(patchBytes, dn.meta.Offset)
if err != nil {
log.Errorf("failed to read from diffImage offset=%d err=%s", dn.meta.Offset, err)
return 0, 0, syscall.EIO
}
patchBuf := bytes.NewBuffer(patchBytes)
patchReader, err := zstd.NewReader(patchBuf)
if err != nil {
log.Errorf("failed to create zstd Reader err=%s", err)
return 0, 0, syscall.EIO
}
defer patchReader.Close()
dn.data, err = io.ReadAll(patchReader)
if err != nil {
log.Errorf("failed to read with zstd Reader err=%s", err)
return 0, 0, syscall.EIO
}
} else if dn.meta.IsSame() {
data, err := dn.readBaseFiles()
if err != nil {
log.Errorf("failed to read from base: %v", err)
return 0, 0, syscall.EIO
}
dn.data = data
} else {
var patchReader io.Reader
patchBytes := make([]byte, dn.meta.CompressedSize)
_, err := dn.root.diffImageFile.ReadAt(patchBytes, dn.meta.Offset)
if err != nil {
log.Errorf("failed to read from diffImage offset=%d len=%d err=%s", dn.meta.Offset, len(patchBytes), err)
return 0, 0, syscall.EIO
}
patchReader = bytes.NewBuffer(patchBytes)
baseData, err := dn.readBaseFiles()
if err != nil {
log.Errorf("failed to read from base: %v", err)
return 0, 0, syscall.EIO
if dn.meta.IsNew() {
patchBytes := make([]byte, dn.meta.CompressedSize)
_, err := dn.root.diffImageFile.ReadAt(patchBytes, dn.meta.Offset)
if err != nil {
log.Errorf("failed to read from diffImage offset=%d err=%s", dn.meta.Offset, err)
return 0, 0, syscall.EIO
}
patchBuf := bytes.NewBuffer(patchBytes)
patchReader, err := zstd.NewReader(patchBuf)
if err != nil {
log.Errorf("failed to create zstd Reader err=%s", err)
return 0, 0, syscall.EIO
}
defer patchReader.Close()
dn.data, err = io.ReadAll(patchReader)
if err != nil {
log.Errorf("failed to read with zstd Reader err=%s", err)
return 0, 0, syscall.EIO
}
} else if dn.meta.IsSame() {
data, err := dn.readBaseFiles()
if err != nil {
log.Errorf("failed to read from base: %v", err)
return 0, 0, syscall.EIO
}
dn.data = data
} else {
var patchReader io.Reader
patchBytes := make([]byte, dn.meta.CompressedSize)
_, err := dn.root.diffImageFile.ReadAt(patchBytes, dn.meta.Offset)
if err != nil {
log.Errorf("failed to read from diffImage offset=%d len=%d err=%s", dn.meta.Offset, len(patchBytes), err)
return 0, 0, syscall.EIO
}
patchReader = bytes.NewBuffer(patchBytes)
baseData, err := dn.readBaseFiles()
if err != nil {
log.Errorf("failed to read from base: %v", err)
return 0, 0, syscall.EIO
}

newBytes, err := dn.plugin.Patch(baseData, patchReader)
if err != nil {
log.Errorf("Open failed(bsdiff) err=%v", err)
return 0, 0, syscall.EIO
}
dn.data = newBytes
log.Debugf("Successfully patched %s", dn.meta.Name)
}

newBytes, err := dn.plugin.Patch(baseData, patchReader)
err := dn.meta.Verify(dn.data)
if err != nil {
log.Errorf("Open failed(bsdiff) err=%v", err)
log.Errorf("failed to verify %s(%d): %v", dn.path, dn.meta.Type, err)
return 0, 0, syscall.EIO
}
dn.data = newBytes
log.Debugf("Successfully patched %s", dn.meta.Name)
}
return nil, fuse.FOPEN_KEEP_CACHE | fuse.FOPEN_CACHE_DIR, 0
}
Expand Down Expand Up @@ -205,6 +213,14 @@ func (dr *Di3fsNode) OnAdd(ctx context.Context) {
if dr.root.IsBase() && dr.meta.IsBaseRequired() {
log.Fatalf("invalid base image")
}

if !dr.meta.IsFile() {
err := dr.meta.Verify(nil)
if err != nil {
log.Fatalf("failed to verify %s: %v", dr.path, err)
}
}

// here, rootNode is initialized
//log.Debugf("base=%s patch=%s", dr.basePath, dr.patchPath)
for childfName := range dr.meta.Childs {
Expand Down
99 changes: 89 additions & 10 deletions pkg/image/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@ import (
"fmt"
"io"
"os"
"slices"
"strings"
"syscall"

"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
)

type EntryType int
Expand Down Expand Up @@ -68,16 +70,18 @@ func UnmarshalJsonFromCompressed[T any](b []byte) (*T, error) {
}

type FileEntry struct {
Name string `json:"name"`
Size int `json:"size"`
Mode uint32 `json:"mode"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
Type EntryType `json:"type"`
RealPath string `json:"realPath,omitempty"`
Childs map[string]*FileEntry `json:"childs"`
CompressedSize int64 `json:"compressedSize,omitempty"`
Offset int64 `json:"offset,omitempty"`
Name string `json:"name"`
Size int `json:"size"`
Mode uint32 `json:"mode"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
RealPath string `json:"realPath,omitempty"`
Childs map[string]*FileEntry `json:"childs"`

Type EntryType `json:"type"`
CompressedSize int64 `json:"compressedSize,omitempty"`
Offset int64 `json:"offset,omitempty"`
Digest digest.Digest `json:"digest"`
}

func (fe *FileEntry) DeepCopy() *FileEntry {
Expand Down Expand Up @@ -139,6 +143,11 @@ func (fe FileEntry) IsSame() bool {
func (fe FileEntry) IsLink() bool {
return fe.Type == FILE_ENTRY_SYMLINK
}
func (fe FileEntry) IsFile() bool {
return fe.Type == FILE_ENTRY_FILE_DIFF ||
fe.Type == FILE_ENTRY_FILE_NEW ||
fe.Type == FILE_ENTRY_FILE_SAME
}

func (fe FileEntry) IsBaseRequired() bool {
return fe.Type == FILE_ENTRY_FILE_DIFF ||
Expand Down Expand Up @@ -185,3 +194,73 @@ func (fe *FileEntry) lookupImpl(paths []string) (*FileEntry, error) {
}
return child.lookupImpl(paths[1:])
}

type feForDigest struct {
Name string `json:"name"`
Size int `json:"size"`
Mode uint32 `json:"mode"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
RealPath string `json:"realPath,omitempty"`
Childs []digest.Digest `json:"childs"`
}

func (fe *FileEntry) feForDigest() (*feForDigest, error) {
res := &feForDigest{
Name: fe.Name,
Size: fe.Size,
Mode: fe.Mode,
UID: fe.UID,
GID: fe.GID,
RealPath: fe.RealPath,
Childs: []digest.Digest{},
}

if fe.IsDir() {
childNames := []string{}
for name := range fe.Childs {
childNames = append(childNames, name)
}
slices.Sort(childNames)

for _, name := range childNames {
c := fe.Childs[name]
if c.Digest == "" {
return nil, fmt.Errorf("child %s does not have digest", name)
}
res.Childs = append(res.Childs, c.Digest)
}
}

return res, nil
}

func (fe *FileEntry) GenerateDigest(body []byte) (digest.Digest, error) {
fed, err := fe.feForDigest()
if err != nil {
return "", nil
}
feBytes, err := json.Marshal(fed)
if err != nil {
return "", nil
}

if fe.IsFile() {
feBytes = append(feBytes, body...)
}
d := digest.FromBytes(feBytes)
return d, nil
}

func (fe *FileEntry) Verify(body []byte) error {
d, err := fe.GenerateDigest(body)
if err != nil {
return nil
}

if d != fe.Digest {
return fmt.Errorf("failed to verify digest")
}

return nil
}
5 changes: 5 additions & 0 deletions pkg/image/merge.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,11 @@ func enqueueMergeTaskToQueue(lowerEntry, upperEntry *FileEntry, taskChan chan me
case FILE_ENTRY_FILE_SAME:
// lower must have FILE_NEW or FILE_DIFF
if lowerChild.HasBody() {
// upperChild's metadata can be updated
lowerChild.Mode = upperChild.Mode
lowerChild.UID = upperChild.UID
lowerChild.GID = upperChild.GID
lowerChild.Digest = upperChild.Digest
upperEntry.Childs[upperfName] = lowerChild
taskChan <- mergeTask{
lowerEntry: lowerChild,
Expand Down
Loading

0 comments on commit a805339

Please sign in to comment.