diff --git a/cmd/sszgen/forks.go b/cmd/sszgen/forks.go new file mode 100644 index 0000000..9c13996 --- /dev/null +++ b/cmd/sszgen/forks.go @@ -0,0 +1,39 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +// forkMapping maps fork names to fork values. This is used internally by the +// ssz codec generator to convert tags to values. +var forkMapping = map[string]string{ + "unknown": "Unknown", + "frontier": "Frontier", + "homestead": "Homestead", + "dao": "DAO", + "tangerine": "Tangerine", + "spurious": "Spurious", + "byzantium": "Byzantium", + "constantinople": "Constantinople", + "istanbul": "Istanbul", + "muir": "Muir", + "phase0": "Phase0", + "berlin": "Berlin", + "london": "London", + "altair": "Altair", + "arrow": "Arrow", + "gray": "Gray", + "bellatrix": "Bellatrix", + "paris": "Paris", + "merge": "Merge", + "shapella": "Shapella", + "shanghai": "Shanghai", + "capella": "Capella", + "dencun": "Dencun", + "cancun": "Cancun", + "deneb": "Deneb", + "pectra": "Pectra", + "prague": "Prague", + "electra": "Electra", + "future": "Future", +} diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 3c153aa..a06ef34 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -9,8 +9,10 @@ import ( "fmt" "go/types" "html/template" + "io" "math" "sort" + "strings" ) const ( @@ -91,66 +93,115 @@ func generate(ctx *genContext, typ *sszContainer) ([]byte, error) { return bytes.Join(codes, []byte("\n")), nil } +// generateStaticSizeAccumulator is a helper to iterate over all the fields and +// accumulate the static sizes into a `size` variable based on fork constraints. +func generateStaticSizeAccumulator(w io.Writer, ctx *genContext, typ *sszContainer) { + for i := range typ.opsets { + switch { + case typ.forks[i] == "" && i == 0: + fmt.Fprintf(w, " size = ") + case typ.forks[i] == "" && typ.forks[i-1] == "": + fmt.Fprintf(w, " + ") + case typ.forks[i] == "" && typ.forks[i-1] != "": + fmt.Fprintf(w, "\n size += ") + case typ.forks[i] != "" && i > 0 && typ.forks[i-1] != typ.forks[i]: + fmt.Fprintf(w, "\n") + } + if typ.forks[i] != "" { + if i == 0 || typ.forks[i] != typ.forks[i-1] { + if typ.forks[i][0] == '!' { + fmt.Fprintf(w, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(w, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + fmt.Fprintf(w, " size += ") + } else { + fmt.Fprintf(w, " + ") + } + } + switch t := typ.opsets[i].(type) { + case *opsetStatic: + if t.bytes != nil { + if len(t.bytes) == 1 { + fmt.Fprintf(w, "%d", t.bytes[0]) + } else { + fmt.Fprintf(w, "%d*%d", t.bytes[0], t.bytes[1]) + } + } else { + typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) + pkg := typ.Obj().Pkg() + if pkg.Path() == ctx.pkg.Path() { + fmt.Fprintf(w, "(*%s)(nil).SizeSSZ(sizer)", typ.Obj().Name()) + } else { + ctx.addImport(pkg.Path(), "") + fmt.Fprintf(w, "(*%s.%s)(nil).SizeSSZ(sizer)", pkg.Name(), typ.Obj().Name()) + } + } + case *opsetDynamic: + fmt.Fprintf(w, "%d", offsetBytes) + } + if typ.forks[i] != "" && (i == len(typ.forks)-1 || typ.forks[i] != typ.forks[i+1]) { + fmt.Fprintf(w, "\n }") + } + } + fmt.Fprintf(w, " \n") +} + func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { var b bytes.Buffer // Generate the code itself if typ.static { // Iterate through the fields to see if the size can be computed compile - // time or if runtime resolutions are needed - var runtime bool + // time or if runtime resolutions are needed. + var ( + runtime bool + monolith bool + ) for i := range typ.opsets { if typ.opsets[i].(*opsetStatic).bytes == nil { runtime = true - break + } + if typ.forks[i] != "" { + monolith = true } } // If some types require runtime size determination, generate a helper // variable to run it on package init if runtime { fmt.Fprintf(&b, "// Cached static size computed on package init.\n") - fmt.Fprintf(&b, "var staticSizeCache%s = ", typ.named.Obj().Name()) - for i := range typ.opsets { - if bytes := typ.opsets[i].(*opsetStatic).bytes; bytes != nil { + fmt.Fprintf(&b, "var staticSizeCache%s = ssz.PrecomputeStaticSizeCache((*%s)(nil))\n\n", typ.named.Obj().Name(), typ.named.Obj().Name()) + + fmt.Fprintf(&b, "// SizeSSZ returns the total size of the static ssz object.\n") + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) (size uint32) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " return staticSizeCache%s[fork]\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " }\n") + + generateStaticSizeAccumulator(&b, ctx, typ) + fmt.Fprintf(&b, " return size\n}\n") + } else { + fmt.Fprint(&b, "// SizeSSZ returns the total size of the static ssz object.\n") + if monolith { + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) (size uint32) {\n", typ.named.Obj().Name()) + generateStaticSizeAccumulator(&b, ctx, typ) + fmt.Fprintf(&b, " return size\n}\n") + } else { + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " return ") + for i := range typ.opsets { + bytes := typ.opsets[i].(*opsetStatic).bytes if len(bytes) == 1 { fmt.Fprintf(&b, "%d", bytes[0]) } else { fmt.Fprintf(&b, "%d*%d", bytes[0], bytes[1]) } - } else { - typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) - pkg := typ.Obj().Pkg() - if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ()", typ.Obj().Name()) - } else { - ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ()", pkg.Name(), typ.Obj().Name()) + if i < len(typ.opsets)-1 { + fmt.Fprint(&b, " + ") } } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") - } - } - fmt.Fprintf(&b, "\n\n// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ() uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " return staticSizeCache%s\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, "}\n") - } else { - fmt.Fprint(&b, "// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ() uint32 {\n", typ.named.Obj().Name()) - fmt.Fprint(&b, " return ") - for i := range typ.opsets { - bytes := typ.opsets[i].(*opsetStatic).bytes - if len(bytes) == 1 { - fmt.Fprintf(&b, "%d", bytes[0]) - } else { - fmt.Fprintf(&b, "%d*%d", bytes[0], bytes[1]) - } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") - } + fmt.Fprintf(&b, "\n}\n") } - fmt.Fprintf(&b, "\n}\n") } } else { // Iterate through the fields to see if the static size can be computed @@ -160,7 +211,6 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { if typ, ok := typ.opsets[i].(*opsetStatic); ok { if typ.bytes == nil { runtime = true - break } } } @@ -168,78 +218,88 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { // variable to run it on package init if runtime { fmt.Fprintf(&b, "// Cached static size computed on package init.\n") - fmt.Fprintf(&b, "var staticSizeCache%s = ", typ.named.Obj().Name()) - for i := range typ.opsets { - switch t := typ.opsets[i].(type) { - case *opsetStatic: - if t.bytes != nil { - if len(t.bytes) == 1 { - fmt.Fprintf(&b, "%d", t.bytes[0]) - } else { - fmt.Fprintf(&b, "%d*%d", t.bytes[0], t.bytes[1]) - } + fmt.Fprintf(&b, "var staticSizeCache%s = ssz.PrecomputeStaticSizeCache((*%s)(nil))\n\n", typ.named.Obj().Name(), typ.named.Obj().Name()) + + fmt.Fprintf(&b, "// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " // Load static size if already precomputed, calculate otherwise\n") + fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " size = staticSizeCache%s[fork]\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " } else {\n") + generateStaticSizeAccumulator(&b, ctx, typ) + fmt.Fprintf(&b, " }\n") + fmt.Fprintf(&b, " // Either return the static size or accumulate the dynamic too\n") + fmt.Fprintf(&b, " if (fixed) {\n") + fmt.Fprintf(&b, " return size\n") + fmt.Fprintf(&b, " }\n") + var ( + dynFields []string + dynOpsets []opset + dynForks []string + ) + for i := 0; i < len(typ.fields); i++ { + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) + } + } + for i := range dynFields { + if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { + if dynForks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) } else { - typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) - pkg := typ.Obj().Pkg() - if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ()", typ.Obj().Name()) - } else { - ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ()", pkg.Name(), typ.Obj().Name()) - } + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } - case *opsetDynamic: - fmt.Fprintf(&b, "%d", offsetBytes) } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "", "sizer", "obj."+dynFields[i]) + fmt.Fprintf(&b, " size += ssz.%s\n", call) + if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { + fmt.Fprintf(&b, " }\n") } } + if dynForks[len(dynForks)-1] == "" { + fmt.Fprintf(&b, "\n") + } + fmt.Fprintf(&b, " return size\n") + fmt.Fprintf(&b, "}\n") + } else { fmt.Fprintf(&b, "\n\n// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(fixed bool) uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " var size = uint32(staticSizeCache%s)\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) {\n", typ.named.Obj().Name()) + generateStaticSizeAccumulator(&b, ctx, typ) fmt.Fprintf(&b, " if (fixed) {\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, " }\n") - for i := range typ.opsets { - if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - call := generateCall(opset.size, "", "obj."+typ.fields[i]) - fmt.Fprintf(&b, " size += ssz.%s\n", call) + + var ( + dynFields []string + dynOpsets []opset + dynForks []string + ) + for i := 0; i < len(typ.fields); i++ { + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) } } - fmt.Fprintf(&b, "\n") - fmt.Fprintf(&b, " return size\n") - fmt.Fprintf(&b, "}\n") - } else { - fmt.Fprintf(&b, "\n\n// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(fixed bool) uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " var size = uint32(") - for i := range typ.opsets { - switch t := typ.opsets[i].(type) { - case *opsetStatic: - if len(t.bytes) == 1 { - fmt.Fprintf(&b, "%d", t.bytes[0]) + for i := range dynFields { + if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { + if dynForks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) } else { - fmt.Fprintf(&b, "%d*%d", t.bytes[0], t.bytes[1]) + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } - case *opsetDynamic: - fmt.Fprintf(&b, "%d", offsetBytes) } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "", "sizer", "obj."+dynFields[i]) + fmt.Fprintf(&b, " size += ssz.%s\n", call) + if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { + fmt.Fprintf(&b, " }\n") } } - fmt.Fprintf(&b, ")\n") - fmt.Fprintf(&b, " if (fixed) {\n") - fmt.Fprintf(&b, " return size\n") - fmt.Fprintf(&b, " }\n") - for i := range typ.opsets { - if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - call := generateCall(opset.size, "", "obj."+typ.fields[i]) - fmt.Fprintf(&b, " size += ssz.%s\n", call) - } + if dynForks[len(dynForks)-1] == "" { + fmt.Fprintf(&b, "\n") } - fmt.Fprintf(&b, "\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, "}\n") } @@ -287,7 +347,7 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { field := typ.fields[i] switch opset := typ.opsets[i].(type) { case *opsetStatic: - call := generateCall(opset.define, "codec", "obj."+field, opset.bytes...) + call := generateCall(opset.define, typ.forks[i], "codec", "obj."+field, opset.bytes...) switch len(opset.bytes) { case 0: typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) @@ -298,19 +358,32 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, opset.bytes[0]*opset.bytes[1]) } case *opsetDynamic: - call := generateCall(opset.defineOffset, "codec", "obj."+field, opset.limits...) + call := generateCall(opset.defineOffset, typ.forks[i], "codec", "obj."+field, opset.limits...) fmt.Fprintf(&b, " ssz.%s // Offset ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, offsetBytes) } } if !typ.static { fmt.Fprint(&b, "\n // Define the dynamic data (fields)\n") + var ( + dynIndices []int + dynFields []string + dynOpsets []opset + dynForks []string + ) for i := 0; i < len(typ.fields); i++ { - field := typ.fields[i] - if opset, ok := (typ.opsets[i]).(*opsetDynamic); ok { - call := generateCall(opset.defineContent, "codec", "obj."+field, opset.limits...) - fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, i, field) + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynIndices = append(dynIndices, i) + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) } } + for i := 0; i < len(dynFields); i++ { + opset := (dynOpsets[i]).(*opsetDynamic) + + call := generateCall(opset.defineContent, dynForks[i], "codec", "obj."+dynFields[i], opset.limits...) + fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, dynIndices[i], dynFields[i]) + } } fmt.Fprint(&b, "}\n") return b.Bytes(), nil @@ -318,12 +391,14 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { // generateCall parses a Go template and fills it with the provided data. This // could be done more optimally, but we really don't care for a code generator. -func generateCall(tmpl string, recv string, field string, limits ...int) string { +func generateCall(tmpl string, fork string, recv string, field string, limits ...int) string { + // Generate the base call without taking forks into consideration t, err := template.New("").Parse(tmpl) if err != nil { panic(err) } d := map[string]interface{}{ + "Sizer": recv, "Codec": recv, "Field": field, } @@ -337,5 +412,21 @@ func generateCall(tmpl string, recv string, field string, limits ...int) string if err := t.Execute(buf, d); err != nil { panic(err) } - return string(buf.Bytes()) + call := string(buf.Bytes()) + + // If a fork filter was specified, inject it now into the call + if fork != "" { + // Mutate the call to the fork variant + call = strings.ReplaceAll(call, "(", "OnFork(") + + // Inject a fork filter as the last parameter + var filter string + if fork[0] == '!' { + filter = fmt.Sprintf("ssz.ForkFilter{Removed: ssz.Fork%s}", fork[1:]) + } else { + filter = fmt.Sprintf("ssz.ForkFilter{Added: ssz.Fork%s}", fork) + } + call = strings.ReplaceAll(call, ")", ","+filter+")") + } + return call } diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index 27f8470..6a7bb55 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -20,14 +20,14 @@ type opsetStatic struct { define string // DefineXYZ method for the ssz.Codec encode string // EncodeXYZ method for the ssz.Encoder decode string // DecodeXYZ method for the ssz.Decoder - bytes []int // Number of bytes in the ssz encoding (0 == unknown) + bytes []int // Number of bytes in the ssz encoding (nil == unknown) } // opsetDynamic is a group of methods that define how different pieces of an ssz // codec operates on a given dynamic type. Ideally these would be some go/types // function values, but alas too much pain, especially with generics. type opsetDynamic struct { - size string // SizeXYZ method for the SizeSSZ method + size string // SizeXYZ method for the ssz.Sizer defineOffset string // DefineXYZOffset method for the ssz.Codec defineContent string // DefineXYZContent method for the ssz.Codec encodeOffset string // EncodeXYZOffset method for the ssz.Encoder @@ -41,7 +41,7 @@ type opsetDynamic struct { // resolveBasicOpset retrieves the opset required to handle a basic struct // field. Yes, we could maybe have some of these be "computed" instead of hard // coded, but it makes things brittle for corner-cases. -func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointer bool) (opset, error) { // Sanity check a few tag constraints relevant for all basic types if tags != nil { if tags.limit != nil { @@ -57,52 +57,97 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag) (opset if tags != nil && tags.size[0] != 1 { return nil, fmt.Errorf("boolean basic type requires ssz-size=1: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineBool({{.Codec}}, &{{.Field}})", - "EncodeBool({{.Codec}}, &{{.Field}})", - "DecodeBool({{.Codec}}, &{{.Field}})", - []int{1}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineBool({{.Codec}}, &{{.Field}})", + "EncodeBool({{.Codec}}, &{{.Field}})", + "DecodeBool({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } else { + return &opsetStatic{ + "DefineBoolPointer({{.Codec}}, &{{.Field}})", + "EncodeBoolPointer({{.Codec}}, &{{.Field}})", + "DecodeBoolPointer({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } case types.Uint8: if tags != nil && tags.size[0] != 1 { return nil, fmt.Errorf("byte basic type requires ssz-size=1: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint8({{.Codec}}, &{{.Field}})", - "EncodeUint8({{.Codec}}, &{{.Field}})", - "DecodeUint8({{.Codec}}, &{{.Field}})", - []int{1}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint8({{.Codec}}, &{{.Field}})", + "EncodeUint8({{.Codec}}, &{{.Field}})", + "DecodeUint8({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } else { + return &opsetStatic{ + "DefineUint8Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint8Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint8Pointer({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } case types.Uint16: if tags != nil && tags.size[0] != 2 { return nil, fmt.Errorf("uint16 basic type requires ssz-size=2: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint16({{.Codec}}, &{{.Field}})", - "EncodeUint16({{.Codec}}, &{{.Field}})", - "DecodeUint16({{.Codec}}, &{{.Field}})", - []int{2}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint16({{.Codec}}, &{{.Field}})", + "EncodeUint16({{.Codec}}, &{{.Field}})", + "DecodeUint16({{.Codec}}, &{{.Field}})", + []int{2}, + }, nil + } else { + return &opsetStatic{ + "DefineUint16Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint16Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint16Pointer({{.Codec}}, &{{.Field}})", + []int{2}, + }, nil + } case types.Uint32: if tags != nil && tags.size[0] != 4 { return nil, fmt.Errorf("uint32 basic type requires ssz-size=4: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint32({{.Codec}}, &{{.Field}})", - "EncodeUint32({{.Codec}}, &{{.Field}})", - "DecodeUint32({{.Codec}}, &{{.Field}})", - []int{4}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint32({{.Codec}}, &{{.Field}})", + "EncodeUint32({{.Codec}}, &{{.Field}})", + "DecodeUint32({{.Codec}}, &{{.Field}})", + []int{4}, + }, nil + } else { + return &opsetStatic{ + "DefineUint32Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint32Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint32Pointer({{.Codec}}, &{{.Field}})", + []int{4}, + }, nil + } case types.Uint64: if tags != nil && tags.size[0] != 8 { return nil, fmt.Errorf("uint64 basic type requires ssz-size=8: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint64({{.Codec}}, &{{.Field}})", - "EncodeUint64({{.Codec}}, &{{.Field}})", - "DecodeUint64({{.Codec}}, &{{.Field}})", - []int{8}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint64({{.Codec}}, &{{.Field}})", + "EncodeUint64({{.Codec}}, &{{.Field}})", + "DecodeUint64({{.Codec}}, &{{.Field}})", + []int{8}, + }, nil + } else { + return &opsetStatic{ + "DefineUint64Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint64Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint64Pointer({{.Codec}}, &{{.Field}})", + []int{8}, + }, nil + } default: return nil, fmt.Errorf("unsupported basic type: %s", typ) } @@ -119,7 +164,7 @@ func (p *parseContext) resolveBitlistOpset(tags *sizeTag) (opset, error) { return nil, fmt.Errorf("slice of bits tag conflict: field supports [N] bits, tag wants %v bits", tags.limit) } return &opsetDynamic{ - "SizeSliceOfBits({{.Field}})", + "SizeSliceOfBits({{.Sizer}}, {{.Field}})", fmt.Sprintf("DefineSliceOfBitsOffset({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly fmt.Sprintf("DefineSliceOfBitsContent({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly "EncodeSliceOfBitsOffset({{.Codec}}, &{{.Field}})", @@ -130,7 +175,7 @@ func (p *parseContext) resolveBitlistOpset(tags *sizeTag) (opset, error) { }, nil } -func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag, pointer bool) (opset, error) { switch typ := typ.(type) { case *types.Basic: // Sanity check a few tag constraints relevant for all arrays of basic types @@ -146,12 +191,21 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag if len(tags.size) != 1 || tags.size[0] < (size-1)*8+1 || tags.size[0] > size*8 { return nil, fmt.Errorf("array of bits tag conflict: field supports %d-%d bits, tag wants %v bits", (size-1)*8+1, size*8, tags.size) } - return &opsetStatic{ - fmt.Sprintf("DefineArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - fmt.Sprintf("EncodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - fmt.Sprintf("DecodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - []int{size}, - }, nil + if !pointer { + return &opsetStatic{ + fmt.Sprintf("DefineArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("EncodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("DecodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + []int{size}, + }, nil + } else { + return &opsetStatic{ + fmt.Sprintf("DefineArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("EncodeArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("DecodeArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + []int{size}, + }, nil + } } // Not a bitvector, interpret as plain byte array if tags != nil { @@ -161,13 +215,22 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return nil, fmt.Errorf("array of byte basic type tag conflict: field is %d bytes, tag wants %v bytes", size, tags.size) } } - return &opsetStatic{ - "DefineStaticBytes({{.Codec}}, &{{.Field}})", - "EncodeStaticBytes({{.Codec}}, &{{.Field}})", - "DecodeStaticBytes({{.Codec}}, &{{.Field}})", - []int{size}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineStaticBytes({{.Codec}}, &{{.Field}})", + "EncodeStaticBytes({{.Codec}}, &{{.Field}})", + "DecodeStaticBytes({{.Codec}}, &{{.Field}})", + []int{size}, + }, nil + } else { + return &opsetStatic{ + "DefineStaticBytesPointer({{.Codec}}, &{{.Field}})", + "EncodeStaticBytesPointer({{.Codec}}, &{{.Field}})", + "DecodeStaticBytesPointer({{.Codec}}, &{{.Field}})", + []int{size}, + }, nil + } case types.Uint64: if tags != nil { if (len(tags.size) != 1 && len(tags.size) != 2) || @@ -176,13 +239,21 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return nil, fmt.Errorf("array of byte basic type tag conflict: field is %d bytes, tag wants %v bytes", size, tags.size) } } - return &opsetStatic{ - "DefineArrayOfUint64s({{.Codec}}, &{{.Field}})", - "EncodeArrayOfUint64s({{.Codec}}, &{{.Field}})", - "DecodeArrayOfUint64s({{.Codec}}, &{{.Field}})", - []int{size, 8}, - }, nil - + if !pointer { + return &opsetStatic{ + "DefineArrayOfUint64s({{.Codec}}, &{{.Field}})", + "EncodeArrayOfUint64s({{.Codec}}, &{{.Field}})", + "DecodeArrayOfUint64s({{.Codec}}, &{{.Field}})", + []int{size, 8}, + }, nil + } else { + return &opsetStatic{ + "DefineArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + "EncodeArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + "DecodeArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + []int{size, 8}, + }, nil + } default: return nil, fmt.Errorf("unsupported array item basic type: %s", typ) } @@ -190,7 +261,7 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return p.resolveArrayOfArrayOpset(typ.Elem(), size, int(typ.Len()), tags) case *types.Named: - return p.resolveArrayOpset(typ.Underlying(), size, tags) + return p.resolveArrayOpset(typ.Underlying(), size, tags, pointer) default: return nil, fmt.Errorf("unsupported array item type: %s", typ) @@ -262,7 +333,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of byte basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeDynamicBytes({{.Field}})", + "SizeDynamicBytes({{.Sizer}}, {{.Field}})", "DefineDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeDynamicBytesOffset({{.Codec}}, &{{.Field}})", @@ -297,7 +368,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of uint64 basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfUint64s({{.Field}})", + "SizeSliceOfUint64s({{.Sizer}}, {{.Field}})", "DefineSliceOfUint64sOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfUint64sContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfUint64sOffset({{.Codec}}, &{{.Field}})", @@ -319,7 +390,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of static objects type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfStaticObjects({{.Field}})", + "SizeSliceOfStaticObjects({{.Sizer}}, {{.Field}})", "DefineSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfStaticObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}})", @@ -337,7 +408,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of dynamic objects type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfDynamicObjects({{.Field}})", + "SizeSliceOfDynamicObjects({{.Sizer}}, {{.Field}})", "DefineSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfDynamicObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}})", @@ -393,7 +464,7 @@ func (p *parseContext) resolveSliceOfArrayOpset(typ types.Type, innerSize int, t return nil, fmt.Errorf("dynamic slice of array of byte basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfStaticBytes({{.Field}})", + "SizeSliceOfStaticBytes({{.Sizer}}, {{.Field}})", "DefineSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfStaticBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}})", @@ -428,7 +499,7 @@ func (p *parseContext) resolveSliceOfSliceOpset(typ types.Type, tags *sizeTag) ( return nil, fmt.Errorf("dynamic slice of dynamic slice of byte basic type tag conflict: needs [N, M] ssz-max tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfDynamicBytes({{.Field}})", + "SizeSliceOfDynamicBytes({{.Sizer}}, {{.Field}})", "DefineSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})", "DefineSliceOfDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})", "EncodeSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}})", @@ -456,7 +527,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("uint256 basic type cannot have ssz-max tag") } if len(tags.size) != 1 || tags.size[0] != 32 { - return nil, fmt.Errorf("uint256 basic type tag conflict: filed is [32] bytes, tag wants %v", tags.size) + return nil, fmt.Errorf("uint256 basic type tag conflict: field is [32] bytes, tag wants %v", tags.size) } } return &opsetStatic{ @@ -472,7 +543,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("big.Int (uint256) basic type cannot have ssz-max tag") } if len(tags.size) != 1 || tags.size[0] != 32 { - return nil, fmt.Errorf("big.Int (uint256) basic type tag conflict: filed is [32] bytes, tag wants %v", tags.size) + return nil, fmt.Errorf("big.Int (uint256) basic type tag conflict: field is [32] bytes, tag wants %v", tags.size) } } return &opsetStatic{ @@ -498,7 +569,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("dynamic object type cannot have any ssz tags") } return &opsetDynamic{ - "SizeDynamicObject({{.Field}})", + "SizeDynamicObject({{.Sizer}}, {{.Field}})", "DefineDynamicObjectOffset({{.Codec}}, &{{.Field}})", "DefineDynamicObjectContent({{.Codec}}, &{{.Field}})", "EncodeDynamicObjectOffset({{.Codec}}, &{{.Field}})", @@ -508,5 +579,9 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o nil, nil, }, nil } - return nil, fmt.Errorf("unsupported pointer type %s", typ.String()) + named, ok := typ.Elem().(*types.Named) + if !ok { + return nil, fmt.Errorf("unsupported pointer type %s", typ.String()) + } + return p.resolveOpset(named.Underlying(), tags, true) } diff --git a/cmd/sszgen/tags.go b/cmd/sszgen/tags.go index de81d57..8a8f365 100644 --- a/cmd/sszgen/tags.go +++ b/cmd/sszgen/tags.go @@ -14,22 +14,25 @@ const ( sszTagIdent = "ssz" sszSizeTagIdent = "ssz-size" sszMaxTagIdent = "ssz-max" + sszForkTagIdent = "ssz-fork" ) -// sizeTag describes the size restriction for types. +// sizeTag describes the restriction for types. type sizeTag struct { bits bool // whether the sizes are bits instead of bytes size []int // 0 means the size for that dimension is undefined limit []int // 0 means the limit for that dimension is undefined } -func parseTags(input string) (bool, *sizeTag, error) { +func parseTags(input string) (bool, *sizeTag, string, error) { if len(input) == 0 { - return false, nil, nil + return false, nil, "", nil } var ( ignore bool tags sizeTag + fork string + setTag = func(v int, ident string) { if ident == sszMaxTagIdent { tags.limit = append(tags.limit, v) @@ -41,7 +44,7 @@ func parseTags(input string) (bool, *sizeTag, error) { for _, tag := range strings.Fields(input) { parts := strings.Split(tag, ":") if len(parts) != 2 { - return false, nil, fmt.Errorf("invalid tag %s", tag) + return false, nil, "", fmt.Errorf("invalid tag %s", tag) } ident, remain := parts[0], strings.Trim(parts[1], "\"") switch ident { @@ -60,14 +63,28 @@ func parseTags(input string) (bool, *sizeTag, error) { } num, err := strconv.ParseInt(p, 10, 64) if err != nil { - return false, nil, err + return false, nil, "", err } setTag(int(num), ident) } + case sszForkTagIdent: + var negate bool + if remain[0] == '!' { + negate = true + remain = remain[1:] + } + if enum, ok := forkMapping[remain]; !ok { + return ignore, nil, "", fmt.Errorf("invalid fork tag %s", tag) + } else { + fork = enum + if negate { + fork = "!" + fork + } + } } } if tags.size == nil && tags.limit == nil { - return ignore, nil, nil + return ignore, nil, fork, nil } - return ignore, &tags, nil + return ignore, &tags, fork, nil } diff --git a/cmd/sszgen/types.go b/cmd/sszgen/types.go index e1c2183..fd46b63 100644 --- a/cmd/sszgen/types.go +++ b/cmd/sszgen/types.go @@ -13,9 +13,10 @@ type sszContainer struct { *types.Struct named *types.Named static bool - fields []string - types []types.Type - opsets []opset + fields []string // Name of the struct field + types []types.Type // Type of the struct field + opsets []opset // Opset for the struct field + forks []string // Fork constraint for the struct field } // makeContainer iterates over the fields of the struct and attempt to match each @@ -26,6 +27,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields []string types []types.Type opsets []opset + forks []string ) // Iterate over all the fields of the struct for i := 0; i < typ.NumFields(); i++ { @@ -34,7 +36,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss if !f.Exported() { continue } - ignore, tags, err := parseTags(typ.Tag(i)) + ignore, tags, fork, err := parseTags(typ.Tag(i)) if err != nil { return nil, fmt.Errorf("failed to parse field %s.%s tags: %v", named.Obj().Name(), f.Name(), err) } @@ -42,7 +44,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss continue } // Required field found, validate type with tag content - opset, err := p.resolveOpset(f.Type(), tags) + opset, err := p.resolveOpset(f.Type(), tags, false) if err != nil { return nil, fmt.Errorf("failed to validate field %s.%s: %v", named.Obj().Name(), f.Name(), err) } @@ -52,6 +54,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields = append(fields, f.Name()) types = append(types, f.Type()) opsets = append(opsets, opset) + forks = append(forks, fork) } return &sszContainer{ Struct: typ, @@ -60,6 +63,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields: fields, types: types, opsets: opsets, + forks: forks, }, nil } @@ -67,24 +71,32 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss // whether there's a collision between them, or if more tags are needed to fully // derive the size. If the type/tags are in sync and well-defined, an opset will // be returned that the generator can use to create the code. -func (p *parseContext) resolveOpset(typ types.Type, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveOpset(typ types.Type, tags *sizeTag, pointer bool) (opset, error) { switch t := typ.(type) { case *types.Named: if isBitlist(typ) { return p.resolveBitlistOpset(tags) } - return p.resolveOpset(t.Underlying(), tags) + return p.resolveOpset(t.Underlying(), tags, pointer) case *types.Basic: - return p.resolveBasicOpset(t, tags) + return p.resolveBasicOpset(t, tags, pointer) case *types.Array: - return p.resolveArrayOpset(t.Elem(), int(t.Len()), tags) + return p.resolveArrayOpset(t.Elem(), int(t.Len()), tags, pointer) case *types.Slice: return p.resolveSliceOpset(t.Elem(), tags) case *types.Pointer: + switch tt := t.Elem().(type) { + case *types.Basic: + return p.resolveBasicOpset(tt, tags, true) + + case *types.Array: + return p.resolveArrayOpset(tt.Elem(), int(tt.Len()), tags, true) + + } return p.resolvePointerOpset(t, tags) } return nil, fmt.Errorf("unsupported type %s", typ.String()) diff --git a/codec.go b/codec.go index 061e9a0..47a11ea 100644 --- a/codec.go +++ b/codec.go @@ -15,6 +15,8 @@ import ( // define their schemas once and have that work for both operations at once // (with the same speed as explicitly typing them out would, of course). type Codec struct { + fork Fork // Context for cross-fork monolith types + enc *Encoder dec *Decoder has *Hasher @@ -66,6 +68,20 @@ func DefineBool[T ~bool](c *Codec, v *T) { HashBool(c.has, *v) } +// DefineBoolPointerOnFork defines the next field as a 1 byte boolean if present +// in a fork. +func DefineBoolPointerOnFork[T ~bool](c *Codec, v **T, filter ForkFilter) { + if c.enc != nil { + EncodeBoolPointerOnFork(c.enc, *v, filter) + return + } + if c.dec != nil { + DecodeBoolPointerOnFork(c.dec, v, filter) + return + } + HashBoolPointerOnFork(c.has, *v, filter) +} + // DefineUint8 defines the next field as a uint8. func DefineUint8[T ~uint8](c *Codec, n *T) { if c.enc != nil { @@ -79,6 +95,19 @@ func DefineUint8[T ~uint8](c *Codec, n *T) { HashUint8(c.has, *n) } +// DefineUint8PointerOnFork defines the next field as a uint8 if present in a fork. +func DefineUint8PointerOnFork[T ~uint8](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint8PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint8PointerOnFork(c.dec, n, filter) + return + } + HashUint8PointerOnFork(c.has, *n, filter) +} + // DefineUint16 defines the next field as a uint16. func DefineUint16[T ~uint16](c *Codec, n *T) { if c.enc != nil { @@ -92,6 +121,19 @@ func DefineUint16[T ~uint16](c *Codec, n *T) { HashUint16(c.has, *n) } +// DefineUint16PointerOnFork defines the next field as a uint16 if present in a fork. +func DefineUint16PointerOnFork[T ~uint16](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint16PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint16PointerOnFork(c.dec, n, filter) + return + } + HashUint16PointerOnFork(c.has, *n, filter) +} + // DefineUint32 defines the next field as a uint32. func DefineUint32[T ~uint32](c *Codec, n *T) { if c.enc != nil { @@ -105,6 +147,19 @@ func DefineUint32[T ~uint32](c *Codec, n *T) { HashUint32(c.has, *n) } +// DefineUint32PointerOnFork defines the next field as a uint32 if present in a fork. +func DefineUint32PointerOnFork[T ~uint32](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint32PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint32PointerOnFork(c.dec, n, filter) + return + } + HashUint32PointerOnFork(c.has, *n, filter) +} + // DefineUint64 defines the next field as a uint64. func DefineUint64[T ~uint64](c *Codec, n *T) { if c.enc != nil { @@ -118,6 +173,19 @@ func DefineUint64[T ~uint64](c *Codec, n *T) { HashUint64(c.has, *n) } +// DefineUint64PointerOnFork defines the next field as a uint64 if present in a fork. +func DefineUint64PointerOnFork[T ~uint64](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint64PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint64PointerOnFork(c.dec, n, filter) + return + } + HashUint64PointerOnFork(c.has, *n, filter) +} + // DefineUint256 defines the next field as a uint256. func DefineUint256(c *Codec, n **uint256.Int) { if c.enc != nil { @@ -131,6 +199,19 @@ func DefineUint256(c *Codec, n **uint256.Int) { HashUint256(c.has, *n) } +// DefineUint256OnFork defines the next field as a uint256 if present in a fork. +func DefineUint256OnFork(c *Codec, n **uint256.Int, filter ForkFilter) { + if c.enc != nil { + EncodeUint256OnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint256OnFork(c.dec, n, filter) + return + } + HashUint256OnFork(c.has, *n, filter) +} + // DefineUint256BigInt defines the next field as a uint256. func DefineUint256BigInt(c *Codec, n **big.Int) { if c.enc != nil { @@ -144,6 +225,20 @@ func DefineUint256BigInt(c *Codec, n **big.Int) { HashUint256BigInt(c.has, *n) } +// DefineUint256BigIntOnFork defines the next field as a uint256 if present in a +// fork. +func DefineUint256BigIntOnFork(c *Codec, n **big.Int, filter ForkFilter) { + if c.enc != nil { + EncodeUint256BigIntOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint256BigIntOnFork(c.dec, n, filter) + return + } + HashUint256BigIntOnFork(c.has, *n, filter) +} + // DefineStaticBytes defines the next field as static binary blob. This method // can be used for byte arrays. func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { @@ -158,12 +253,26 @@ func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { HashStaticBytes(c.has, blob) } +// DefineStaticBytesPointerOnFork defines the next field as static binary blob if present +// in a fork. This method can be used for byte arrays. +func DefineStaticBytesPointerOnFork[T commonBytesLengths](c *Codec, blob **T, filter ForkFilter) { + if c.enc != nil { + EncodeStaticBytesPointerOnFork(c.enc, *blob, filter) + return + } + if c.dec != nil { + DecodeStaticBytesPointerOnFork(c.dec, blob, filter) + return + } + HashStaticBytesPointerOnFork(c.has, *blob, filter) +} + // DefineCheckedStaticBytes defines the next field as static binary blob. This // method can be used for plain byte slices, which is more expensive, since it // needs runtime size validation. func DefineCheckedStaticBytes(c *Codec, blob *[]byte, size uint64) { if c.enc != nil { - EncodeCheckedStaticBytes(c.enc, *blob) + EncodeCheckedStaticBytes(c.enc, *blob, size) return } if c.dec != nil { @@ -186,6 +295,20 @@ func DefineDynamicBytesOffset(c *Codec, blob *[]byte, maxSize uint64) { HashDynamicBytes(c.has, *blob, maxSize) } +// DefineDynamicBytesOffsetOnFork defines the next field as dynamic binary blob +// if present in a fork. +func DefineDynamicBytesOffsetOnFork(c *Codec, blob *[]byte, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicBytesOffsetOnFork(c.enc, *blob, filter) + return + } + if c.dec != nil { + DecodeDynamicBytesOffsetOnFork(c.dec, blob, filter) + return + } + HashDynamicBytesOnFork(c.has, *blob, maxSize, filter) +} + // DefineDynamicBytesContent defines the next field as dynamic binary blob. func DefineDynamicBytesContent(c *Codec, blob *[]byte, maxSize uint64) { if c.enc != nil { @@ -199,6 +322,20 @@ func DefineDynamicBytesContent(c *Codec, blob *[]byte, maxSize uint64) { // No hashing, done at the offset position } +// DefineDynamicBytesContentOnFork defines the next field as dynamic binary blob +// if present in a fork. +func DefineDynamicBytesContentOnFork(c *Codec, blob *[]byte, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicBytesContentOnFork(c.enc, *blob, filter) + return + } + if c.dec != nil { + DecodeDynamicBytesContentOnFork(c.dec, blob, maxSize, filter) + return + } + // No hashing, done at the offset position +} + // DefineStaticObject defines the next field as a static ssz object. func DefineStaticObject[T newableStaticObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -212,6 +349,20 @@ func DefineStaticObject[T newableStaticObject[U], U any](c *Codec, obj *T) { HashStaticObject(c.has, *obj) } +// DefineStaticObjectOnFork defines the next field as a static ssz object if +// present in a fork. +func DefineStaticObjectOnFork[T newableStaticObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeStaticObjectOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeStaticObjectOnFork(c.dec, obj, filter) + return + } + HashStaticObjectOnFork(c.has, *obj, filter) +} + // DefineDynamicObjectOffset defines the next field as a dynamic ssz object. func DefineDynamicObjectOffset[T newableDynamicObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -225,6 +376,20 @@ func DefineDynamicObjectOffset[T newableDynamicObject[U], U any](c *Codec, obj * HashDynamicObject(c.has, *obj) } +// DefineDynamicObjectOffsetOnFork defines the next field as a dynamic ssz object +// if present in a fork. +func DefineDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicObjectOffsetOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeDynamicObjectOffsetOnFork(c.dec, obj, filter) + return + } + HashDynamicObjectOnFork(c.has, *obj, filter) +} + // DefineDynamicObjectContent defines the next field as a dynamic ssz object. func DefineDynamicObjectContent[T newableDynamicObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -238,6 +403,20 @@ func DefineDynamicObjectContent[T newableDynamicObject[U], U any](c *Codec, obj // No hashing, done at the offset position } +// DefineDynamicObjectContentOnFork defines the next field as a dynamic ssz object +// if present in a fork. +func DefineDynamicObjectContentOnFork[T newableDynamicObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicObjectContentOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeDynamicObjectContentOnFork(c.dec, obj, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfBits defines the next field as a static array of (packed) bits. func DefineArrayOfBits[T commonBitsLengths](c *Codec, bits *T, size uint64) { if c.enc != nil { @@ -251,7 +430,22 @@ func DefineArrayOfBits[T commonBitsLengths](c *Codec, bits *T, size uint64) { HashArrayOfBits(c.has, bits) } -// DefineSliceOfBitsOffset defines the next field as a dynamic slice of (packed) bits. +// DefineArrayOfBitsPointerOnFork defines the next field as a static array of +// (packed) bits if present in a fork. +func DefineArrayOfBitsPointerOnFork[T commonBitsLengths](c *Codec, bits **T, size uint64, filter ForkFilter) { + if c.enc != nil { + EncodeArrayOfBitsPointerOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeArrayOfBitsPointerOnFork(c.dec, bits, size, filter) + return + } + HashArrayOfBitsPointerOnFork(c.has, *bits, filter) +} + +// DefineSliceOfBitsOffset defines the next field as a dynamic slice of (packed) +// bits. func DefineSliceOfBitsOffset(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { if c.enc != nil { EncodeSliceOfBitsOffset(c.enc, *bits) @@ -264,7 +458,22 @@ func DefineSliceOfBitsOffset(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { HashSliceOfBits(c.has, *bits, maxBits) } -// DefineSliceOfBitsContent defines the next field as a dynamic slice of (packed) bits. +// DefineSliceOfBitsOffsetOnFork defines the next field as a dynamic slice of +// (packed) bits if present in a fork. +func DefineSliceOfBitsOffsetOnFork(c *Codec, bits *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfBitsOffsetOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeSliceOfBitsOffsetOnFork(c.dec, bits, filter) + return + } + HashSliceOfBitsOnFork(c.has, *bits, maxBits, filter) +} + +// DefineSliceOfBitsContent defines the next field as a dynamic slice of (packed) +// bits. func DefineSliceOfBitsContent(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { if c.enc != nil { EncodeSliceOfBitsContent(c.enc, *bits) @@ -277,6 +486,20 @@ func DefineSliceOfBitsContent(c *Codec, bits *bitfield.Bitlist, maxBits uint64) // No hashing, done at the offset position } +// DefineSliceOfBitsContentOnFork defines the next field as a dynamic slice of +// (packed) bits if present in a fork. +func DefineSliceOfBitsContentOnFork(c *Codec, bits *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfBitsContentOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeSliceOfBitsContentOnFork(c.dec, bits, maxBits, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfUint64s defines the next field as a static array of uint64s. func DefineArrayOfUint64s[T commonUint64sLengths](c *Codec, ns *T) { if c.enc != nil { @@ -290,6 +513,20 @@ func DefineArrayOfUint64s[T commonUint64sLengths](c *Codec, ns *T) { HashArrayOfUint64s(c.has, ns) } +// DefineArrayOfUint64sPointerOnFork defines the next field as a static array of +// uint64s if present in a fork. +func DefineArrayOfUint64sPointerOnFork[T commonUint64sLengths](c *Codec, ns **T, filter ForkFilter) { + if c.enc != nil { + EncodeArrayOfUint64sPointerOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeArrayOfUint64sPointerOnFork(c.dec, ns, filter) + return + } + HashArrayOfUint64sPointerOnFork(c.has, *ns, filter) +} + // DefineSliceOfUint64sOffset defines the next field as a dynamic slice of uint64s. func DefineSliceOfUint64sOffset[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { if c.enc != nil { @@ -303,6 +540,20 @@ func DefineSliceOfUint64sOffset[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { HashSliceOfUint64s(c.has, *ns, maxItems) } +// DefineSliceOfUint64sOffsetOnFork defines the next field as a dynamic slice of +// uint64s if present in a fork. +func DefineSliceOfUint64sOffsetOnFork[T ~uint64](c *Codec, ns *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfUint64sOffsetOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeSliceOfUint64sOffsetOnFork(c.dec, ns, filter) + return + } + HashSliceOfUint64sOnFork(c.has, *ns, maxItems, filter) +} + // DefineSliceOfUint64sContent defines the next field as a dynamic slice of uint64s. func DefineSliceOfUint64sContent[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { if c.enc != nil { @@ -316,6 +567,20 @@ func DefineSliceOfUint64sContent[T ~uint64](c *Codec, ns *[]T, maxItems uint64) // No hashing, done at the offset position } +// DefineSliceOfUint64sContentOnFork defines the next field as a dynamic slice of +// uint64s if present in a fork. +func DefineSliceOfUint64sContentOnFork[T ~uint64](c *Codec, ns *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfUint64sContentOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeSliceOfUint64sContentOnFork(c.dec, ns, maxItems, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfStaticBytes defines the next field as a static array of static // binary blobs. func DefineArrayOfStaticBytes[T commonBytesArrayLengths[U], U commonBytesLengths](c *Codec, blobs *T) { @@ -351,7 +616,7 @@ func DefineUnsafeArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs []T) { // which is more expensive since it needs runtime size validation. func DefineCheckedArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs *[]T, size uint64) { if c.enc != nil { - EncodeCheckedArrayOfStaticBytes(c.enc, *blobs) + EncodeCheckedArrayOfStaticBytes(c.enc, *blobs, size) return } if c.dec != nil { @@ -361,8 +626,8 @@ func DefineCheckedArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs *[]T, HashCheckedArrayOfStaticBytes(c.has, *blobs) } -// DefineSliceOfStaticBytesOffset defines the next field as a dynamic slice of static -// binary blobs. +// DefineSliceOfStaticBytesOffset defines the next field as a dynamic slice of +// static binary blobs. func DefineSliceOfStaticBytesOffset[T commonBytesLengths](c *Codec, bytes *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfStaticBytesOffset(c.enc, *bytes) @@ -375,6 +640,20 @@ func DefineSliceOfStaticBytesOffset[T commonBytesLengths](c *Codec, bytes *[]T, HashSliceOfStaticBytes(c.has, *bytes, maxItems) } +// DefineSliceOfStaticBytesOffsetOnFork defines the next field as a dynamic slice +// of static binary blobs if present in a fork. +func DefineSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](c *Codec, bytes *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticBytesOffsetOnFork(c.enc, *bytes, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticBytesOffsetOnFork(c.dec, bytes, filter) + return + } + HashSliceOfStaticBytesOnFork(c.has, *bytes, maxItems, filter) +} + // DefineSliceOfStaticBytesContent defines the next field as a dynamic slice of static // binary blobs. func DefineSliceOfStaticBytesContent[T commonBytesLengths](c *Codec, blobs *[]T, maxItems uint64) { @@ -389,8 +668,22 @@ func DefineSliceOfStaticBytesContent[T commonBytesLengths](c *Codec, blobs *[]T, // No hashing, done at the offset position } -// DefineSliceOfDynamicBytesOffset defines the next field as a dynamic slice of dynamic -// binary blobs. +// DefineSliceOfStaticBytesContentOnFork defines the next field as a dynamic slice +// of static binary blobs if present in a fork. +func DefineSliceOfStaticBytesContentOnFork[T commonBytesLengths](c *Codec, blobs *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticBytesContentOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticBytesContentOnFork(c.dec, blobs, maxItems, filter) + return + } + // No hashing, done at the offset position +} + +// DefineSliceOfDynamicBytesOffset defines the next field as a dynamic slice of +// dynamic binary blobs. func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { if c.enc != nil { EncodeSliceOfDynamicBytesOffset(c.enc, *blobs) @@ -403,8 +696,22 @@ func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, HashSliceOfDynamicBytes(c.has, *blobs, maxItems, maxSize) } -// DefineSliceOfDynamicBytesContent defines the next field as a dynamic slice of dynamic -// binary blobs. +// DefineSliceOfDynamicBytesOffsetOnFork defines the next field as a dynamic slice +// of dynamic binary blobs if present in a fork. +func DefineSliceOfDynamicBytesOffsetOnFork(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicBytesOffsetOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicBytesOffsetOnFork(c.dec, blobs, filter) + return + } + HashSliceOfDynamicBytesOnFork(c.has, *blobs, maxItems, maxSize, filter) +} + +// DefineSliceOfDynamicBytesContent defines the next field as a dynamic slice of +// dynamic binary blobs. func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { if c.enc != nil { EncodeSliceOfDynamicBytesContent(c.enc, *blobs) @@ -417,8 +724,22 @@ func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64 // No hashing, done at the offset position } -// DefineSliceOfStaticObjectsOffset defines the next field as a dynamic slice of static -// ssz objects. +// DefineSliceOfDynamicBytesContentOnFork defines the next field as a dynamic +// slice of dynamic binary blobs. +func DefineSliceOfDynamicBytesContentOnFork(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicBytesContentOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicBytesContentOnFork(c.dec, blobs, maxItems, maxSize, filter) + return + } + // No hashing, done at the offset position +} + +// DefineSliceOfStaticObjectsOffset defines the next field as a dynamic slice of +// static ssz objects. func DefineSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfStaticObjectsOffset(c.enc, *objects) @@ -431,6 +752,20 @@ func DefineSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](c *Codec, HashSliceOfStaticObjects(c.has, *objects, maxItems) } +// DefineSliceOfStaticObjectsOffsetOnFork defines the next field as a dynamic +// slice of static ssz objects if present in a fork. +func DefineSliceOfStaticObjectsOffsetOnFork[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticObjectsOffsetOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticObjectsOffsetOnFork(c.dec, objects, filter) + return + } + HashSliceOfStaticObjectsOnFork(c.has, *objects, maxItems, filter) +} + // DefineSliceOfStaticObjectsContent defines the next field as a dynamic slice of static // ssz objects. func DefineSliceOfStaticObjectsContent[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { @@ -442,11 +777,25 @@ func DefineSliceOfStaticObjectsContent[T newableStaticObject[U], U any](c *Codec DecodeSliceOfStaticObjectsContent(c.dec, objects, maxItems) return } - // No hashing, done at the offset posiiton + // No hashing, done at the offset position } -// DefineSliceOfDynamicObjectsOffset defines the next field as a dynamic slice of dynamic -// ssz objects. +// DefineSliceOfStaticObjectsContentOnFork defines the next field as a dynamic +// slice of static ssz objects if present in a fork. +func DefineSliceOfStaticObjectsContentOnFork[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticObjectsContentOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticObjectsContentOnFork(c.dec, objects, maxItems, filter) + return + } + // No hashing, done at the offset position +} + +// DefineSliceOfDynamicObjectsOffset defines the next field as a dynamic slice of +// dynamic ssz objects. func DefineSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfDynamicObjectsOffset(c.enc, *objects) @@ -459,8 +808,22 @@ func DefineSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](c *Code HashSliceOfDynamicObjects(c.has, *objects, maxItems) } -// DefineSliceOfDynamicObjectsContent defines the next field as a dynamic slice of dynamic -// ssz objects. +// DefineSliceOfDynamicObjectsOffsetOnFork defines the next field as a dynamic +// slice of dynamic ssz objects if present in a fork. +func DefineSliceOfDynamicObjectsOffsetOnFork[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicObjectsOffsetOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicObjectsOffsetOnFork(c.dec, objects, filter) + return + } + HashSliceOfDynamicObjectsOnFork(c.has, *objects, maxItems, filter) +} + +// DefineSliceOfDynamicObjectsContent defines the next field as a dynamic slice +// of dynamic ssz objects. func DefineSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfDynamicObjectsContent(c.enc, *objects) @@ -472,3 +835,17 @@ func DefineSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](c *Cod } // No hashing, done at the offset position } + +// DefineSliceOfDynamicObjectsContentOnFork defines the next field as a dynamic +// slice of dynamic ssz objects if present in a fork. +func DefineSliceOfDynamicObjectsContentOnFork[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicObjectsContentOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicObjectsContentOnFork(c.dec, objects, maxItems, filter) + return + } + // No hashing, done at the offset position +} diff --git a/decoder.go b/decoder.go index 08777cc..b87a9e3 100644 --- a/decoder.go +++ b/decoder.go @@ -57,9 +57,10 @@ type Decoder struct { inBufPtrs []uintptr // Stack of starting pointers from outer calls (buffered mode) inBufEnd uintptr // Ending pointer in the input buffer (buffered mode) - err error // Any write error to halt future encoding calls + err error // Any write error to halt future encoding calls + codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) - codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) buf [32]byte // Integer conversion buffer bufInt uint256.Int // Big.Int conversion buffer (not pointer, alloc free) @@ -109,6 +110,24 @@ func DecodeBool[T ~bool](dec *Decoder, v *T) { } } +// DecodeBoolPointerOnFork parses a boolean if present in a fork. If not, the +// boolean pointer is set to nil. +// +// This method is similar to DecodeBool, but will also initialize the pointer +// if it is not allocated yet. +func DecodeBoolPointerOnFork[T ~bool](dec *Decoder, v **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *v = nil + return + } + // Otherwise fall back to the standard decoder + if *v == nil { + *v = new(T) + } + DecodeBool(dec, *v) +} + // DecodeUint8 parses a uint8. func DecodeUint8[T ~uint8](dec *Decoder, n *T) { if dec.err != nil { @@ -128,6 +147,24 @@ func DecodeUint8[T ~uint8](dec *Decoder, n *T) { } } +// DecodeUint8PointerOnFork parses a uint8 if present in a fork. If not, the +// uint8 pointer is set to nil. +// +// This method is similar to DecodeUint8, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint8PointerOnFork[T ~uint8](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint8(dec, *n) +} + // DecodeUint16 parses a uint16. func DecodeUint16[T ~uint16](dec *Decoder, n *T) { if dec.err != nil { @@ -147,6 +184,24 @@ func DecodeUint16[T ~uint16](dec *Decoder, n *T) { } } +// DecodeUint16PointerOnFork parses a uint16 if present in a fork. If not, the +// uint16 pointer is set to nil. +// +// This method is similar to DecodeUint16, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint16PointerOnFork[T ~uint16](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint16(dec, *n) +} + // DecodeUint32 parses a uint32. func DecodeUint32[T ~uint32](dec *Decoder, n *T) { if dec.err != nil { @@ -166,6 +221,24 @@ func DecodeUint32[T ~uint32](dec *Decoder, n *T) { } } +// DecodeUint32PointerOnFork parses a uint32 if present in a fork. If not, the +// uint32 pointer is set to nil. +// +// This method is similar to DecodeUint32, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint32PointerOnFork[T ~uint32](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint32(dec, *n) +} + // DecodeUint64 parses a uint64. func DecodeUint64[T ~uint64](dec *Decoder, n *T) { if dec.err != nil { @@ -185,6 +258,24 @@ func DecodeUint64[T ~uint64](dec *Decoder, n *T) { } } +// DecodeUint64PointerOnFork parses a uint64 if present in a fork. If not, the +// uint64 pointer is set to nil. +// +// This method is similar to DecodeUint64, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint64PointerOnFork[T ~uint64](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint64(dec, *n) +} + // DecodeUint256 parses a uint256. func DecodeUint256(dec *Decoder, n **uint256.Int) { if dec.err != nil { @@ -214,6 +305,17 @@ func DecodeUint256(dec *Decoder, n **uint256.Int) { } } +// DecodeUint256OnFork parses a uint256 if present in a fork. +func DecodeUint256OnFork(dec *Decoder, n **uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + DecodeUint256(dec, n) +} + // DecodeUint256BigInt parses a uint256 into a big.Int. func DecodeUint256BigInt(dec *Decoder, n **big.Int) { if dec.err != nil { @@ -239,6 +341,17 @@ func DecodeUint256BigInt(dec *Decoder, n **big.Int) { } } +// DecodeUint256BigIntOnFork parses a uint256 into a big.Int if present in a fork. +func DecodeUint256BigIntOnFork(dec *Decoder, n **big.Int, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + DecodeUint256BigInt(dec, n) +} + // DecodeStaticBytes parses a static binary blob. func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { if dec.err != nil { @@ -261,6 +374,21 @@ func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { } } +// DecodeStaticBytesPointerOnFork parses a static binary blob if present in a fork. +// If not, the bytes are set to nil. +func DecodeStaticBytesPointerOnFork[T commonBytesLengths](dec *Decoder, blob **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blob = nil + return + } + // Otherwise fall back to the standard decoder + if *blob == nil { + *blob = new(T) + } + DecodeStaticBytes(dec, *blob) +} + // DecodeCheckedStaticBytes parses a static binary blob. func DecodeCheckedStaticBytes(dec *Decoder, blob *[]byte, size uint64) { if dec.err != nil { @@ -285,11 +413,22 @@ func DecodeCheckedStaticBytes(dec *Decoder, blob *[]byte, size uint64) { } } -// DecodeDynamicBytesOffset parses a dynamic binary blob. +// DecodeDynamicBytesOffset parses the offset of a dynamic binary blob. func DecodeDynamicBytesOffset(dec *Decoder, blob *[]byte) { dec.decodeOffset(false) } +// DecodeDynamicBytesOffsetOnFork parses the offset of dynamic binary blob if +// present in a fork. +func DecodeDynamicBytesOffsetOnFork(dec *Decoder, blob *[]byte, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicBytesOffset(dec, blob) +} + // DecodeDynamicBytesContent is the lazy data reader of DecodeDynamicBytesOffset. func DecodeDynamicBytesContent(dec *Decoder, blob *[]byte, maxSize uint64) { if dec.err != nil { @@ -323,6 +462,17 @@ func DecodeDynamicBytesContent(dec *Decoder, blob *[]byte, maxSize uint64) { } } +// DecodeDynamicBytesContentOnFork is the lazy data reader of DecodeDynamicBytesOffsetOnFork. +func DecodeDynamicBytesContentOnFork(dec *Decoder, blob *[]byte, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blob = nil + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicBytesContent(dec, blob, maxSize) +} + // DecodeStaticObject parses a static ssz object. func DecodeStaticObject[T newableStaticObject[U], U any](dec *Decoder, obj *T) { if dec.err != nil { @@ -334,11 +484,32 @@ func DecodeStaticObject[T newableStaticObject[U], U any](dec *Decoder, obj *T) { (*obj).DefineSSZ(dec.codec) } +// DecodeStaticObjectOnFork parses a static ssz object if present in a fork. +func DecodeStaticObjectOnFork[T newableStaticObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *obj = nil + return + } + // Otherwise fall back to the standard decoder + DecodeStaticObject(dec, obj) +} + // DecodeDynamicObjectOffset parses a dynamic ssz object. func DecodeDynamicObjectOffset[T newableDynamicObject[U], U any](dec *Decoder, obj *T) { dec.decodeOffset(false) } +// DecodeDynamicObjectOffsetOnFork parses a dynamic ssz object if present in a fork. +func DecodeDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicObjectOffset(dec, obj) +} + // DecodeDynamicObjectContent is the lazy data reader of DecodeDynamicObjectOffset. func DecodeDynamicObjectContent[T newableDynamicObject[U], U any](dec *Decoder, obj *T) { if dec.err != nil { @@ -354,11 +525,22 @@ func DecodeDynamicObjectContent[T newableDynamicObject[U], U any](dec *Decoder, if *obj == nil { *obj = T(new(U)) } - dec.startDynamics((*obj).SizeSSZ(true)) + dec.startDynamics((*obj).SizeSSZ(dec.sizer, true)) (*obj).DefineSSZ(dec.codec) dec.flushDynamics() } +// DecodeDynamicObjectContentOnFork is the lazy data reader of DecodeDynamicObjectOffsetOnFork. +func DecodeDynamicObjectContentOnFork[T newableDynamicObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *obj = nil + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicObjectContent(dec, obj) +} + // DecodeArrayOfBits parses a static array of (packed) bits. func DecodeArrayOfBits[T commonBitsLengths](dec *Decoder, bits *T, size uint64) { if dec.err != nil { @@ -391,11 +573,37 @@ func DecodeArrayOfBits[T commonBitsLengths](dec *Decoder, bits *T, size uint64) } } +// DecodeArrayOfBitsPointerOnFork parses a static array of (packed) bits if present +// in a fork. If not, the bit array pointer is set to nil. +func DecodeArrayOfBitsPointerOnFork[T commonBitsLengths](dec *Decoder, bits **T, size uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *bits = nil + return + } + // Otherwise fall back to the standard decoder + if *bits == nil { + *bits = new(T) + } + DecodeArrayOfBits(dec, *bits, size) +} + // DecodeSliceOfBitsOffset parses a dynamic slice of (packed) bits. func DecodeSliceOfBitsOffset(dec *Decoder, bitlist *bitfield.Bitlist) { dec.decodeOffset(false) } +// DecodeSliceOfBitsOffsetOnFork parses a dynamic slice of (packed) bits if present +// in a fork. +func DecodeSliceOfBitsOffsetOnFork(dec *Decoder, bitlist *bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfBitsOffset(dec, bitlist) +} + // DecodeSliceOfBitsContent is the lazy data reader of DecodeSliceOfBitsOffset. func DecodeSliceOfBitsContent(dec *Decoder, bitlist *bitfield.Bitlist, maxBits uint64) { if dec.err != nil { @@ -444,6 +652,17 @@ func DecodeSliceOfBitsContent(dec *Decoder, bitlist *bitfield.Bitlist, maxBits u } } +// DecodeSliceOfBitsContentOnFork is the lazy data reader of DecodeSliceOfBitsOffsetOnFork. +func DecodeSliceOfBitsContentOnFork(dec *Decoder, bitlist *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *bitlist = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfBitsContent(dec, bitlist, maxBits) +} + // DecodeArrayOfUint64s parses a static array of uint64s. func DecodeArrayOfUint64s[T commonUint64sLengths](dec *Decoder, ns *T) { if dec.err != nil { @@ -474,11 +693,37 @@ func DecodeArrayOfUint64s[T commonUint64sLengths](dec *Decoder, ns *T) { } } +// DecodeArrayOfUint64sPointerOnFork parses a static array of uint64s if present +// in a fork. If not, the bit array pointer is set to nil. +func DecodeArrayOfUint64sPointerOnFork[T commonUint64sLengths](dec *Decoder, ns **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *ns = nil + return + } + // Otherwise fall back to the standard decoder + if *ns == nil { + *ns = new(T) + } + DecodeArrayOfUint64s(dec, *ns) +} + // DecodeSliceOfUint64sOffset parses a dynamic slice of uint64s. func DecodeSliceOfUint64sOffset[T ~uint64](dec *Decoder, ns *[]T) { dec.decodeOffset(false) } +// DecodeSliceOfUint64sOffsetOnFork parses a dynamic slice of uint64s if present +// in a fork. +func DecodeSliceOfUint64sOffsetOnFork[T ~uint64](dec *Decoder, ns *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfUint64sOffset(dec, ns) +} + // DecodeSliceOfUint64sContent is the lazy data reader of DecodeSliceOfUint64sOffset. func DecodeSliceOfUint64sContent[T ~uint64](dec *Decoder, ns *[]T, maxItems uint64) { if dec.err != nil { @@ -528,6 +773,17 @@ func DecodeSliceOfUint64sContent[T ~uint64](dec *Decoder, ns *[]T, maxItems uint } } +// DecodeSliceOfUint64sContentOnFork is the lazy data reader of DecodeSliceOfUint64sOffsetOnFork. +func DecodeSliceOfUint64sContentOnFork[T ~uint64](dec *Decoder, ns *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *ns = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfUint64sContent(dec, ns, maxItems) +} + // DecodeArrayOfStaticBytes parses a static array of static binary blobs. func DecodeArrayOfStaticBytes[T commonBytesArrayLengths[U], U commonBytesLengths](dec *Decoder, blobs *T) { // The code below should have used `(*blobs)[:]`, alas Go's generics compiler @@ -604,6 +860,17 @@ func DecodeSliceOfStaticBytesOffset[T commonBytesLengths](dec *Decoder, blobs *[ dec.decodeOffset(false) } +// DecodeSliceOfStaticBytesOffsetOnFork parses a dynamic slice of static binary +// blobs if present in a fork. +func DecodeSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](dec *Decoder, blobs *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticBytesOffset(dec, blobs) +} + // DecodeSliceOfStaticBytesContent is the lazy data reader of DecodeSliceOfStaticBytesOffset. func DecodeSliceOfStaticBytesContent[T commonBytesLengths](dec *Decoder, blobs *[]T, maxItems uint64) { if dec.err != nil { @@ -663,11 +930,33 @@ func DecodeSliceOfStaticBytesContent[T commonBytesLengths](dec *Decoder, blobs * } } +// DecodeSliceOfStaticBytesContentOnFork is the lazy data reader of DecodeSliceOfStaticBytesOffsetOnFork. +func DecodeSliceOfStaticBytesContentOnFork[T commonBytesLengths](dec *Decoder, blobs *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blobs = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticBytesContent(dec, blobs, maxItems) +} + // DecodeSliceOfDynamicBytesOffset parses a dynamic slice of dynamic binary blobs. func DecodeSliceOfDynamicBytesOffset(dec *Decoder, blobs *[][]byte) { dec.decodeOffset(false) } +// DecodeSliceOfDynamicBytesOffsetOnFork parses a dynamic slice of dynamic binary +// blobs if present in a fork. +func DecodeSliceOfDynamicBytesOffsetOnFork(dec *Decoder, blobs *[][]byte, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicBytesOffset(dec, blobs) +} + // DecodeSliceOfDynamicBytesContent is the lazy data reader of DecodeSliceOfDynamicBytesOffset. func DecodeSliceOfDynamicBytesContent(dec *Decoder, blobs *[][]byte, maxItems uint64, maxSize uint64) { if dec.err != nil { @@ -723,11 +1012,33 @@ func DecodeSliceOfDynamicBytesContent(dec *Decoder, blobs *[][]byte, maxItems ui } } +// DecodeSliceOfDynamicBytesContentOnFork is the lazy data reader of DecodeSliceOfDynamicBytesOffsetOnFork. +func DecodeSliceOfDynamicBytesContentOnFork(dec *Decoder, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blobs = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicBytesContent(dec, blobs, maxItems, maxSize) +} + // DecodeSliceOfStaticObjectsOffset parses a dynamic slice of static ssz objects. func DecodeSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](dec *Decoder, objects *[]T) { dec.decodeOffset(false) } +// DecodeSliceOfStaticObjectsOffsetOnFork parses a dynamic slice of static ssz +// objects if present in a fork. +func DecodeSliceOfStaticObjectsOffsetOnFork[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticObjectsOffset(dec, objects) +} + // DecodeSliceOfStaticObjectsContent is the lazy data reader of DecodeSliceOfStaticObjectsOffset. func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64) { if dec.err != nil { @@ -743,7 +1054,7 @@ func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Dec // Compute the number of items based on the item size of the type var sizer T // SizeSSZ is on *U, objects is static, so nil T is fine - itemSize := sizer.SizeSSZ() + itemSize := sizer.SizeSSZ(dec.sizer) if size%itemSize != 0 { dec.err = fmt.Errorf("%w: length %d, item size %d", ErrDynamicStaticsIndivisible, size, itemSize) return @@ -774,11 +1085,33 @@ func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Dec } } +// DecodeSliceOfStaticObjectsContentOnFork is the lazy data reader of DecodeSliceOfStaticObjectsOffsetOnFork. +func DecodeSliceOfStaticObjectsContentOnFork[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *objects = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticObjectsContent(dec, objects, maxItems) +} + // DecodeSliceOfDynamicObjectsOffset parses a dynamic slice of dynamic ssz objects. func DecodeSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T) { dec.decodeOffset(false) } +// DecodeSliceOfDynamicObjectsOffsetOnFork parses a dynamic slice of dynamic ssz +// objects if present in a fork. +func DecodeSliceOfDynamicObjectsOffsetOnFork[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicObjectsOffset(dec, objects) +} + // DecodeSliceOfDynamicObjectsContent is the lazy data reader of DecodeSliceOfDynamicObjectsOffset. func DecodeSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64) { if dec.err != nil { @@ -835,6 +1168,17 @@ func DecodeSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](dec *D } } +// DecodeSliceOfDynamicObjectsContentOnFork is the lazy data reader of DecodeSliceOfDynamicObjectsOffsetOnFork. +func DecodeSliceOfDynamicObjectsContentOnFork[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *objects = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicObjectsContent(dec, objects, maxItems) +} + // decodeOffset decodes the next uint32 as an offset and validates it. func (dec *Decoder) decodeOffset(list bool) { if dec.err != nil { @@ -871,7 +1215,7 @@ func (dec *Decoder) decodeOffset(list bool) { dec.offsets = append(dec.offsets, offset) } -// retrieveSize retrieves the length of the nest dynamic item based on the seen +// retrieveSize retrieves the length of the next dynamic item based on the seen // and cached offsets. func (dec *Decoder) retrieveSize() uint32 { // If sizes aren't yet available, pre-compute them all. The reason we use a diff --git a/encoder.go b/encoder.go index 2f262e1..3885723 100644 --- a/encoder.go +++ b/encoder.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "io" "math/big" + "reflect" "unsafe" "github.com/holiman/uint256" @@ -19,6 +20,7 @@ var ( boolFalse = []byte{0x00} boolTrue = []byte{0x01} uint256Zero = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + bitlistZero = bitfield.NewBitlist(0) ) // Encoder is a wrapper around an io.Writer or a []byte buffer to implement SSZ @@ -71,6 +73,7 @@ type Encoder struct { err error // Any write error to halt future encoding calls codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) buf [32]byte // Integer conversion buffer bufInt uint256.Int // Big.Int conversion buffer (not pointer, alloc free) @@ -99,6 +102,22 @@ func EncodeBool[T ~bool](enc *Encoder, v T) { } } +// EncodeBoolPointerOnFork serializes a boolean if present in a fork. +// +// Note, a nil pointer is serialized as false. +func EncodeBoolPointerOnFork[T ~bool](enc *Encoder, v *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if v == nil { + EncodeBool[bool](enc, false) + return + } + EncodeBool(enc, *v) +} + // EncodeUint8 serializes a uint8. func EncodeUint8[T ~uint8](enc *Encoder, n T) { if enc.outWriter != nil { @@ -113,6 +132,22 @@ func EncodeUint8[T ~uint8](enc *Encoder, n T) { } } +// EncodeUint8PointerOnFork serializes a uint8 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint8PointerOnFork[T ~uint8](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint8[uint8](enc, 0) + return + } + EncodeUint8(enc, *n) +} + // EncodeUint16 serializes a uint16. func EncodeUint16[T ~uint16](enc *Encoder, n T) { if enc.outWriter != nil { @@ -127,6 +162,22 @@ func EncodeUint16[T ~uint16](enc *Encoder, n T) { } } +// EncodeUint16PointerOnFork serializes a uint16 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint16PointerOnFork[T ~uint16](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint16[uint16](enc, 0) + return + } + EncodeUint16(enc, *n) +} + // EncodeUint32 serializes a uint32. func EncodeUint32[T ~uint32](enc *Encoder, n T) { if enc.outWriter != nil { @@ -141,9 +192,24 @@ func EncodeUint32[T ~uint32](enc *Encoder, n T) { } } +// EncodeUint32PointerOnFork serializes a uint32 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint32PointerOnFork[T ~uint32](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint32[uint32](enc, 0) + return + } + EncodeUint32(enc, *n) +} + // EncodeUint64 serializes a uint64. func EncodeUint64[T ~uint64](enc *Encoder, n T) { - // Nope, dive into actual encoding if enc.outWriter != nil { if enc.err != nil { return @@ -156,6 +222,22 @@ func EncodeUint64[T ~uint64](enc *Encoder, n T) { } } +// EncodeUint64PointerOnFork serializes a uint64 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint64PointerOnFork[T ~uint64](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint64[uint64](enc, 0) + return + } + EncodeUint64(enc, *n) +} + // EncodeUint256 serializes a uint256. // // Note, a nil pointer is serialized as zero. @@ -180,7 +262,19 @@ func EncodeUint256(enc *Encoder, n *uint256.Int) { } } -// EncodeUint256BigInt serializes a big.Ing as uint256. +// EncodeUint256OnFork serializes a uint256 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint256OnFork(enc *Encoder, n *uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeUint256(enc, n) +} + +// EncodeUint256BigInt serializes a big.Int as uint256. // // Note, a nil pointer is serialized as zero. // Note, an overflow will be silently dropped. @@ -207,6 +301,20 @@ func EncodeUint256BigInt(enc *Encoder, n *big.Int) { } } +// EncodeUint256BigIntOnFork serializes a big.Int as uint256 if present in a +// fork. +// +// Note, a nil pointer is serialized as zero. +// Note, an overflow will be silently dropped. +func EncodeUint256BigIntOnFork(enc *Encoder, n *big.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeUint256BigInt(enc, n) +} + // EncodeStaticBytes serializes a static binary blob. // // The blob is passed by pointer to avoid high stack copy costs and a potential @@ -227,8 +335,31 @@ func EncodeStaticBytes[T commonBytesLengths](enc *Encoder, blob *T) { } } +// EncodeStaticBytesPointerOnFork serializes a static binary blob if present in +// a fork. +// +// Note, a nil pointer is serialized as a zero-value blob. +func EncodeStaticBytesPointerOnFork[T commonBytesLengths](enc *Encoder, blob *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if blob == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len()) + return + } + EncodeStaticBytes(enc, blob) +} + // EncodeCheckedStaticBytes serializes a static binary blob. -func EncodeCheckedStaticBytes(enc *Encoder, blob []byte) { +func EncodeCheckedStaticBytes(enc *Encoder, blob []byte, size uint64) { + // If the blob is nil, write a batch of zeroes and exit + if blob == nil { + enc.encodeZeroes(int(size)) + return + } + // Blob not nil, write the actual data content if enc.outWriter != nil { if enc.err != nil { return @@ -255,6 +386,17 @@ func EncodeDynamicBytesOffset(enc *Encoder, blob []byte) { enc.offset += uint32(len(blob)) } +// EncodeDynamicBytesOffsetOnFork serializes a dynamic binary blob if present in +// a fork. +func EncodeDynamicBytesOffsetOnFork(enc *Encoder, blob []byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicBytesOffset(enc, blob) +} + // EncodeDynamicBytesContent is the lazy data writer for EncodeDynamicBytesOffset. func EncodeDynamicBytesContent(enc *Encoder, blob []byte) { if enc.outWriter != nil { @@ -268,16 +410,47 @@ func EncodeDynamicBytesContent(enc *Encoder, blob []byte) { } } +// EncodeDynamicBytesContentOnFork is the lazy data writer for EncodeDynamicBytesOffsetOnFork. +func EncodeDynamicBytesContentOnFork(enc *Encoder, blob []byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicBytesContent(enc, blob) +} + // EncodeStaticObject serializes a static ssz object. -func EncodeStaticObject(enc *Encoder, obj StaticObject) { +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeStaticObject[T newableStaticObject[U], U any](enc *Encoder, obj T) { if enc.err != nil { return } + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueStatic[T, U]() + } obj.DefineSSZ(enc.codec) } +// EncodeStaticObjectOnFork serializes a static ssz object is present in a fork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeStaticObjectOnFork[T newableStaticObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeStaticObject(enc, obj) +} + // EncodeDynamicObjectOffset serializes a dynamic ssz object. -func EncodeDynamicObjectOffset(enc *Encoder, obj DynamicObject) { +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectOffset[T newableDynamicObject[U], U any](enc *Encoder, obj T) { if enc.outWriter != nil { if enc.err != nil { return @@ -288,18 +461,55 @@ func EncodeDynamicObjectOffset(enc *Encoder, obj DynamicObject) { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] } - enc.offset += obj.SizeSSZ(false) + // If the object is nil, pull up it's zero value. This will be very slow, but + // it should not happen in production, only during tests mostly. + if obj == nil { + obj = zeroValueDynamic[T, U]() + } + enc.offset += obj.SizeSSZ(enc.sizer, false) +} + +// EncodeDynamicObjectOffsetOnFork serializes a dynamic ssz object if present in +// a fork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicObjectOffset(enc, obj) } // EncodeDynamicObjectContent is the lazy data writer for EncodeDynamicObjectOffset. -func EncodeDynamicObjectContent(enc *Encoder, obj DynamicObject) { +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectContent[T newableDynamicObject[U], U any](enc *Encoder, obj T) { if enc.err != nil { return } - enc.offsetDynamics(obj.SizeSSZ(true)) + // If the object is nil, pull up it's zero value. This will be very slow, but + // it should not happen in production, only during tests mostly. + if obj == nil { + obj = zeroValueDynamic[T, U]() + } + enc.offsetDynamics(obj.SizeSSZ(enc.sizer, true)) obj.DefineSSZ(enc.codec) } +// EncodeDynamicObjectContentOnFork is the lazy data writer for EncodeDynamicObjectOffsetOnFork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectContentOnFork[T newableDynamicObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicObjectContent(enc, obj) +} + // EncodeArrayOfBits serializes a static array of (packed) bits. func EncodeArrayOfBits[T commonBitsLengths](enc *Encoder, bits *T) { if enc.outWriter != nil { @@ -317,7 +527,26 @@ func EncodeArrayOfBits[T commonBitsLengths](enc *Encoder, bits *T) { } } +// EncodeArrayOfBitsPointerOnFork serializes a static array of (packed) bits if +// present in a fork. +// +// Note, a nil pointer is serialized as a zero-value bit array. +func EncodeArrayOfBitsPointerOnFork[T commonBitsLengths](enc *Encoder, bits *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if bits == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len()) + return + } + EncodeArrayOfBits(enc, bits) +} + // EncodeSliceOfBitsOffset serializes a dynamic slice of (packed) bits. +// +// Note, a nil slice of bits is serialized as an empty bit list. func EncodeSliceOfBitsOffset(enc *Encoder, bits bitfield.Bitlist) { if enc.outWriter != nil { if enc.err != nil { @@ -329,22 +558,62 @@ func EncodeSliceOfBitsOffset(enc *Encoder, bits bitfield.Bitlist) { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] } - enc.offset += uint32(len(bits)) + if bits != nil { + enc.offset += uint32(len(bits)) + } else { + enc.offset += uint32(len(bitlistZero)) + } +} + +// EncodeSliceOfBitsOffsetOnFork serializes a dynamic slice of (packed) bits if +// present in a fork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func EncodeSliceOfBitsOffsetOnFork(enc *Encoder, bits bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfBitsOffset(enc, bits) } // EncodeSliceOfBitsContent is the lazy data writer for EncodeSliceOfBitsOffset. +// +// Note, a nil slice of bits is serialized as an empty bit list. func EncodeSliceOfBitsContent(enc *Encoder, bits bitfield.Bitlist) { if enc.outWriter != nil { if enc.err != nil { return } - _, enc.err = enc.outWriter.Write(bits) // bitfield.Bitlist already has the length bit set + if bits != nil { + _, enc.err = enc.outWriter.Write(bits) // bitfield.Bitlist already has the length bit set + } else { + _, enc.err = enc.outWriter.Write(bitlistZero) + } } else { - copy(enc.outBuffer, bits) - enc.outBuffer = enc.outBuffer[len(bits):] // bitfield.Bitlist already has the length bit set + if bits != nil { + copy(enc.outBuffer, bits) + enc.outBuffer = enc.outBuffer[len(bits):] // bitfield.Bitlist already has the length bit set + } else { + copy(enc.outBuffer, bitlistZero) + enc.outBuffer = enc.outBuffer[len(bitlistZero):] + } } } +// EncodeSliceOfBitsContentOnFork is the lazy data writer for EncodeSliceOfBitsOffsetOnFork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func EncodeSliceOfBitsContentOnFork(enc *Encoder, bits bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfBitsContent(enc, bits) +} + // EncodeArrayOfUint64s serializes a static array of uint64s. // // The reason the ns is passed by pointer and not by value is to prevent it from @@ -373,6 +642,23 @@ func EncodeArrayOfUint64s[T commonUint64sLengths](enc *Encoder, ns *T) { } } +// EncodeArrayOfUint64sPointerOnFork serializes a static array of uint64s if +// present in a fork. +// +// Note, a nil pointer is serialized as a uint64 array filled with zeroes. +func EncodeArrayOfUint64sPointerOnFork[T commonUint64sLengths](enc *Encoder, ns *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if ns == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len() * 8) + return + } + EncodeArrayOfUint64s(enc, ns) +} + // EncodeSliceOfUint64sOffset serializes a dynamic slice of uint64s. func EncodeSliceOfUint64sOffset[T ~uint64](enc *Encoder, ns []T) { // Nope, dive into actual encoding @@ -391,6 +677,17 @@ func EncodeSliceOfUint64sOffset[T ~uint64](enc *Encoder, ns []T) { } } +// EncodeSliceOfUint64sOffsetOnFork serializes a dynamic slice of uint64s if +// present in a fork. +func EncodeSliceOfUint64sOffsetOnFork[T ~uint64](enc *Encoder, ns []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfUint64sOffset(enc, ns) +} + // EncodeSliceOfUint64sContent is the lazy data writer for EncodeSliceOfUint64sOffset. func EncodeSliceOfUint64sContent[T ~uint64](enc *Encoder, ns []T) { if enc.outWriter != nil { @@ -409,6 +706,16 @@ func EncodeSliceOfUint64sContent[T ~uint64](enc *Encoder, ns []T) { } } +// EncodeSliceOfUint64sContentOnFork is the lazy data writer for EncodeSliceOfUint64sOffsetOnFork. +func EncodeSliceOfUint64sContentOnFork[T ~uint64](enc *Encoder, ns []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfUint64sContent(enc, ns) +} + // EncodeArrayOfStaticBytes serializes a static array of static binary // blobs. // @@ -448,7 +755,12 @@ func EncodeUnsafeArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs [] // EncodeCheckedArrayOfStaticBytes serializes a static array of static binary // blobs. -func EncodeCheckedArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs []T) { +func EncodeCheckedArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs []T, size uint64) { + // If the blobs are nil, write a batch of zeroes and exit + if blobs == nil { + enc.encodeZeroes(int(size) * reflect.TypeFor[T]().Len()) + return + } // Internally this method is essentially calling EncodeStaticBytes on all // the blobs in a loop. Practically, we've inlined that call to make things // a *lot* faster. @@ -488,6 +800,16 @@ func EncodeSliceOfStaticBytesOffset[T commonBytesLengths](enc *Encoder, blobs [] } } +// EncodeSliceOfStaticBytesOffsetOnFork serializes a dynamic slice of static binary blobs. +func EncodeSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](enc *Encoder, blobs []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticBytesOffset(enc, blobs) +} + // EncodeSliceOfStaticBytesContent is the lazy data writer for EncodeSliceOfStaticBytesOffset. func EncodeSliceOfStaticBytesContent[T commonBytesLengths](enc *Encoder, blobs []T) { // Internally this method is essentially calling EncodeStaticBytes on all @@ -512,7 +834,18 @@ func EncodeSliceOfStaticBytesContent[T commonBytesLengths](enc *Encoder, blobs [ } } -// EncodeSliceOfDynamicBytesOffset serializes a dynamic slice of dynamic binary blobs. +// EncodeSliceOfStaticBytesContentOnFork is the lazy data writer for EncodeSliceOfStaticBytesOffsetOnFork. +func EncodeSliceOfStaticBytesContentOnFork[T commonBytesLengths](enc *Encoder, blobs []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticBytesContent(enc, blobs) +} + +// EncodeSliceOfDynamicBytesOffset serializes a dynamic slice of dynamic binary +// blobs. func EncodeSliceOfDynamicBytesOffset(enc *Encoder, blobs [][]byte) { if enc.outWriter != nil { if enc.err != nil { @@ -529,6 +862,17 @@ func EncodeSliceOfDynamicBytesOffset(enc *Encoder, blobs [][]byte) { } } +// EncodeSliceOfDynamicBytesOffsetOnFork serializes a dynamic slice of dynamic +// binary blob if present in a fork. +func EncodeSliceOfDynamicBytesOffsetOnFork(enc *Encoder, blobs [][]byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicBytesOffset(enc, blobs) +} + // EncodeSliceOfDynamicBytesContent is the lazy data writer for EncodeSliceOfDynamicBytesOffset. func EncodeSliceOfDynamicBytesContent(enc *Encoder, blobs [][]byte) { // Nope, dive into actual encoding @@ -577,6 +921,16 @@ func EncodeSliceOfDynamicBytesContent(enc *Encoder, blobs [][]byte) { } } +// EncodeSliceOfDynamicBytesContentOnFork is the lazy data writer for EncodeSliceOfDynamicBytesOffsetOnFork. +func EncodeSliceOfDynamicBytesContentOnFork(enc *Encoder, blobs [][]byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicBytesContent(enc, blobs) +} + // EncodeSliceOfStaticObjectsOffset serializes a dynamic slice of static ssz objects. func EncodeSliceOfStaticObjectsOffset[T StaticObject](enc *Encoder, objects []T) { if enc.outWriter != nil { @@ -590,8 +944,19 @@ func EncodeSliceOfStaticObjectsOffset[T StaticObject](enc *Encoder, objects []T) enc.outBuffer = enc.outBuffer[4:] } if items := len(objects); items > 0 { - enc.offset += uint32(items) * objects[0].SizeSSZ() + enc.offset += uint32(items) * objects[0].SizeSSZ(enc.sizer) + } +} + +// EncodeSliceOfStaticObjectsOffsetOnFork serializes a dynamic slice of static ssz +// objects if present in a fork. +func EncodeSliceOfStaticObjectsOffsetOnFork[T StaticObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticObjectsOffset(enc, objects) } // EncodeSliceOfStaticObjectsContent is the lazy data writer for EncodeSliceOfStaticObjectsOffset. @@ -604,7 +969,18 @@ func EncodeSliceOfStaticObjectsContent[T StaticObject](enc *Encoder, objects []T } } -// EncodeSliceOfDynamicObjectsOffset serializes a dynamic slice of dynamic ssz objects. +// EncodeSliceOfStaticObjectsContentOnFork is the lazy data writer for EncodeSliceOfStaticObjectsOffsetOnFork. +func EncodeSliceOfStaticObjectsContentOnFork[T StaticObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticObjectsContent(enc, objects) +} + +// EncodeSliceOfDynamicObjectsOffset serializes a dynamic slice of dynamic ssz +// objects. func EncodeSliceOfDynamicObjectsOffset[T DynamicObject](enc *Encoder, objects []T) { if enc.outWriter != nil { if enc.err != nil { @@ -617,10 +993,21 @@ func EncodeSliceOfDynamicObjectsOffset[T DynamicObject](enc *Encoder, objects [] enc.outBuffer = enc.outBuffer[4:] } for _, obj := range objects { - enc.offset += 4 + obj.SizeSSZ(false) + enc.offset += 4 + obj.SizeSSZ(enc.sizer, false) } } +// EncodeSliceOfDynamicObjectsOffsetOnFork serializes a dynamic slice of dynamic +// ssz objects if present in a fork. +func EncodeSliceOfDynamicObjectsOffsetOnFork[T DynamicObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicObjectsOffset(enc, objects) +} + // EncodeSliceOfDynamicObjectsContent is the lazy data writer for EncodeSliceOfDynamicObjectsOffset. func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects []T) { enc.offsetDynamics(uint32(4 * len(objects))) @@ -638,14 +1025,14 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ binary.LittleEndian.PutUint32(enc.buf[:4], enc.offset) _, enc.err = enc.outWriter.Write(enc.buf[:4]) - enc.offset += obj.SizeSSZ(false) + enc.offset += obj.SizeSSZ(enc.sizer, false) } } else { for _, obj := range objects { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] - enc.offset += obj.SizeSSZ(false) + enc.offset += obj.SizeSSZ(enc.sizer, false) } } // Inline: @@ -657,13 +1044,53 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ if enc.err != nil { return } - enc.offsetDynamics(obj.SizeSSZ(true)) + enc.offsetDynamics(obj.SizeSSZ(enc.sizer, true)) obj.DefineSSZ(enc.codec) } } +// EncodeSliceOfDynamicObjectsContentOnFork is the lazy data writer for EncodeSliceOfDynamicObjectsOffsetOnFork. +func EncodeSliceOfDynamicObjectsContentOnFork[T DynamicObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicObjectsContent(enc, objects) +} + // offsetDynamics marks the item being encoded as a dynamic type, setting the starting // offset for the dynamic fields. func (enc *Encoder) offsetDynamics(offset uint32) { enc.offset = offset } + +// encodeZeroes is a helper to append a bunch of zero values to the output stream. +// This method is mainly used for encoding uninitialized fields without allocating +// them beforehand. +func (enc *Encoder) encodeZeroes(size int) { + if enc.outWriter != nil { + if enc.err != nil { + return + } + for size >= 32 { + if _, enc.err = enc.outWriter.Write(uint256Zero); enc.err != nil { + return + } + size -= 32 + } + if size > 0 { + _, enc.err = enc.outWriter.Write(uint256Zero[:size]) + } + } else { + for size >= 32 { + copy(enc.outBuffer, uint256Zero) + enc.outBuffer = enc.outBuffer[32:] + size -= 32 + } + if size > 0 { + copy(enc.outBuffer, uint256Zero[:size]) + enc.outBuffer = enc.outBuffer[size:] + } + } +} diff --git a/example_asymmetric_test.go b/example_asymmetric_test.go index a9028c5..d50ccf8 100644 --- a/example_asymmetric_test.go +++ b/example_asymmetric_test.go @@ -17,7 +17,7 @@ type WithdrawalAsym struct { Amount uint64 `ssz-size:"8"` } -func (w *WithdrawalAsym) SizeSSZ() uint32 { return 44 } +func (w *WithdrawalAsym) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { codec.DefineEncoder(func(enc *ssz.Encoder) { @@ -41,11 +41,11 @@ func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { } func ExampleEncodeAsymmetricObject() { - blob := make([]byte, (*WithdrawalAsym)(nil).SizeSSZ()) - if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym)); err != nil { + blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil), ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym), ssz.ForkUnknown); err != nil { panic(err) } - hash := ssz.HashSequential(new(WithdrawalAsym)) + hash := ssz.HashSequential(new(WithdrawalAsym), ssz.ForkUnknown) fmt.Printf("ssz: %#x\nhash: %#x\n", blob, hash) // Output: diff --git a/example_checked_test.go b/example_checked_test.go index 453601c..bca5f0f 100644 --- a/example_checked_test.go +++ b/example_checked_test.go @@ -17,7 +17,7 @@ type WithdrawalChecked struct { Amount uint64 `ssz-size:"8"` } -func (w *WithdrawalChecked) SizeSSZ() uint32 { return 44 } +func (w *WithdrawalChecked) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *WithdrawalChecked) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &w.Index) // Field (0) - Index - 8 bytes @@ -30,7 +30,7 @@ func ExampleDecodeCheckedObject() { blob := make([]byte, 44) obj := new(WithdrawalChecked) - if err := ssz.DecodeFromBytes(blob, obj); err != nil { + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); err != nil { panic(err) } fmt.Printf("obj: %#x\n", obj) diff --git a/example_dynamic_test.go b/example_dynamic_test.go index 32b1d8c..1f244bc 100644 --- a/example_dynamic_test.go +++ b/example_dynamic_test.go @@ -32,16 +32,16 @@ type ExecutionPayload struct { Withdrawals []*Withdrawal `ssz-max:"16"` } -func (e *ExecutionPayload) SizeSSZ(fixed bool) uint32 { +func (e *ExecutionPayload) SizeSSZ(siz *ssz.Sizer, fixed bool) uint32 { // Start out with the static size size := uint32(512) if fixed { return size } // Append all the dynamic sizes - size += ssz.SizeDynamicBytes(e.ExtraData) // Field (10) - ExtraData - max 32 bytes (not enforced) - size += ssz.SizeSliceOfDynamicBytes(e.Transactions) // Field (13) - Transactions - max 1048576 items, 1073741824 bytes each (not enforced) - size += ssz.SizeSliceOfStaticObjects(e.Withdrawals) // Field (14) - Withdrawals - max 16 items, 44 bytes each (not enforced) + size += ssz.SizeDynamicBytes(siz, e.ExtraData) // Field (10) - ExtraData - max 32 bytes (not enforced) + size += ssz.SizeSliceOfDynamicBytes(siz, e.Transactions) // Field (13) - Transactions - max 1048576 items, 1073741824 bytes each (not enforced) + size += ssz.SizeSliceOfStaticObjects(siz, e.Withdrawals) // Field (14) - Withdrawals - max 16 items, 44 bytes each (not enforced) return size } @@ -72,8 +72,8 @@ func (e *ExecutionPayload) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeDynamicObject() { obj := new(ExecutionPayload) - blob := make([]byte, obj.SizeSSZ(false)) - if err := ssz.EncodeToBytes(blob, obj); err != nil { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); err != nil { panic(err) } fmt.Printf("ssz: %#x\n", blob) diff --git a/example_static_test.go b/example_static_test.go index f433baa..4486222 100644 --- a/example_static_test.go +++ b/example_static_test.go @@ -20,7 +20,7 @@ type Withdrawal struct { Amount uint64 `ssz-size:"8"` } -func (w *Withdrawal) SizeSSZ() uint32 { return 44 } +func (w *Withdrawal) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &w.Index) // Field (0) - Index - 8 bytes @@ -31,10 +31,10 @@ func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeStaticObject() { out := new(bytes.Buffer) - if err := ssz.EncodeToStream(out, new(Withdrawal)); err != nil { + if err := ssz.EncodeToStream(out, new(Withdrawal), ssz.ForkUnknown); err != nil { panic(err) } - hash := ssz.HashSequential(new(Withdrawal)) + hash := ssz.HashSequential(new(Withdrawal), ssz.ForkUnknown) fmt.Printf("ssz: %#x\nhash: %#x\n", out, hash) // Output: diff --git a/forks.go b/forks.go new file mode 100644 index 0000000..f131841 --- /dev/null +++ b/forks.go @@ -0,0 +1,89 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +// Fork is an enum with all the hard forks that Ethereum mainnet went through, +// which can be used to multiplex monolith types that can encode/decode across +// a range of forks, not just for one specific. +// +// These enums are only meaningful in relation to one another, but are completely +// meaningless numbers otherwise. Do not persist them across code versions. +type Fork int + +const ( + ForkUnknown Fork = iota // Placeholder if forks haven't been specified (must be index 0) + + ForkFrontier // https://ethereum.org/en/history/#frontier + ForkHomestead // https://ethereum.org/en/history/#homestead + ForkDAO // https://ethereum.org/en/history/#dao-fork + ForkTangerine // https://ethereum.org/en/history/#tangerine-whistle + ForkSpurious // https://ethereum.org/en/history/#spurious-dragon + ForkByzantium // https://ethereum.org/en/history/#byzantium + ForkConstantinople // https://ethereum.org/en/history/#constantinople + ForkIstanbul // https://ethereum.org/en/history/#istanbul + ForkMuir // https://ethereum.org/en/history/#muir-glacier + ForkPhase0 // https://ethereum.org/en/history/#beacon-chain-genesis + ForkBerlin // https://ethereum.org/en/history/#berlin + ForkLondon // https://ethereum.org/en/history/#london + ForkAltair // https://ethereum.org/en/history/#altair + ForkArrow // https://ethereum.org/en/history/#arrow-glacier + ForkGray // https://ethereum.org/en/history/#gray-glacier + ForkBellatrix // https://ethereum.org/en/history/#bellatrix + ForkParis // https://ethereum.org/en/history/#paris + ForkShapella // https://ethereum.org/en/history/#shapella + ForkDencun // https://ethereum.org/en/history/#dencun + ForkPectra // https://ethereum.org/en/history/#pectra + + ForkFuture // Use this for specifying future features (must be last index, no gaps) + + ForkMerge = ForkParis // Common alias for Paris + ForkShanghai = ForkShapella // EL alias for Shapella + ForkCapella = ForkShapella // CL alias for Shapella + ForkCancun = ForkDencun // EL alias for Dencun + ForkDeneb = ForkDencun // CL alias for Dencun + ForkPrague = ForkPectra // EL alias for Pectra + ForkElectra = ForkPectra // CL alias for Pectra +) + +// ForkMapping maps fork names to fork values. This is used internally by the +// ssz codec generator to convert tags to values. +var ForkMapping = map[string]Fork{ + "unknown": ForkUnknown, + "frontier": ForkFrontier, + "homestead": ForkHomestead, + "dao": ForkDAO, + "tangerine": ForkTangerine, + "spurious": ForkSpurious, + "byzantium": ForkByzantium, + "constantinople": ForkConstantinople, + "istanbul": ForkIstanbul, + "muir": ForkMuir, + "phase0": ForkPhase0, + "berlin": ForkBerlin, + "london": ForkLondon, + "altair": ForkAltair, + "arrow": ForkArrow, + "gray": ForkGray, + "bellatrix": ForkBellatrix, + "paris": ForkParis, + "merge": ForkMerge, + "shapella": ForkShapella, + "shanghai": ForkShanghai, + "capella": ForkCapella, + "dencun": ForkDencun, + "cancun": ForkCancun, + "deneb": ForkDeneb, + "pectra": ForkPectra, + "prague": ForkPrague, + "electra": ForkElectra, + "future": ForkFuture, +} + +// ForkFilter can be used by the XXXOnFork methods inside monolithic types to +// define certain fields appearing only in certain forks. +type ForkFilter struct { + Added Fork + Removed Fork +} diff --git a/generics.go b/generics.go index 8b34f6e..24f558f 100644 --- a/generics.go +++ b/generics.go @@ -30,8 +30,8 @@ type newableDynamicObject[U any] interface { // generics compiler that it cannot represent arrays of arbitrary sizes with // one shorthand notation. type commonBytesLengths interface { - // fork | address | verkle-stem | hash | pubkey | committee | signature | bloom - ~[4]byte | ~[20]byte | ~[31]byte | ~[32]byte | ~[48]byte | ~[64]byte | ~[96]byte | ~[256]byte + // fork | nonce | address | verkle-stem | hash | pubkey | committee | signature | bloom + ~[4]byte | ~[8]byte | ~[20]byte | ~[31]byte | ~[32]byte | ~[48]byte | ~[64]byte | ~[96]byte | ~[256]byte } // commonUint64sLengths is a generic type whose purpose is to permit that fixed- diff --git a/hasher.go b/hasher.go index 51fb530..51d3b8a 100644 --- a/hasher.go +++ b/hasher.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "math/big" bitops "math/bits" + "reflect" "runtime" "unsafe" @@ -27,6 +28,7 @@ const concurrencyThreshold = 65536 // Some helpers to avoid occasional allocations var ( + hasherZeroChunk = [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} hasherBoolFalse = [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} hasherBoolTrue = [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} @@ -52,7 +54,9 @@ type Hasher struct { groups []groupStats // Hashing progress tracking for the chunk groups layer int // Layer depth being hasher now - codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) + bitbuf []byte // Bitlist conversion buffer } @@ -73,6 +77,22 @@ func HashBool[T ~bool](h *Hasher, v T) { } } +// HashBoolPointerOnFork hashes a boolean if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashBoolPointerOnFork[T ~bool](h *Hasher, v *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if v == nil { + HashBool[bool](h, false) + return + } + HashBool(h, *v) +} + // HashUint8 hashes a uint8. func HashUint8[T ~uint8](h *Hasher, n T) { var buffer [32]byte @@ -80,6 +100,22 @@ func HashUint8[T ~uint8](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint8PointerOnFork hashes a uint8 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint8PointerOnFork[T ~uint8](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint8[uint8](h, 0) + return + } + HashUint8(h, *n) +} + // HashUint16 hashes a uint16. func HashUint16[T ~uint16](h *Hasher, n T) { var buffer [32]byte @@ -87,6 +123,22 @@ func HashUint16[T ~uint16](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint16PointerOnFork hashes a uint16 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint16PointerOnFork[T ~uint16](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint16[uint16](h, 0) + return + } + HashUint16(h, *n) +} + // HashUint32 hashes a uint32. func HashUint32[T ~uint32](h *Hasher, n T) { var buffer [32]byte @@ -94,6 +146,22 @@ func HashUint32[T ~uint32](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint32PointerOnFork hashes a uint32 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint32PointerOnFork[T ~uint32](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint32[uint32](h, 0) + return + } + HashUint32(h, *n) +} + // HashUint64 hashes a uint64. func HashUint64[T ~uint64](h *Hasher, n T) { var buffer [32]byte @@ -101,6 +169,22 @@ func HashUint64[T ~uint64](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint64PointerOnFork hashes a uint64 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint64PointerOnFork[T ~uint64](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint64[uint64](h, 0) + return + } + HashUint64(h, *n) +} + // HashUint256 hashes a uint256. // // Note, a nil pointer is hashed as zero. @@ -112,9 +196,22 @@ func HashUint256(h *Hasher, n *uint256.Int) { h.insertChunk(buffer, 0) } +// HashUint256OnFork hashes a uint256 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint256OnFork(h *Hasher, n *uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashUint256(h, n) +} + // HashUint256BigInt hashes a big.Int as uint256. // // Note, a nil pointer is hashed as zero. +// Note, an overflow will be silently dropped. func HashUint256BigInt(h *Hasher, n *big.Int) { var buffer [32]byte if n != nil { @@ -125,6 +222,19 @@ func HashUint256BigInt(h *Hasher, n *big.Int) { h.insertChunk(buffer, 0) } +// HashUint256BigIntOnFork hashes a big.Int as uint256 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +// Note, an overflow will be silently dropped. +func HashUint256BigIntOnFork(h *Hasher, n *big.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashUint256BigInt(h, n) +} + // HashStaticBytes hashes a static binary blob. // // The blob is passed by pointer to avoid high stack copy costs and a potential @@ -135,6 +245,26 @@ func HashStaticBytes[T commonBytesLengths](h *Hasher, blob *T) { h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) } +// HashStaticBytesPointerOnFork hashes a static binary blob if present in a fork. +// +// Note, a nil pointer is hashed as an empty binary blob. +func HashStaticBytesPointerOnFork[T commonBytesLengths](h *Hasher, blob *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if blob == nil { + // Go generics cannot do len(T{}), so we either allocate and bear the GC + // costs, or we use reflect. Both is kind of crappy. + // + // https://github.com/golang/go/issues/69100 + h.hashBytesEmpty(reflect.TypeFor[T]().Len()) + return + } + HashStaticBytes(h, blob) +} + // HashCheckedStaticBytes hashes a static binary blob. func HashCheckedStaticBytes(h *Hasher, blob []byte) { h.hashBytes(blob) @@ -147,20 +277,60 @@ func HashDynamicBytes(h *Hasher, blob []byte, maxSize uint64) { h.ascendMixinLayer(uint64(len(blob)), (maxSize+31)/32) } +// HashDynamicBytesOnFork hashes a dynamic binary blob if present in a fork. +func HashDynamicBytesOnFork(h *Hasher, blob []byte, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashDynamicBytes(h, blob, maxSize) +} + // HashStaticObject hashes a static ssz object. -func HashStaticObject(h *Hasher, obj StaticObject) { +func HashStaticObject[T newableStaticObject[U], U any](h *Hasher, obj T) { h.descendLayer() + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueStatic[T, U]() + } obj.DefineSSZ(h.codec) h.ascendLayer(0) } +// HashStaticObjectOnFork hashes a static ssz object if present in a fork. +func HashStaticObjectOnFork[T newableStaticObject[U], U any](h *Hasher, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashStaticObject(h, obj) +} + // HashDynamicObject hashes a dynamic ssz object. -func HashDynamicObject(h *Hasher, obj DynamicObject) { +func HashDynamicObject[T newableDynamicObject[U], U any](h *Hasher, obj T) { h.descendLayer() + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueDynamic[T, U]() + } obj.DefineSSZ(h.codec) h.ascendLayer(0) } +// HashDynamicObjectOnFork hashes a dynamic ssz object if present in a fork. +func HashDynamicObjectOnFork[T newableDynamicObject[U], U any](h *Hasher, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashDynamicObject(h, obj) +} + // HashArrayOfBits hashes a static array of (packed) bits. func HashArrayOfBits[T commonBitsLengths](h *Hasher, bits *T) { // The code below should have used `*bits[:]`, alas Go's generics compiler @@ -168,8 +338,34 @@ func HashArrayOfBits[T commonBitsLengths](h *Hasher, bits *T) { h.hashBytes(unsafe.Slice(&(*bits)[0], len(*bits))) } +// HashArrayOfBitsPointerOnFork hashes a static array of (packed) bits if present +// in a fork. +func HashArrayOfBitsPointerOnFork[T commonBitsLengths](h *Hasher, bits *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if bits == nil { + // Go generics cannot do len(T{}), so we either allocate and bear the GC + // costs, or we use reflect. Both is kind of crappy. + // + // https://github.com/golang/go/issues/69100 + h.hashBytesEmpty(reflect.TypeFor[T]().Len()) + return + } + HashArrayOfBits(h, bits) +} + // HashSliceOfBits hashes a dynamic slice of (packed) bits. +// +// Note, a nil slice of bits is serialized as an empty bit list. func HashSliceOfBits(h *Hasher, bits bitfield.Bitlist, maxBits uint64) { + // If the slice of bits is nil (i.e. uninitialized), hash it as empty + if bits == nil { + HashSliceOfBits(h, bitlistZero, maxBits) + return + } // Parse the bit-list into a hashable representation var ( msb = uint8(bitops.Len8(bits[len(bits)-1])) - 1 @@ -197,6 +393,19 @@ func HashSliceOfBits(h *Hasher, bits bitfield.Bitlist, maxBits uint64) { h.ascendMixinLayer(size, (maxBits+255)/256) } +// HashSliceOfBitsOnFork hashes a dynamic slice of (packed) bits if present in a +// fork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func HashSliceOfBitsOnFork(h *Hasher, bits bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfBits(h, bits, maxBits) +} + // HashArrayOfUint64s hashes a static array of uint64s. // // The reason the ns is passed by pointer and not by value is to prevent it from @@ -228,6 +437,23 @@ func HashArrayOfUint64s[T commonUint64sLengths](h *Hasher, ns *T) { h.ascendLayer(0) } +// HashArrayOfUint64sPointerOnFork hashes a static array of uint64s if present +// in a fork. +func HashArrayOfUint64sPointerOnFork[T commonUint64sLengths](h *Hasher, ns *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if ns == nil { + h.descendLayer() + h.insertBlobChunksEmpty(reflect.TypeFor[T]().Len() * 8) + h.ascendLayer(0) + return + } + HashArrayOfUint64s(h, ns) +} + // HashSliceOfUint64s hashes a dynamic slice of uint64s. func HashSliceOfUint64s[T ~uint64](h *Hasher, ns []T, maxItems uint64) { h.descendMixinLayer() @@ -253,6 +479,16 @@ func HashSliceOfUint64s[T ~uint64](h *Hasher, ns []T, maxItems uint64) { h.ascendMixinLayer(uint64(len(ns)), (maxItems*8+31)/32) } +// HashSliceOfUint64sOnFork hashes a dynamic slice of uint64s if present in a fork. +func HashSliceOfUint64sOnFork[T ~uint64](h *Hasher, ns []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfUint64s(h, ns, maxItems) +} + // HashArrayOfStaticBytes hashes a static array of static binary blobs. // // The reason the blobs is passed by pointer and not by value is to prevent it @@ -297,6 +533,17 @@ func HashSliceOfStaticBytes[T commonBytesLengths](h *Hasher, blobs []T, maxItems h.ascendMixinLayer(uint64(len(blobs)), maxItems) } +// HashSliceOfStaticBytesOnFork hashes a dynamic slice of static binary blobs if +// present in a fork. +func HashSliceOfStaticBytesOnFork[T commonBytesLengths](h *Hasher, blobs []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfStaticBytes(h, blobs, maxItems) +} + // HashSliceOfDynamicBytes hashes a dynamic slice of dynamic binary blobs. func HashSliceOfDynamicBytes(h *Hasher, blobs [][]byte, maxItems uint64, maxSize uint64) { h.descendMixinLayer() @@ -308,13 +555,24 @@ func HashSliceOfDynamicBytes(h *Hasher, blobs [][]byte, maxItems uint64, maxSize h.ascendMixinLayer(uint64(len(blobs)), maxItems) } +// HashSliceOfDynamicBytesOnFork hashes a dynamic slice of dynamic binary blobs +// if present in a fork. +func HashSliceOfDynamicBytesOnFork(h *Hasher, blobs [][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfDynamicBytes(h, blobs, maxItems, maxSize) +} + // HashSliceOfStaticObjects hashes a dynamic slice of static ssz objects. func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems uint64) { h.descendMixinLayer() defer h.ascendMixinLayer(uint64(len(objects)), maxItems) // If threading is disabled, or hashing nothing, do it sequentially - if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0])) < concurrencyThreshold { + if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.codec.fork)) < concurrencyThreshold { for _, obj := range objects { h.descendLayer() obj.DefineSSZ(h.codec) @@ -367,6 +625,17 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u } } +// HashSliceOfStaticObjectsOnFork hashes a dynamic slice of static ssz objects +// if present in a fork. +func HashSliceOfStaticObjectsOnFork[T StaticObject](h *Hasher, objects []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfStaticObjects(h, objects, maxItems) +} + // HashSliceOfDynamicObjects hashes a dynamic slice of dynamic ssz objects. func HashSliceOfDynamicObjects[T DynamicObject](h *Hasher, objects []T, maxItems uint64) { h.descendMixinLayer() @@ -378,6 +647,17 @@ func HashSliceOfDynamicObjects[T DynamicObject](h *Hasher, objects []T, maxItems h.ascendMixinLayer(uint64(len(objects)), maxItems) } +// HashSliceOfDynamicObjectsOnFork hashes a dynamic slice of dynamic ssz objects +// if present in a fork. +func HashSliceOfDynamicObjectsOnFork[T DynamicObject](h *Hasher, objects []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfDynamicObjects(h, objects, maxItems) +} + // hashBytes either appends the blob to the hasher's scratch space if it's small // enough to fit into a single chunk, or chunks it up and merkleizes it first. func (h *Hasher) hashBytes(blob []byte) { @@ -394,6 +674,21 @@ func (h *Hasher) hashBytes(blob []byte) { h.ascendLayer(0) } +// hashBytesEmpty is analogous to hashBytes, but where the input is all zeroes, +// so it's passed by length, not by content. This allows hashing zero pointers +// without allocating them first. +func (h *Hasher) hashBytesEmpty(size int) { + // If the blob is small, accumulate as a single chunk + if size <= 32 { + h.insertChunk(hasherZeroChunk, 0) + return + } + // Otherwise hash it as its own tree + h.descendLayer() + h.insertBlobChunksEmpty(size) + h.ascendLayer(0) +} + // insertChunk adds a chunk to the accumulators, collapsing matching pairs. func (h *Hasher) insertChunk(chunk [32]byte, depth int) { // Insert the chunk into the accumulator @@ -467,6 +762,16 @@ func (h *Hasher) insertBlobChunks(blob []byte) { } } +// insertBlobChunksEmpty is analogous to insertBlobChunks, but where the input +// is all zeroes, so it's passed by length, not by content. This allows hashing +// zero pointers without allocating them first. +func (h *Hasher) insertBlobChunksEmpty(size int) { + for size > 0 { // will insert a full chunk for the last segment + h.insertChunk(hasherZeroChunk, 0) + size -= 32 + } +} + // descendLayer starts a new hashing layer, acting as a barrier to prevent the // chunks from being collapsed into previous pending ones. func (h *Hasher) descendLayer() { diff --git a/sizer.go b/sizer.go index 892a5bb..9f30898 100644 --- a/sizer.go +++ b/sizer.go @@ -4,35 +4,57 @@ package ssz -import "github.com/prysmaticlabs/go-bitfield" +import ( + "github.com/prysmaticlabs/go-bitfield" +) + +// Sizer is an SSZ static and dynamic size computer. +type Sizer struct { + codec *Codec // Self-referencing to have access to fork contexts +} + +// Fork retrieves the current fork (if any) that the sizer is operating in. +func (siz *Sizer) Fork() Fork { + return siz.codec.fork +} // SizeDynamicBytes returns the serialized size of the dynamic part of a dynamic // blob. -func SizeDynamicBytes(blobs []byte) uint32 { +func SizeDynamicBytes(siz *Sizer, blobs []byte) uint32 { return uint32(len(blobs)) } // SizeSliceOfBits returns the serialized size of the dynamic part of a slice of // bits. -func SizeSliceOfBits(bits bitfield.Bitlist) uint32 { - return uint32(len(bits)) +// +// Note, a nil slice of bits is sized as an empty bit list. +func SizeSliceOfBits(siz *Sizer, bits bitfield.Bitlist) uint32 { + if bits != nil { + return uint32(len(bits)) + } + return uint32(len(bitlistZero)) } // SizeSliceOfUint64s returns the serialized size of the dynamic part of a dynamic // list of uint64s. -func SizeSliceOfUint64s[T ~uint64](ns []T) uint32 { +func SizeSliceOfUint64s[T ~uint64](siz *Sizer, ns []T) uint32 { return uint32(len(ns)) * 8 } // SizeDynamicObject returns the serialized size of the dynamic part of a dynamic // object. -func SizeDynamicObject[T DynamicObject](obj T) uint32 { - return obj.SizeSSZ(false) +func SizeDynamicObject[T newableDynamicObject[U], U any](siz *Sizer, obj T) uint32 { + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueDynamic[T, U]() + } + return obj.SizeSSZ(siz, false) } // SizeSliceOfStaticBytes returns the serialized size of the dynamic part of a dynamic // list of static blobs. -func SizeSliceOfStaticBytes[T commonBytesLengths](blobs []T) uint32 { +func SizeSliceOfStaticBytes[T commonBytesLengths](siz *Sizer, blobs []T) uint32 { if len(blobs) == 0 { return 0 } @@ -41,7 +63,7 @@ func SizeSliceOfStaticBytes[T commonBytesLengths](blobs []T) uint32 { // SizeSliceOfDynamicBytes returns the serialized size of the dynamic part of a dynamic // list of dynamic blobs. -func SizeSliceOfDynamicBytes(blobs [][]byte) uint32 { +func SizeSliceOfDynamicBytes(siz *Sizer, blobs [][]byte) uint32 { var size uint32 for _, blob := range blobs { size += uint32(4 + len(blob)) // 4-byte offset + dynamic data later @@ -51,19 +73,19 @@ func SizeSliceOfDynamicBytes(blobs [][]byte) uint32 { // SizeSliceOfStaticObjects returns the serialized size of the dynamic part of a dynamic // list of static objects. -func SizeSliceOfStaticObjects[T StaticObject](objects []T) uint32 { +func SizeSliceOfStaticObjects[T StaticObject](siz *Sizer, objects []T) uint32 { if len(objects) == 0 { return 0 } - return uint32(len(objects)) * objects[0].SizeSSZ() + return uint32(len(objects)) * objects[0].SizeSSZ(siz) } // SizeSliceOfDynamicObjects returns the serialized size of the dynamic part of // a dynamic list of dynamic objects. -func SizeSliceOfDynamicObjects[T DynamicObject](objects []T) uint32 { +func SizeSliceOfDynamicObjects[T DynamicObject](siz *Sizer, objects []T) uint32 { var size uint32 for _, obj := range objects { - size += 4 + obj.SizeSSZ(false) // 4-byte offset + dynamic data later + size += 4 + obj.SizeSSZ(siz, false) // 4-byte offset + dynamic data later } return size } diff --git a/ssz.go b/ssz.go index 9beed9c..42acef2 100644 --- a/ssz.go +++ b/ssz.go @@ -29,7 +29,7 @@ type StaticObject interface { // Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash // to allow the compiler to detect placing one or the other in reversed data // slots on an SSZ containers. - SizeSSZ() uint32 + SizeSSZ(siz *Sizer) uint32 } // DynamicObject defines the methods a type needs to implement to be used as a @@ -43,7 +43,7 @@ type DynamicObject interface { // Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash // to allow the compiler to detect placing one or the other in reversed data // slots on an SSZ containers. - SizeSSZ(fixed bool) uint32 + SizeSSZ(siz *Sizer, fixed bool) uint32 } // encoderPool is a pool of SSZ encoders to reuse some tiny internal helpers @@ -52,6 +52,7 @@ var encoderPool = sync.Pool{ New: func() any { codec := &Codec{enc: new(Encoder)} codec.enc.codec = codec + codec.enc.sizer = &Sizer{codec: codec} return codec }, } @@ -62,6 +63,7 @@ var decoderPool = sync.Pool{ New: func() any { codec := &Codec{dec: new(Decoder)} codec.dec.codec = codec + codec.dec.sizer = &Sizer{codec: codec} return codec }, } @@ -72,66 +74,85 @@ var hasherPool = sync.Pool{ New: func() any { codec := &Codec{has: new(Hasher)} codec.has.codec = codec + codec.has.sizer = &Sizer{codec: codec} return codec }, } +// sizerPool is a pool of SSZ sizers to reuse some tiny internal helpers +// without hitting Go's GC constantly. +var sizerPool = sync.Pool{ + New: func() any { + return &Sizer{codec: new(Codec)} + }, +} + // EncodeToStream serializes the object into a data stream. Do not use this // method with a bytes.Buffer to write into a []byte slice, as that will do // double the byte copying. For that use case, use EncodeToBytes instead. -func EncodeToStream(w io.Writer, obj Object) error { +func EncodeToStream(w io.Writer, obj Object, fork Fork) error { codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) - codec.enc.outWriter, codec.enc.err = w, nil + codec.fork, codec.enc.outWriter = fork, w switch v := obj.(type) { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.enc.offsetDynamics(v.SizeSSZ(true)) + codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true)) v.DefineSSZ(codec) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } + // Retrieve any errors, zero out the sink and return + err := codec.enc.err + codec.enc.outWriter = nil - return codec.enc.err + codec.enc.err = nil + + return err } // EncodeToBytes serializes the object into a byte buffer. Don't use this method // if you want to then write the buffer into a stream via some writer, as that // would double the memory use for the temporary buffer. For that use case, use // EncodeToStream instead. -func EncodeToBytes(buf []byte, obj Object) error { +func EncodeToBytes(buf []byte, obj Object, fork Fork) error { // Sanity check that we have enough space to serialize into - if size := Size(obj); int(size) > len(buf) { + if size := Size(obj, fork); int(size) > len(buf) { return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size) } codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) - codec.enc.outBuffer, codec.enc.err = buf, nil + codec.fork, codec.enc.outBuffer = fork, buf switch v := obj.(type) { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.enc.offsetDynamics(v.SizeSSZ(true)) + codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true)) v.DefineSSZ(codec) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } + // Retrieve any errors, zero out the sink and return + err := codec.enc.err + codec.enc.outBuffer = nil - return codec.enc.err + codec.enc.err = nil + + return err } // DecodeFromStream parses an object with the given size out of a stream. Do not // use this method with a bytes.Buffer to read from a []byte slice, as that will // double the byte copying. For that use case, use DecodeFromBytes instead. -func DecodeFromStream(r io.Reader, obj Object, size uint32) error { +func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error { // Retrieve a new decoder codec and set its data source codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) - codec.dec.inReader = r + codec.fork, codec.dec.inReader = fork, r // Start a decoding round with length enforcement in place codec.dec.descendIntoSlot(size) @@ -140,7 +161,7 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32) error { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.dec.startDynamics(v.SizeSSZ(true)) + codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true)) v.DefineSSZ(codec) codec.dec.flushDynamics() default: @@ -161,7 +182,7 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32) error { // if you want to first read the buffer from a stream via some reader, as that // would double the memory use for the temporary buffer. For that use case, use // DecodeFromStream instead. -func DecodeFromBytes(blob []byte, obj Object) error { +func DecodeFromBytes(blob []byte, obj Object, fork Fork) error { // Reject decoding from an empty slice if len(blob) == 0 { return io.ErrUnexpectedEOF @@ -170,6 +191,7 @@ func DecodeFromBytes(blob []byte, obj Object) error { codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) + codec.fork = fork codec.dec.inBuffer = blob codec.dec.inBufEnd = uintptr(unsafe.Pointer(&blob[0])) + uintptr(len(blob)) @@ -180,7 +202,7 @@ func DecodeFromBytes(blob []byte, obj Object) error { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.dec.startDynamics(v.SizeSSZ(true)) + codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true)) v.DefineSSZ(codec) codec.dec.flushDynamics() default: @@ -201,11 +223,13 @@ func DecodeFromBytes(blob []byte, obj Object) error { // HashSequential computes the ssz merkle root of the object on a single thread. // This is useful for processing small objects with stable runtime and O(1) GC // guarantees. -func HashSequential(obj Object) [32]byte { +func HashSequential(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() + codec.fork = fork + codec.has.descendLayer() obj.DefineSSZ(codec) codec.has.ascendLayer(0) @@ -220,12 +244,14 @@ func HashSequential(obj Object) [32]byte { // concurrent threads (iff some data segments are large enough to be worth it). This // is useful for processing large objects, but will place a bigger load on your CPU // and GC; and might be more variable timing wise depending on other load. -func HashConcurrent(obj Object) [32]byte { +func HashConcurrent(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() + codec.fork = fork codec.has.threads = true + codec.has.descendLayer() obj.DefineSSZ(codec) codec.has.ascendLayer(0) @@ -233,18 +259,24 @@ func HashConcurrent(obj Object) [32]byte { if len(codec.has.chunks) != 1 { panic(fmt.Sprintf("unfinished hashing: left %v", codec.has.groups)) } + codec.has.threads = false return codec.has.chunks[0] } // Size retrieves the size of a ssz object, independent if it's a static or a // dynamic one. -func Size(obj Object) uint32 { +func Size(obj Object, fork Fork) uint32 { + sizer := sizerPool.Get().(*Sizer) + defer sizerPool.Put(sizer) + + sizer.codec.fork = fork + var size uint32 switch v := obj.(type) { case StaticObject: - size = v.SizeSSZ() + size = v.SizeSSZ(sizer) case DynamicObject: - size = v.SizeSSZ(false) + size = v.SizeSSZ(sizer, false) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 95fe40c..77f4540 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -11,6 +11,7 @@ import ( "math/rand" "os" "path/filepath" + "reflect" "strings" "sync" "testing" @@ -48,13 +49,26 @@ func commonPrefix(a []byte, b []byte) []byte { // TestConsensusSpecBasics iterates over the basic container tests from the // consensus spec tests repo and runs the encoding/decoding/hashing round. func TestConsensusSpecBasics(t *testing.T) { + // Run through all the basic tests as simple types testConsensusSpecBasicType[*types.SingleFieldTestStruct](t, "SingleFieldTestStruct") testConsensusSpecBasicType[*types.SmallTestStruct](t, "SmallTestStruct") testConsensusSpecBasicType[*types.FixedTestStruct](t, "FixedTestStruct") testConsensusSpecBasicType[*types.BitsStruct](t, "BitsStruct") + + // Add monolith variations to the basic types + testConsensusSpecBasicType[*types.SingleFieldTestStructMonolith](t, "SingleFieldTestStruct") + testConsensusSpecBasicType[*types.SmallTestStructMonolith](t, "SmallTestStruct") + testConsensusSpecBasicType[*types.FixedTestStructMonolith](t, "FixedTestStruct") + testConsensusSpecBasicType[*types.BitsStructMonolith](t, "BitsStruct") } func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind string) { + // Sanity check that the zero values can be handled before diving into the + // actual test datasets. This is mostly to catch implementation faults with + // uninitialized field handling. + t.Run(fmt.Sprintf("zero/%s", kind), func(t *testing.T) { + testZeroValue[T, U](t, ssz.ForkUnknown) + }) // Filter out the valid tests for this specific type path := filepath.Join(consensusSpecTestsBasicsRoot, "valid") @@ -95,11 +109,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -108,11 +122,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -122,14 +136,14 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkUnknown); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj) + hash := ssz.HashSequential(obj, ssz.ForkUnknown) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj) + hash = ssz.HashConcurrent(obj, ssz.ForkUnknown) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -163,11 +177,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Try to decode, it should fail obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err == nil { t.Fatalf("succeeded in decoding invalid SSZ stream") } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err == nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err == nil { t.Fatalf("succeeded in decoding invalid SSZ buffer") } }) @@ -177,6 +191,7 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st // TestConsensusSpecs iterates over all the (supported) consensus SSZ types and // runs the encoding/decoding/hashing round. func TestConsensusSpecs(t *testing.T) { + // Run through all the consensus specs as simple types testConsensusSpecType[*types.AggregateAndProof](t, "AggregateAndProof", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") testConsensusSpecType[*types.Attestation](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") testConsensusSpecType[*types.AttestationData](t, "AttestationData") @@ -189,6 +204,7 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.BeaconBlockBodyDeneb](t, "BeaconBlockBody", "deneb", "eip7594") testConsensusSpecType[*types.BeaconBlockHeader](t, "BeaconBlockHeader") testConsensusSpecType[*types.BeaconState](t, "BeaconState", "phase0") + testConsensusSpecType[*types.BeaconStateAltair](t, "BeaconState", "altair") testConsensusSpecType[*types.BeaconStateCapella](t, "BeaconState", "capella") testConsensusSpecType[*types.BeaconStateDeneb](t, "BeaconState", "deneb") testConsensusSpecType[*types.BLSToExecutionChange](t, "BLSToExecutionChange") @@ -219,10 +235,24 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.VoluntaryExit](t, "VoluntaryExit") testConsensusSpecType[*types.Withdrawal](t, "Withdrawal") + // Add monolith variations to the consensus types + testConsensusSpecType[*types.BeaconBlockBodyMonolith](t, "BeaconBlockBody", "phase0", "altair", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.BeaconStateMonolith](t, "BeaconState", "phase0", "altair", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ExecutionPayloadMonolith](t, "ExecutionPayload", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ExecutionPayloadMonolith2](t, "ExecutionPayload", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](t, "ExecutionPayloadHeader", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ValidatorMonolith](t, "Validator") + // Add some API variations to test different codec implementations testConsensusSpecType[*types.ExecutionPayloadVariation](t, "ExecutionPayload", "bellatrix") testConsensusSpecType[*types.HistoricalBatchVariation](t, "HistoricalBatch") testConsensusSpecType[*types.WithdrawalVariation](t, "Withdrawal") + testConsensusSpecType[*types.AttestationVariation1](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationVariation2](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationVariation3](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationDataVariation1](t, "AttestationData") + testConsensusSpecType[*types.AttestationDataVariation2](t, "AttestationData") + testConsensusSpecType[*types.AttestationDataVariation3](t, "AttestationData") // Iterate over all the untouched tests and report them // forks, err := os.ReadDir(consensusSpecTestsRoot) @@ -271,6 +301,13 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, } // Some specific fork was requested, look that up explicitly for _, fork := range forks { + // Sanity check that the zero values can be handled before diving into the + // actual test datasets. This is mostly to catch implementation faults with + // uninitialized field handling. + t.Run(fmt.Sprintf("zero/%s/%s", fork, kind), func(t *testing.T) { + testZeroValue[T, U](t, ssz.ForkMapping[fork]) + }) + // Zero value on this specific fork ok, pull in the consensus dataset path := filepath.Join(consensusSpecTestsRoot, fork, "ssz_static", kind, "ssz_random") tests, err := os.ReadDir(path) @@ -288,7 +325,7 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // Run all the subtests found in the folder for _, test := range tests { - t.Run(fmt.Sprintf("%s/%s/%s", fork, kind, test.Name()), func(t *testing.T) { + t.Run(fmt.Sprintf("%s/%s/%s", fork, reflect.TypeFor[U]().Name(), test.Name()), func(t *testing.T) { // Parse the input SSZ data and the expected root for the test inSnappy, err := os.ReadFile(filepath.Join(path, test.Name(), "serialized.ssz_snappy")) if err != nil { @@ -312,11 +349,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -325,11 +362,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkMapping[fork])) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -339,14 +376,14 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkMapping[fork]); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj) + hash := ssz.HashSequential(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj) + hash = ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -358,16 +395,16 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // BenchmarkConsensusSpecs iterates over all the (supported) consensus SSZ types and // runs the encoding/decoding/hashing benchmark round. func BenchmarkConsensusSpecs(b *testing.B) { - benchmarkConsensusSpecType[*types.ExecutionPayloadVariation](b, "bellatrix", "ExecutionPayload") - benchmarkConsensusSpecType[*types.AggregateAndProof](b, "deneb", "AggregateAndProof") benchmarkConsensusSpecType[*types.Attestation](b, "deneb", "Attestation") benchmarkConsensusSpecType[*types.AttestationData](b, "deneb", "AttestationData") benchmarkConsensusSpecType[*types.AttesterSlashing](b, "deneb", "AttesterSlashing") benchmarkConsensusSpecType[*types.BeaconBlock](b, "phase0", "BeaconBlock") benchmarkConsensusSpecType[*types.BeaconBlockBodyDeneb](b, "deneb", "BeaconBlockBody") + benchmarkConsensusSpecType[*types.BeaconBlockBodyMonolith](b, "deneb", "BeaconBlockBody") benchmarkConsensusSpecType[*types.BeaconBlockHeader](b, "deneb", "BeaconBlockHeader") - benchmarkConsensusSpecType[*types.BeaconState](b, "phase0", "BeaconState") + benchmarkConsensusSpecType[*types.BeaconStateDeneb](b, "deneb", "BeaconState") + benchmarkConsensusSpecType[*types.BeaconStateMonolith](b, "deneb", "BeaconState") benchmarkConsensusSpecType[*types.BLSToExecutionChange](b, "deneb", "BLSToExecutionChange") benchmarkConsensusSpecType[*types.Checkpoint](b, "deneb", "Checkpoint") benchmarkConsensusSpecType[*types.Deposit](b, "deneb", "Deposit") @@ -376,7 +413,9 @@ func BenchmarkConsensusSpecs(b *testing.B) { benchmarkConsensusSpecType[*types.Eth1Block](b, "deneb", "Eth1Block") benchmarkConsensusSpecType[*types.Eth1Data](b, "deneb", "Eth1Data") benchmarkConsensusSpecType[*types.ExecutionPayloadDeneb](b, "deneb", "ExecutionPayload") + benchmarkConsensusSpecType[*types.ExecutionPayloadMonolith](b, "deneb", "ExecutionPayload") benchmarkConsensusSpecType[*types.ExecutionPayloadHeaderDeneb](b, "deneb", "ExecutionPayloadHeader") + benchmarkConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](b, "deneb", "ExecutionPayloadHeader") benchmarkConsensusSpecType[*types.Fork](b, "deneb", "Fork") benchmarkConsensusSpecType[*types.HistoricalBatch](b, "deneb", "HistoricalBatch") benchmarkConsensusSpecType[*types.HistoricalSummary](b, "deneb", "HistoricalSummary") @@ -406,22 +445,22 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.Fatalf("failed to parse snappy ssz binary: %v", err) } inObj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } // Start the benchmarks for all the different operations - b.Run(fmt.Sprintf("%s/encode-stream", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/encode-stream", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { b.SetBytes(int64(len(inSSZ))) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToStream(io.Discard, inObj); err != nil { + if err := ssz.EncodeToStream(io.Discard, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ stream: %v", err) } } }) - b.Run(fmt.Sprintf("%s/encode-buffer", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/encode-buffer", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { blob := make([]byte, len(inSSZ)) b.SetBytes(int64(len(inSSZ))) @@ -429,12 +468,12 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToBytes(blob, inObj); err != nil { + if err := ssz.EncodeToBytes(blob, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ bytes: %v", err) } } }) - b.Run(fmt.Sprintf("%s/decode-stream", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/decode-stream", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) r := bytes.NewReader(inSSZ) @@ -443,13 +482,13 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromStream(r, obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(r, obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } r.Reset(inSSZ) } }) - b.Run(fmt.Sprintf("%s/decode-buffer", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/decode-buffer", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) b.SetBytes(int64(len(inSSZ))) @@ -457,14 +496,14 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } } }) - b.Run(fmt.Sprintf("%s/merkleize-sequential", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/merkleize-sequential", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -472,12 +511,12 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashSequential(obj) + ssz.HashSequential(obj, ssz.ForkMapping[fork]) } }) - b.Run(fmt.Sprintf("%s/merkleize-concurrent", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/merkleize-concurrent", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -485,7 +524,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashConcurrent(obj) + ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) } }) } @@ -531,6 +570,18 @@ func FuzzConsensusSpecsBeaconBlockHeader(f *testing.F) { func FuzzConsensusSpecsBeaconState(f *testing.F) { fuzzConsensusSpecType[*types.BeaconState](f, "BeaconState") } +func FuzzConsensusSpecsBeaconStateAltair(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateAltair](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateBellatrix(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateBellatrix](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateCapella(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateCapella](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateDeneb(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateDeneb](f, "BeaconState") +} func FuzzConsensusSpecsBLSToExecutionChange(f *testing.F) { fuzzConsensusSpecType[*types.BLSToExecutionChange](f, "BLSToExecutionChange") } @@ -613,6 +664,19 @@ func FuzzConsensusSpecsWithdrawal(f *testing.F) { fuzzConsensusSpecType[*types.Withdrawal](f, "Withdrawal") } +func FuzzConsensusSpecsBeaconBlockBodyMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconBlockBodyMonolith](f, "BeaconBlockBody") +} +func FuzzConsensusSpecsBeaconStateMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateMonolith](f, "BeaconState") +} +func FuzzConsensusSpecsExecutionPayloadMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.ExecutionPayloadMonolith](f, "ExecutionPayload") +} +func FuzzConsensusSpecsExecutionPayloadHeaderMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](f, "ExecutionPayloadHeader") +} + func FuzzConsensusSpecsExecutionPayloadVariation(f *testing.F) { fuzzConsensusSpecType[*types.ExecutionPayloadVariation](f, "ExecutionPayload") } @@ -653,7 +717,7 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) f.Fatalf("failed to parse snappy ssz binary: %v", err) } obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stash away all valid ssz streams so we can play with decoding // into previously used objects valids = append(valids, inSSZ) @@ -670,11 +734,11 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stream decoder succeeded, make sure it re-encodes correctly and // that the buffer decoder also succeeds parsing blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -682,27 +746,27 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } valid = true } // Try the buffer encoder/decoder obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err == nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err == nil { // Buffer decoder succeeded, make sure it re-encodes correctly and // that the stream decoder also succeeds parsing - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -710,16 +774,16 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", bin, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } @@ -730,14 +794,14 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream into used object: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream from used object: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -745,24 +809,24 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } // Try the buffer encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer into used object: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer from used object: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -770,12 +834,12 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - hash1 = ssz.HashSequential(obj) - hash2 = ssz.HashConcurrent(obj) + hash1 = ssz.HashSequential(obj, ssz.ForkFuture) + hash2 = ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } diff --git a/tests/corner_cases_test.go b/tests/corner_cases_test.go index 82d5687..23d1b13 100644 --- a/tests/corner_cases_test.go +++ b/tests/corner_cases_test.go @@ -19,19 +19,19 @@ import ( func TestDecodeMissized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, obj.SizeSSZ()+1) - if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)+1) + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - blob = make([]byte, obj.SizeSSZ()-1) - if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, io.ErrUnexpectedEOF) { + blob = make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, io.ErrUnexpectedEOF) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } } @@ -40,7 +40,7 @@ type testMissizedType struct { A, B uint64 } -func (t *testMissizedType) SizeSSZ() uint32 { return 16 } +func (t *testMissizedType) SizeSSZ(sizer *ssz.Sizer) uint32 { return 16 } func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &t.A) ssz.DefineUint64(codec, &t.B) @@ -50,11 +50,11 @@ func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { func TestEncodeOversized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, obj.SizeSSZ()-1) - if err := ssz.EncodeToBytes(blob, obj); !errors.Is(err, ssz.ErrBufferTooSmall) { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) + if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrBufferTooSmall) { t.Errorf("encode to bytes error mismatch: have %v, want %v", err, ssz.ErrBufferTooSmall) } - if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj); err == nil { + if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj, ssz.ForkUnknown); err == nil { t.Errorf("encode to stream error mismatch: have nil, want stream full") // wonky, but should be fine } } @@ -85,7 +85,7 @@ func TestZeroCounterOffset(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload)) + err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload), ssz.ForkUnknown) if !errors.Is(err, ssz.ErrZeroCounterOffset) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrZeroCounterOffset) } @@ -97,7 +97,7 @@ func TestInvalidBoolean(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.Validator)) + err = ssz.DecodeFromBytes(inSSZ, new(types.Validator), ssz.ForkUnknown) if !errors.Is(err, ssz.ErrInvalidBoolean) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrInvalidBoolean) } diff --git a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go index 62a33c4..21f4c57 100644 --- a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AggregateAndProof) SizeSSZ(fixed bool) uint32 { - var size = uint32(8 + 4 + 96) +func (obj *AggregateAndProof) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 8 + 4 + 96 if fixed { return size } - size += ssz.SizeDynamicObject(obj.Aggregate) + size += ssz.SizeDynamicObject(sizer, obj.Aggregate) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go index 7d05f35..1629e34 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheAttestationData = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() +var staticSizeCacheAttestationData = ssz.PrecomputeStaticSizeCache((*AttestationData)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *AttestationData) SizeSSZ() uint32 { - return staticSizeCacheAttestationData +func (obj *AttestationData) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationData) { + return staticSizeCacheAttestationData[fork] + } + size = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go new file mode 100644 index 0000000..0879436 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go @@ -0,0 +1,30 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation1 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation1)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation1) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation1) { + return staticSizeCacheAttestationDataVariation1[fork] + } + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation1) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (0) - Future - 8 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field (1) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (2) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (3) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go new file mode 100644 index 0000000..615e127 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go @@ -0,0 +1,31 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation2 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation2)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation2) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation2) { + return staticSizeCacheAttestationDataVariation2[fork] + } + size = 8 + 8 + 32 + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation2) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (3) - Future - 8 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go new file mode 100644 index 0000000..d24b625 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go @@ -0,0 +1,30 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation3 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation3)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation3) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation3) { + return staticSizeCacheAttestationDataVariation3[fork] + } + size = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation3) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (3) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (4) - Target - ? bytes (Checkpoint) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (5) - Future - 8 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go index 6eb49ff..f7b08d7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 96 +var staticSizeCacheAttestation = ssz.PrecomputeStaticSizeCache((*Attestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *Attestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheAttestation) +func (obj *Attestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestation) { + size = staticSizeCacheAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfBits(obj.AggregationBits) + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go new file mode 100644 index 0000000..6fe098e --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go @@ -0,0 +1,41 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation1 = ssz.PrecomputeStaticSizeCache((*AttestationVariation1)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation1) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation1) { + size = staticSizeCacheAttestationVariation1[fork] + } else { + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation1) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (0) - Future - 8 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (1) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (2) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (1) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go new file mode 100644 index 0000000..c280c37 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go @@ -0,0 +1,42 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation2 = ssz.PrecomputeStaticSizeCache((*AttestationVariation2)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation2) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation2) { + size = staticSizeCacheAttestationVariation2[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 96 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation2) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (2) - Future - 8 bytes + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go new file mode 100644 index 0000000..80f35f2 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go @@ -0,0 +1,41 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation3 = ssz.PrecomputeStaticSizeCache((*AttestationVariation3)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation3) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation3) { + size = staticSizeCacheAttestationVariation3[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation3) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (2) - Signature - 96 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (3) - Future - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go index 97638fd..56c3188 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AttesterSlashing) SizeSSZ(fixed bool) uint32 { - var size = uint32(4 + 4) +func (obj *AttesterSlashing) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 4 + 4 if fixed { return size } - size += ssz.SizeDynamicObject(obj.Attestation1) - size += ssz.SizeDynamicObject(obj.Attestation2) + size += ssz.SizeDynamicObject(sizer, obj.Attestation1) + size += ssz.SizeDynamicObject(sizer, obj.Attestation2) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go index c6f3178..83542f6 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go @@ -5,20 +5,26 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyAltair = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() +var staticSizeCacheBeaconBlockBodyAltair = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyAltair)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyAltair) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyAltair) +func (obj *BeaconBlockBodyAltair) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyAltair) { + size = staticSizeCacheBeaconBlockBodyAltair[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go index a424f55..5bf3bfe 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go @@ -5,21 +5,27 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyBellatrix = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 +var staticSizeCacheBeaconBlockBodyBellatrix = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyBellatrix)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyBellatrix) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyBellatrix) +func (obj *BeaconBlockBodyBellatrix) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyBellatrix) { + size = staticSizeCacheBeaconBlockBodyBellatrix[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go index 39ec490..237ec22 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go @@ -5,22 +5,28 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyCapella = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 + 4 +var staticSizeCacheBeaconBlockBodyCapella = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyCapella)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyCapella) +func (obj *BeaconBlockBodyCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyCapella) { + size = staticSizeCacheBeaconBlockBodyCapella[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) - size += ssz.SizeSliceOfStaticObjects(obj.BlsToExecutionChanges) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go index 118aaf6..b42bdf8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go @@ -5,23 +5,29 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyDeneb = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 + 4 + 4 +var staticSizeCacheBeaconBlockBodyDeneb = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyDeneb)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyDeneb) +func (obj *BeaconBlockBodyDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyDeneb) { + size = staticSizeCacheBeaconBlockBodyDeneb[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) - size += ssz.SizeSliceOfStaticObjects(obj.BlsToExecutionChanges) - size += ssz.SizeSliceOfStaticBytes(obj.BlobKzgCommitments) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.BlobKzgCommitments) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go new file mode 100644 index 0000000..6c3af26 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go @@ -0,0 +1,77 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconBlockBodyMonolith = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyMonolith)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconBlockBodyMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyMonolith) { + size = staticSizeCacheBeaconBlockBodyMonolith[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + if sizer.Fork() >= ssz.ForkAltair { + size += (*SyncAggregate)(nil).SizeSSZ(sizer) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += 4 + } + if sizer.Fork() >= ssz.ForkCapella { + size += 4 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 4 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + if sizer.Fork() >= ssz.ForkBellatrix { + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + } + if sizer.Fork() >= ssz.ForkCapella { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) + } + if sizer.Fork() >= ssz.ForkDeneb { + size += ssz.SizeSliceOfStaticBytes(sizer, obj.BlobKzgCommitments) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconBlockBodyMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.RandaoReveal) // Field ( 0) - RandaoReveal - 96 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 1) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineStaticBytes(codec, &obj.Graffiti) // Field ( 2) - Graffiti - 32 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.ProposerSlashings, 16) // Offset ( 3) - ProposerSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.AttesterSlashings, 2) // Offset ( 4) - AttesterSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.Attestations, 128) // Offset ( 5) - Attestations - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Deposits, 16) // Offset ( 6) - Deposits - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.VoluntaryExits, 16) // Offset ( 7) - VoluntaryExits - 4 bytes + ssz.DefineStaticObjectOnFork(codec, &obj.SyncAggregate, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field ( 8) - SyncAggregate - ? bytes (SyncAggregate) + ssz.DefineDynamicObjectOffsetOnFork(codec, &obj.ExecutionPayload, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Offset ( 9) - ExecutionPayload - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.BlsToExecutionChanges, 16, ssz.ForkFilter{Added: ssz.ForkCapella}) // Offset (10) - BlsToExecutionChanges - 4 bytes + ssz.DefineSliceOfStaticBytesOffsetOnFork(codec, &obj.BlobKzgCommitments, 4096, ssz.ForkFilter{Added: ssz.ForkDeneb}) // Offset (11) - BlobKzgCommitments - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.ProposerSlashings, 16) // Field ( 3) - ProposerSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.AttesterSlashings, 2) // Field ( 4) - AttesterSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.Attestations, 128) // Field ( 5) - Attestations - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Deposits, 16) // Field ( 6) - Deposits - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.VoluntaryExits, 16) // Field ( 7) - VoluntaryExits - ? bytes + ssz.DefineDynamicObjectContentOnFork(codec, &obj.ExecutionPayload, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Field ( 9) - ExecutionPayload - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.BlsToExecutionChanges, 16, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (10) - BlsToExecutionChanges - ? bytes + ssz.DefineSliceOfStaticBytesContentOnFork(codec, &obj.BlobKzgCommitments, 4096, ssz.ForkFilter{Added: ssz.ForkDeneb}) // Field (11) - BlobKzgCommitments - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go index 484c1ca..260d7cb 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go @@ -5,20 +5,26 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBody = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 +var staticSizeCacheBeaconBlockBody = ssz.PrecomputeStaticSizeCache((*BeaconBlockBody)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBody) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBody) +func (obj *BeaconBlockBody) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBody) { + size = staticSizeCacheBeaconBlockBody[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go index dc9b0d7..6a0ee7f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *BeaconBlockHeader) SizeSSZ() uint32 { +func (obj *BeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 32 + 32 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go index e7bc25f..b09e670 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlock) SizeSSZ(fixed bool) uint32 { - var size = uint32(8 + 8 + 32 + 32 + 4) +func (obj *BeaconBlock) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 8 + 8 + 32 + 32 + 4 if fixed { return size } - size += ssz.SizeDynamicObject(obj.Body) + size += ssz.SizeDynamicObject(sizer, obj.Body) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go new file mode 100644 index 0000000..032848d --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go @@ -0,0 +1,70 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateAltair = ssz.PrecomputeStaticSizeCache((*BeaconStateAltair)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateAltair) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateAltair) { + size = staticSizeCacheBeaconStateAltair[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateAltair) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineCheckedStaticBytes(codec, &obj.GenesisValidatorsRoot, 32) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go new file mode 100644 index 0000000..88cd65e --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go @@ -0,0 +1,73 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateBellatrix = ssz.PrecomputeStaticSizeCache((*BeaconStateBellatrix)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateBellatrix) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateBellatrix) { + size = staticSizeCacheBeaconStateBellatrix[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateBellatrix) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go index 68d19dc..b413fc5 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go @@ -5,24 +5,30 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconStateCapella = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + 4 + (*SyncCommittee)(nil).SizeSSZ() + (*SyncCommittee)(nil).SizeSSZ() + 4 + 8 + 8 + 4 +var staticSizeCacheBeaconStateCapella = ssz.PrecomputeStaticSizeCache((*BeaconStateCapella)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconStateCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconStateCapella) +func (obj *BeaconStateCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateCapella) { + size = staticSizeCacheBeaconStateCapella[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + 8 + 8 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeDynamicBytes(obj.PreviousEpochParticipation) - size += ssz.SizeDynamicBytes(obj.CurrentEpochParticipation) - size += ssz.SizeSliceOfUint64s(obj.InactivityScores) - size += ssz.SizeDynamicObject(obj.LatestExecutionPayloadHeader) - size += ssz.SizeSliceOfStaticObjects(obj.HistoricalSummaries) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go deleted file mode 100644 index b57597e..0000000 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by github.com/karalabe/ssz. DO NOT EDIT. - -package consensus_spec_tests - -import "github.com/karalabe/ssz" - -// Cached static size computed on package init. -var staticSizeCacheBeaconStateDeneb = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + 4 + (*SyncCommittee)(nil).SizeSSZ() + (*SyncCommittee)(nil).SizeSSZ() + 4 + 8 + 8 + 4 - -// SizeSSZ returns either the static size of the object if fixed == true, or -// the total size otherwise. -func (obj *BeaconStateDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconStateDeneb) - if fixed { - return size - } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeDynamicBytes(obj.PreviousEpochParticipation) - size += ssz.SizeDynamicBytes(obj.CurrentEpochParticipation) - size += ssz.SizeSliceOfUint64s(obj.InactivityScores) - size += ssz.SizeDynamicObject(obj.LatestExecutionPayloadHeader) - size += ssz.SizeSliceOfStaticObjects(obj.HistoricalSummaries) - - return size -} - -// DefineSSZ defines how an object is encoded/decoded. -func (obj *BeaconStateDeneb) DefineSSZ(codec *ssz.Codec) { - // Define the static data (fields and dynamic offsets) - ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes - ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes - ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes - ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) - ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes - ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots,16777216) // Offset ( 7) - HistoricalRoots - 4 bytes - ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes,2048) // Offset ( 9) - Eth1DataVotes - 4 bytes - ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators,1099511627776) // Offset (11) - Validators - 4 bytes - ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances,1099511627776) // Offset (12) - Balances - 4 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes - ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation,1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation,1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes - ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes - ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) - ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores,1099511627776) // Offset (21) - InactivityScores - 4 bytes - ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) - ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) - ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes - ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (25) - NextWithdrawalIndex - 8 bytes - ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (26) - NextWithdrawalValidatorIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries,16777216) // Offset (27) - HistoricalSummaries - 4 bytes - - // Define the dynamic data (fields) - ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes - ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes - ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes - ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (27) - HistoricalSummaries - ? bytes -} \ No newline at end of file diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go new file mode 100644 index 0000000..8896b7d --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go @@ -0,0 +1,78 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateDeneb = ssz.PrecomputeStaticSizeCache((*BeaconStateDeneb)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateDeneb) { + size = staticSizeCacheBeaconStateDeneb[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + 8 + 8 + 4 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateDeneb) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes + ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (25) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (26) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (27) - HistoricalSummaries - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (27) - HistoricalSummaries - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go new file mode 100644 index 0000000..123143a --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -0,0 +1,110 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateMonolith = ssz.PrecomputeStaticSizeCache((*BeaconStateMonolith)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateMonolith) { + size = staticSizeCacheBeaconStateMonolith[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + if sizer.Fork() >= ssz.ForkUnknown { + size += 8192 * 8 + } + if sizer.Fork() < ssz.ForkAltair { + size += 4 + 4 + } + if sizer.Fork() >= ssz.ForkAltair { + size += 4 + 4 + } + size += 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkAltair { + size += 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += 4 + } + if sizer.Fork() >= ssz.ForkCapella { + size += 8 + 8 + 4 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + if sizer.Fork() < ssz.ForkAltair { + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.PreviousEpochAttestations) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.CurrentEpochAttestations) + } + if sizer.Fork() >= ssz.ForkAltair { + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + } + if sizer.Fork() >= ssz.ForkCapella { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64sPointerOnFork(codec, &obj.Slashings, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (14) - Slashings - 65536 bytes + ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.PreviousEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (15) - PreviousEpochAttestations - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.CurrentEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (16) - CurrentEpochAttestations - 4 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.PreviousEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (17) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.CurrentEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (18) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (19) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (20) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (21) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (22) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffsetOnFork(codec, &obj.InactivityScores, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (23) - InactivityScores - 4 bytes + ssz.DefineStaticObjectOnFork(codec, &obj.CurrentSyncCommittee, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (24) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObjectOnFork(codec, &obj.NextSyncCommittee, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (25) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffsetOnFork(codec, &obj.LatestExecutionPayloadHeader, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.NextWithdrawalIndex, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (27) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.NextWithdrawalValidatorIndex, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.HistoricalSummaries, 16777216, ssz.ForkFilter{Added: ssz.ForkCapella}) // Offset (29) - HistoricalSummaries - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineSliceOfDynamicObjectsContentOnFork(codec, &obj.PreviousEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Field (15) - PreviousEpochAttestations - ? bytes + ssz.DefineSliceOfDynamicObjectsContentOnFork(codec, &obj.CurrentEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Field (16) - CurrentEpochAttestations - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.PreviousEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (17) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.CurrentEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (18) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContentOnFork(codec, &obj.InactivityScores, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (23) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContentOnFork(codec, &obj.LatestExecutionPayloadHeader, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Field (26) - LatestExecutionPayloadHeader - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.HistoricalSummaries, 16777216, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (29) - HistoricalSummaries - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go index ff04879..054e131 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go @@ -5,21 +5,27 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconState = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() +var staticSizeCacheBeaconState = ssz.PrecomputeStaticSizeCache((*BeaconState)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconState) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconState) +func (obj *BeaconState) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconState) { + size = staticSizeCacheBeaconState[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeSliceOfDynamicObjects(obj.PreviousEpochAttestations) - size += ssz.SizeSliceOfDynamicObjects(obj.CurrentEpochAttestations) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.PreviousEpochAttestations) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.CurrentEpochAttestations) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go new file mode 100644 index 0000000..44e6dfe --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go @@ -0,0 +1,37 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BitsStructMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 4 + 1 + } + size += 1 + 4 + 1 + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkUnknown { + size += ssz.SizeSliceOfBits(sizer, obj.A) + } + size += ssz.SizeSliceOfBits(sizer, obj.D) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BitsStructMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffsetOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (0) - A - 4 bytes + ssz.DefineArrayOfBitsPointerOnFork(codec, &obj.B, 2, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (1) - B - 1 bytes + ssz.DefineArrayOfBits(codec, &obj.C, 1) // Field (2) - C - 1 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.D, 6) // Offset (3) - D - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.E, 8) // Field (4) - E - 1 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContentOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - ? bytes + ssz.DefineSliceOfBitsContent(codec, &obj.D, 6) // Field (3) - D - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go index 1115077..884417d 100644 --- a/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BitsStruct) SizeSSZ(fixed bool) uint32 { - var size = uint32(4 + 1 + 1 + 4 + 1) +func (obj *BitsStruct) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 4 + 1 + 1 + 4 + 1 if fixed { return size } - size += ssz.SizeSliceOfBits(obj.A) - size += ssz.SizeSliceOfBits(obj.D) + size += ssz.SizeSliceOfBits(sizer, obj.A) + size += ssz.SizeSliceOfBits(sizer, obj.D) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go index ba72149..184fc13 100644 --- a/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *BLSToExecutionChange) SizeSSZ() uint32 { +func (obj *BLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 48 + 20 } diff --git a/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go b/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go index 676dbe8..2da5dc7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Checkpoint) SizeSSZ() uint32 { +func (obj *Checkpoint) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go index d2e22c9..58360c6 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *DepositData) SizeSSZ() uint32 { +func (obj *DepositData) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 + 96 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go index ee36e66..879400a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *DepositMessage) SizeSSZ() uint32 { +func (obj *DepositMessage) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go index ad1d662..158c192 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheDeposit = 33*32 + (*DepositData)(nil).SizeSSZ() +var staticSizeCacheDeposit = ssz.PrecomputeStaticSizeCache((*Deposit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *Deposit) SizeSSZ() uint32 { - return staticSizeCacheDeposit +func (obj *Deposit) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheDeposit) { + return staticSizeCacheDeposit[fork] + } + size = 33*32 + (*DepositData)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go b/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go index bee1ebf..e09d4f1 100644 --- a/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Eth1Block) SizeSSZ() uint32 { +func (obj *Eth1Block) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 32 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go index 806049d..1e3be89 100644 --- a/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Eth1Data) SizeSSZ() uint32 { +func (obj *Eth1Data) SizeSSZ(sizer *ssz.Sizer) uint32 { return 32 + 8 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go index 59a0a58..eef108b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go @@ -6,14 +6,14 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4) +func (obj *ExecutionPayloadCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) - size += ssz.SizeSliceOfStaticObjects(obj.Withdrawals) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go index 37ff652..f9cc135 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go @@ -6,14 +6,14 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 + 8 + 8) +func (obj *ExecutionPayloadDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 + 8 + 8 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) - size += ssz.SizeSliceOfStaticObjects(obj.Withdrawals) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go index 377bc6c..ce48a20 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32) +func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go index 9c7c824..a787aed 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 + 8 + 8) +func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 + 8 + 8 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go new file mode 100644 index 0000000..579de25 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -0,0 +1,53 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadHeaderMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { + size += 4 + } + size += 32 + 32 + 32 + if sizer.Fork() >= ssz.ForkShanghai { + size += 32 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 8 + 8 + } + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineStaticBytes(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes + ssz.DefineStaticBytesPointerOnFork(codec, &obj.WithdrawalRoot, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - WithdrawalRoot - 32 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go index 777c73b..2e571e8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeader) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32) +func (obj *ExecutionPayloadHeader) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go new file mode 100644 index 0000000..8ce32e1 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go @@ -0,0 +1,62 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadMonolith2) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { + size += 4 + } + if sizer.Fork() >= ssz.ForkUnknown { + size += 32 + } + size += 32 + 4 + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 8 + 8 + } + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkShanghai { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadMonolith2) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256BigIntOnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go new file mode 100644 index 0000000..31fde12 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -0,0 +1,67 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { + size += 4 + } + if sizer.Fork() >= ssz.ForkUnknown { + size += 32 + } + size += 32 + if sizer.Fork() >= ssz.ForkUnknown { + size += 4 + } + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 8 + 8 + } + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } + if sizer.Fork() >= ssz.ForkUnknown { + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + } + if sizer.Fork() >= ssz.ForkShanghai { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256OnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffsetOnFork(codec, &obj.Transactions, 1048576, 1073741824, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContentOnFork(codec, &obj.Transactions, 1048576, 1073741824, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go index ae3dfa9..b1e1f6a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayload) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) +func (obj *ExecutionPayload) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go index ef6c78f..147e237 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadVariation) SizeSSZ(fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) +func (obj *ExecutionPayloadVariation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go new file mode 100644 index 0000000..9451427 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go @@ -0,0 +1,20 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *FixedTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + 8 + 4 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *FixedTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint8PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 1 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.B, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (1) - B - 8 bytes + ssz.DefineUint32PointerOnFork(codec, &obj.C, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (2) - C - 4 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go index cfebc25..c9c5f25 100644 --- a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *FixedTestStruct) SizeSSZ() uint32 { +func (obj *FixedTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 1 + 8 + 4 } diff --git a/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go index 28623d5..cf87868 100644 --- a/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Fork) SizeSSZ() uint32 { +func (obj *Fork) SizeSSZ(sizer *ssz.Sizer) uint32 { return 4 + 4 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go index 9ce8ed2..a2541f3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalBatch) SizeSSZ() uint32 { +func (obj *HistoricalBatch) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8192*32 + 8192*32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go index 8abb8ce..93a2c47 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalBatchVariation) SizeSSZ() uint32 { +func (obj *HistoricalBatchVariation) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8192*32 + 8192*32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go index 22372b0..741dc0b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalSummary) SizeSSZ() uint32 { +func (obj *HistoricalSummary) SizeSSZ(sizer *ssz.Sizer) uint32 { return 32 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go index c595974..ff47f22 100644 --- a/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheIndexedAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 96 +var staticSizeCacheIndexedAttestation = ssz.PrecomputeStaticSizeCache((*IndexedAttestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *IndexedAttestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheIndexedAttestation) +func (obj *IndexedAttestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheIndexedAttestation) { + size = staticSizeCacheIndexedAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfUint64s(obj.AttestationIndices) + size += ssz.SizeSliceOfUint64s(sizer, obj.AttestationIndices) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go index 11db1b8..c805ff3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCachePendingAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 8 + 8 +var staticSizeCachePendingAttestation = ssz.PrecomputeStaticSizeCache((*PendingAttestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *PendingAttestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCachePendingAttestation) +func (obj *PendingAttestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCachePendingAttestation) { + size = staticSizeCachePendingAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 8 + 8 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfBits(obj.AggregationBits) + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go index 4a95b9a..4cbd415 100644 --- a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheProposerSlashing = (*SignedBeaconBlockHeader)(nil).SizeSSZ() + (*SignedBeaconBlockHeader)(nil).SizeSSZ() +var staticSizeCacheProposerSlashing = ssz.PrecomputeStaticSizeCache((*ProposerSlashing)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *ProposerSlashing) SizeSSZ() uint32 { - return staticSizeCacheProposerSlashing +func (obj *ProposerSlashing) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheProposerSlashing) { + return staticSizeCacheProposerSlashing[fork] + } + size = (*SignedBeaconBlockHeader)(nil).SizeSSZ(sizer) + (*SignedBeaconBlockHeader)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go index e4c1d0c..34bc309 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedBeaconBlockHeader = (*BeaconBlockHeader)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedBeaconBlockHeader = ssz.PrecomputeStaticSizeCache((*SignedBeaconBlockHeader)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBeaconBlockHeader) SizeSSZ() uint32 { - return staticSizeCacheSignedBeaconBlockHeader +func (obj *SignedBeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBeaconBlockHeader) { + return staticSizeCacheSignedBeaconBlockHeader[fork] + } + size = (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go index f79df41..0e114e0 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedBLSToExecutionChange = (*BLSToExecutionChange)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedBLSToExecutionChange = ssz.PrecomputeStaticSizeCache((*SignedBLSToExecutionChange)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBLSToExecutionChange) SizeSSZ() uint32 { - return staticSizeCacheSignedBLSToExecutionChange +func (obj *SignedBLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBLSToExecutionChange) { + return staticSizeCacheSignedBLSToExecutionChange[fork] + } + size = (*BLSToExecutionChange)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go index c8827bd..b29f8ec 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go @@ -5,11 +5,15 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedVoluntaryExit = (*VoluntaryExit)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedVoluntaryExit = ssz.PrecomputeStaticSizeCache((*SignedVoluntaryExit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedVoluntaryExit) SizeSSZ() uint32 { - return staticSizeCacheSignedVoluntaryExit +func (obj *SignedVoluntaryExit) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedVoluntaryExit) { + return staticSizeCacheSignedVoluntaryExit[fork] + } + size = (*VoluntaryExit)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go new file mode 100644 index 0000000..86b5874 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go @@ -0,0 +1,18 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *SingleFieldTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *SingleFieldTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint8PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 1 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go index 4790439..a101db8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SingleFieldTestStruct) SizeSSZ() uint32 { +func (obj *SingleFieldTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 1 } diff --git a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go new file mode 100644 index 0000000..3996ab2 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go @@ -0,0 +1,20 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *SmallTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 2 + } + size += 2 + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *SmallTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint16PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 2 bytes + ssz.DefineUint16(codec, &obj.B) // Field (1) - B - 2 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go index 020e433..8191ad5 100644 --- a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SmallTestStruct) SizeSSZ() uint32 { +func (obj *SmallTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 2 + 2 } diff --git a/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go b/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go index a5eb1ed..f1d2183 100644 --- a/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SyncAggregate) SizeSSZ() uint32 { +func (obj *SyncAggregate) SizeSSZ(sizer *ssz.Sizer) uint32 { return 64 + 96 } diff --git a/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go b/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go index 7be6c36..abf2e0e 100644 --- a/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SyncCommittee) SizeSSZ() uint32 { +func (obj *SyncCommittee) SizeSSZ(sizer *ssz.Sizer) uint32 { return 512*48 + 48 } diff --git a/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go new file mode 100644 index 0000000..173a9db --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go @@ -0,0 +1,27 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *ValidatorMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + size = 48 + 32 + 8 + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + } + size += 8 + 8 + 8 + 8 + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ValidatorMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineStaticBytes(codec, &obj.Pubkey) // Field (0) - Pubkey - 48 bytes + ssz.DefineStaticBytes(codec, &obj.WithdrawalCredentials) // Field (1) - WithdrawalCredentials - 32 bytes + ssz.DefineUint64(codec, &obj.EffectiveBalance) // Field (2) - EffectiveBalance - 8 bytes + ssz.DefineBoolPointerOnFork(codec, &obj.Slashed, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (3) - Slashed - 1 bytes + ssz.DefineUint64(codec, &obj.ActivationEligibilityEpoch) // Field (4) - ActivationEligibilityEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.ActivationEpoch) // Field (5) - ActivationEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.ExitEpoch) // Field (6) - ExitEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.WithdrawableEpoch) // Field (7) - WithdrawableEpoch - 8 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go b/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go index 6e40251..ec7ff1b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Validator) SizeSSZ() uint32 { +func (obj *Validator) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 + 1 + 8 + 8 + 8 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go index 14853a0..9f8d355 100644 --- a/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *VoluntaryExit) SizeSSZ() uint32 { +func (obj *VoluntaryExit) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go b/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go index 021d327..8373ec9 100644 --- a/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Withdrawal) SizeSSZ() uint32 { +func (obj *Withdrawal) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 20 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go index 5e1fa65..ce74ad3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *WithdrawalVariation) SizeSSZ() uint32 { +func (obj *WithdrawalVariation) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 20 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/types_consensus.go b/tests/testtypes/consensus-spec-tests/types_consensus.go index 13d06ea..908b69d 100644 --- a/tests/testtypes/consensus-spec-tests/types_consensus.go +++ b/tests/testtypes/consensus-spec-tests/types_consensus.go @@ -42,7 +42,10 @@ import ( //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadDeneb -out gen_execution_payload_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderDeneb -out gen_execution_payload_header_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconState -out gen_beacon_state_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateAltair -out gen_beacon_state_altair_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateBellatrix -out gen_beacon_state_bellatrix_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconStateCapella -out gen_beacon_state_capella_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateDeneb -out gen_beacon_state_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBody -out gen_beacon_block_body_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyAltair -out gen_beacon_block_body_altair_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyBellatrix -out gen_beacon_block_body_bellatrix_ssz.go @@ -62,7 +65,7 @@ type Address [20]byte // LogsBloom is a standalone mock of go-ethereum's types.LogsBloom type LogsBloom [256]byte -// Roots is a helper type to foce a generator quirk. +// Roots is a helper type to force a generator quirk. type Roots [8192]Hash type AggregateAndProof struct { @@ -195,6 +198,61 @@ type BeaconState struct { FinalizedCheckpoint *Checkpoint } +type BeaconStateAltair struct { + GenesisTime uint64 + GenesisValidatorsRoot []byte `ssz-size:"32"` + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings [8192]uint64 + PreviousEpochParticipation []byte `ssz-max:"1099511627776"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776"` + CurrentSyncCommittee *SyncCommittee + NextSyncCommittee *SyncCommittee +} + +type BeaconStateBellatrix struct { + GenesisTime uint64 + GenesisValidatorsRoot [32]byte + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings [8192]uint64 + PreviousEpochParticipation []byte `ssz-max:"1099511627776"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776"` + CurrentSyncCommittee *SyncCommittee + NextSyncCommittee *SyncCommittee + LatestExecutionPayloadHeader *ExecutionPayloadHeader +} + type BeaconStateCapella struct { GenesisTime uint64 GenesisValidatorsRoot [32]byte diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go new file mode 100644 index 0000000..4b2917b --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -0,0 +1,166 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package consensus_spec_tests + +import ( + "math/big" + + "github.com/holiman/uint256" + "github.com/prysmaticlabs/go-bitfield" +) + +//go:generate go run -cover ../../../cmd/sszgen -type SingleFieldTestStructMonolith -out gen_single_field_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type SmallTestStructMonolith -out gen_small_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type FixedTestStructMonolith -out gen_fixed_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BitsStructMonolith -out gen_bits_struct_monolith_ssz.go + +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadMonolith2 -out gen_execution_payload_monolith_2_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyMonolith -out gen_beacon_block_body_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateMonolith -out gen_beacon_state_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ValidatorMonolith -out gen_validator_monolith_ssz.go + +type SingleFieldTestStructMonolith struct { + A *byte `ssz-fork:"unknown"` +} + +type SmallTestStructMonolith struct { + A *uint16 `ssz-fork:"unknown"` + B uint16 +} + +type FixedTestStructMonolith struct { + A *uint8 `ssz-fork:"unknown"` + B *uint64 `ssz-fork:"unknown"` + C *uint32 `ssz-fork:"unknown"` +} + +type BitsStructMonolith struct { + A bitfield.Bitlist `ssz-max:"5" ssz-fork:"unknown"` + B *[1]byte `ssz-size:"2" ssz:"bits" ssz-fork:"unknown"` + C [1]byte `ssz-size:"1" ssz:"bits"` + D bitfield.Bitlist `ssz-max:"6"` + E [1]byte `ssz-size:"8" ssz:"bits"` +} + +type BeaconBlockBodyMonolith struct { + RandaoReveal [96]byte + Eth1Data *Eth1Data + Graffiti [32]byte + ProposerSlashings []*ProposerSlashing `ssz-max:"16"` + AttesterSlashings []*AttesterSlashing `ssz-max:"2"` + Attestations []*Attestation `ssz-max:"128"` + Deposits []*Deposit `ssz-max:"16"` + VoluntaryExits []*SignedVoluntaryExit `ssz-max:"16"` + SyncAggregate *SyncAggregate ` ssz-fork:"altair"` + ExecutionPayload *ExecutionPayloadMonolith ` ssz-fork:"bellatrix"` + BlsToExecutionChanges []*SignedBLSToExecutionChange `ssz-max:"16" ssz-fork:"capella"` + BlobKzgCommitments [][48]byte `ssz-max:"4096" ssz-fork:"deneb"` +} + +type BeaconStateMonolith struct { + GenesisTime uint64 + GenesisValidatorsRoot [32]byte + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings *[8192]uint64 `ssz-fork:"unknown"` + PreviousEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` + CurrentEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` + PreviousEpochParticipation []byte `ssz-max:"1099511627776" ssz-fork:"altair"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776" ssz-fork:"altair"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776" ssz-fork:"altair"` + CurrentSyncCommittee *SyncCommittee ` ssz-fork:"altair"` + NextSyncCommittee *SyncCommittee ` ssz-fork:"altair"` + LatestExecutionPayloadHeader *ExecutionPayloadHeaderMonolith ` ssz-fork:"bellatrix"` + NextWithdrawalIndex *uint64 ` ssz-fork:"capella"` + NextWithdrawalValidatorIndex *uint64 ` ssz-fork:"capella"` + HistoricalSummaries []*HistoricalSummary `ssz-max:"16777216" ssz-fork:"capella"` +} + +type ExecutionPayloadMonolith struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBloom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` + BaseFeePerGas *uint256.Int `ssz-fork:"unknown"` + BlockHash Hash + Transactions [][]byte `ssz-max:"1048576,1073741824" ssz-fork:"unknown"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` + BlobGasUsed *uint64 ` ssz-fork:"cancun"` + ExcessBlobGas *uint64 ` ssz-fork:"cancun"` +} + +type ExecutionPayloadMonolith2 struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBloom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` + BaseFeePerGas *big.Int `ssz-fork:"unknown"` + BlockHash Hash + Transactions [][]byte `ssz-max:"1048576,1073741824"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` + BlobGasUsed *uint64 ` ssz-fork:"cancun"` + ExcessBlobGas *uint64 ` ssz-fork:"cancun"` +} + +type ExecutionPayloadHeaderMonolith struct { + ParentHash [32]byte + FeeRecipient [20]byte + StateRoot [32]byte + ReceiptsRoot [32]byte + LogsBloom [256]byte + PrevRandao [32]byte + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` + BaseFeePerGas [32]byte + BlockHash [32]byte + TransactionsRoot [32]byte + WithdrawalRoot *[32]byte `ssz-fork:"shanghai"` + BlobGasUsed *uint64 `ssz-fork:"cancun"` + ExcessBlobGas *uint64 `ssz-fork:"cancun"` +} + +type ValidatorMonolith struct { + Pubkey [48]byte + WithdrawalCredentials [32]byte + EffectiveBalance uint64 + Slashed *bool `ssz-fork:"unknown"` + ActivationEligibilityEpoch uint64 + ActivationEpoch uint64 + ExitEpoch uint64 + WithdrawableEpoch uint64 +} diff --git a/tests/testtypes/consensus-spec-tests/types_variation.go b/tests/testtypes/consensus-spec-tests/types_variation.go index ef6b6e3..6a9e4d8 100644 --- a/tests/testtypes/consensus-spec-tests/types_variation.go +++ b/tests/testtypes/consensus-spec-tests/types_variation.go @@ -6,11 +6,19 @@ package consensus_spec_tests import ( "math/big" + + "github.com/prysmaticlabs/go-bitfield" ) //go:generate go run -cover ../../../cmd/sszgen -type WithdrawalVariation -out gen_withdrawal_variation_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type HistoricalBatchVariation -out gen_historical_batch_variation_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadVariation -out gen_execution_payload_variation_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation1 -out gen_attestation_variation_1_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation2 -out gen_attestation_variation_2_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation3 -out gen_attestation_variation_3_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation1 -out gen_attestation_data_variation_1_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation2 -out gen_attestation_data_variation_2_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation3 -out gen_attestation_data_variation_3_ssz.go type WithdrawalVariation struct { Index uint64 @@ -40,3 +48,50 @@ type ExecutionPayloadVariation struct { BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` } + +// The types below test that fork constraints generate correct code for runtime +// types (i.e. static objects embedded) for various positions. + +type AttestationVariation1 struct { + Future *uint64 `ssz-fork:"future"` // Currently unused field + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Signature [96]byte +} +type AttestationVariation2 struct { + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Future *uint64 `ssz-fork:"future"` // Currently unused field + Signature [96]byte +} +type AttestationVariation3 struct { + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Signature [96]byte + Future *uint64 `ssz-fork:"future"` // Currently unused field +} + +type AttestationDataVariation1 struct { + Future *uint64 `ssz-fork:"future"` // Currently unused field + Slot Slot + Index uint64 + BeaconBlockHash Hash + Source *Checkpoint + Target *Checkpoint +} +type AttestationDataVariation2 struct { + Slot Slot + Index uint64 + BeaconBlockHash Hash + Future *uint64 `ssz-fork:"future"` // Currently unused field + Source *Checkpoint + Target *Checkpoint +} +type AttestationDataVariation3 struct { + Slot Slot + Index uint64 + BeaconBlockHash Hash + Source *Checkpoint + Target *Checkpoint + Future *uint64 `ssz-fork:"future"` // Currently unused field +} diff --git a/tests/zeroval_test.go b/tests/zeroval_test.go new file mode 100644 index 0000000..0ee6c8c --- /dev/null +++ b/tests/zeroval_test.go @@ -0,0 +1,77 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package tests + +import ( + "bytes" + "reflect" + "testing" + + "github.com/karalabe/ssz" +) + +// testZeroValue does a bunch of encoding/decoding/hashing variations on the zero +// value of input types to check that the SSZ implementation can correctly handle +// the different uninitialized fields. +func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { + // Verify that streaming/buffering encoding of a zero value results in the + // same binary (maybe incorrect, we just want to see that they're the same). + str1 := new(bytes.Buffer) + if err := ssz.EncodeToStream(str1, T(new(U)), fork); err != nil { + t.Fatalf("failed to stream-encode zero-value object: %v", err) + } + bin1 := make([]byte, ssz.Size(T(new(U)), fork)) + if err := ssz.EncodeToBytes(bin1, T(new(U)), fork); err != nil { + t.Fatalf("failed to buffer-encode zero-value object: %v", err) + } + if !bytes.Equal(str1.Bytes(), bin1) { + t.Fatalf("zero-value encoding mismatch: stream %x, buffer %x", str1, bin1) + } + // Decode the previous encoding in both streaming/buffering mode and check + // that the produced objects are the same. + obj1 := T(new(U)) + if err := ssz.DecodeFromStream(bytes.NewReader(bin1), T(new(U)), uint32(len(bin1)), fork); err != nil { + t.Fatalf("failed to stream-decode zero-value object: %v", err) + } + obj2 := T(new(U)) + if err := ssz.DecodeFromBytes(bin1, T(new(U)), fork); err != nil { + t.Fatalf("failed to buffer-decode zero-value object: %v", err) + } + if !reflect.DeepEqual(obj1, obj2) { + t.Fatalf("zero-value decoding mismatch: stream %+v, buffer %+v", obj1, obj2) + } + // We can't compare the decoded zero-value to the true zero-values as pointer + // nil-ness might be different. To verify that the decoding was successful, do + // yet another round of encodings and check that to the original ones. + str2 := new(bytes.Buffer) + if err := ssz.EncodeToStream(str2, obj1, fork); err != nil { + t.Fatalf("failed to stream-encode decoded object: %v", err) + } + bin2 := make([]byte, ssz.Size(obj1, fork)) + if err := ssz.EncodeToBytes(bin2, obj1, fork); err != nil { + t.Fatalf("failed to buffer-encode decoded object: %v", err) + } + if !bytes.Equal(str2.Bytes(), bin2) { + t.Fatalf("re-encoding mismatch: stream %x, buffer %x", str2, bin2) + } + if !bytes.Equal(bin1, bin2) { + t.Fatalf("re-encoding mismatch: zero-value %x, decoded %x", bin1, bin2) + } + // Encoding/decoding seems to work, hash the zero-value and re-encoded value + // in both sequential/concurrent more and verify the results. + hashes := map[string][32]byte{ + "zero-value-sequential": ssz.HashSequential(T(new(U)), fork), + "zero-value-concurrent": ssz.HashConcurrent(T(new(U)), fork), + "decoded-sequential": ssz.HashSequential(obj1, fork), + "decoded-concurrent": ssz.HashSequential(obj1, fork), + } + for key1, hash1 := range hashes { + for key2, hash2 := range hashes { + if hash1 != hash2 { + t.Errorf("hash mismatch: %s %x, %s %x", key1, hash1, key2, hash2) + } + } + } +} diff --git a/utils.go b/utils.go new file mode 100644 index 0000000..ffef378 --- /dev/null +++ b/utils.go @@ -0,0 +1,35 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +import "fmt" + +// PrecomputeStaticSizeCache is a helper to precompute SSZ (static) sizes for a +// monolith type on different forks. +// +// For non-monolith types that are constant across forks (or are not meant to be +// used across forks), all the sizes will be the same so might as well hard-code +// it instead. +func PrecomputeStaticSizeCache(obj Object) []uint32 { + var ( + sizes = make([]uint32, ForkFuture) + sizer = &Sizer{codec: new(Codec)} + ) + switch v := obj.(type) { + case StaticObject: + for fork := 0; fork < len(sizes); fork++ { + sizer.codec.fork = Fork(fork) + sizes[fork] = v.SizeSSZ(sizer) + } + case DynamicObject: + for fork := 0; fork < len(sizes); fork++ { + sizer.codec.fork = Fork(fork) + sizes[fork] = v.SizeSSZ(sizer, true) + } + default: + panic(fmt.Sprintf("unsupported type: %T", obj)) + } + return sizes +} diff --git a/zeroes.go b/zeroes.go new file mode 100644 index 0000000..09091a3 --- /dev/null +++ b/zeroes.go @@ -0,0 +1,46 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +import ( + "reflect" + "sync" +) + +// zeroCache contains zero-values for dynamic objects that got hit during codec +// operations. This is a global sync map, meaning it will be slow to access, but +// encoding/hashing zero values should not happen in production code, it's more +// of a sanity thing to handle weird corner-cases without blowing up. +var zeroCache = new(sync.Map) + +// zeroValueStatic retrieves a previously created (or creates one on the fly) +// zero value for a static object to support operating on half-initialized +// objects (useful for tests mainly, but can also avoid crashes in case of bad +// calling parameters). +func zeroValueStatic[T newableStaticObject[U], U any]() T { + kind := reflect.TypeFor[U]() + + if val, ok := zeroCache.Load(kind); ok { + return val.(T) + } + val := T(new(U)) + zeroCache.Store(kind, val) + return val +} + +// zeroValueDynamic retrieves a previously created (or creates one on the fly) +// zero value for a dynamic object to support operating on half-initialized +// objects (useful for tests mainly, but can also avoid crashes in case of bad +// calling parameters). +func zeroValueDynamic[T newableDynamicObject[U], U any]() T { + kind := reflect.TypeFor[U]() + + if val, ok := zeroCache.Load(kind); ok { + return val.(T) + } + val := T(new(U)) + zeroCache.Store(kind, val) + return val +}