diff --git a/cmd/bee/cmd/start.go b/cmd/bee/cmd/start.go index e659d125116..9403dc69571 100644 --- a/cmd/bee/cmd/start.go +++ b/cmd/bee/cmd/start.go @@ -27,6 +27,7 @@ import ( chaincfg "github.com/ethersphere/bee/v2/pkg/config" "github.com/ethersphere/bee/v2/pkg/crypto" "github.com/ethersphere/bee/v2/pkg/crypto/clef" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" "github.com/ethersphere/bee/v2/pkg/keystore" filekeystore "github.com/ethersphere/bee/v2/pkg/keystore/file" memkeystore "github.com/ethersphere/bee/v2/pkg/keystore/mem" @@ -293,7 +294,7 @@ func buildBeeNode(ctx context.Context, c *command, cmd *cobra.Command, logger lo neighborhoodSuggester = c.config.GetString(optionNameNeighborhoodSuggester) } - b, err := node.NewBee(ctx, c.config.GetString(optionNameP2PAddr), signerConfig.publicKey, signerConfig.signer, networkID, logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, &node.Options{ + b, err := node.NewBee(ctx, c.config.GetString(optionNameP2PAddr), signerConfig.publicKey, signerConfig.signer, networkID, logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, signerConfig.session, &node.Options{ DataDir: c.config.GetString(optionNameDataDir), CacheCapacity: c.config.GetUint64(optionNameCacheCapacity), DBOpenFilesLimit: c.config.GetUint64(optionNameDBOpenFilesLimit), @@ -373,6 +374,7 @@ type signerConfig struct { publicKey *ecdsa.PublicKey libp2pPrivateKey *ecdsa.PrivateKey pssPrivateKey *ecdsa.PrivateKey + session dynamicaccess.Session } func waitForClef(logger log.Logger, maxRetries uint64, endpoint string) (externalSigner *external.ExternalSigner, err error) { @@ -403,6 +405,7 @@ func (c *command) configureSigner(cmd *cobra.Command, logger log.Logger) (config var signer crypto.Signer var password string var publicKey *ecdsa.PublicKey + var session dynamicaccess.Session if p := c.config.GetString(optionNamePassword); p != "" { password = p } else if pf := c.config.GetString(optionNamePasswordFile); pf != "" { @@ -475,6 +478,7 @@ func (c *command) configureSigner(cmd *cobra.Command, logger log.Logger) (config } signer = crypto.NewDefaultSigner(swarmPrivateKey) publicKey = &swarmPrivateKey.PublicKey + session = dynamicaccess.NewDefaultSession(swarmPrivateKey) } logger.Info("swarm public key", "public_key", hex.EncodeToString(crypto.EncodeSecp256k1PublicKey(publicKey))) @@ -513,6 +517,7 @@ func (c *command) configureSigner(cmd *cobra.Command, logger log.Logger) (config publicKey: publicKey, libp2pPrivateKey: libp2pPrivateKey, pssPrivateKey: pssPrivateKey, + session: session, }, nil } diff --git a/openapi/Swarm.yaml b/openapi/Swarm.yaml index 7bcd224a7d0..d4024534449 100644 --- a/openapi/Swarm.yaml +++ b/openapi/Swarm.yaml @@ -89,6 +89,135 @@ paths: default: description: Default response + "/grantee": + post: + summary: "Create grantee list" + tags: + - ACT + parameters: + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" + name: swarm-postage-batch-id + required: true + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter" + name: swarm-tag + required: false + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" + name: swarm-pin + required: false + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmDeferredUpload" + name: swarm-deferred-upload + required: false + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" + name: swarm-act-history-address + required: false + requestBody: + required: true + content: + application/json: + schema: + $ref: "SwarmCommon.yaml#/components/schemas/ActGranteesCreateRequest" + responses: + "201": + description: Ok + content: + application/json: + schema: + $ref: "SwarmCommon.yaml#/components/schemas/ActGranteesOperationResponse" + "400": + $ref: "SwarmCommon.yaml#/components/responses/400" + "500": + $ref: "SwarmCommon.yaml#/components/responses/500" + + "/grantee/{reference}": + get: + summary: "Get grantee list" + tags: + - ACT + parameters: + - in: path + name: reference + schema: + $ref: "SwarmCommon.yaml#/components/schemas/SwarmEncryptedReference" + required: true + description: Grantee list reference + responses: + "200": + description: Ok + content: + application/json: + schema: + type: array + items: + $ref: "SwarmCommon.yaml#/components/schemas/PublicKey" + "404": + $ref: "SwarmCommon.yaml#/components/responses/404" + "500": + $ref: "SwarmCommon.yaml#/components/responses/500" + patch: + summary: "Update grantee list" + description: "Add or remove grantees from an existing grantee list" + tags: + - ACT + parameters: + - in: path + name: reference + schema: + $ref: "SwarmCommon.yaml#/components/schemas/SwarmEncryptedReference" + required: true + description: Grantee list reference + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" + name: swarm-act-history-address + required: true + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" + name: swarm-postage-batch-id + required: true + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter" + name: swarm-tag + required: false + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" + name: swarm-pin + required: false + - in: header + schema: + $ref: "SwarmCommon.yaml#/components/parameters/SwarmDeferredUpload" + name: swarm-deferred-upload + required: false + requestBody: + required: true + content: + application/json: + schema: + $ref: "SwarmCommon.yaml#/components/schemas/ActGranteesPatchRequest" + responses: + "200": + description: Ok + content: + application/json: + schema: + $ref: "SwarmCommon.yaml#/components/schemas/ActGranteesOperationResponse" + "400": + $ref: "SwarmCommon.yaml#/components/responses/400" + "500": + $ref: "SwarmCommon.yaml#/components/responses/500" + "/bytes": post: summary: "Upload data" @@ -167,6 +296,9 @@ paths: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Retrieved content specified by reference @@ -190,6 +322,9 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress" required: true description: Swarm address of chunk + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Chunk exists @@ -208,6 +343,8 @@ paths: parameters: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmTagParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" requestBody: description: Chunk binary data that has to have at least 8 bytes. content: @@ -223,6 +360,8 @@ paths: description: Tag UID if it was passed to the request `swarm-tag` header. schema: $ref: "SwarmCommon.yaml#/components/schemas/Uid" + "swarm-act-history-address": + $ref: "SwarmCommon.yaml#/components/headers/SwarmActHistoryAddress" content: application/json: schema: @@ -279,6 +418,8 @@ paths: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmDeferredUpload" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyLevelParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" requestBody: content: multipart/form-data: @@ -305,6 +446,8 @@ paths: $ref: "SwarmCommon.yaml#/components/headers/SwarmTag" "etag": $ref: "SwarmCommon.yaml#/components/headers/ETag" + "swarm-act-history-address": + $ref: "SwarmCommon.yaml#/components/headers/SwarmActHistoryAddress" content: application/json: schema: @@ -334,6 +477,9 @@ paths: - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyStrategyParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmRedundancyFallbackModeParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmChunkRetrievalTimeoutParameter" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Ok @@ -363,6 +509,9 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress" required: true description: Swarm address of chunk + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Chunk exists @@ -749,6 +898,8 @@ paths: $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" name: swarm-postage-batch-id required: true + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" requestBody: required: true description: The SOC binary data is composed of the span (8 bytes) and the at most 4KB payload. @@ -764,6 +915,9 @@ paths: application/json: schema: $ref: "SwarmCommon.yaml#/components/schemas/ReferenceResponse" + headers: + "swarm-act-history-address": + $ref: "SwarmCommon.yaml#/components/headers/SwarmActHistoryAddress" "400": $ref: "SwarmCommon.yaml#/components/responses/400" "401": @@ -801,6 +955,8 @@ paths: description: "Feed indexing scheme (default: sequence)" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPinParameter" - $ref: "SwarmCommon.yaml#/components/parameters/SwarmPostageBatchId" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmAct" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "201": description: Created @@ -808,6 +964,9 @@ paths: application/json: schema: $ref: "SwarmCommon.yaml#/components/schemas/ReferenceResponse" + headers: + "swarm-act-history-address": + $ref: "SwarmCommon.yaml#/components/headers/SwarmActHistoryAddress" "400": $ref: "SwarmCommon.yaml#/components/responses/400" "401": @@ -1150,6 +1309,9 @@ paths: $ref: "SwarmCommon.yaml#/components/parameters/SwarmCache" name: swarm-cache required: false + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Retrieved chunk content @@ -1177,6 +1339,9 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress" required: true description: Swarm address of chunk + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Chunk exists diff --git a/openapi/SwarmCommon.yaml b/openapi/SwarmCommon.yaml index 84d690fc64f..620f3ed8aff 100644 --- a/openapi/SwarmCommon.yaml +++ b/openapi/SwarmCommon.yaml @@ -87,6 +87,36 @@ components: ghostBalance: $ref: "#/components/schemas/BigInt" + ActGranteesCreateRequest: + type: object + properties: + grantees: + type: array + items: + $ref: "#/components/schemas/PublicKey" + + ActGranteesPatchRequest: + type: object + properties: + add: + type: array + items: + $ref: "#/components/schemas/PublicKey" + description: List of grantees to add + revoke: + type: array + items: + $ref: "#/components/schemas/PublicKey" + description: List of grantees to revoke future access from + + ActGranteesOperationResponse: + type: object + properties: + ref: + $ref: "#/components/schemas/SwarmEncryptedReference" + historyref: + $ref: "#/components/schemas/SwarmEncryptedReference" + Balance: type: object properties: @@ -853,7 +883,7 @@ components: reserveSize: type: integer reserveSizeWithinRadius: - type: interger + type: integer pullsyncRate: type: number storageRadius: @@ -977,6 +1007,12 @@ components: schema: $ref: "#/components/schemas/HexString" + SwarmActHistoryAddress: + description: "Swarm address reference to the new ACT history entry" + schema: + $ref: "#/components/schemas/SwarmAddress" + required: false + ETag: description: | The RFC7232 ETag header field in a response provides the current entity- @@ -1137,6 +1173,40 @@ components: required: false description: "Determines if the download data should be cached on the node. By default the download will be cached" + SwarmAct: + in: header + name: swarm-act + schema: + type: boolean + default: "false" + required: false + description: "Determines if the uploaded data should be treated as ACT content" + + SwarmActPublisher: + in: header + name: swarm-act-publisher + schema: + $ref: "#/components/schemas/PublicKey" + required: false + description: "ACT content publisher's public key" + + SwarmActHistoryAddress: + in: header + name: swarm-act-history-address + schema: + $ref: "#/components/schemas/SwarmAddress" + required: false + description: "ACT history reference address" + + SwarmActTimestamp: + in: header + name: swarm-act-timestamp + schema: + type: integer + format: int64 + required: false + description: "ACT history Unix timestamp" + responses: "200": description: OK. diff --git a/openapi/SwarmDebug.yaml b/openapi/SwarmDebug.yaml index e2091171729..ff51a96300a 100644 --- a/openapi/SwarmDebug.yaml +++ b/openapi/SwarmDebug.yaml @@ -201,6 +201,9 @@ paths: $ref: "SwarmCommon.yaml#/components/schemas/SwarmAddress" required: true description: Swarm address of chunk + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActTimestamp" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActPublisher" + - $ref: "SwarmCommon.yaml#/components/parameters/SwarmActHistoryAddress" responses: "200": description: Chunk exists diff --git a/pkg/api/api.go b/pkg/api/api.go index 72100cc0d9b..ec5f4a508ad 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -30,6 +30,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/accounting" "github.com/ethersphere/bee/v2/pkg/auth" "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" "github.com/ethersphere/bee/v2/pkg/feeds" "github.com/ethersphere/bee/v2/pkg/file/pipeline" "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" @@ -85,6 +86,10 @@ const ( SwarmRedundancyFallbackModeHeader = "Swarm-Redundancy-Fallback-Mode" SwarmChunkRetrievalTimeoutHeader = "Swarm-Chunk-Retrieval-Timeout" SwarmLookAheadBufferSizeHeader = "Swarm-Lookahead-Buffer-Size" + SwarmActHeader = "Swarm-Act" + SwarmActTimestampHeader = "Swarm-Act-Timestamp" + SwarmActPublisherHeader = "Swarm-Act-Publisher" + SwarmActHistoryAddressHeader = "Swarm-Act-History-Address" ImmutableHeader = "Immutable" GasPriceHeader = "Gas-Price" @@ -117,6 +122,8 @@ var ( errBatchUnusable = errors.New("batch not usable") errUnsupportedDevNodeOperation = errors.New("operation not supported in dev mode") errOperationSupportedOnlyInFullMode = errors.New("operation is supported only in full mode") + errActDownload = errors.New("act download failed") + errActUpload = errors.New("act upload failed") ) // Storer interface provides the functionality required from the local storage @@ -147,6 +154,7 @@ type Service struct { feedFactory feeds.Factory signer crypto.Signer post postage.Service + dac dynamicaccess.Controller postageContract postagecontract.Interface probe *Probe metricsRegistry *prometheus.Registry @@ -245,6 +253,7 @@ type ExtraOptions struct { Pss pss.Interface FeedFactory feeds.Factory Post postage.Service + Dac dynamicaccess.Controller PostageContract postagecontract.Interface Staking staking.Contract Steward steward.Interface @@ -328,6 +337,7 @@ func (s *Service) Configure(signer crypto.Signer, auth auth.Authenticator, trace s.pss = e.Pss s.feedFactory = e.FeedFactory s.post = e.Post + s.dac = e.Dac s.postageContract = e.PostageContract s.steward = e.Steward s.stakingContract = e.Staking diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 6a96812a908..8a6781f7f98 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -28,6 +28,8 @@ import ( "github.com/ethersphere/bee/v2/pkg/auth" mockauth "github.com/ethersphere/bee/v2/pkg/auth/mock" "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + mockdac "github.com/ethersphere/bee/v2/pkg/dynamicaccess/mock" "github.com/ethersphere/bee/v2/pkg/feeds" "github.com/ethersphere/bee/v2/pkg/file/pipeline" "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" @@ -102,6 +104,7 @@ type testServerOptions struct { PostageContract postagecontract.Interface StakingContract staking.Contract Post postage.Service + Dac dynamicaccess.Controller Steward steward.Interface WsHeaders http.Header Authenticator auth.Authenticator @@ -152,6 +155,9 @@ func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket. if o.Post == nil { o.Post = mockpost.New() } + if o.Dac == nil { + o.Dac = mockdac.New() + } if o.BatchStore == nil { o.BatchStore = mockbatchstore.New(mockbatchstore.WithAcceptAllExistsFunc()) // default is with accept-all Exists() func } @@ -198,6 +204,7 @@ func newTestServer(t *testing.T, o testServerOptions) (*http.Client, *websocket. Pss: o.Pss, FeedFactory: o.Feeds, Post: o.Post, + Dac: o.Dac, PostageContract: o.PostageContract, Steward: o.Steward, SyncStatus: o.SyncStatus, diff --git a/pkg/api/bytes.go b/pkg/api/bytes.go index 9b5d9b902d6..0eefb581c1b 100644 --- a/pkg/api/bytes.go +++ b/pkg/api/bytes.go @@ -15,7 +15,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/file/redundancy" "github.com/ethersphere/bee/v2/pkg/jsonhttp" "github.com/ethersphere/bee/v2/pkg/postage" - storage "github.com/ethersphere/bee/v2/pkg/storage" + "github.com/ethersphere/bee/v2/pkg/storage" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/ethersphere/bee/v2/pkg/tracing" "github.com/gorilla/mux" @@ -33,12 +33,14 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { defer span.Finish() headers := struct { - BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` - SwarmTag uint64 `map:"Swarm-Tag"` - Pin bool `map:"Swarm-Pin"` - Deferred *bool `map:"Swarm-Deferred-Upload"` - Encrypt bool `map:"Swarm-Encrypt"` - RLevel redundancy.Level `map:"Swarm-Redundancy-Level"` + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + SwarmTag uint64 `map:"Swarm-Tag"` + Pin bool `map:"Swarm-Pin"` + Deferred *bool `map:"Swarm-Deferred-Upload"` + Encrypt bool `map:"Swarm-Encrypt"` + RLevel redundancy.Level `map:"Swarm-Redundancy-Level"` + Act bool `map:"Swarm-Act"` + HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { response("invalid header params", logger, w) @@ -100,7 +102,7 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { } p := requestPipelineFn(putter, headers.Encrypt, headers.RLevel) - address, err := p(ctx, r.Body) + reference, err := p(ctx, r.Body) if err != nil { logger.Debug("split write all failed", "error", err) logger.Error(nil, "split write all failed") @@ -114,9 +116,17 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { return } - span.SetTag("root_address", address) + encryptedReference := reference + if headers.Act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, reference, headers.HistoryAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } + span.SetTag("root_address", encryptedReference) - err = putter.Done(address) + err = putter.Done(reference) if err != nil { logger.Debug("done split failed", "error", err) logger.Error(nil, "done split failed") @@ -133,7 +143,7 @@ func (s *Service) bytesUploadHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader) jsonhttp.Created(w, bytesPostResponse{ - Reference: address, + Reference: encryptedReference, }) } @@ -149,11 +159,16 @@ func (s *Service) bytesGetHandler(w http.ResponseWriter, r *http.Request) { return } + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + additionalHeaders := http.Header{ ContentTypeHeader: {"application/octet-stream"}, } - s.downloadHandler(logger, w, r, paths.Address, additionalHeaders, true, false) + s.downloadHandler(logger, w, r, address, additionalHeaders, true, false) } func (s *Service) bytesHeadHandler(w http.ResponseWriter, r *http.Request) { @@ -167,11 +182,16 @@ func (s *Service) bytesHeadHandler(w http.ResponseWriter, r *http.Request) { return } + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + getter := s.storer.Download(true) - ch, err := getter.Get(r.Context(), paths.Address) + ch, err := getter.Get(r.Context(), address) if err != nil { - logger.Debug("get root chunk failed", "chunk_address", paths.Address, "error", err) + logger.Debug("get root chunk failed", "chunk_address", address, "error", err) logger.Error(nil, "get rook chunk failed") w.WriteHeader(http.StatusNotFound) return diff --git a/pkg/api/bzz.go b/pkg/api/bzz.go index 65ded851f12..dc26b26667a 100644 --- a/pkg/api/bzz.go +++ b/pkg/api/bzz.go @@ -30,8 +30,8 @@ import ( "github.com/ethersphere/bee/v2/pkg/log" "github.com/ethersphere/bee/v2/pkg/manifest" "github.com/ethersphere/bee/v2/pkg/postage" - storage "github.com/ethersphere/bee/v2/pkg/storage" - storer "github.com/ethersphere/bee/v2/pkg/storer" + "github.com/ethersphere/bee/v2/pkg/storage" + "github.com/ethersphere/bee/v2/pkg/storer" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/ethersphere/bee/v2/pkg/topology" "github.com/ethersphere/bee/v2/pkg/tracing" @@ -63,14 +63,16 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) { defer span.Finish() headers := struct { - ContentType string `map:"Content-Type,mimeMediaType" validate:"required"` - BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` - SwarmTag uint64 `map:"Swarm-Tag"` - Pin bool `map:"Swarm-Pin"` - Deferred *bool `map:"Swarm-Deferred-Upload"` - Encrypt bool `map:"Swarm-Encrypt"` - IsDir bool `map:"Swarm-Collection"` - RLevel redundancy.Level `map:"Swarm-Redundancy-Level"` + ContentType string `map:"Content-Type,mimeMediaType" validate:"required"` + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + SwarmTag uint64 `map:"Swarm-Tag"` + Pin bool `map:"Swarm-Pin"` + Deferred *bool `map:"Swarm-Deferred-Upload"` + Encrypt bool `map:"Swarm-Encrypt"` + IsDir bool `map:"Swarm-Collection"` + RLevel redundancy.Level `map:"Swarm-Redundancy-Level"` + Act bool `map:"Swarm-Act"` + HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { response("invalid header params", logger, w) @@ -132,10 +134,10 @@ func (s *Service) bzzUploadHandler(w http.ResponseWriter, r *http.Request) { } if headers.IsDir || headers.ContentType == multiPartFormData { - s.dirUploadHandler(ctx, logger, span, ow, r, putter, r.Header.Get(ContentTypeHeader), headers.Encrypt, tag, headers.RLevel) + s.dirUploadHandler(ctx, logger, span, ow, r, putter, r.Header.Get(ContentTypeHeader), headers.Encrypt, tag, headers.RLevel, headers.Act, headers.HistoryAddress) return } - s.fileUploadHandler(ctx, logger, span, ow, r, putter, headers.Encrypt, tag, headers.RLevel) + s.fileUploadHandler(ctx, logger, span, ow, r, putter, headers.Encrypt, tag, headers.RLevel, headers.Act, headers.HistoryAddress) } // fileUploadResponse is returned when an HTTP request to upload a file is successful @@ -155,6 +157,8 @@ func (s *Service) fileUploadHandler( encrypt bool, tagID uint64, rLevel redundancy.Level, + act bool, + historyAddress swarm.Address, ) { queries := struct { FileName string `map:"name" validate:"startsnotwith=/"` @@ -260,6 +264,15 @@ func (s *Service) fileUploadHandler( } logger.Debug("store", "manifest_reference", manifestReference) + encryptedReference := manifestReference + if act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, manifestReference, historyAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } + err = putter.Done(manifestReference) if err != nil { logger.Debug("done split failed", "error", err) @@ -268,18 +281,18 @@ func (s *Service) fileUploadHandler( ext.LogError(span, err, olog.String("action", "putter.Done")) return } - span.LogFields(olog.Bool("success", true)) - span.SetTag("root_address", manifestReference) + span.SetTag("root_address", encryptedReference) if tagID != 0 { w.Header().Set(SwarmTagHeader, fmt.Sprint(tagID)) span.SetTag("tagID", tagID) } - w.Header().Set(ETagHeader, fmt.Sprintf("%q", manifestReference.String())) + w.Header().Set(ETagHeader, fmt.Sprintf("%q", encryptedReference.String())) w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader) + jsonhttp.Created(w, bzzUploadResponse{ - Reference: manifestReference, + Reference: encryptedReference, }) } @@ -295,11 +308,16 @@ func (s *Service) bzzDownloadHandler(w http.ResponseWriter, r *http.Request) { return } + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + if strings.HasSuffix(paths.Path, "/") { paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some. } - s.serveReference(logger, paths.Address, paths.Path, w, r, false) + s.serveReference(logger, address, paths.Path, w, r, false) } func (s *Service) bzzHeadHandler(w http.ResponseWriter, r *http.Request) { @@ -314,11 +332,16 @@ func (s *Service) bzzHeadHandler(w http.ResponseWriter, r *http.Request) { return } + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + if strings.HasSuffix(paths.Path, "/") { paths.Path = strings.TrimRight(paths.Path, "/") + "/" // NOTE: leave one slash if there was some. } - s.serveReference(logger, paths.Address, paths.Path, w, r, true) + s.serveReference(logger, address, paths.Path, w, r, true) } func (s *Service) serveReference(logger log.Logger, address swarm.Address, pathVar string, w http.ResponseWriter, r *http.Request, headerOnly bool) { diff --git a/pkg/api/chunk.go b/pkg/api/chunk.go index a572cacdcdf..21daa0d0f57 100644 --- a/pkg/api/chunk.go +++ b/pkg/api/chunk.go @@ -30,8 +30,10 @@ func (s *Service) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { logger := s.logger.WithName("post_chunk").Build() headers := struct { - BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` - SwarmTag uint64 `map:"Swarm-Tag"` + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + SwarmTag uint64 `map:"Swarm-Tag"` + Act bool `map:"Swarm-Act"` + HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { response("invalid header params", logger, w) @@ -139,6 +141,15 @@ func (s *Service) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { } } + encryptedReference := chunk.Address() + if headers.Act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, chunk.Address(), headers.HistoryAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } + err = putter.Put(r.Context(), chunk) if err != nil { logger.Debug("chunk upload: write chunk failed", "chunk_address", chunk.Address(), "error", err) @@ -165,7 +176,7 @@ func (s *Service) chunkUploadHandler(w http.ResponseWriter, r *http.Request) { } w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader) - jsonhttp.Created(w, chunkAddressResponse{Reference: chunk.Address()}) + jsonhttp.Created(w, chunkAddressResponse{Reference: encryptedReference}) } func (s *Service) chunkGetHandler(w http.ResponseWriter, r *http.Request) { @@ -192,15 +203,20 @@ func (s *Service) chunkGetHandler(w http.ResponseWriter, r *http.Request) { return } - chunk, err := s.storer.Download(cache).Get(r.Context(), paths.Address) + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + + chunk, err := s.storer.Download(cache).Get(r.Context(), address) if err != nil { if errors.Is(err, storage.ErrNotFound) { - loggerV1.Debug("chunk not found", "address", paths.Address) + loggerV1.Debug("chunk not found", "address", address) jsonhttp.NotFound(w, "chunk not found") return } - logger.Debug("read chunk failed", "chunk_address", paths.Address, "error", err) + logger.Debug("read chunk failed", "chunk_address", address, "error", err) logger.Error(nil, "read chunk failed") jsonhttp.InternalServerError(w, "read chunk failed") return diff --git a/pkg/api/chunk_address.go b/pkg/api/chunk_address.go index 6f214a0ea03..838c9ce6e8e 100644 --- a/pkg/api/chunk_address.go +++ b/pkg/api/chunk_address.go @@ -23,9 +23,14 @@ func (s *Service) hasChunkHandler(w http.ResponseWriter, r *http.Request) { return } - has, err := s.storer.ChunkStore().Has(r.Context(), paths.Address) + address := paths.Address + if v := getAddressFromContext(r.Context()); !v.IsZero() { + address = v + } + + has, err := s.storer.ChunkStore().Has(r.Context(), address) if err != nil { - logger.Debug("has chunk failed", "chunk_address", paths.Address, "error", err) + logger.Debug("has chunk failed", "chunk_address", address, "error", err) jsonhttp.BadRequest(w, err) return } diff --git a/pkg/api/dirs.go b/pkg/api/dirs.go index f54a02807c9..f187fbde01e 100644 --- a/pkg/api/dirs.go +++ b/pkg/api/dirs.go @@ -47,6 +47,8 @@ func (s *Service) dirUploadHandler( encrypt bool, tag uint64, rLevel redundancy.Level, + act bool, + historyAddress swarm.Address, ) { if r.Body == http.NoBody { logger.Error(nil, "request has no body") @@ -98,6 +100,15 @@ func (s *Service) dirUploadHandler( return } + encryptedReference := reference + if act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, reference, historyAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } + err = putter.Done(reference) if err != nil { logger.Debug("store dir failed", "error", err) @@ -113,7 +124,7 @@ func (s *Service) dirUploadHandler( } w.Header().Set("Access-Control-Expose-Headers", SwarmTagHeader) jsonhttp.Created(w, bzzUploadResponse{ - Reference: reference, + Reference: encryptedReference, }) } diff --git a/pkg/api/dynamicaccess.go b/pkg/api/dynamicaccess.go new file mode 100644 index 00000000000..f83f0cca435 --- /dev/null +++ b/pkg/api/dynamicaccess.go @@ -0,0 +1,529 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/jsonhttp" + "github.com/ethersphere/bee/v2/pkg/postage" + "github.com/ethersphere/bee/v2/pkg/storage" + "github.com/ethersphere/bee/v2/pkg/storer" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/gorilla/mux" +) + +type addressKey struct{} + +const granteeListEncrypt = true + +// getAddressFromContext is a helper function to extract the address from the context +func getAddressFromContext(ctx context.Context) swarm.Address { + v, ok := ctx.Value(addressKey{}).(swarm.Address) + if ok { + return v + } + return swarm.ZeroAddress +} + +// setAddressInContext sets the swarm address in the context +func setAddressInContext(ctx context.Context, address swarm.Address) context.Context { + return context.WithValue(ctx, addressKey{}, address) +} + +type GranteesPatchRequest struct { + Addlist []string `json:"add"` + Revokelist []string `json:"revoke"` +} + +type GranteesPatchResponse struct { + Reference swarm.Address `json:"ref"` + HistoryReference swarm.Address `json:"historyref"` +} + +type GranteesPostRequest struct { + GranteeList []string `json:"grantees"` +} + +type GranteesPostResponse struct { + Reference swarm.Address `json:"ref"` + HistoryReference swarm.Address `json:"historyref"` +} + +type GranteesPatch struct { + Addlist []*ecdsa.PublicKey + Revokelist []*ecdsa.PublicKey +} + +// actDecryptionHandler is a middleware that looks up and decrypts the given address, +// if the act headers are present +func (s *Service) actDecryptionHandler() func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("act_decryption_handler").Build() + paths := struct { + Address swarm.Address `map:"address,resolve" validate:"required"` + }{} + if response := s.mapStructure(mux.Vars(r), &paths); response != nil { + response("invalid path params", logger, w) + return + } + + headers := struct { + Timestamp *int64 `map:"Swarm-Act-Timestamp"` + Publisher *ecdsa.PublicKey `map:"Swarm-Act-Publisher"` + HistoryAddress *swarm.Address `map:"Swarm-Act-History-Address"` + Cache *bool `map:"Swarm-Cache"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + + // Try to download the file wihtout decryption, if the act headers are not present + if headers.Publisher == nil || headers.HistoryAddress == nil { + h.ServeHTTP(w, r) + return + } + + timestamp := time.Now().Unix() + if headers.Timestamp != nil { + timestamp = *headers.Timestamp + } + + cache := true + if headers.Cache != nil { + cache = *headers.Cache + } + ctx := r.Context() + ls := loadsave.NewReadonly(s.storer.Download(cache)) + reference, err := s.dac.DownloadHandler(ctx, ls, paths.Address, headers.Publisher, *headers.HistoryAddress, timestamp) + if err != nil { + logger.Debug("act failed to decrypt reference", "error", err) + logger.Error(nil, "act failed to decrypt reference") + jsonhttp.InternalServerError(w, errActDownload) + return + } + h.ServeHTTP(w, r.WithContext(setAddressInContext(ctx, reference))) + }) + } +} + +// actEncryptionHandler is a middleware that encrypts the given address using the publisher's public key, +// uploads the encrypted reference, history and kvs to the store +func (s *Service) actEncryptionHandler( + ctx context.Context, + w http.ResponseWriter, + putter storer.PutterSession, + reference swarm.Address, + historyRootHash swarm.Address, +) (swarm.Address, error) { + logger := s.logger.WithName("act_encryption_handler").Build() + publisherPublicKey := &s.publicKey + ls := loadsave.New(s.storer.Download(true), s.storer.Cache(), requestPipelineFactory(ctx, putter, false, redundancy.NONE)) + storageReference, historyReference, encryptedReference, err := s.dac.UploadHandler(ctx, ls, reference, publisherPublicKey, historyRootHash) + if err != nil { + logger.Debug("act failed to encrypt reference", "error", err) + logger.Error(nil, "act failed to encrypt reference") + return swarm.ZeroAddress, fmt.Errorf("act failed to encrypt reference: %w", err) + } + // only need to upload history and kvs if a new history is created, + // meaning that the publisher uploaded to the history for the first time + if !historyReference.Equal(historyRootHash) { + err = putter.Done(storageReference) + if err != nil { + logger.Debug("done split keyvaluestore failed", "error", err) + logger.Error(nil, "done split keyvaluestore failed") + return swarm.ZeroAddress, fmt.Errorf("done split keyvaluestore failed: %w", err) + } + err = putter.Done(historyReference) + if err != nil { + logger.Debug("done split history failed", "error", err) + logger.Error(nil, "done split history failed") + return swarm.ZeroAddress, fmt.Errorf("done split history failed: %w", err) + } + } + + w.Header().Set(SwarmActHistoryAddressHeader, historyReference.String()) + return encryptedReference, nil +} + +// actListGranteesHandler is a middleware that decrypts the given address and returns the list of grantees, +// only the publisher is authorized to access the list +func (s *Service) actListGranteesHandler(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("act_list_grantees_handler").Build() + paths := struct { + GranteesAddress swarm.Address `map:"address,resolve" validate:"required"` + }{} + if response := s.mapStructure(mux.Vars(r), &paths); response != nil { + response("invalid path params", logger, w) + return + } + + headers := struct { + Cache *bool `map:"Swarm-Cache"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + cache := true + if headers.Cache != nil { + cache = *headers.Cache + } + publisher := &s.publicKey + ls := loadsave.NewReadonly(s.storer.Download(cache)) + grantees, err := s.dac.Get(r.Context(), ls, publisher, paths.GranteesAddress) + if err != nil { + logger.Debug("could not get grantees", "error", err) + logger.Error(nil, "could not get grantees") + jsonhttp.NotFound(w, "granteelist not found") + return + } + granteeSlice := make([]string, len(grantees)) + for i, grantee := range grantees { + granteeSlice[i] = hex.EncodeToString(crypto.EncodeSecp256k1PublicKey(grantee)) + } + jsonhttp.OK(w, granteeSlice) +} + +// actGrantRevokeHandler is a middleware that makes updates to the list of grantees, +// only the publisher is authorized to perform this action +func (s *Service) actGrantRevokeHandler(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("act_grant_revoke_handler").Build() + + if r.Body == http.NoBody { + logger.Error(nil, "request has no body") + jsonhttp.BadRequest(w, errInvalidRequest) + return + } + + paths := struct { + GranteesAddress swarm.Address `map:"address,resolve" validate:"required"` + }{} + if response := s.mapStructure(mux.Vars(r), &paths); response != nil { + response("invalid path params", logger, w) + return + } + + headers := struct { + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + SwarmTag uint64 `map:"Swarm-Tag"` + Pin bool `map:"Swarm-Pin"` + Deferred *bool `map:"Swarm-Deferred-Upload"` + HistoryAddress *swarm.Address `map:"Swarm-Act-History-Address" validate:"required"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + + historyAddress := swarm.ZeroAddress + if headers.HistoryAddress != nil { + historyAddress = *headers.HistoryAddress + } + + var ( + tag uint64 + err error + deferred = defaultUploadMethod(headers.Deferred) + ) + + if deferred || headers.Pin { + tag, err = s.getOrCreateSessionID(headers.SwarmTag) + if err != nil { + logger.Debug("get or create tag failed", "error", err) + logger.Error(nil, "get or create tag failed") + switch { + case errors.Is(err, storage.ErrNotFound): + jsonhttp.NotFound(w, "tag not found") + default: + jsonhttp.InternalServerError(w, "cannot get or create tag") + } + return + } + } + + body, err := io.ReadAll(r.Body) + if err != nil { + if jsonhttp.HandleBodyReadError(err, w) { + return + } + logger.Debug("read request body failed", "error", err) + logger.Error(nil, "read request body failed") + jsonhttp.InternalServerError(w, "cannot read request") + return + } + + gpr := GranteesPatchRequest{} + if len(body) > 0 { + err = json.Unmarshal(body, &gpr) + if err != nil { + logger.Debug("unmarshal body failed", "error", err) + logger.Error(nil, "unmarshal body failed") + jsonhttp.InternalServerError(w, "error unmarshaling request body") + return + } + } + + grantees := GranteesPatch{} + parsedAddlist, err := parseKeys(gpr.Addlist) + if err != nil { + logger.Debug("add list key parse failed", "error", err) + logger.Error(nil, "add list key parse failed") + jsonhttp.InternalServerError(w, "error add list key parsing") + return + } + grantees.Addlist = append(grantees.Addlist, parsedAddlist...) + + parsedRevokelist, err := parseKeys(gpr.Revokelist) + if err != nil { + logger.Debug("revoke list key parse failed", "error", err) + logger.Error(nil, "revoke list key parse failed") + jsonhttp.InternalServerError(w, "error revoke list key parsing") + return + } + grantees.Revokelist = append(grantees.Revokelist, parsedRevokelist...) + + ctx := r.Context() + putter, err := s.newStamperPutter(ctx, putterOptions{ + BatchID: headers.BatchID, + TagID: tag, + Pin: headers.Pin, + Deferred: deferred, + }) + if err != nil { + logger.Debug("putter failed", "error", err) + logger.Error(nil, "putter failed") + switch { + case errors.Is(err, errBatchUnusable) || errors.Is(err, postage.ErrNotUsable): + jsonhttp.UnprocessableEntity(w, "batch not usable yet or does not exist") + case errors.Is(err, postage.ErrNotFound): + jsonhttp.NotFound(w, "batch with id not found") + case errors.Is(err, errInvalidPostageBatch): + jsonhttp.BadRequest(w, "invalid batch id") + case errors.Is(err, errUnsupportedDevNodeOperation): + jsonhttp.BadRequest(w, errUnsupportedDevNodeOperation) + default: + jsonhttp.BadRequest(w, nil) + } + return + } + + granteeref := paths.GranteesAddress + publisher := &s.publicKey + ls := loadsave.New(s.storer.Download(true), s.storer.Cache(), requestPipelineFactory(ctx, putter, false, redundancy.NONE)) + gls := loadsave.New(s.storer.Download(true), s.storer.Cache(), requestPipelineFactory(ctx, putter, granteeListEncrypt, redundancy.NONE)) + granteeref, encryptedglref, historyref, actref, err := s.dac.UpdateHandler(ctx, ls, gls, granteeref, historyAddress, publisher, grantees.Addlist, grantees.Revokelist) + if err != nil { + logger.Debug("failed to update grantee list", "error", err) + logger.Error(nil, "failed to update grantee list") + jsonhttp.InternalServerError(w, "failed to update grantee list") + return + } + + err = putter.Done(actref) + if err != nil { + logger.Debug("done split act failed", "error", err) + logger.Error(nil, "done split act failed") + jsonhttp.InternalServerError(w, "done split act failed") + return + } + + err = putter.Done(historyref) + if err != nil { + logger.Debug("done split history failed", "error", err) + logger.Error(nil, "done split history failed") + jsonhttp.InternalServerError(w, "done split history failed") + return + } + + err = putter.Done(granteeref) + if err != nil { + logger.Debug("done split grantees failed", "error", err) + logger.Error(nil, "done split grantees failed") + jsonhttp.InternalServerError(w, "done split grantees failed") + return + } + + jsonhttp.OK(w, GranteesPatchResponse{ + Reference: encryptedglref, + HistoryReference: historyref, + }) +} + +// actCreateGranteesHandler is a middleware that creates a new list of grantees, +// only the publisher is authorized to perform this action +func (s *Service) actCreateGranteesHandler(w http.ResponseWriter, r *http.Request) { + logger := s.logger.WithName("acthandler").Build() + + if r.Body == http.NoBody { + logger.Error(nil, "request has no body") + jsonhttp.BadRequest(w, errInvalidRequest) + return + } + + headers := struct { + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + SwarmTag uint64 `map:"Swarm-Tag"` + Pin bool `map:"Swarm-Pin"` + Deferred *bool `map:"Swarm-Deferred-Upload"` + HistoryAddress *swarm.Address `map:"Swarm-Act-History-Address"` + }{} + if response := s.mapStructure(r.Header, &headers); response != nil { + response("invalid header params", logger, w) + return + } + + historyAddress := swarm.ZeroAddress + if headers.HistoryAddress != nil { + historyAddress = *headers.HistoryAddress + } + + var ( + tag uint64 + err error + deferred = defaultUploadMethod(headers.Deferred) + ) + + if deferred || headers.Pin { + tag, err = s.getOrCreateSessionID(headers.SwarmTag) + if err != nil { + logger.Debug("get or create tag failed", "error", err) + logger.Error(nil, "get or create tag failed") + switch { + case errors.Is(err, storage.ErrNotFound): + jsonhttp.NotFound(w, "tag not found") + default: + jsonhttp.InternalServerError(w, "cannot get or create tag") + } + return + } + } + + body, err := io.ReadAll(r.Body) + if err != nil { + if jsonhttp.HandleBodyReadError(err, w) { + return + } + logger.Debug("read request body failed", "error", err) + logger.Error(nil, "read request body failed") + jsonhttp.InternalServerError(w, "cannot read request") + return + } + + gpr := GranteesPostRequest{} + if len(body) > 0 { + err = json.Unmarshal(body, &gpr) + if err != nil { + logger.Debug("unmarshal body failed", "error", err) + logger.Error(nil, "unmarshal body failed") + jsonhttp.InternalServerError(w, "error unmarshaling request body") + return + } + } + + list, err := parseKeys(gpr.GranteeList) + if err != nil { + logger.Debug("create list key parse failed", "error", err) + logger.Error(nil, "create list key parse failed") + jsonhttp.InternalServerError(w, "error create list key parsing") + return + } + + ctx := r.Context() + putter, err := s.newStamperPutter(ctx, putterOptions{ + BatchID: headers.BatchID, + TagID: tag, + Pin: headers.Pin, + Deferred: deferred, + }) + if err != nil { + logger.Debug("putter failed", "error", err) + logger.Error(nil, "putter failed") + switch { + case errors.Is(err, errBatchUnusable) || errors.Is(err, postage.ErrNotUsable): + jsonhttp.UnprocessableEntity(w, "batch not usable yet or does not exist") + case errors.Is(err, postage.ErrNotFound): + jsonhttp.NotFound(w, "batch with id not found") + case errors.Is(err, errInvalidPostageBatch): + jsonhttp.BadRequest(w, "invalid batch id") + case errors.Is(err, errUnsupportedDevNodeOperation): + jsonhttp.BadRequest(w, errUnsupportedDevNodeOperation) + default: + jsonhttp.BadRequest(w, nil) + } + return + } + + publisher := &s.publicKey + ls := loadsave.New(s.storer.Download(true), s.storer.Cache(), requestPipelineFactory(ctx, putter, false, redundancy.NONE)) + gls := loadsave.New(s.storer.Download(true), s.storer.Cache(), requestPipelineFactory(ctx, putter, granteeListEncrypt, redundancy.NONE)) + granteeref, encryptedglref, historyref, actref, err := s.dac.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, historyAddress, publisher, list, nil) + if err != nil { + logger.Debug("failed to update grantee list", "error", err) + logger.Error(nil, "failed to update grantee list") + jsonhttp.InternalServerError(w, "failed to update grantee list") + return + } + + err = putter.Done(actref) + if err != nil { + logger.Debug("done split act failed", "error", err) + logger.Error(nil, "done split act failed") + jsonhttp.InternalServerError(w, "done split act failed") + return + } + + err = putter.Done(historyref) + if err != nil { + logger.Debug("done split history failed", "error", err) + logger.Error(nil, "done split history failed") + jsonhttp.InternalServerError(w, "done split history failed") + return + } + + err = putter.Done(granteeref) + if err != nil { + logger.Debug("done split grantees failed", "error", err) + logger.Error(nil, "done split grantees failed") + jsonhttp.InternalServerError(w, "done split grantees failed") + return + } + + jsonhttp.Created(w, GranteesPostResponse{ + Reference: encryptedglref, + HistoryReference: historyref, + }) +} + +func parseKeys(list []string) ([]*ecdsa.PublicKey, error) { + parsedList := make([]*ecdsa.PublicKey, 0, len(list)) + for _, g := range list { + h, err := hex.DecodeString(g) + if err != nil { + return []*ecdsa.PublicKey{}, err + } + k, err := btcec.ParsePubKey(h) + if err != nil { + return []*ecdsa.PublicKey{}, err + } + parsedList = append(parsedList, k.ToECDSA()) + } + + return parsedList, nil +} diff --git a/pkg/api/dynamicaccess_test.go b/pkg/api/dynamicaccess_test.go new file mode 100644 index 00000000000..79eac36327b --- /dev/null +++ b/pkg/api/dynamicaccess_test.go @@ -0,0 +1,935 @@ +// Copyright 2020 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package api_test + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethersphere/bee/v2/pkg/api" + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + mockdac "github.com/ethersphere/bee/v2/pkg/dynamicaccess/mock" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/jsonhttp" + "github.com/ethersphere/bee/v2/pkg/jsonhttp/jsonhttptest" + "github.com/ethersphere/bee/v2/pkg/log" + mockpost "github.com/ethersphere/bee/v2/pkg/postage/mock" + testingsoc "github.com/ethersphere/bee/v2/pkg/soc/testing" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "gitlab.com/nolash/go-mockbytes" +) + +func prepareHistoryFixture(storer api.Storer) (dynamicaccess.History, swarm.Address) { + ctx := context.Background() + ls := loadsave.New(storer.ChunkStore(), storer.Cache(), pipelineFactory(storer.Cache(), false, redundancy.NONE)) + + h, _ := dynamicaccess.NewHistory(ls) + + testActRef1 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd891")) + firstTime := time.Date(1994, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + _ = h.Add(ctx, testActRef1, &firstTime, nil) + + testActRef2 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd892")) + secondTime := time.Date(2000, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + _ = h.Add(ctx, testActRef2, &secondTime, nil) + + testActRef3 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd893")) + thirdTime := time.Date(2015, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + _ = h.Add(ctx, testActRef3, &thirdTime, nil) + + testActRef4 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd894")) + fourthTime := time.Date(2020, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + _ = h.Add(ctx, testActRef4, &fourthTime, nil) + + testActRef5 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd895")) + fifthTime := time.Date(2030, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + _ = h.Add(ctx, testActRef5, &fifthTime, nil) + + ref, _ := h.Store(ctx) + return h, ref +} + +// TODO: test tag, pin, deferred, stamp +// TODO: feed test +// nolint:paralleltest,tparallel +// TestDacWithoutActHeader [positive tests]: +// On each endpoint: upload w/ "Swarm-Act" header then download and check the decrypted data +func TestDacEachEndpointWithAct(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + testfile = "testfile1" + storerMock = mockstorer.New() + logger = log.Noop + now = time.Now().Unix() + chunk = swarm.NewChunk( + swarm.MustParseHexAddress("0025737be11979e91654dffd2be817ac1e52a2dadb08c97a7cef12f937e707bc"), + []byte{72, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 149, 179, 31, 244, 146, 247, 129, 123, 132, 248, 215, 77, 44, 47, 91, 248, 229, 215, 89, 156, 210, 243, 3, 110, 204, 74, 101, 119, 53, 53, 145, 188, 193, 153, 130, 197, 83, 152, 36, 140, 150, 209, 191, 214, 193, 4, 144, 121, 32, 45, 205, 220, 59, 227, 28, 43, 161, 51, 108, 14, 106, 180, 135, 2}, + ) + g = mockbytes.New(0, mockbytes.MockTypeStandard).WithModulus(255) + bytedata, _ = g.SequentialBytes(swarm.ChunkSize * 2) + tag, _ = storerMock.NewSession() + sch = testingsoc.GenerateMockSOCWithKey(t, []byte("foo"), pk) + dirdata = []byte("Lorem ipsum dolor sit amet") + socResource = func(owner, id, sig string) string { return fmt.Sprintf("/soc/%s/%s?sig=%s", owner, id, sig) } + ) + + tc := []struct { + name string + downurl string + upurl string + exphash string + data io.Reader + expdata []byte + contenttype string + resp struct { + Reference swarm.Address `json:"reference"` + } + }{ + { + name: "bzz", + upurl: "/bzz?name=sample.html", + downurl: "/bzz", + exphash: "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade", + resp: api.BzzUploadResponse{Reference: swarm.MustParseHexAddress("a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade")}, + data: strings.NewReader(testfile), + expdata: []byte(testfile), + contenttype: "text/html; charset=utf-8", + }, + { + name: "bzz-dir", + upurl: "/bzz?name=ipsum/lorem.txt", + downurl: "/bzz", + exphash: "6561b2a744d2a8f276270585da22e092c07c56624af83ac9969d52b54e87cee6/ipsum/lorem.txt", + resp: api.BzzUploadResponse{Reference: swarm.MustParseHexAddress("6561b2a744d2a8f276270585da22e092c07c56624af83ac9969d52b54e87cee6")}, + data: tarFiles(t, []f{ + { + data: dirdata, + name: "lorem.txt", + dir: "ipsum", + header: http.Header{ + api.ContentTypeHeader: {"text/plain; charset=utf-8"}, + }, + }, + }), + expdata: dirdata, + contenttype: api.ContentTypeTar, + }, + { + name: "bytes", + upurl: "/bytes", + downurl: "/bytes", + exphash: "e30da540bb9e1901169977fcf617f28b7f8df4537de978784f6d47491619a630", + resp: api.BytesPostResponse{Reference: swarm.MustParseHexAddress("e30da540bb9e1901169977fcf617f28b7f8df4537de978784f6d47491619a630")}, + data: bytes.NewReader(bytedata), + expdata: bytedata, + contenttype: "application/octet-stream", + }, + { + name: "chunks", + upurl: "/chunks", + downurl: "/chunks", + exphash: "ca8d2d29466e017cba46d383e7e0794d99a141185ec525086037f25fc2093155", + resp: api.ChunkAddressResponse{Reference: swarm.MustParseHexAddress("ca8d2d29466e017cba46d383e7e0794d99a141185ec525086037f25fc2093155")}, + data: bytes.NewReader(chunk.Data()), + expdata: chunk.Data(), + contenttype: "binary/octet-stream", + }, + { + name: "soc", + upurl: socResource(hex.EncodeToString(sch.Owner), hex.EncodeToString(sch.ID), hex.EncodeToString(sch.Signature)), + downurl: "/chunks", + exphash: "b100d7ce487426b17b98ff779fad4f2dd471d04ab1c8949dd2a1a78fe4a1524e", + resp: api.ChunkAddressResponse{Reference: swarm.MustParseHexAddress("b100d7ce487426b17b98ff779fad4f2dd471d04ab1c8949dd2a1a78fe4a1524e")}, + data: bytes.NewReader(sch.WrappedChunk.Data()), + expdata: sch.Chunk().Data(), + contenttype: "binary/octet-stream", + }, + } + + for _, v := range tc { + upTestOpts := []jsonhttptest.Option{ + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmTagHeader, fmt.Sprintf("%d", tag.TagID)), + jsonhttptest.WithRequestBody(v.data), + jsonhttptest.WithExpectedJSONResponse(v.resp), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, v.contenttype), + } + if v.name == "soc" { + upTestOpts = append(upTestOpts, jsonhttptest.WithRequestHeader(api.SwarmPinHeader, "true")) + } else { + upTestOpts = append(upTestOpts, jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader)) + } + expcontenttype := v.contenttype + if v.name == "bzz-dir" { + expcontenttype = "text/plain; charset=utf-8" + upTestOpts = append(upTestOpts, jsonhttptest.WithRequestHeader(api.SwarmCollectionHeader, "True")) + } + t.Run(v.name, func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(), + }) + header := jsonhttptest.Request(t, client, http.MethodPost, v.upurl, http.StatusCreated, + upTestOpts..., + ) + + historyRef := header.Get(api.SwarmActHistoryAddressHeader) + jsonhttptest.Request(t, client, http.MethodGet, v.downurl+"/"+v.exphash, http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, historyRef), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponse(v.expdata), + jsonhttptest.WithExpectedContentLength(len(v.expdata)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, expcontenttype), + ) + + if v.name != "bzz-dir" && v.name != "soc" && v.name != "chunks" { + t.Run("head", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodHead, v.downurl+"/"+v.exphash, http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, historyRef), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithRequestBody(v.data), + jsonhttptest.WithExpectedContentLength(len(v.expdata)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, expcontenttype), + ) + }) + } + }) + } +} + +// TestDacWithoutActHeader [negative tests]: +// 1. upload w/ "Swarm-Act" header then try to dowload w/o the header. +// 2. upload w/o "Swarm-Act" header then try to dowload w/ the header. +// +//nolint:paralleltest,tparallel +func TestDacWithoutAct(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + fileUploadResource = "/bzz" + fileDownloadResource = func(addr string) string { return "/bzz/" + addr } + storerMock = mockstorer.New() + h, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + fileName = "sample.html" + now = time.Now().Unix() + ) + + t.Run("upload-w/-act-then-download-w/o-act", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + var ( + testfile = "testfile1" + encryptedRef = "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + ) + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusNotFound, + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: "address not found or incorrect", + Code: http.StatusNotFound, + }), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) + + t.Run("upload-w/o-act-then-download-w/-act", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(), + }) + var ( + rootHash = "0cb947ccbc410c43139ba4409d83bf89114cb0d79556a651c06c888cf73f4d7e" + sampleHtml = ` + +
+ +My first paragraph.
+ + + ` + ) + + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmDeferredUploadHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(strings.NewReader(sampleHtml)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(rootHash), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", rootHash)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(rootHash), http.StatusInternalServerError, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: api.ErrActDownload.Error(), + Code: http.StatusInternalServerError, + }), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) +} + +// TestDacInvalidPath [negative test]: Expect Bad request when the path address is invalid. +// +//nolint:paralleltest,tparallel +func TestDacInvalidPath(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + fileDownloadResource = func(addr string) string { return "/bzz/" + addr } + storerMock = mockstorer.New() + _, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + now = time.Now().Unix() + ) + + t.Run("invalid-path-params", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(), + }) + encryptedRef := "asd" + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusBadRequest, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Code: http.StatusBadRequest, + Message: "invalid path params", + Reasons: []jsonhttp.Reason{ + { + Field: "address", + Error: api.HexInvalidByteError('s').Error(), + }, + }, + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + ) + }) +} + +// nolint:paralleltest,tparallel +// TestDacHistory tests: +// [positive tests] 1., 2.: uploading a file w/ and w/o history address then downloading it and checking the data. +// [negative test] 3. uploading a file then downloading it with a wrong history address. +// [negative test] 4. uploading a file to a wrong history address. +// [negative test] 5. downloading a file to w/o history address. +func TestDacHistory(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + fileUploadResource = "/bzz" + fileDownloadResource = func(addr string) string { return "/bzz/" + addr } + storerMock = mockstorer.New() + h, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + fileName = "sample.html" + now = time.Now().Unix() + ) + + t.Run("empty-history-upload-then-download-and-check-data", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(), + }) + var ( + testfile = "testfile1" + encryptedRef = "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + ) + header := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + historyRef := header.Get(api.SwarmActHistoryAddressHeader) + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, historyRef), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponse([]byte(testfile)), + jsonhttptest.WithExpectedContentLength(len(testfile)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, fmt.Sprintf(`inline; filename="%s"`, fileName)), + ) + }) + + t.Run("with-history-upload-then-download-and-check-data", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + var ( + encryptedRef = "c611199e1b3674d6bf89a83e518bd16896bf5315109b4a23dcb4682a02d17b97" + testfile = ` + + + +My first paragraph.
+ + + ` + ) + + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponse([]byte(testfile)), + jsonhttptest.WithExpectedContentLength(len(testfile)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, fmt.Sprintf(`inline; filename="%s"`, fileName)), + ) + }) + + t.Run("upload-then-download-wrong-history", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + var ( + testfile = "testfile1" + encryptedRef = "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + ) + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusInternalServerError, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, "fc4e9fe978991257b897d987bc4ff13058b66ef45a53189a0b4fe84bb3346396"), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: api.ErrActDownload.Error(), + Code: http.StatusInternalServerError, + }), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) + + t.Run("upload-wrong-history", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(), + }) + testfile := "testfile1" + + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusInternalServerError, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: api.ErrActUpload.Error(), + Code: http.StatusInternalServerError, + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + ) + }) + + t.Run("download-w/o-history", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + encryptedRef := "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusNotFound, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) +} + +// nolint:paralleltest,tparallel +// TestDacTimestamp +// [positive test] 1.: uploading a file w/ ACT then download it w/ timestamp and check the data. +// [negative test] 2.: try to download a file w/o timestamp. +func TestDacTimestamp(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + fileUploadResource = "/bzz" + fileDownloadResource = func(addr string) string { return "/bzz/" + addr } + storerMock = mockstorer.New() + h, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + fileName = "sample.html" + ) + t.Run("upload-then-download-with-timestamp-and-check-data", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + var ( + thirdTime = time.Date(2015, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + encryptedRef = "c611199e1b3674d6bf89a83e518bd16896bf5315109b4a23dcb4682a02d17b97" + testfile = ` + + + +My first paragraph.
+ + + ` + ) + + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(thirdTime, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponse([]byte(testfile)), + jsonhttptest.WithExpectedContentLength(len(testfile)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, fmt.Sprintf(`inline; filename="%s"`, fileName)), + ) + }) + + t.Run("download-w/o-timestamp", func(t *testing.T) { + encryptedRef := "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusNotFound, + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) +} + +// nolint:paralleltest,tparallel +// TestDacPublisher +// [positive test] 1.: uploading a file w/ ACT then download it w/ the publisher address and check the data. +// [negative test] 2.: expect Bad request when the public key is invalid. +// [negative test] 3.: try to download a file w/ an incorrect publisher address. +// [negative test] 3.: try to download a file w/o a publisher address. +func TestDacPublisher(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + fileUploadResource = "/bzz" + fileDownloadResource = func(addr string) string { return "/bzz/" + addr } + storerMock = mockstorer.New() + h, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + fileName = "sample.html" + now = time.Now().Unix() + ) + + t.Run("upload-then-download-w/-publisher-and-check-data", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String()), mockdac.WithPublisher(publisher)), + }) + var ( + encryptedRef = "a5a26b4915d7ce1622f9ca52252092cf2445f98d359dabaf52588c05911aaf4f" + testfile = ` + + + +My first paragraph.
+ + + ` + ) + + jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponse([]byte(testfile)), + jsonhttptest.WithExpectedContentLength(len(testfile)), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithExpectedResponseHeader(api.ContentDispositionHeader, fmt.Sprintf(`inline; filename="%s"`, fileName)), + ) + }) + + t.Run("upload-then-download-invalid-publickey", func(t *testing.T) { + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithPublisher(publisher)), + }) + var ( + publickey = "b786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfb" + encryptedRef = "a5a26b4915d7ce1622f9ca52252092cf2445f98d359dabaf52588c05911aaf4f" + testfile = ` + + + +My first paragraph.
+ + + ` + ) + + header := jsonhttptest.Request(t, client, http.MethodPost, fileUploadResource+"?name="+fileName, http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmActHeader, "true"), + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(strings.NewReader(testfile)), + jsonhttptest.WithExpectedJSONResponse(api.BzzUploadResponse{ + Reference: swarm.MustParseHexAddress(encryptedRef), + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + jsonhttptest.WithNonEmptyResponseHeader(api.SwarmTagHeader), + jsonhttptest.WithExpectedResponseHeader(api.ETagHeader, fmt.Sprintf("%q", encryptedRef)), + ) + + historyRef := header.Get(api.SwarmActHistoryAddressHeader) + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusBadRequest, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, historyRef), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publickey), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Code: http.StatusBadRequest, + Message: "invalid header params", + Reasons: []jsonhttp.Reason{ + { + Field: "Swarm-Act-Publisher", + Error: "malformed public key: invalid length: 32", + }, + }, + }), + jsonhttptest.WithRequestHeader(api.ContentTypeHeader, "text/html; charset=utf-8"), + ) + }) + + t.Run("download-w/-wrong-publisher", func(t *testing.T) { + var ( + downloader = "03c712a7e29bc792ac8d8ae49793d28d5bda27ed70f0d90697b2fb456c0a168bd2" + encryptedRef = "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + ) + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String()), mockdac.WithPublisher(publisher)), + }) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusInternalServerError, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, downloader), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: api.ErrActDownload.Error(), + Code: http.StatusInternalServerError, + }), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) + + t.Run("download-w/o-publisher", func(t *testing.T) { + encryptedRef := "a5df670544eaea29e61b19d8739faa4573b19e4426e58a173e51ed0b5e7e2ade" + client, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String()), mockdac.WithPublisher(publisher)), + }) + + jsonhttptest.Request(t, client, http.MethodGet, fileDownloadResource(encryptedRef), http.StatusNotFound, + jsonhttptest.WithRequestHeader(api.SwarmActTimestampHeader, strconv.FormatInt(now, 10)), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, fixtureHref.String()), + jsonhttptest.WithRequestHeader(api.SwarmActPublisherHeader, publisher), + jsonhttptest.WithExpectedResponseHeader(api.ContentTypeHeader, "application/json; charset=utf-8"), + ) + }) +} + +func TestDacGrantees(t *testing.T) { + t.Parallel() + var ( + spk, _ = hex.DecodeString("a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa") + pk, _ = crypto.DecodeSecp256k1PrivateKey(spk) + storerMock = mockstorer.New() + h, fixtureHref = prepareHistoryFixture(storerMock) + logger = log.Noop + addr = swarm.RandAddress(t) + client, _, _, _ = newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String())), + }) + ) + t.Run("get-grantees", func(t *testing.T) { + var ( + publicKeyBytes = crypto.EncodeSecp256k1PublicKey(&pk.PublicKey) + publisher = hex.EncodeToString(publicKeyBytes) + ) + clientwihtpublisher, _, _, _ := newTestServer(t, testServerOptions{ + Storer: storerMock, + Logger: logger, + Post: mockpost.New(mockpost.WithAcceptAll()), + PublicKey: pk.PublicKey, + Dac: mockdac.New(mockdac.WithHistory(h, fixtureHref.String()), mockdac.WithPublisher(publisher)), + }) + expected := []string{ + "03d7660772cc3142f8a7a2dfac46ce34d12eac1718720cef0e3d94347902aa96a2", + "03c712a7e29bc792ac8d8ae49793d28d5bda27ed70f0d90697b2fb456c0a168bd2", + "032541acf966823bae26c2c16a7102e728ade3e2e29c11a8a17b29d8eb2bd19302", + } + jsonhttptest.Request(t, clientwihtpublisher, http.MethodGet, "/grantee/"+addr.String(), http.StatusOK, + jsonhttptest.WithExpectedJSONResponse(expected), + ) + }) + + t.Run("get-grantees-unauthorized", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodGet, "/grantee/fc4e9fe978991257b897d987bc4ff13058b66ef45a53189a0b4fe84bb3346396", http.StatusNotFound, + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: "granteelist not found", + Code: http.StatusNotFound, + }), + ) + }) + t.Run("get-grantees-invalid-address", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodGet, "/grantee/asd", http.StatusBadRequest, + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Code: http.StatusBadRequest, + Message: "invalid path params", + Reasons: []jsonhttp.Reason{ + { + Field: "address", + Error: api.HexInvalidByteError('s').Error(), + }, + }, + }), + ) + }) + t.Run("add-revoke-grantees", func(t *testing.T) { + body := api.GranteesPatchRequest{ + Addlist: []string{"02ab7473879005929d10ce7d4f626412dad9fe56b0a6622038931d26bd79abf0a4"}, + Revokelist: []string{"02ab7473879005929d10ce7d4f626412dad9fe56b0a6622038931d26bd79abf0a4"}, + } + jsonhttptest.Request(t, client, http.MethodPatch, "/grantee/"+addr.String(), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, addr.String()), + jsonhttptest.WithJSONRequestBody(body), + ) + }) + t.Run("add-revoke-grantees-empty-body", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodPatch, "/grantee/"+addr.String(), http.StatusBadRequest, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(bytes.NewReader(nil)), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: "could not validate request", + Code: http.StatusBadRequest, + }), + ) + }) + t.Run("add-grantee-with-history", func(t *testing.T) { + body := api.GranteesPatchRequest{ + Addlist: []string{"02ab7473879005929d10ce7d4f626412dad9fe56b0a6622038931d26bd79abf0a4"}, + } + jsonhttptest.Request(t, client, http.MethodPatch, "/grantee/"+addr.String(), http.StatusOK, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestHeader(api.SwarmActHistoryAddressHeader, addr.String()), + jsonhttptest.WithJSONRequestBody(body), + ) + }) + t.Run("add-grantee-without-history", func(t *testing.T) { + body := api.GranteesPatchRequest{ + Addlist: []string{"02ab7473879005929d10ce7d4f626412dad9fe56b0a6622038931d26bd79abf0a4"}, + } + jsonhttptest.Request(t, client, http.MethodPatch, "/grantee/"+addr.String(), http.StatusBadRequest, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithJSONRequestBody(body), + ) + }) + + t.Run("create-granteelist", func(t *testing.T) { + body := api.GranteesPostRequest{ + GranteeList: []string{ + "02ab7473879005929d10ce7d4f626412dad9fe56b0a6622038931d26bd79abf0a4", + "03d7660772cc3142f8a7a2dfac46ce34d12eac1718720cef0e3d94347902aa96a2", + }, + } + jsonhttptest.Request(t, client, http.MethodPost, "/grantee", http.StatusCreated, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithJSONRequestBody(body), + ) + }) + t.Run("create-granteelist-without-stamp", func(t *testing.T) { + body := api.GranteesPostRequest{ + GranteeList: []string{ + "03d7660772cc3142f8a7a2dfac46ce34d12eac1718720cef0e3d94347902aa96a2", + }, + } + jsonhttptest.Request(t, client, http.MethodPost, "/grantee", http.StatusBadRequest, + jsonhttptest.WithJSONRequestBody(body), + ) + }) + t.Run("create-granteelist-empty-body", func(t *testing.T) { + jsonhttptest.Request(t, client, http.MethodPost, "/grantee", http.StatusBadRequest, + jsonhttptest.WithRequestHeader(api.SwarmPostageBatchIdHeader, batchOkStr), + jsonhttptest.WithRequestBody(bytes.NewReader(nil)), + jsonhttptest.WithExpectedJSONResponse(jsonhttp.StatusResponse{ + Message: "could not validate request", + Code: http.StatusBadRequest, + }), + ) + }) +} diff --git a/pkg/api/export_test.go b/pkg/api/export_test.go index 812ce522ba0..fd78ae6974f 100644 --- a/pkg/api/export_test.go +++ b/pkg/api/export_test.go @@ -38,6 +38,8 @@ var ( ErrInvalidNameOrAddress = errInvalidNameOrAddress ErrUnsupportedDevNodeOperation = errUnsupportedDevNodeOperation ErrOperationSupportedOnlyInFullMode = errOperationSupportedOnlyInFullMode + ErrActDownload = errActDownload + ErrActUpload = errActUpload ) var ( diff --git a/pkg/api/feed.go b/pkg/api/feed.go index 09fdf6515ec..552cd03ffa4 100644 --- a/pkg/api/feed.go +++ b/pkg/api/feed.go @@ -21,8 +21,8 @@ import ( "github.com/ethersphere/bee/v2/pkg/manifest/simple" "github.com/ethersphere/bee/v2/pkg/postage" "github.com/ethersphere/bee/v2/pkg/soc" - storage "github.com/ethersphere/bee/v2/pkg/storage" - storer "github.com/ethersphere/bee/v2/pkg/storer" + "github.com/ethersphere/bee/v2/pkg/storage" + "github.com/ethersphere/bee/v2/pkg/storer" "github.com/ethersphere/bee/v2/pkg/swarm" "github.com/gorilla/mux" ) @@ -137,9 +137,11 @@ func (s *Service) feedPostHandler(w http.ResponseWriter, r *http.Request) { } headers := struct { - BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` - Pin bool `map:"Swarm-Pin"` - Deferred *bool `map:"Swarm-Deferred-Upload"` + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + Pin bool `map:"Swarm-Pin"` + Deferred *bool `map:"Swarm-Deferred-Upload"` + Act bool `map:"Swarm-Act"` + HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { response("invalid header params", logger, w) @@ -244,6 +246,15 @@ func (s *Service) feedPostHandler(w http.ResponseWriter, r *http.Request) { } return } + // TODO: do we want to allow feed act upload/ download? + encryptedReference := ref + if headers.Act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, ref, headers.HistoryAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } err = putter.Done(ref) if err != nil { @@ -253,7 +264,7 @@ func (s *Service) feedPostHandler(w http.ResponseWriter, r *http.Request) { return } - jsonhttp.Created(w, feedReferenceResponse{Reference: ref}) + jsonhttp.Created(w, feedReferenceResponse{Reference: encryptedReference}) } func parseFeedUpdate(ch swarm.Chunk) (swarm.Address, int64, error) { diff --git a/pkg/api/router.go b/pkg/api/router.go index c0c6d02782c..609ffff573e 100644 --- a/pkg/api/router.go +++ b/pkg/api/router.go @@ -214,10 +214,12 @@ func (s *Service) mountAPI() { "GET": web.ChainHandlers( s.contentLengthMetricMiddleware(), s.newTracingHandler("bytes-download"), + s.actDecryptionHandler(), web.FinalHandlerFunc(s.bytesGetHandler), ), "HEAD": web.ChainHandlers( s.newTracingHandler("bytes-head"), + s.actDecryptionHandler(), web.FinalHandlerFunc(s.bytesHeadHandler), ), }) @@ -235,8 +237,14 @@ func (s *Service) mountAPI() { )) handle("/chunks/{address}", jsonhttp.MethodHandler{ - "GET": http.HandlerFunc(s.chunkGetHandler), - "HEAD": http.HandlerFunc(s.hasChunkHandler), + "GET": web.ChainHandlers( + s.actDecryptionHandler(), + web.FinalHandlerFunc(s.chunkGetHandler), + ), + "HEAD": web.ChainHandlers( + s.actDecryptionHandler(), + web.FinalHandlerFunc(s.hasChunkHandler), + ), }) handle("/soc/{owner}/{id}", jsonhttp.MethodHandler{ @@ -262,6 +270,21 @@ func (s *Service) mountAPI() { ), }) + handle("/grantee", jsonhttp.MethodHandler{ + "POST": web.ChainHandlers( + web.FinalHandlerFunc(s.actCreateGranteesHandler), + ), + }) + + handle("/grantee/{address}", jsonhttp.MethodHandler{ + "GET": web.ChainHandlers( + web.FinalHandlerFunc(s.actListGranteesHandler), + ), + "PATCH": web.ChainHandlers( + web.FinalHandlerFunc(s.actGrantRevokeHandler), + ), + }) + handle("/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { u := r.URL u.Path += "/" @@ -272,9 +295,11 @@ func (s *Service) mountAPI() { "GET": web.ChainHandlers( s.contentLengthMetricMiddleware(), s.newTracingHandler("bzz-download"), + s.actDecryptionHandler(), web.FinalHandlerFunc(s.bzzDownloadHandler), ), "HEAD": web.ChainHandlers( + s.actDecryptionHandler(), web.FinalHandlerFunc(s.bzzHeadHandler), ), }) @@ -414,7 +439,10 @@ func (s *Service) mountBusinessDebug() { }) handle("/chunks/{address}", jsonhttp.MethodHandler{ - "GET": http.HandlerFunc(s.hasChunkHandler), + "GET": web.ChainHandlers( + s.actDecryptionHandler(), + web.FinalHandlerFunc(s.hasChunkHandler), + ), }) handle("/topology", jsonhttp.MethodHandler{ diff --git a/pkg/api/soc.go b/pkg/api/soc.go index 0abf338deb9..29777066b10 100644 --- a/pkg/api/soc.go +++ b/pkg/api/soc.go @@ -43,8 +43,10 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { } headers := struct { - BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` - Pin bool `map:"Swarm-Pin"` + BatchID []byte `map:"Swarm-Postage-Batch-Id" validate:"required"` + Pin bool `map:"Swarm-Pin"` + Act bool `map:"Swarm-Act"` + HistoryAddress swarm.Address `map:"Swarm-Act-History-Address"` }{} if response := s.mapStructure(r.Header, &headers); response != nil { response("invalid header params", logger, w) @@ -155,6 +157,15 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { return } + encryptedReference := sch.Address() + if headers.Act { + encryptedReference, err = s.actEncryptionHandler(r.Context(), w, putter, sch.Address(), headers.HistoryAddress) + if err != nil { + jsonhttp.InternalServerError(w, errActUpload) + return + } + } + err = putter.Put(r.Context(), sch) if err != nil { logger.Debug("write chunk failed", "chunk_address", sch.Address(), "error", err) @@ -171,5 +182,5 @@ func (s *Service) socUploadHandler(w http.ResponseWriter, r *http.Request) { return } - jsonhttp.Created(w, chunkAddressResponse{Reference: sch.Address()}) + jsonhttp.Created(w, socPostResponse{Reference: encryptedReference}) } diff --git a/pkg/dynamicaccess/accesslogic.go b/pkg/dynamicaccess/accesslogic.go new file mode 100644 index 00000000000..671537486e9 --- /dev/null +++ b/pkg/dynamicaccess/accesslogic.go @@ -0,0 +1,157 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess + +import ( + "context" + "crypto/ecdsa" + "fmt" + + "github.com/ethersphere/bee/v2/pkg/encryption" + "github.com/ethersphere/bee/v2/pkg/kvs" + "github.com/ethersphere/bee/v2/pkg/swarm" + "golang.org/x/crypto/sha3" +) + +//nolint:gochecknoglobals +var ( + hashFunc = sha3.NewLegacyKeccak256 + oneByteArray = []byte{1} + zeroByteArray = []byte{0} +) + +// Decryptor is a read-only interface for the ACT. +type Decryptor interface { + // DecryptRef will return a decrypted reference, for given encrypted reference and grantee + DecryptRef(ctx context.Context, storage kvs.KeyValueStore, encryptedRef swarm.Address, publisher *ecdsa.PublicKey) (swarm.Address, error) + Session +} + +// Control interface for the ACT (does write operations). +type Control interface { + Decryptor + // AddGrantee adds a new grantee to the ACT + AddGrantee(ctx context.Context, storage kvs.KeyValueStore, publisherPubKey, granteePubKey *ecdsa.PublicKey) error + // EncryptRef encrypts a Swarm reference for a given grantee + EncryptRef(ctx context.Context, storage kvs.KeyValueStore, grantee *ecdsa.PublicKey, ref swarm.Address) (swarm.Address, error) +} + +type ActLogic struct { + Session +} + +var _ Control = (*ActLogic)(nil) + +// EncryptRef encrypts a SWARM reference for a publisher. +func (al ActLogic) EncryptRef(ctx context.Context, storage kvs.KeyValueStore, publisherPubKey *ecdsa.PublicKey, ref swarm.Address) (swarm.Address, error) { + accessKey, err := al.getAccessKey(ctx, storage, publisherPubKey) + if err != nil { + return swarm.ZeroAddress, err + } + refCipher := encryption.New(accessKey, 0, uint32(0), hashFunc) + encryptedRef, err := refCipher.Encrypt(ref.Bytes()) + if err != nil { + return swarm.ZeroAddress, fmt.Errorf("failed to encrypt reference: %w", err) + } + + return swarm.NewAddress(encryptedRef), nil +} + +// AddGrantee adds a new grantee to the ACT. +func (al ActLogic) AddGrantee(ctx context.Context, storage kvs.KeyValueStore, publisherPubKey, granteePubKey *ecdsa.PublicKey) error { + var ( + accessKey encryption.Key + err error + ) + + // Create new access key because grantee is the publisher + if publisherPubKey.Equal(granteePubKey) { + accessKey = encryption.GenerateRandomKey(encryption.KeyLength) + } else { + // Get previously generated access key + accessKey, err = al.getAccessKey(ctx, storage, publisherPubKey) + if err != nil { + return err + } + } + + // Encrypt the access key for the new Grantee + lookupKey, accessKeyDecryptionKey, err := al.getKeys(granteePubKey) + if err != nil { + return err + } + + // Encrypt the access key for the new Grantee + cipher := encryption.New(encryption.Key(accessKeyDecryptionKey), 0, uint32(0), hashFunc) + granteeEncryptedAccessKey, err := cipher.Encrypt(accessKey) + if err != nil { + return fmt.Errorf("failed to encrypt access key: %w", err) + } + + // Add the new encrypted access key to the Act + return storage.Put(ctx, lookupKey, granteeEncryptedAccessKey) +} + +// Will return the access key for a publisher (public key). +func (al *ActLogic) getAccessKey(ctx context.Context, storage kvs.KeyValueStore, publisherPubKey *ecdsa.PublicKey) ([]byte, error) { + publisherLookupKey, publisherAKDecryptionKey, err := al.getKeys(publisherPubKey) + if err != nil { + return nil, err + } + // no need for constructor call if value not found in act + accessKeyDecryptionCipher := encryption.New(encryption.Key(publisherAKDecryptionKey), 0, uint32(0), hashFunc) + encryptedAK, err := storage.Get(ctx, publisherLookupKey) + if err != nil { + return nil, fmt.Errorf("failed go get value from KVS: %w", err) + } + + return accessKeyDecryptionCipher.Decrypt(encryptedAK) +} + +// Generate lookup key and access key decryption key for a given public key +func (al *ActLogic) getKeys(publicKey *ecdsa.PublicKey) ([]byte, []byte, error) { + nonces := [][]byte{zeroByteArray, oneByteArray} + keys, err := al.Session.Key(publicKey, nonces) + if keys == nil { + return nil, nil, err + } + return keys[0], keys[1], err +} + +// DecryptRef will return a decrypted reference, for given encrypted reference and publisher +func (al ActLogic) DecryptRef(ctx context.Context, storage kvs.KeyValueStore, encryptedRef swarm.Address, publisher *ecdsa.PublicKey) (swarm.Address, error) { + lookupKey, accessKeyDecryptionKey, err := al.getKeys(publisher) + if err != nil { + return swarm.ZeroAddress, err + } + + // Lookup encrypted access key from the ACT manifest + encryptedAccessKey, err := storage.Get(ctx, lookupKey) + if err != nil { + return swarm.ZeroAddress, fmt.Errorf("failed to get access key from KVS: %w", err) + } + + // Decrypt access key + accessKeyCipher := encryption.New(encryption.Key(accessKeyDecryptionKey), 0, uint32(0), hashFunc) + accessKey, err := accessKeyCipher.Decrypt(encryptedAccessKey) + if err != nil { + return swarm.ZeroAddress, err + } + + // Decrypt reference + refCipher := encryption.New(accessKey, 0, uint32(0), hashFunc) + ref, err := refCipher.Decrypt(encryptedRef.Bytes()) + if err != nil { + return swarm.ZeroAddress, err + } + + return swarm.NewAddress(ref), nil +} + +func NewLogic(s Session) ActLogic { + return ActLogic{ + Session: s, + } +} diff --git a/pkg/dynamicaccess/accesslogic_test.go b/pkg/dynamicaccess/accesslogic_test.go new file mode 100644 index 00000000000..b8cab2f8fbf --- /dev/null +++ b/pkg/dynamicaccess/accesslogic_test.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess_test + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/hex" + "testing" + + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + kvsmock "github.com/ethersphere/bee/v2/pkg/kvs/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/stretchr/testify/assert" +) + +// Generates a new test environment with a fix private key +func setupAccessLogic() dynamicaccess.ActLogic { + privateKey := getPrivKey(1) + diffieHellman := dynamicaccess.NewDefaultSession(privateKey) + al := dynamicaccess.NewLogic(diffieHellman) + + return al +} + +func getPrivKey(keyNumber int) *ecdsa.PrivateKey { + var keyHex string + + switch keyNumber { + case 0: + keyHex = "a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa" + case 1: + keyHex = "b786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfb" + case 2: + keyHex = "c786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfc" + default: + panic("Invalid key number") + } + + data, err := hex.DecodeString(keyHex) + if err != nil { + panic(err) + } + + privKey, err := crypto.DecodeSecp256k1PrivateKey(data) + if err != nil { + panic(err) + } + + return privKey +} + +func TestDecryptRef_Success(t *testing.T) { + t.Parallel() + ctx := context.Background() + id1 := getPrivKey(1) + s := kvsmock.New() + al := setupAccessLogic() + err := al.AddGrantee(ctx, s, &id1.PublicKey, &id1.PublicKey) + if err != nil { + t.Fatalf("AddGrantee: expected no error, got %v", err) + } + + byteRef, _ := hex.DecodeString("39a5ea87b141fe44aa609c3327ecd896c0e2122897f5f4bbacf74db1033c5559") + expectedRef := swarm.NewAddress(byteRef) + encryptedRef, err := al.EncryptRef(ctx, s, &id1.PublicKey, expectedRef) + if err != nil { + t.Fatalf("There was an error while calling EncryptRef: %v", err) + } + + actualRef, err := al.DecryptRef(ctx, s, encryptedRef, &id1.PublicKey) + if err != nil { + t.Fatalf("There was an error while calling Get: %v", err) + } + + if !expectedRef.Equal(actualRef) { + t.Fatalf("DecryptRef gave back wrong Swarm reference! Expedted: %v, actual: %v", expectedRef, actualRef) + } +} + +func TestDecryptRefWithGrantee_Success(t *testing.T) { + t.Parallel() + ctx := context.Background() + id0, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + diffieHellman := dynamicaccess.NewDefaultSession(id0) + al := dynamicaccess.NewLogic(diffieHellman) + + s := kvsmock.New() + err := al.AddGrantee(ctx, s, &id0.PublicKey, &id0.PublicKey) + if err != nil { + t.Fatalf("AddGrantee: expected no error, got %v", err) + } + + id1, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + err = al.AddGrantee(ctx, s, &id0.PublicKey, &id1.PublicKey) + if err != nil { + t.Fatalf("AddNewGrantee: expected no error, got %v", err) + } + + byteRef, _ := hex.DecodeString("39a5ea87b141fe44aa609c3327ecd896c0e2122897f5f4bbacf74db1033c5559") + + expectedRef := swarm.NewAddress(byteRef) + + encryptedRef, err := al.EncryptRef(ctx, s, &id0.PublicKey, expectedRef) + if err != nil { + t.Fatalf("There was an error while calling EncryptRef: %v", err) + } + + diffieHellman2 := dynamicaccess.NewDefaultSession(id1) + granteeAccessLogic := dynamicaccess.NewLogic(diffieHellman2) + actualRef, err := granteeAccessLogic.DecryptRef(ctx, s, encryptedRef, &id0.PublicKey) + if err != nil { + t.Fatalf("There was an error while calling Get: %v", err) + } + + if !expectedRef.Equal(actualRef) { + t.Fatalf("DecryptRef gave back wrong Swarm reference! Expedted: %v, actual: %v", expectedRef, actualRef) + } +} + +func TestDecryptRef_Error(t *testing.T) { + t.Parallel() + id0 := getPrivKey(0) + + ctx := context.Background() + s := kvsmock.New() + al := setupAccessLogic() + err := al.AddGrantee(ctx, s, &id0.PublicKey, &id0.PublicKey) + assert.NoError(t, err) + + expectedRef := "39a5ea87b141fe44aa609c3327ecd896c0e2122897f5f4bbacf74db1033c5559" + + encryptedRef, _ := al.EncryptRef(ctx, s, &id0.PublicKey, swarm.NewAddress([]byte(expectedRef))) + + r, err := al.DecryptRef(ctx, s, encryptedRef, nil) + if err == nil { + t.Logf("r: %s", r.String()) + t.Fatalf("Get should return encrypted access key not found error!") + } +} + +func TestAddPublisher(t *testing.T) { + t.Parallel() + id0 := getPrivKey(0) + savedLookupKey := "b6ee086390c280eeb9824c331a4427596f0c8510d5564bc1b6168d0059a46e2b" + s := kvsmock.New() + ctx := context.Background() + + al := setupAccessLogic() + err := al.AddGrantee(ctx, s, &id0.PublicKey, &id0.PublicKey) + assert.NoError(t, err) + + decodedSavedLookupKey, err := hex.DecodeString(savedLookupKey) + assert.NoError(t, err) + + encryptedAccessKey, err := s.Get(ctx, decodedSavedLookupKey) + assert.NoError(t, err) + + decodedEncryptedAccessKey := hex.EncodeToString(encryptedAccessKey) + + // A random value is returned, so it is only possible to check the length of the returned value + // We know the lookup key because the generated private key is fixed + if len(decodedEncryptedAccessKey) != 64 { + t.Fatalf("AddGrantee: expected encrypted access key length 64, got %d", len(decodedEncryptedAccessKey)) + } + if s == nil { + t.Fatalf("AddGrantee: expected act, got nil") + } +} + +func TestAddNewGranteeToContent(t *testing.T) { + t.Parallel() + id0 := getPrivKey(0) + id1 := getPrivKey(1) + id2 := getPrivKey(2) + ctx := context.Background() + + publisherLookupKey := "b6ee086390c280eeb9824c331a4427596f0c8510d5564bc1b6168d0059a46e2b" + firstAddedGranteeLookupKey := "a13678e81f9d939b9401a3ad7e548d2ceb81c50f8c76424296e83a1ad79c0df0" + secondAddedGranteeLookupKey := "d5e9a6499ca74f5b8b958a4b89b7338045b2baa9420e115443a8050e26986564" + + s := kvsmock.New() + al := setupAccessLogic() + err := al.AddGrantee(ctx, s, &id0.PublicKey, &id0.PublicKey) + assert.NoError(t, err) + + err = al.AddGrantee(ctx, s, &id0.PublicKey, &id1.PublicKey) + assert.NoError(t, err) + + err = al.AddGrantee(ctx, s, &id0.PublicKey, &id2.PublicKey) + assert.NoError(t, err) + + lookupKeyAsByte, err := hex.DecodeString(publisherLookupKey) + assert.NoError(t, err) + + result, _ := s.Get(ctx, lookupKeyAsByte) + hexEncodedEncryptedAK := hex.EncodeToString(result) + if len(hexEncodedEncryptedAK) != 64 { + t.Fatalf("AddNewGrantee: expected encrypted access key length 64, got %d", len(hexEncodedEncryptedAK)) + } + + lookupKeyAsByte, err = hex.DecodeString(firstAddedGranteeLookupKey) + assert.NoError(t, err) + + result, _ = s.Get(ctx, lookupKeyAsByte) + hexEncodedEncryptedAK = hex.EncodeToString(result) + if len(hexEncodedEncryptedAK) != 64 { + t.Fatalf("AddNewGrantee: expected encrypted access key length 64, got %d", len(hexEncodedEncryptedAK)) + } + + lookupKeyAsByte, err = hex.DecodeString(secondAddedGranteeLookupKey) + assert.NoError(t, err) + + result, _ = s.Get(ctx, lookupKeyAsByte) + hexEncodedEncryptedAK = hex.EncodeToString(result) + if len(hexEncodedEncryptedAK) != 64 { + t.Fatalf("AddNewGrantee: expected encrypted access key length 64, got %d", len(hexEncodedEncryptedAK)) + } +} diff --git a/pkg/dynamicaccess/controller.go b/pkg/dynamicaccess/controller.go new file mode 100644 index 00000000000..dffe4bbdf64 --- /dev/null +++ b/pkg/dynamicaccess/controller.go @@ -0,0 +1,283 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess + +import ( + "context" + "crypto/ecdsa" + "io" + "time" + + "github.com/ethersphere/bee/v2/pkg/encryption" + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/kvs" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +type Grantees interface { + // UpdateHandler manages the grantees for the given publisher, updating the list based on provided public keys to add or remove. + // Only the publisher can make changes to the grantee list. + UpdateHandler(ctx context.Context, ls file.LoadSaver, gls file.LoadSaver, granteeRef swarm.Address, historyRef swarm.Address, publisher *ecdsa.PublicKey, addList, removeList []*ecdsa.PublicKey) (swarm.Address, swarm.Address, swarm.Address, swarm.Address, error) + // Get returns the list of grantees for the given publisher. + // The list is accessible only by the publisher. + Get(ctx context.Context, ls file.LoadSaver, publisher *ecdsa.PublicKey, encryptedglRef swarm.Address) ([]*ecdsa.PublicKey, error) +} + +type Controller interface { + Grantees + // DownloadHandler decrypts the encryptedRef using the lookupkey based on the history and timestamp. + DownloadHandler(ctx context.Context, ls file.LoadSaver, encryptedRef swarm.Address, publisher *ecdsa.PublicKey, historyRef swarm.Address, timestamp int64) (swarm.Address, error) + // UploadHandler encrypts the reference and stores it in the history as the latest update. + UploadHandler(ctx context.Context, ls file.LoadSaver, reference swarm.Address, publisher *ecdsa.PublicKey, historyRef swarm.Address) (swarm.Address, swarm.Address, swarm.Address, error) + io.Closer +} + +type ControllerStruct struct { + accessLogic ActLogic +} + +var _ Controller = (*ControllerStruct)(nil) + +func NewController(accessLogic ActLogic) *ControllerStruct { + return &ControllerStruct{ + accessLogic: accessLogic, + } +} + +func (c *ControllerStruct) DownloadHandler( + ctx context.Context, + ls file.LoadSaver, + encryptedRef swarm.Address, + publisher *ecdsa.PublicKey, + historyRef swarm.Address, + timestamp int64, +) (swarm.Address, error) { + _, act, err := c.getHistoryAndAct(ctx, ls, historyRef, publisher, timestamp) + if err != nil { + return swarm.ZeroAddress, err + } + + return c.accessLogic.DecryptRef(ctx, act, encryptedRef, publisher) +} + +func (c *ControllerStruct) UploadHandler( + ctx context.Context, + ls file.LoadSaver, + reference swarm.Address, + publisher *ecdsa.PublicKey, + historyRef swarm.Address, +) (swarm.Address, swarm.Address, swarm.Address, error) { + history, act, err := c.getHistoryAndAct(ctx, ls, historyRef, publisher, time.Now().Unix()) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + actRef := swarm.ZeroAddress + newHistoryRef := historyRef + if historyRef.IsZero() { + newHistoryRef, actRef, err = c.saveHistoryAndAct(ctx, history, nil, act) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + } + + encryptedRef, err := c.accessLogic.EncryptRef(ctx, act, publisher, reference) + return actRef, newHistoryRef, encryptedRef, err +} + +// Limitation: If an upadate is called again within a second from the latest upload/update then mantaray save fails with ErrInvalidInput, +// because the key (timestamp) is already present, hence a new fork is not created +func (c *ControllerStruct) UpdateHandler( + ctx context.Context, + ls file.LoadSaver, + gls file.LoadSaver, + encryptedglRef swarm.Address, + historyRef swarm.Address, + publisher *ecdsa.PublicKey, + addList []*ecdsa.PublicKey, + removeList []*ecdsa.PublicKey, +) (swarm.Address, swarm.Address, swarm.Address, swarm.Address, error) { + history, act, err := c.getHistoryAndAct(ctx, ls, historyRef, publisher, time.Now().Unix()) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + + gl, err := c.getGranteeList(ctx, gls, encryptedglRef, publisher) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + if len(addList) != 0 { + err = gl.Add(addList) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + } + granteesToAdd := addList + if len(removeList) != 0 { + err = gl.Remove(removeList) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + // generate new access key and new act, only if history was not newly created + if !historyRef.IsZero() { + act, err = c.newActWithPublisher(ctx, ls, publisher) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + } + granteesToAdd = gl.Get() + } + + for _, grantee := range granteesToAdd { + err := c.accessLogic.AddGrantee(ctx, act, publisher, grantee) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + } + + granteeRef, err := gl.Save(ctx) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + + egranteeRef, err := c.encryptRefForPublisher(publisher, granteeRef) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + // need to re-initialize history, because Lookup loads the forks causing the manifest save to skip the root node + if !historyRef.IsZero() { + history, err = NewHistoryReference(ls, historyRef) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + } + + mtdt := map[string]string{"encryptedglref": egranteeRef.String()} + hRef, actRef, err := c.saveHistoryAndAct(ctx, history, &mtdt, act) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, err + } + + return granteeRef, egranteeRef, hRef, actRef, nil +} + +func (c *ControllerStruct) Get(ctx context.Context, ls file.LoadSaver, publisher *ecdsa.PublicKey, encryptedglRef swarm.Address) ([]*ecdsa.PublicKey, error) { + gl, err := c.getGranteeList(ctx, ls, encryptedglRef, publisher) + if err != nil { + return nil, err + } + return gl.Get(), nil +} + +func (c *ControllerStruct) newActWithPublisher(ctx context.Context, ls file.LoadSaver, publisher *ecdsa.PublicKey) (kvs.KeyValueStore, error) { + act, err := kvs.New(ls) + if err != nil { + return nil, err + } + err = c.accessLogic.AddGrantee(ctx, act, publisher, publisher) + if err != nil { + return nil, err + } + + return act, nil +} + +func (c *ControllerStruct) getHistoryAndAct(ctx context.Context, ls file.LoadSaver, historyRef swarm.Address, publisher *ecdsa.PublicKey, timestamp int64) (history History, act kvs.KeyValueStore, err error) { + if historyRef.IsZero() { + history, err = NewHistory(ls) + if err != nil { + return nil, nil, err + } + act, err = c.newActWithPublisher(ctx, ls, publisher) + if err != nil { + return nil, nil, err + } + } else { + history, err = NewHistoryReference(ls, historyRef) + if err != nil { + return nil, nil, err + } + entry, err := history.Lookup(ctx, timestamp) + if err != nil { + return nil, nil, err + } + act, err = kvs.NewReference(ls, entry.Reference()) + if err != nil { + return nil, nil, err + } + } + + return history, act, nil +} + +func (c *ControllerStruct) saveHistoryAndAct(ctx context.Context, history History, mtdt *map[string]string, act kvs.KeyValueStore) (swarm.Address, swarm.Address, error) { + actRef, err := act.Save(ctx) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, err + } + err = history.Add(ctx, actRef, nil, mtdt) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, err + } + historyRef, err := history.Store(ctx) + if err != nil { + return swarm.ZeroAddress, swarm.ZeroAddress, err + } + + return historyRef, actRef, nil +} + +func (c *ControllerStruct) getGranteeList(ctx context.Context, ls file.LoadSaver, encryptedglRef swarm.Address, publisher *ecdsa.PublicKey) (gl GranteeList, err error) { + if encryptedglRef.IsZero() { + gl, err = NewGranteeList(ls) + if err != nil { + return nil, err + } + } else { + granteeref, err := c.decryptRefForPublisher(publisher, encryptedglRef) + if err != nil { + return nil, err + } + + gl, err = NewGranteeListReference(ctx, ls, granteeref) + if err != nil { + return nil, err + } + } + + return gl, nil +} + +func (c *ControllerStruct) encryptRefForPublisher(publisherPubKey *ecdsa.PublicKey, ref swarm.Address) (swarm.Address, error) { + keys, err := c.accessLogic.Session.Key(publisherPubKey, [][]byte{oneByteArray}) + if err != nil { + return swarm.ZeroAddress, err + } + refCipher := encryption.New(keys[0], 0, uint32(0), hashFunc) + encryptedRef, err := refCipher.Encrypt(ref.Bytes()) + if err != nil { + return swarm.ZeroAddress, err + } + + return swarm.NewAddress(encryptedRef), nil +} + +func (c *ControllerStruct) decryptRefForPublisher(publisherPubKey *ecdsa.PublicKey, encryptedRef swarm.Address) (swarm.Address, error) { + keys, err := c.accessLogic.Session.Key(publisherPubKey, [][]byte{oneByteArray}) + if err != nil { + return swarm.ZeroAddress, err + } + refCipher := encryption.New(keys[0], 0, uint32(0), hashFunc) + ref, err := refCipher.Decrypt(encryptedRef.Bytes()) + if err != nil { + return swarm.ZeroAddress, err + } + + return swarm.NewAddress(ref), nil +} + +// TODO: what to do in close ? +func (c *ControllerStruct) Close() error { + return nil +} diff --git a/pkg/dynamicaccess/controller_test.go b/pkg/dynamicaccess/controller_test.go new file mode 100644 index 00000000000..c25d8079bfa --- /dev/null +++ b/pkg/dynamicaccess/controller_test.go @@ -0,0 +1,303 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess_test + +import ( + "context" + "crypto/ecdsa" + "reflect" + "testing" + "time" + + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + encryption "github.com/ethersphere/bee/v2/pkg/encryption" + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/kvs" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/sha3" +) + +//nolint:errcheck,gosec,wrapcheck +func getHistoryFixture(ctx context.Context, ls file.LoadSaver, al dynamicaccess.ActLogic, publisher *ecdsa.PublicKey) (swarm.Address, error) { + h, err := dynamicaccess.NewHistory(ls) + if err != nil { + return swarm.ZeroAddress, err + } + pk1 := getPrivKey(1) + pk2 := getPrivKey(2) + + kvs0, _ := kvs.New(ls) + al.AddGrantee(ctx, kvs0, publisher, publisher) + kvs0Ref, _ := kvs0.Save(ctx) + kvs1, _ := kvs.New(ls) + al.AddGrantee(ctx, kvs1, publisher, publisher) + al.AddGrantee(ctx, kvs1, publisher, &pk1.PublicKey) + kvs1Ref, _ := kvs1.Save(ctx) + kvs2, _ := kvs.New(ls) + al.AddGrantee(ctx, kvs2, publisher, publisher) + al.AddGrantee(ctx, kvs2, publisher, &pk2.PublicKey) + kvs2Ref, _ := kvs2.Save(ctx) + firstTime := time.Date(1994, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + secondTime := time.Date(2000, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + thirdTime := time.Date(2015, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + + h.Add(ctx, kvs0Ref, &thirdTime, nil) + h.Add(ctx, kvs1Ref, &firstTime, nil) + h.Add(ctx, kvs2Ref, &secondTime, nil) + return h.Store(ctx) +} + +func TestController_UploadHandler(t *testing.T) { + t.Parallel() + ctx := context.Background() + publisher := getPrivKey(0) + diffieHellman := dynamicaccess.NewDefaultSession(publisher) + al := dynamicaccess.NewLogic(diffieHellman) + c := dynamicaccess.NewController(al) + ls := createLs() + + t.Run("New upload", func(t *testing.T) { + ref := swarm.RandAddress(t) + _, hRef, encRef, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, swarm.ZeroAddress) + assert.NoError(t, err) + + h, _ := dynamicaccess.NewHistoryReference(ls, hRef) + entry, _ := h.Lookup(ctx, time.Now().Unix()) + actRef := entry.Reference() + act, _ := kvs.NewReference(ls, actRef) + expRef, err := al.EncryptRef(ctx, act, &publisher.PublicKey, ref) + + assert.NoError(t, err) + assert.Equal(t, encRef, expRef) + assert.NotEqual(t, hRef, swarm.ZeroAddress) + }) + + t.Run("Upload to same history", func(t *testing.T) { + ref := swarm.RandAddress(t) + _, hRef1, _, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, swarm.ZeroAddress) + assert.NoError(t, err) + _, hRef2, encRef, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, hRef1) + assert.NoError(t, err) + h, err := dynamicaccess.NewHistoryReference(ls, hRef2) + assert.NoError(t, err) + hRef2, err = h.Store(ctx) + assert.NoError(t, err) + assert.True(t, hRef1.Equal(hRef2)) + + h, _ = dynamicaccess.NewHistoryReference(ls, hRef2) + entry, _ := h.Lookup(ctx, time.Now().Unix()) + actRef := entry.Reference() + act, _ := kvs.NewReference(ls, actRef) + expRef, err := al.EncryptRef(ctx, act, &publisher.PublicKey, ref) + + assert.NoError(t, err) + assert.Equal(t, encRef, expRef) + assert.NotEqual(t, hRef2, swarm.ZeroAddress) + }) +} + +func TestController_PublisherDownload(t *testing.T) { + t.Parallel() + ctx := context.Background() + publisher := getPrivKey(0) + diffieHellman := dynamicaccess.NewDefaultSession(publisher) + al := dynamicaccess.NewLogic(diffieHellman) + c := dynamicaccess.NewController(al) + ls := createLs() + ref := swarm.RandAddress(t) + href, _ := getHistoryFixture(ctx, ls, al, &publisher.PublicKey) + h, _ := dynamicaccess.NewHistoryReference(ls, href) + entry, _ := h.Lookup(ctx, time.Now().Unix()) + actRef := entry.Reference() + act, _ := kvs.NewReference(ls, actRef) + encRef, err := al.EncryptRef(ctx, act, &publisher.PublicKey, ref) + + assert.NoError(t, err) + dref, err := c.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, href, time.Now().Unix()) + assert.NoError(t, err) + assert.Equal(t, ref, dref) +} + +func TestController_GranteeDownload(t *testing.T) { + t.Parallel() + ctx := context.Background() + publisher := getPrivKey(0) + grantee := getPrivKey(2) + publisherDH := dynamicaccess.NewDefaultSession(publisher) + publisherAL := dynamicaccess.NewLogic(publisherDH) + + diffieHellman := dynamicaccess.NewDefaultSession(grantee) + al := dynamicaccess.NewLogic(diffieHellman) + ls := createLs() + c := dynamicaccess.NewController(al) + ref := swarm.RandAddress(t) + href, _ := getHistoryFixture(ctx, ls, publisherAL, &publisher.PublicKey) + h, _ := dynamicaccess.NewHistoryReference(ls, href) + ts := time.Date(2001, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + entry, _ := h.Lookup(ctx, ts) + actRef := entry.Reference() + act, _ := kvs.NewReference(ls, actRef) + encRef, err := publisherAL.EncryptRef(ctx, act, &publisher.PublicKey, ref) + + assert.NoError(t, err) + dref, err := c.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, href, ts) + assert.NoError(t, err) + assert.Equal(t, ref, dref) +} + +func TestController_UpdateHandler(t *testing.T) { + t.Parallel() + ctx := context.Background() + publisher := getPrivKey(1) + diffieHellman := dynamicaccess.NewDefaultSession(publisher) + al := dynamicaccess.NewLogic(diffieHellman) + keys, _ := al.Session.Key(&publisher.PublicKey, [][]byte{{1}}) + refCipher := encryption.New(keys[0], 0, uint32(0), sha3.NewLegacyKeccak256) + ls := createLs() + gls := loadsave.New(mockStorer.ChunkStore(), mockStorer.Cache(), requestPipelineFactory(context.Background(), mockStorer.Cache(), true, redundancy.NONE)) + c := dynamicaccess.NewController(al) + href, _ := getHistoryFixture(ctx, ls, al, &publisher.PublicKey) + + grantee1 := getPrivKey(0) + grantee := getPrivKey(2) + + t.Run("add to new list", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + granteeRef, _, _, _, err := c.UpdateHandler(ctx, ls, ls, swarm.ZeroAddress, swarm.ZeroAddress, &publisher.PublicKey, addList, nil) + assert.NoError(t, err) + + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + + assert.NoError(t, err) + assert.Len(t, gl.Get(), 1) + }) + t.Run("add to existing list", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + granteeRef, eglref, _, _, err := c.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, href, &publisher.PublicKey, addList, nil) + assert.NoError(t, err) + + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + + assert.NoError(t, err) + assert.Len(t, gl.Get(), 1) + + addList = []*ecdsa.PublicKey{&getPrivKey(0).PublicKey} + granteeRef, _, _, _, _ = c.UpdateHandler(ctx, ls, ls, eglref, href, &publisher.PublicKey, addList, nil) + gl, err = dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + assert.NoError(t, err) + assert.Len(t, gl.Get(), 2) + }) + t.Run("add and revoke", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + revokeList := []*ecdsa.PublicKey{&grantee1.PublicKey} + gl, _ := dynamicaccess.NewGranteeList(ls) + _ = gl.Add([]*ecdsa.PublicKey{&publisher.PublicKey, &grantee1.PublicKey}) + granteeRef, _ := gl.Save(ctx) + eglref, _ := refCipher.Encrypt(granteeRef.Bytes()) + + granteeRef, _, _, _, _ = c.UpdateHandler(ctx, ls, gls, swarm.NewAddress(eglref), href, &publisher.PublicKey, addList, revokeList) + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + + assert.NoError(t, err) + assert.Len(t, gl.Get(), 2) + }) + t.Run("add and revoke then get from history", func(t *testing.T) { + addRevokeList := []*ecdsa.PublicKey{&grantee.PublicKey} + ref := swarm.RandAddress(t) + _, hRef, encRef, err := c.UploadHandler(ctx, ls, ref, &publisher.PublicKey, swarm.ZeroAddress) + require.NoError(t, err) + + // Need to wait a second before each update call so that a new history mantaray fork is created for the new key(timestamp) entry + time.Sleep(1 * time.Second) + beforeRevokeTS := time.Now().Unix() + _, egranteeRef, hrefUpdate1, _, err := c.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, hRef, &publisher.PublicKey, addRevokeList, nil) + require.NoError(t, err) + + time.Sleep(1 * time.Second) + granteeRef, _, hrefUpdate2, _, err := c.UpdateHandler(ctx, ls, gls, egranteeRef, hrefUpdate1, &publisher.PublicKey, nil, addRevokeList) + require.NoError(t, err) + + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + require.NoError(t, err) + assert.Empty(t, gl.Get()) + // expect history reference to be different after grantee list update + assert.NotEqual(t, hrefUpdate1, hrefUpdate2) + + granteeDH := dynamicaccess.NewDefaultSession(grantee) + granteeAl := dynamicaccess.NewLogic(granteeDH) + granteeCtrl := dynamicaccess.NewController(granteeAl) + // download with grantee shall still work with the timestamp before the revoke + decRef, err := granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS) + require.NoError(t, err) + assert.Equal(t, ref, decRef) + + // download with grantee shall NOT work with the latest timestamp + decRef, err = granteeCtrl.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, time.Now().Unix()) + require.Error(t, err) + assert.Equal(t, swarm.ZeroAddress, decRef) + + // publisher shall still be able to download with the timestamp before the revoke + decRef, err = c.DownloadHandler(ctx, ls, encRef, &publisher.PublicKey, hrefUpdate2, beforeRevokeTS) + require.NoError(t, err) + assert.Equal(t, ref, decRef) + }) + t.Run("add twice", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey, &grantee.PublicKey} + //nolint:ineffassign,staticcheck,wastedassign + granteeRef, eglref, _, _, err := c.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, href, &publisher.PublicKey, addList, nil) + granteeRef, _, _, _, _ = c.UpdateHandler(ctx, ls, ls, eglref, href, &publisher.PublicKey, addList, nil) + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + + assert.NoError(t, err) + assert.Len(t, gl.Get(), 1) + }) + t.Run("revoke non-existing", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + granteeRef, _, _, _, _ := c.UpdateHandler(ctx, ls, ls, swarm.ZeroAddress, href, &publisher.PublicKey, addList, nil) + gl, err := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + + assert.NoError(t, err) + assert.Len(t, gl.Get(), 1) + }) +} + +func TestController_Get(t *testing.T) { + t.Parallel() + ctx := context.Background() + publisher := getPrivKey(1) + caller := getPrivKey(0) + grantee := getPrivKey(2) + diffieHellman1 := dynamicaccess.NewDefaultSession(publisher) + diffieHellman2 := dynamicaccess.NewDefaultSession(caller) + al1 := dynamicaccess.NewLogic(diffieHellman1) + al2 := dynamicaccess.NewLogic(diffieHellman2) + ls := createLs() + gls := loadsave.New(mockStorer.ChunkStore(), mockStorer.Cache(), requestPipelineFactory(context.Background(), mockStorer.Cache(), true, redundancy.NONE)) + c1 := dynamicaccess.NewController(al1) + c2 := dynamicaccess.NewController(al2) + + t.Run("get by publisher", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + granteeRef, eglRef, _, _, _ := c1.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, swarm.ZeroAddress, &publisher.PublicKey, addList, nil) + + grantees, err := c1.Get(ctx, ls, &publisher.PublicKey, eglRef) + assert.NoError(t, err) + assert.True(t, reflect.DeepEqual(grantees, addList)) + + gl, _ := dynamicaccess.NewGranteeListReference(ctx, ls, granteeRef) + assert.True(t, reflect.DeepEqual(gl.Get(), addList)) + }) + t.Run("get by non-publisher", func(t *testing.T) { + addList := []*ecdsa.PublicKey{&grantee.PublicKey} + _, eglRef, _, _, _ := c1.UpdateHandler(ctx, ls, gls, swarm.ZeroAddress, swarm.ZeroAddress, &publisher.PublicKey, addList, nil) + grantees, err := c2.Get(ctx, ls, &publisher.PublicKey, eglRef) + assert.Error(t, err) + assert.Nil(t, grantees) + }) +} diff --git a/pkg/dynamicaccess/grantee.go b/pkg/dynamicaccess/grantee.go new file mode 100644 index 00000000000..44832032754 --- /dev/null +++ b/pkg/dynamicaccess/grantee.go @@ -0,0 +1,166 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +const ( + publicKeyLen = 65 +) + +// GranteeList manages a list of public keys. +type GranteeList interface { + // Add adds a list of public keys to the grantee list. It filters out duplicates. + Add(addList []*ecdsa.PublicKey) error + // Remove removes a list of public keys from the grantee list, if there is any. + Remove(removeList []*ecdsa.PublicKey) error + // Get simply returns the list of public keys. + Get() []*ecdsa.PublicKey + // Save saves the grantee list to the underlying storage and returns the reference. + Save(ctx context.Context) (swarm.Address, error) +} + +type GranteeListStruct struct { + grantees []*ecdsa.PublicKey + loadSave file.LoadSaver +} + +var _ GranteeList = (*GranteeListStruct)(nil) + +func (g *GranteeListStruct) Get() []*ecdsa.PublicKey { + return g.grantees +} + +func (g *GranteeListStruct) Add(addList []*ecdsa.PublicKey) error { + if len(addList) == 0 { + return fmt.Errorf("no public key provided") + } + filteredList := make([]*ecdsa.PublicKey, 0, len(addList)) + for _, addkey := range addList { + add := true + for _, granteekey := range g.grantees { + if granteekey.Equal(addkey) { + add = false + break + } + } + for _, filteredkey := range filteredList { + if filteredkey.Equal(addkey) { + add = false + break + } + } + if add { + filteredList = append(filteredList, addkey) + } + } + g.grantees = append(g.grantees, filteredList...) + + return nil +} + +func (g *GranteeListStruct) Save(ctx context.Context) (swarm.Address, error) { + data := serialize(g.grantees) + refBytes, err := g.loadSave.Save(ctx, data) + if err != nil { + return swarm.ZeroAddress, fmt.Errorf("grantee save error: %w", err) + } + + return swarm.NewAddress(refBytes), nil +} + +var ( + ErrNothingToRemove = errors.New("nothing to remove") + ErrNoGranteeFound = errors.New("no grantee found") +) + +func (g *GranteeListStruct) Remove(keysToRemove []*ecdsa.PublicKey) error { + if len(keysToRemove) == 0 { + return ErrNothingToRemove + } + + if len(g.grantees) == 0 { + return ErrNoGranteeFound + } + grantees := g.grantees + + for _, remove := range keysToRemove { + for i := 0; i < len(grantees); i++ { + if grantees[i].Equal(remove) { + grantees[i] = grantees[len(grantees)-1] + grantees = grantees[:len(grantees)-1] + } + } + } + g.grantees = grantees + + return nil +} + +func NewGranteeList(ls file.LoadSaver) (*GranteeListStruct, error) { // Why is the error necessary? + return &GranteeListStruct{ + grantees: []*ecdsa.PublicKey{}, + loadSave: ls, + }, nil +} + +func NewGranteeListReference(ctx context.Context, ls file.LoadSaver, reference swarm.Address) (*GranteeListStruct, error) { + data, err := ls.Load(ctx, reference.Bytes()) + if err != nil { + return nil, fmt.Errorf("unable to load reference, %w", err) + } + grantees := deserialize(data) + + return &GranteeListStruct{ + grantees: grantees, + loadSave: ls, + }, nil +} + +func serialize(publicKeys []*ecdsa.PublicKey) []byte { + b := make([]byte, 0, len(publicKeys)*publicKeyLen) + for _, key := range publicKeys { + b = append(b, serializePublicKey(key)...) + } + return b +} + +func serializePublicKey(pub *ecdsa.PublicKey) []byte { + return elliptic.Marshal(pub.Curve, pub.X, pub.Y) +} + +func deserialize(data []byte) []*ecdsa.PublicKey { + if len(data) == 0 { + return []*ecdsa.PublicKey{} + } + + p := make([]*ecdsa.PublicKey, 0, len(data)/publicKeyLen) + for i := 0; i < len(data); i += publicKeyLen { + pubKey := deserializeBytes(data[i : i+publicKeyLen]) + if pubKey == nil { + return []*ecdsa.PublicKey{} + } + p = append(p, pubKey) + } + return p +} + +func deserializeBytes(data []byte) *ecdsa.PublicKey { + key, err := btcec.ParsePubKey(data) + if err != nil { + return nil + } + return key.ToECDSA() +} diff --git a/pkg/dynamicaccess/grantee_test.go b/pkg/dynamicaccess/grantee_test.go new file mode 100644 index 00000000000..f9a9b0904f8 --- /dev/null +++ b/pkg/dynamicaccess/grantee_test.go @@ -0,0 +1,236 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess_test + +import ( + "context" + "crypto/ecdsa" + "crypto/rand" + "testing" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/pipeline" + "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/storage" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/stretchr/testify/assert" +) + +var mockStorer = mockstorer.New() + +func requestPipelineFactory(ctx context.Context, s storage.Putter, encrypt bool, rLevel redundancy.Level) func() pipeline.Interface { + return func() pipeline.Interface { + return builder.NewPipelineBuilder(ctx, s, encrypt, rLevel) + } +} + +func createLs() file.LoadSaver { + return loadsave.New(mockStorer.ChunkStore(), mockStorer.Cache(), requestPipelineFactory(context.Background(), mockStorer.Cache(), false, redundancy.NONE)) +} + +func generateKeyListFixture() ([]*ecdsa.PublicKey, error) { + key1, err := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + if err != nil { + return nil, err + } + key2, err := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + if err != nil { + return nil, err + } + key3, err := ecdsa.GenerateKey(btcec.S256(), rand.Reader) + if err != nil { + return nil, err + } + return []*ecdsa.PublicKey{&key1.PublicKey, &key2.PublicKey, &key3.PublicKey}, nil +} + +func TestGranteeAddGet(t *testing.T) { + t.Parallel() + gl, _ := dynamicaccess.NewGranteeList(createLs()) + keys, err := generateKeyListFixture() + if err != nil { + t.Errorf("key generation error: %v", err) + } + + t.Run("Get empty grantee list should return error", func(t *testing.T) { + val := gl.Get() + assert.Empty(t, val) + }) + + t.Run("Get should return value equal to put value", func(t *testing.T) { + var ( + keys2, _ = generateKeyListFixture() + addList1 []*ecdsa.PublicKey = []*ecdsa.PublicKey{keys[0]} + addList2 []*ecdsa.PublicKey = []*ecdsa.PublicKey{keys[1], keys[2]} + addList3 []*ecdsa.PublicKey = keys2 + ) + testCases := []struct { + name string + list []*ecdsa.PublicKey + }{ + { + name: "Test list = 1", + list: addList1, + }, + { + name: "Test list = duplicate1", + list: addList1, + }, + { + name: "Test list = 2", + list: addList2, + }, + { + name: "Test list = 3", + list: addList3, + }, + { + name: "Test empty add list", + list: nil, + }, + } + + expList := []*ecdsa.PublicKey{} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := gl.Add(tc.list) + if tc.list == nil { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if tc.name != "Test list = duplicate1" { + expList = append(expList, tc.list...) + } + retVal := gl.Get() + assert.Equal(t, expList, retVal) + } + }) + } + }) +} + +func TestGranteeRemove(t *testing.T) { + t.Parallel() + gl, _ := dynamicaccess.NewGranteeList(createLs()) + keys, err := generateKeyListFixture() + if err != nil { + t.Errorf("key generation error: %v", err) + } + + t.Run("Add should NOT return error", func(t *testing.T) { + err := gl.Add(keys) + assert.NoError(t, err) + retVal := gl.Get() + assert.Equal(t, keys, retVal) + }) + removeList1 := []*ecdsa.PublicKey{keys[0]} + removeList2 := []*ecdsa.PublicKey{keys[2], keys[1]} + t.Run("Remove the first item should return NO error", func(t *testing.T) { + err := gl.Remove(removeList1) + assert.NoError(t, err) + retVal := gl.Get() + assert.Equal(t, removeList2, retVal) + }) + t.Run("Remove non-existent item should return NO error", func(t *testing.T) { + err := gl.Remove(removeList1) + assert.NoError(t, err) + retVal := gl.Get() + assert.Equal(t, removeList2, retVal) + }) + t.Run("Remove second and third item should return NO error", func(t *testing.T) { + err := gl.Remove(removeList2) + assert.NoError(t, err) + retVal := gl.Get() + assert.Empty(t, retVal) + }) + t.Run("Remove from empty grantee list should return error", func(t *testing.T) { + err := gl.Remove(removeList1) + assert.Error(t, err) + retVal := gl.Get() + assert.Empty(t, retVal) + }) + t.Run("Remove empty remove list should return error", func(t *testing.T) { + err := gl.Remove(nil) + assert.Error(t, err) + retVal := gl.Get() + assert.Empty(t, retVal) + }) +} + +func TestGranteeSave(t *testing.T) { + t.Parallel() + ctx := context.Background() + keys, err := generateKeyListFixture() + if err != nil { + t.Errorf("key generation error: %v", err) + } + t.Run("Create grantee list with invalid reference, expect error", func(t *testing.T) { + gl, err := dynamicaccess.NewGranteeListReference(ctx, createLs(), swarm.RandAddress(t)) + assert.Error(t, err) + assert.Nil(t, gl) + }) + t.Run("Save empty grantee list return NO error", func(t *testing.T) { + gl, _ := dynamicaccess.NewGranteeList(createLs()) + _, err := gl.Save(ctx) + assert.NoError(t, err) + }) + t.Run("Save not empty grantee list return valid swarm address", func(t *testing.T) { + gl, _ := dynamicaccess.NewGranteeList(createLs()) + err = gl.Add(keys) + ref, err := gl.Save(ctx) + assert.NoError(t, err) + assert.True(t, ref.IsValidNonEmpty()) + }) + t.Run("Save grantee list with one item, no error, pre-save value exist", func(t *testing.T) { + ls := createLs() + gl1, _ := dynamicaccess.NewGranteeList(ls) + + err := gl1.Add(keys) + assert.NoError(t, err) + + ref, err := gl1.Save(ctx) + assert.NoError(t, err) + + gl2, _ := dynamicaccess.NewGranteeListReference(ctx, ls, ref) + val := gl2.Get() + assert.NoError(t, err) + assert.Equal(t, keys, val) + }) + t.Run("Save grantee list and add one item, no error, after-save value exist", func(t *testing.T) { + ls := createLs() + keys2, _ := generateKeyListFixture() + + gl1, _ := dynamicaccess.NewGranteeList(ls) + + err := gl1.Add(keys) + assert.NoError(t, err) + ref, err := gl1.Save(ctx) + assert.NoError(t, err) + + gl2, _ := dynamicaccess.NewGranteeListReference(ctx, ls, ref) + err = gl2.Add(keys2) + assert.NoError(t, err) + + val := gl2.Get() + assert.Equal(t, append(keys, keys2...), val) + }) +} + +func TestGranteeRemoveTwo(t *testing.T) { + gl, _ := dynamicaccess.NewGranteeList(createLs()) + keys, err := generateKeyListFixture() + if err != nil { + t.Errorf("key generation error: %v", err) + } + _ = gl.Add([]*ecdsa.PublicKey{keys[0]}) + _ = gl.Add([]*ecdsa.PublicKey{keys[0]}) + err = gl.Remove([]*ecdsa.PublicKey{keys[0]}) + assert.NoError(t, err) +} diff --git a/pkg/dynamicaccess/history.go b/pkg/dynamicaccess/history.go new file mode 100644 index 00000000000..27485e95f95 --- /dev/null +++ b/pkg/dynamicaccess/history.go @@ -0,0 +1,172 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess + +import ( + "context" + "errors" + "fmt" + "math" + "strconv" + "time" + + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/manifest" + "github.com/ethersphere/bee/v2/pkg/manifest/mantaray" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +type History interface { + Add(ctx context.Context, ref swarm.Address, timestamp *int64, metadata *map[string]string) error + Lookup(ctx context.Context, timestamp int64) (manifest.Entry, error) + Store(ctx context.Context) (swarm.Address, error) +} + +var _ History = (*HistoryStruct)(nil) + +var ( + ErrEndIteration = errors.New("end iteration") + ErrUnexpectedType = errors.New("unexpected type") +) + +type HistoryStruct struct { + manifest *manifest.MantarayManifest + ls file.LoadSaver +} + +func NewHistory(ls file.LoadSaver) (*HistoryStruct, error) { + m, err := manifest.NewDefaultManifest(ls, false) + if err != nil { + return nil, fmt.Errorf("failed to create default manifest: %w", err) + } + + mm, ok := m.(*manifest.MantarayManifest) + if !ok { + return nil, fmt.Errorf("%w: expected MantarayManifest, got %T", ErrUnexpectedType, m) + } + + return &HistoryStruct{manifest: mm, ls: ls}, nil +} + +func NewHistoryReference(ls file.LoadSaver, ref swarm.Address) (*HistoryStruct, error) { + m, err := manifest.NewDefaultManifestReference(ref, ls) + if err != nil { + return nil, fmt.Errorf("failed to create default manifest: %w", err) + } + + mm, ok := m.(*manifest.MantarayManifest) + if !ok { + return nil, fmt.Errorf("%w: expected MantarayManifest, got %T", ErrUnexpectedType, m) + } + + return &HistoryStruct{manifest: mm, ls: ls}, nil +} + +func (h *HistoryStruct) Add(ctx context.Context, ref swarm.Address, timestamp *int64, metadata *map[string]string) error { + mtdt := map[string]string{} + if metadata != nil { + mtdt = *metadata + } + // add timestamps transformed so that the latests timestamp becomes the smallest key + var unixTime int64 + if timestamp != nil { + unixTime = *timestamp + } else { + unixTime = time.Now().Unix() + } + + key := strconv.FormatInt(math.MaxInt64-unixTime, 10) + return h.manifest.Add(ctx, key, manifest.NewEntry(ref, mtdt)) +} + +var ErrInvalidTimestamp = errors.New("invalid timestamp") + +// Lookup finds the entry for a path or returns error if not found +func (h *HistoryStruct) Lookup(ctx context.Context, timestamp int64) (manifest.Entry, error) { + if timestamp <= 0 { + return manifest.NewEntry(swarm.ZeroAddress, map[string]string{}), ErrInvalidTimestamp + } + + reversedTimestamp := math.MaxInt64 - timestamp + node, err := h.lookupNode(ctx, reversedTimestamp) + if err != nil { + return manifest.NewEntry(swarm.ZeroAddress, map[string]string{}), err + } + + if node != nil { + return manifest.NewEntry(swarm.NewAddress(node.Entry()), node.Metadata()), nil + } + + return manifest.NewEntry(swarm.ZeroAddress, map[string]string{}), nil +} + +func (h *HistoryStruct) lookupNode(ctx context.Context, searchedTimestamp int64) (*mantaray.Node, error) { + // before node's timestamp is the closest one that is less than or equal to the searched timestamp + // for instance: 2030, 2020, 1994 -> search for 2021 -> before is 2020 + var beforeNode *mantaray.Node + // after node's timestamp is after the latest + // for instance: 2030, 2020, 1994 -> search for 1980 -> after is 1994 + var afterNode *mantaray.Node + + walker := func(pathTimestamp []byte, currNode *mantaray.Node, err error) error { + if err != nil { + return err + } + + if currNode.IsValueType() && len(currNode.Entry()) > 0 { + afterNode = currNode + + match, err := isBeforeMatch(pathTimestamp, searchedTimestamp) + if match { + beforeNode = currNode + // return error to stop the walk, this is how WalkNode works... + return ErrEndIteration + } + + return err + } + + return nil + } + + rootNode := h.manifest.Root() + err := rootNode.WalkNode(ctx, []byte{}, h.ls, walker) + + if err != nil && !errors.Is(err, ErrEndIteration) { + return nil, fmt.Errorf("history lookup node error: %w", err) + } + + if beforeNode != nil { + return beforeNode, nil + } + if afterNode != nil { + return afterNode, nil + } + return nil, nil +} + +func (h *HistoryStruct) Store(ctx context.Context) (swarm.Address, error) { + return h.manifest.Store(ctx) +} + +func bytesToInt64(b []byte) (int64, error) { + num, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return -1, err + } + + return num, nil +} + +func isBeforeMatch(pathTimestamp []byte, searchedTimestamp int64) (bool, error) { + targetTimestamp, err := bytesToInt64(pathTimestamp) + if err != nil { + return false, err + } + if targetTimestamp == 0 { + return false, nil + } + return searchedTimestamp <= targetTimestamp, nil +} diff --git a/pkg/dynamicaccess/history_test.go b/pkg/dynamicaccess/history_test.go new file mode 100644 index 00000000000..871b5241eab --- /dev/null +++ b/pkg/dynamicaccess/history_test.go @@ -0,0 +1,155 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess_test + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/pipeline" + "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" + "github.com/ethersphere/bee/v2/pkg/storage" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/stretchr/testify/assert" +) + +func TestHistoryAdd(t *testing.T) { + t.Parallel() + h, err := dynamicaccess.NewHistory(nil) + assert.NoError(t, err) + + addr := swarm.NewAddress([]byte("addr")) + + ctx := context.Background() + + err = h.Add(ctx, addr, nil, nil) + assert.NoError(t, err) +} + +func TestSingleNodeHistoryLookup(t *testing.T) { + t.Parallel() + storer := mockstorer.New() + ctx := context.Background() + ls := loadsave.New(storer.ChunkStore(), storer.Cache(), pipelineFactory(storer.Cache(), false)) + + h, err := dynamicaccess.NewHistory(ls) + assert.NoError(t, err) + + testActRef := swarm.RandAddress(t) + err = h.Add(ctx, testActRef, nil, nil) + assert.NoError(t, err) + + _, err = h.Store(ctx) + assert.NoError(t, err) + + searchedTime := time.Now().Unix() + entry, err := h.Lookup(ctx, searchedTime) + actRef := entry.Reference() + assert.NoError(t, err) + assert.True(t, actRef.Equal(testActRef)) + assert.Nil(t, entry.Metadata()) +} + +func TestMultiNodeHistoryLookup(t *testing.T) { + t.Parallel() + storer := mockstorer.New() + ctx := context.Background() + ls := loadsave.New(storer.ChunkStore(), storer.Cache(), pipelineFactory(storer.Cache(), false)) + + h, _ := dynamicaccess.NewHistory(ls) + + testActRef1 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd891")) + firstTime := time.Date(1994, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt1 := map[string]string{"firstTime": "1994-04-01"} + _ = h.Add(ctx, testActRef1, &firstTime, &mtdt1) + + testActRef2 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd892")) + secondTime := time.Date(2000, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt2 := map[string]string{"secondTime": "2000-04-01"} + _ = h.Add(ctx, testActRef2, &secondTime, &mtdt2) + + testActRef3 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd893")) + thirdTime := time.Date(2015, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt3 := map[string]string{"thirdTime": "2015-04-01"} + _ = h.Add(ctx, testActRef3, &thirdTime, &mtdt3) + + testActRef4 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd894")) + fourthTime := time.Date(2020, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt4 := map[string]string{"fourthTime": "2020-04-01"} + _ = h.Add(ctx, testActRef4, &fourthTime, &mtdt4) + + testActRef5 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd895")) + fifthTime := time.Date(2030, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt5 := map[string]string{"fifthTime": "2030-04-01"} + _ = h.Add(ctx, testActRef5, &fifthTime, &mtdt5) + + // latest + searchedTime := time.Date(1980, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + entry, err := h.Lookup(ctx, searchedTime) + actRef := entry.Reference() + assert.NoError(t, err) + assert.True(t, actRef.Equal(testActRef1)) + assert.True(t, reflect.DeepEqual(mtdt1, entry.Metadata())) + + // before first time + searchedTime = time.Date(2021, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + entry, err = h.Lookup(ctx, searchedTime) + actRef = entry.Reference() + assert.NoError(t, err) + assert.True(t, actRef.Equal(testActRef4)) + assert.True(t, reflect.DeepEqual(mtdt4, entry.Metadata())) + + // same time + searchedTime = time.Date(2000, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + entry, err = h.Lookup(ctx, searchedTime) + actRef = entry.Reference() + assert.NoError(t, err) + assert.True(t, actRef.Equal(testActRef2)) + assert.True(t, reflect.DeepEqual(mtdt2, entry.Metadata())) + + // after time + searchedTime = time.Date(2045, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + entry, err = h.Lookup(ctx, searchedTime) + actRef = entry.Reference() + assert.NoError(t, err) + assert.True(t, actRef.Equal(testActRef5)) + assert.True(t, reflect.DeepEqual(mtdt5, entry.Metadata())) +} + +func TestHistoryStore(t *testing.T) { + t.Parallel() + storer := mockstorer.New() + ctx := context.Background() + ls := loadsave.New(storer.ChunkStore(), storer.Cache(), pipelineFactory(storer.Cache(), false)) + + h1, _ := dynamicaccess.NewHistory(ls) + + testActRef1 := swarm.NewAddress([]byte("39a5ea87b141fe44aa609c3327ecd891")) + firstTime := time.Date(1994, time.April, 1, 0, 0, 0, 0, time.UTC).Unix() + mtdt1 := map[string]string{"firstTime": "1994-04-01"} + _ = h1.Add(ctx, testActRef1, &firstTime, &mtdt1) + + href1, err := h1.Store(ctx) + assert.NoError(t, err) + + h2, err := dynamicaccess.NewHistoryReference(ls, href1) + assert.NoError(t, err) + + entry1, _ := h2.Lookup(ctx, firstTime) + actRef1 := entry1.Reference() + assert.True(t, actRef1.Equal(testActRef1)) + assert.True(t, reflect.DeepEqual(mtdt1, entry1.Metadata())) +} + +func pipelineFactory(s storage.Putter, encrypt bool) func() pipeline.Interface { + return func() pipeline.Interface { + return builder.NewPipelineBuilder(context.Background(), s, encrypt, 0) + } +} diff --git a/pkg/dynamicaccess/mock/accesslogic.go b/pkg/dynamicaccess/mock/accesslogic.go new file mode 100644 index 00000000000..6b48c824eb9 --- /dev/null +++ b/pkg/dynamicaccess/mock/accesslogic.go @@ -0,0 +1,15 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package mock + +type AccessLogicMock struct { + GetFunc func(string, string, string) (string, error) +} + +func (ma *AccessLogicMock) Get(encryped_ref string, publisher string, tag string) (string, error) { + if ma.GetFunc == nil { + return "", nil + } + return ma.GetFunc(encryped_ref, publisher, tag) +} diff --git a/pkg/dynamicaccess/mock/controller.go b/pkg/dynamicaccess/mock/controller.go new file mode 100644 index 00000000000..30f617bfccb --- /dev/null +++ b/pkg/dynamicaccess/mock/controller.go @@ -0,0 +1,183 @@ +// Copyright 2020 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mock + +import ( + "context" + "crypto/ecdsa" + "encoding/hex" + "fmt" + "time" + + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + "github.com/ethersphere/bee/v2/pkg/encryption" + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/pipeline" + "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/storage" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "golang.org/x/crypto/sha3" +) + +type mockController struct { + historyMap map[string]dynamicaccess.History + refMap map[string]swarm.Address + acceptAll bool + publisher string + encrypter encryption.Interface + ls file.LoadSaver +} + +type optionFunc func(*mockController) + +// Option is an option passed to a mock dynamicaccess Service. +type Option interface { + apply(*mockController) +} + +func (f optionFunc) apply(r *mockController) { f(r) } + +// New creates a new mock dynamicaccess service. +func New(o ...Option) dynamicaccess.Controller { + storer := mockstorer.New() + m := &mockController{ + historyMap: make(map[string]dynamicaccess.History), + refMap: make(map[string]swarm.Address), + publisher: "", + encrypter: encryption.New(encryption.Key("b6ee086390c280eeb9824c331a4427596f0c8510d5564bc1b6168d0059a46e2b"), 0, uint32(0), sha3.NewLegacyKeccak256), + ls: loadsave.New(storer.ChunkStore(), storer.Cache(), requestPipelineFactory(context.Background(), storer.Cache(), false, redundancy.NONE)), + } + for _, v := range o { + v.apply(m) + } + + return m +} + +// WithAcceptAll sets the mock to return fixed references on every call to DownloadHandler. +func WithAcceptAll() Option { + return optionFunc(func(m *mockController) { m.acceptAll = true }) +} + +func WithHistory(h dynamicaccess.History, ref string) Option { + return optionFunc(func(m *mockController) { + m.historyMap = map[string]dynamicaccess.History{ref: h} + }) +} + +func WithPublisher(ref string) Option { + return optionFunc(func(m *mockController) { + m.publisher = ref + m.encrypter = encryption.New(encryption.Key(ref), 0, uint32(0), sha3.NewLegacyKeccak256) + }) +} + +func (m *mockController) DownloadHandler(ctx context.Context, ls file.LoadSaver, encryptedRef swarm.Address, publisher *ecdsa.PublicKey, historyRootHash swarm.Address, timestamp int64) (swarm.Address, error) { + if m.acceptAll { + return swarm.ParseHexAddress("36e6c1bbdfee6ac21485d5f970479fd1df458d36df9ef4e8179708ed46da557f") + } + + publicKeyBytes := crypto.EncodeSecp256k1PublicKey(publisher) + p := hex.EncodeToString(publicKeyBytes) + if m.publisher != "" && m.publisher != p { + return swarm.ZeroAddress, fmt.Errorf("incorrect publisher") + } + + h, exists := m.historyMap[historyRootHash.String()] + if !exists { + return swarm.ZeroAddress, fmt.Errorf("history not found") + } + entry, err := h.Lookup(ctx, timestamp) + kvsRef := entry.Reference() + if kvsRef.IsZero() || err != nil { + return swarm.ZeroAddress, fmt.Errorf("kvs not found") + } + return m.refMap[encryptedRef.String()], nil +} + +func (m *mockController) UploadHandler(ctx context.Context, ls file.LoadSaver, reference swarm.Address, publisher *ecdsa.PublicKey, historyRootHash swarm.Address) (swarm.Address, swarm.Address, swarm.Address, error) { + historyRef, _ := swarm.ParseHexAddress("67bdf80a9bbea8eca9c8480e43fdceb485d2d74d5708e45144b8c4adacd13d9c") + kvsRef, _ := swarm.ParseHexAddress("3339613565613837623134316665343461613630396333333237656364383934") + if m.acceptAll { + encryptedRef, _ := swarm.ParseHexAddress("fc4e9fe978991257b897d987bc4ff13058b66ef45a53189a0b4fe84bb3346396") + return kvsRef, historyRef, encryptedRef, nil + } + var ( + h dynamicaccess.History + exists bool + ) + now := time.Now().Unix() + if !historyRootHash.IsZero() { + historyRef = historyRootHash + h, exists = m.historyMap[historyRef.String()] + if !exists { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, fmt.Errorf("history not found") + } + entry, _ := h.Lookup(ctx, now) + kvsRef := entry.Reference() + if kvsRef.IsZero() { + return swarm.ZeroAddress, swarm.ZeroAddress, swarm.ZeroAddress, fmt.Errorf("kvs not found") + } + } else { + h, _ = dynamicaccess.NewHistory(m.ls) + _ = h.Add(ctx, kvsRef, &now, nil) + historyRef, _ = h.Store(ctx) + m.historyMap[historyRef.String()] = h + } + + encryptedRef, _ := m.encrypter.Encrypt(reference.Bytes()) + m.refMap[(hex.EncodeToString(encryptedRef))] = reference + return kvsRef, historyRef, swarm.NewAddress(encryptedRef), nil +} + +func (m *mockController) Close() error { + return nil +} + +func (m *mockController) UpdateHandler(_ context.Context, ls file.LoadSaver, gls file.LoadSaver, encryptedglref swarm.Address, historyref swarm.Address, publisher *ecdsa.PublicKey, addList []*ecdsa.PublicKey, removeList []*ecdsa.PublicKey) (swarm.Address, swarm.Address, swarm.Address, swarm.Address, error) { + historyRef, _ := swarm.ParseHexAddress("67bdf80a9bbea8eca9c8480e43fdceb485d2d74d5708e45144b8c4adacd13d9c") + glRef, _ := swarm.ParseHexAddress("3339613565613837623134316665343461613630396333333237656364383934") + eglRef, _ := swarm.ParseHexAddress("fc4e9fe978991257b897d987bc4ff13058b66ef45a53189a0b4fe84bb3346396") + actref, _ := swarm.ParseHexAddress("39a5ea87b141fe44aa609c3327ecd896c0e2122897f5f4bbacf74db1033c5559") + return glRef, eglRef, historyRef, actref, nil +} + +func (m *mockController) Get(ctx context.Context, ls file.LoadSaver, publisher *ecdsa.PublicKey, encryptedglref swarm.Address) ([]*ecdsa.PublicKey, error) { + if m.publisher == "" { + return nil, fmt.Errorf("granteelist not found") + } + keys := []string{ + "a786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfa", + "b786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfb", + "c786dd84b61485de12146fd9c4c02d87e8fd95f0542765cb7fc3d2e428c0bcfc", + } + pubkeys := make([]*ecdsa.PublicKey, 0, len(keys)) + for i := range keys { + data, err := hex.DecodeString(keys[i]) + if err != nil { + panic(err) + } + + privKey, err := crypto.DecodeSecp256k1PrivateKey(data) + pubKey := privKey.PublicKey + if err != nil { + panic(err) + } + pubkeys = append(pubkeys, &pubKey) + } + return pubkeys, nil +} + +func requestPipelineFactory(ctx context.Context, s storage.Putter, encrypt bool, rLevel redundancy.Level) func() pipeline.Interface { + return func() pipeline.Interface { + return builder.NewPipelineBuilder(ctx, s, encrypt, rLevel) + } +} + +var _ dynamicaccess.Controller = (*mockController)(nil) diff --git a/pkg/dynamicaccess/mock/grantee.go b/pkg/dynamicaccess/mock/grantee.go new file mode 100644 index 00000000000..6a8f01c87cb --- /dev/null +++ b/pkg/dynamicaccess/mock/grantee.go @@ -0,0 +1,55 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mock + +import ( + "crypto/ecdsa" + + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +type GranteeListMock interface { + Add(publicKeys []*ecdsa.PublicKey) error + Remove(removeList []*ecdsa.PublicKey) error + Get() []*ecdsa.PublicKey + Save() (swarm.Address, error) +} + +type GranteeListStructMock struct { + grantees []*ecdsa.PublicKey +} + +func (g *GranteeListStructMock) Get() []*ecdsa.PublicKey { + grantees := g.grantees + keys := make([]*ecdsa.PublicKey, len(grantees)) + copy(keys, grantees) + return keys +} + +func (g *GranteeListStructMock) Add(addList []*ecdsa.PublicKey) error { + g.grantees = append(g.grantees, addList...) + return nil +} + +func (g *GranteeListStructMock) Remove(removeList []*ecdsa.PublicKey) error { + for _, remove := range removeList { + for i, grantee := range g.grantees { + if *grantee == *remove { + g.grantees[i] = g.grantees[len(g.grantees)-1] + g.grantees = g.grantees[:len(g.grantees)-1] + } + } + } + + return nil +} + +func (g *GranteeListStructMock) Save() (swarm.Address, error) { + return swarm.EmptyAddress, nil +} + +func NewGranteeList() *GranteeListStructMock { + return &GranteeListStructMock{grantees: []*ecdsa.PublicKey{}} +} diff --git a/pkg/dynamicaccess/mock/session.go b/pkg/dynamicaccess/mock/session.go new file mode 100644 index 00000000000..6e5d43a9576 --- /dev/null +++ b/pkg/dynamicaccess/mock/session.go @@ -0,0 +1,44 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mock + +import ( + "crypto/ecdsa" + + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/keystore" +) + +type SessionMock struct { + KeyFunc func(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]byte, error) + key *ecdsa.PrivateKey +} + +func (s *SessionMock) Key(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]byte, error) { + if s.KeyFunc == nil { + return nil, nil + } + return s.KeyFunc(publicKey, nonces) +} + +func NewSessionMock(key *ecdsa.PrivateKey) *SessionMock { + return &SessionMock{key: key} +} + +func NewFromKeystore( + ks keystore.Service, + tag, + password string, + keyFunc func(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]byte, error), +) *SessionMock { + key, created, err := ks.Key(tag, password, crypto.EDGSecp256_K1) + if !created || err != nil { + return nil + } + return &SessionMock{ + key: key, + KeyFunc: keyFunc, + } +} diff --git a/pkg/dynamicaccess/session.go b/pkg/dynamicaccess/session.go new file mode 100644 index 00000000000..fb1f44241be --- /dev/null +++ b/pkg/dynamicaccess/session.go @@ -0,0 +1,67 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess + +import ( + "crypto/ecdsa" + "errors" + "fmt" + + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/keystore" +) + +// Session represents an interface for a Diffie-Helmann key derivation +type Session interface { + // Key returns a derived key for each nonce + Key(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]byte, error) +} + +var _ Session = (*SessionStruct)(nil) + +type SessionStruct struct { + key *ecdsa.PrivateKey +} + +var ( + ErrInvalidPublicKey = errors.New("invalid public key") + ErrSecretKeyInfinity = errors.New("shared secret is point at infinity") +) + +func (s *SessionStruct) Key(publicKey *ecdsa.PublicKey, nonces [][]byte) ([][]byte, error) { + if publicKey == nil { + return nil, ErrInvalidPublicKey + } + x, y := publicKey.Curve.ScalarMult(publicKey.X, publicKey.Y, s.key.D.Bytes()) + if x == nil || y == nil { + return nil, ErrSecretKeyInfinity + } + + if len(nonces) == 0 { + return [][]byte{(*x).Bytes()}, nil + } + + keys := make([][]byte, 0, len(nonces)) + for _, nonce := range nonces { + key, err := crypto.LegacyKeccak256(append(x.Bytes(), nonce...)) + if err != nil { + return nil, fmt.Errorf("failed to get Keccak256 hash: %w", err) + } + keys = append(keys, key) + } + + return keys, nil +} + +func NewDefaultSession(key *ecdsa.PrivateKey) *SessionStruct { + return &SessionStruct{ + key: key, + } +} + +// Currently implemented only in mock/session.go +func NewFromKeystore(keystore.Service, string, string) Session { + return nil +} diff --git a/pkg/dynamicaccess/session_test.go b/pkg/dynamicaccess/session_test.go new file mode 100644 index 00000000000..701384b93bb --- /dev/null +++ b/pkg/dynamicaccess/session_test.go @@ -0,0 +1,177 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicaccess_test + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "io" + "testing" + + "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess/mock" + memkeystore "github.com/ethersphere/bee/v2/pkg/keystore/mem" +) + +func mockKeyFunc(*ecdsa.PublicKey, [][]byte) ([][]byte, error) { + return [][]byte{{1}}, nil +} + +func TestSessionNewDefaultSession(t *testing.T) { + pk, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatalf("Error generating private key: %v", err) + } + si := dynamicaccess.NewDefaultSession(pk) + if si == nil { + t.Fatal("Session instance is nil") + } +} + +func TestSessionNewFromKeystore(t *testing.T) { + ks := memkeystore.New() + si := mock.NewFromKeystore(ks, "tag", "password", mockKeyFunc) + if si == nil { + t.Fatal("Session instance is nil") + } +} + +func TestSessionKey(t *testing.T) { + t.Parallel() + + key1, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + si1 := dynamicaccess.NewDefaultSession(key1) + + key2, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + si2 := dynamicaccess.NewDefaultSession(key2) + + nonces := make([][]byte, 2) + for i := range nonces { + if _, err := io.ReadFull(rand.Reader, nonces[i]); err != nil { + t.Fatal(err) + } + } + + keys1, err := si1.Key(&key2.PublicKey, nonces) + if err != nil { + t.Fatal(err) + } + keys2, err := si2.Key(&key1.PublicKey, nonces) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(keys1[0], keys2[0]) { + t.Fatalf("shared secrets do not match %s, %s", hex.EncodeToString(keys1[0]), hex.EncodeToString(keys2[0])) + } + if !bytes.Equal(keys1[1], keys2[1]) { + t.Fatalf("shared secrets do not match %s, %s", hex.EncodeToString(keys1[0]), hex.EncodeToString(keys2[0])) + } +} + +func TestSessionKeyWithoutNonces(t *testing.T) { + t.Parallel() + + key1, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + si1 := dynamicaccess.NewDefaultSession(key1) + + key2, err := crypto.GenerateSecp256k1Key() + if err != nil { + t.Fatal(err) + } + si2 := dynamicaccess.NewDefaultSession(key2) + + keys1, err := si1.Key(&key2.PublicKey, nil) + if err != nil { + t.Fatal(err) + } + keys2, err := si2.Key(&key1.PublicKey, nil) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(keys1[0], keys2[0]) { + t.Fatalf("shared secrets do not match %s, %s", hex.EncodeToString(keys1[0]), hex.EncodeToString(keys2[0])) + } +} + +func TestSessionKeyFromKeystore(t *testing.T) { + t.Parallel() + + ks := memkeystore.New() + tag1 := "tag1" + tag2 := "tag2" + password1 := "password1" + password2 := "password2" + + si1 := mock.NewFromKeystore(ks, tag1, password1, mockKeyFunc) + // si1 := dynamicaccess.NewFromKeystore(ks, tag1, password1) + exists, err := ks.Exists(tag1) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("Key1 should exist") + } + key1, created, err := ks.Key(tag1, password1, crypto.EDGSecp256_K1) + if err != nil { + t.Fatal(err) + } + if created { + t.Fatal("Key1 should not be created") + } + + si2 := mock.NewFromKeystore(ks, tag2, password2, mockKeyFunc) + // si2 := dynamicaccess.NewFromKeystore(ks, tag2, password2) + exists, err = ks.Exists(tag2) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("Key2 should exist") + } + key2, created, err := ks.Key(tag2, password2, crypto.EDGSecp256_K1) + if err != nil { + t.Fatal(err) + } + if created { + t.Fatal("Key2 should not be created") + } + + nonces := make([][]byte, 1) + for i := range nonces { + if _, err := io.ReadFull(rand.Reader, nonces[i]); err != nil { + t.Fatal(err) + } + } + + keys1, err := si1.Key(&key2.PublicKey, nonces) + if err != nil { + t.Fatal(err) + } + keys2, err := si2.Key(&key1.PublicKey, nonces) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(keys1[0], keys2[0]) { + t.Fatalf("shared secrets do not match %s, %s", hex.EncodeToString(keys1[0]), hex.EncodeToString(keys2[0])) + } + // if !bytes.Equal(keys1[1], keys2[1]) { + // t.Fatalf("shared secrets do not match %s, %s", hex.EncodeToString(keys1[0]), hex.EncodeToString(keys2[0])) + // } +} diff --git a/pkg/kvs/kvs.go b/pkg/kvs/kvs.go new file mode 100644 index 00000000000..0c7eae48c08 --- /dev/null +++ b/pkg/kvs/kvs.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kvs + +import ( + "context" + "encoding/hex" + "errors" + + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/manifest" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +type KeyValueStore interface { + Get(ctx context.Context, key []byte) ([]byte, error) + Put(ctx context.Context, key, value []byte) error + Save(ctx context.Context) (swarm.Address, error) +} + +type keyValueStore struct { + manifest manifest.Interface + putCnt int +} + +var _ KeyValueStore = (*keyValueStore)(nil) + +func (s *keyValueStore) Get(ctx context.Context, key []byte) ([]byte, error) { + entry, err := s.manifest.Lookup(ctx, hex.EncodeToString(key)) + if err != nil { + return nil, err + } + ref := entry.Reference() + return ref.Bytes(), nil +} + +func (s *keyValueStore) Put(ctx context.Context, key []byte, value []byte) error { + err := s.manifest.Add(ctx, hex.EncodeToString(key), manifest.NewEntry(swarm.NewAddress(value), map[string]string{})) + if err != nil { + return err + } + s.putCnt++ + return nil +} + +func (s *keyValueStore) Save(ctx context.Context) (swarm.Address, error) { + if s.putCnt == 0 { + return swarm.ZeroAddress, errors.New("nothing to save") + } + ref, err := s.manifest.Store(ctx) + if err != nil { + return swarm.ZeroAddress, err + } + s.putCnt = 0 + return ref, nil +} + +func New(ls file.LoadSaver) (KeyValueStore, error) { + m, err := manifest.NewSimpleManifest(ls) + if err != nil { + return nil, err + } + + return &keyValueStore{ + manifest: m, + }, nil +} + +func NewReference(ls file.LoadSaver, ref swarm.Address) (KeyValueStore, error) { + m, err := manifest.NewSimpleManifestReference(ref, ls) + if err != nil { + return nil, err + } + + return &keyValueStore{ + manifest: m, + }, nil +} diff --git a/pkg/kvs/kvs_test.go b/pkg/kvs/kvs_test.go new file mode 100644 index 00000000000..2f9af999a95 --- /dev/null +++ b/pkg/kvs/kvs_test.go @@ -0,0 +1,177 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package kvs_test + +import ( + "context" + "testing" + + "github.com/ethersphere/bee/v2/pkg/file" + "github.com/ethersphere/bee/v2/pkg/file/loadsave" + "github.com/ethersphere/bee/v2/pkg/file/pipeline" + "github.com/ethersphere/bee/v2/pkg/file/pipeline/builder" + "github.com/ethersphere/bee/v2/pkg/file/redundancy" + "github.com/ethersphere/bee/v2/pkg/kvs" + "github.com/ethersphere/bee/v2/pkg/storage" + mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock" + "github.com/ethersphere/bee/v2/pkg/swarm" + "github.com/stretchr/testify/assert" +) + +var mockStorer = mockstorer.New() + +func requestPipelineFactory(ctx context.Context, s storage.Putter, encrypt bool, rLevel redundancy.Level) func() pipeline.Interface { + return func() pipeline.Interface { + return builder.NewPipelineBuilder(ctx, s, encrypt, rLevel) + } +} + +func createLs() file.LoadSaver { + return loadsave.New(mockStorer.ChunkStore(), mockStorer.Cache(), requestPipelineFactory(context.Background(), mockStorer.Cache(), false, redundancy.NONE)) +} + +func keyValuePair(t *testing.T) ([]byte, []byte) { + t.Helper() + return swarm.RandAddress(t).Bytes(), swarm.RandAddress(t).Bytes() +} + +func TestKvs(t *testing.T) { + t.Parallel() + s, err := kvs.New(createLs()) + assert.NoError(t, err) + + key, val := keyValuePair(t) + ctx := context.Background() + + t.Run("Get non-existent key should return error", func(t *testing.T) { + _, err := s.Get(ctx, []byte{1}) + assert.Error(t, err) + }) + + t.Run("Multiple Get with same key, no error", func(t *testing.T) { + err := s.Put(ctx, key, val) + assert.NoError(t, err) + + // get #1 + v, err := s.Get(ctx, key) + assert.NoError(t, err) + assert.Equal(t, val, v) + // get #2 + v, err = s.Get(ctx, key) + assert.NoError(t, err) + assert.Equal(t, val, v) + }) + + t.Run("Get should return value equal to put value", func(t *testing.T) { + var ( + key1 []byte = []byte{1} + key2 []byte = []byte{2} + key3 []byte = []byte{3} + ) + testCases := []struct { + name string + key []byte + val []byte + }{ + { + name: "Test key = 1", + key: key1, + val: []byte{11}, + }, + { + name: "Test key = 2", + key: key2, + val: []byte{22}, + }, + { + name: "Test overwrite key = 1", + key: key1, + val: []byte{111}, + }, + { + name: "Test key = 3", + key: key3, + val: []byte{33}, + }, + { + name: "Test key = 3 with same value", + key: key3, + val: []byte{33}, + }, + { + name: "Test key = 3 with value for key1", + key: key3, + val: []byte{11}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := s.Put(ctx, tc.key, tc.val) + assert.NoError(t, err) + retVal, err := s.Get(ctx, tc.key) + assert.NoError(t, err) + assert.Equal(t, tc.val, retVal) + }) + } + }) +} + +func TestKvs_Save(t *testing.T) { + t.Parallel() + ctx := context.Background() + + key1, val1 := keyValuePair(t) + key2, val2 := keyValuePair(t) + t.Run("Save empty KVS return error", func(t *testing.T) { + s, _ := kvs.New(createLs()) + _, err := s.Save(ctx) + assert.Error(t, err) + }) + t.Run("Save not empty KVS return valid swarm address", func(t *testing.T) { + s, _ := kvs.New(createLs()) + _ = s.Put(ctx, key1, val1) + ref, err := s.Save(ctx) + assert.NoError(t, err) + assert.True(t, ref.IsValidNonEmpty()) + }) + t.Run("Save KVS with one item, no error, pre-save value exist", func(t *testing.T) { + ls := createLs() + s1, _ := kvs.New(ls) + + err := s1.Put(ctx, key1, val1) + assert.NoError(t, err) + + ref, err := s1.Save(ctx) + assert.NoError(t, err) + + s2, err := kvs.NewReference(ls, ref) + assert.NoError(t, err) + + val, err := s2.Get(ctx, key1) + assert.NoError(t, err) + assert.Equal(t, val1, val) + }) + t.Run("Save KVS and add one item, no error, after-save value exist", func(t *testing.T) { + ls := createLs() + + kvs1, _ := kvs.New(ls) + + err := kvs1.Put(ctx, key1, val1) + assert.NoError(t, err) + ref, err := kvs1.Save(ctx) + assert.NoError(t, err) + + // New KVS + kvs2, err := kvs.NewReference(ls, ref) + assert.NoError(t, err) + err = kvs2.Put(ctx, key2, val2) + assert.NoError(t, err) + + val, err := kvs2.Get(ctx, key2) + assert.NoError(t, err) + assert.Equal(t, val2, val) + }) +} diff --git a/pkg/kvs/mock/kvs.go b/pkg/kvs/mock/kvs.go new file mode 100644 index 00000000000..f5818972004 --- /dev/null +++ b/pkg/kvs/mock/kvs.go @@ -0,0 +1,78 @@ +// Copyright 2024 The Swarm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mock + +import ( + "context" + "encoding/hex" + "sync" + + "github.com/ethersphere/bee/v2/pkg/kvs" + "github.com/ethersphere/bee/v2/pkg/swarm" +) + +var lock = &sync.Mutex{} + +type single struct { + // TODO string -> []byte ? + memoryMock map[string]map[string][]byte +} + +var singleInMemorySwarm *single + +func getInMemorySwarm() *single { + if singleInMemorySwarm == nil { + lock.Lock() + defer lock.Unlock() + if singleInMemorySwarm == nil { + singleInMemorySwarm = &single{ + memoryMock: make(map[string]map[string][]byte), + } + } + } + return singleInMemorySwarm +} + +func getMemory() map[string]map[string][]byte { + ch := make(chan *single) + go func() { + ch <- getInMemorySwarm() + }() + mem := <-ch + return mem.memoryMock +} + +type mockKeyValueStore struct { + address swarm.Address +} + +var _ kvs.KeyValueStore = (*mockKeyValueStore)(nil) + +func (m *mockKeyValueStore) Get(_ context.Context, key []byte) ([]byte, error) { + mem := getMemory() + val := mem[m.address.String()][hex.EncodeToString(key)] + return val, nil +} + +func (m *mockKeyValueStore) Put(_ context.Context, key []byte, value []byte) error { + mem := getMemory() + if _, ok := mem[m.address.String()]; !ok { + mem[m.address.String()] = make(map[string][]byte) + } + mem[m.address.String()][hex.EncodeToString(key)] = value + return nil +} + +func (m *mockKeyValueStore) Save(ctx context.Context) (swarm.Address, error) { + return m.address, nil +} + +func New() kvs.KeyValueStore { + return &mockKeyValueStore{address: swarm.EmptyAddress} +} + +func NewReference(address swarm.Address) kvs.KeyValueStore { + return &mockKeyValueStore{address: address} +} diff --git a/pkg/manifest/mantaray.go b/pkg/manifest/mantaray.go index f3b86e06b66..009e3eab055 100644 --- a/pkg/manifest/mantaray.go +++ b/pkg/manifest/mantaray.go @@ -20,7 +20,7 @@ const ( ManifestMantarayContentType = "application/bzz-manifest-mantaray+octet-stream" ) -type mantarayManifest struct { +type MantarayManifest struct { trie *mantaray.Node ls file.LoadSaver @@ -31,7 +31,7 @@ func NewMantarayManifest( ls file.LoadSaver, encrypted bool, ) (Interface, error) { - mm := &mantarayManifest{ + mm := &MantarayManifest{ trie: mantaray.New(), ls: ls, } @@ -48,24 +48,28 @@ func NewMantarayManifestReference( reference swarm.Address, ls file.LoadSaver, ) (Interface, error) { - return &mantarayManifest{ + return &MantarayManifest{ trie: mantaray.NewNodeRef(reference.Bytes()), ls: ls, }, nil } -func (m *mantarayManifest) Type() string { +func (m *MantarayManifest) Root() *mantaray.Node { + return m.trie +} + +func (m *MantarayManifest) Type() string { return ManifestMantarayContentType } -func (m *mantarayManifest) Add(ctx context.Context, path string, entry Entry) error { +func (m *MantarayManifest) Add(ctx context.Context, path string, entry Entry) error { p := []byte(path) e := entry.Reference().Bytes() return m.trie.Add(ctx, p, e, entry.Metadata(), m.ls) } -func (m *mantarayManifest) Remove(ctx context.Context, path string) error { +func (m *MantarayManifest) Remove(ctx context.Context, path string) error { p := []byte(path) err := m.trie.Remove(ctx, p, m.ls) @@ -79,7 +83,7 @@ func (m *mantarayManifest) Remove(ctx context.Context, path string) error { return nil } -func (m *mantarayManifest) Lookup(ctx context.Context, path string) (Entry, error) { +func (m *MantarayManifest) Lookup(ctx context.Context, path string) (Entry, error) { p := []byte(path) node, err := m.trie.LookupNode(ctx, p, m.ls) @@ -100,13 +104,13 @@ func (m *mantarayManifest) Lookup(ctx context.Context, path string) (Entry, erro return entry, nil } -func (m *mantarayManifest) HasPrefix(ctx context.Context, prefix string) (bool, error) { +func (m *MantarayManifest) HasPrefix(ctx context.Context, prefix string) (bool, error) { p := []byte(prefix) return m.trie.HasPrefix(ctx, p, m.ls) } -func (m *mantarayManifest) Store(ctx context.Context, storeSizeFn ...StoreSizeFunc) (swarm.Address, error) { +func (m *MantarayManifest) Store(ctx context.Context, storeSizeFn ...StoreSizeFunc) (swarm.Address, error) { var ls mantaray.LoadSaver if len(storeSizeFn) > 0 { ls = &mantarayLoadSaver{ @@ -127,7 +131,7 @@ func (m *mantarayManifest) Store(ctx context.Context, storeSizeFn ...StoreSizeFu return address, nil } -func (m *mantarayManifest) IterateAddresses(ctx context.Context, fn swarm.AddressIterFunc) error { +func (m *MantarayManifest) IterateAddresses(ctx context.Context, fn swarm.AddressIterFunc) error { reference := swarm.NewAddress(m.trie.Reference()) if swarm.ZeroAddress.Equal(reference) { diff --git a/pkg/manifest/mantaray/node.go b/pkg/manifest/mantaray/node.go index 5a59c010d52..9561c299db5 100644 --- a/pkg/manifest/mantaray/node.go +++ b/pkg/manifest/mantaray/node.go @@ -16,13 +16,8 @@ const ( ) var ( - ZeroObfuscationKey []byte -) - -// nolint:gochecknoinits -func init() { ZeroObfuscationKey = make([]byte, 32) -} +) // Error used when lookup path does not match var ( diff --git a/pkg/node/devnode.go b/pkg/node/devnode.go index 1b090654295..389dc2337a5 100644 --- a/pkg/node/devnode.go +++ b/pkg/node/devnode.go @@ -22,6 +22,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/auth" "github.com/ethersphere/bee/v2/pkg/bzz" "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" "github.com/ethersphere/bee/v2/pkg/feeds/factory" "github.com/ethersphere/bee/v2/pkg/log" mockP2P "github.com/ethersphere/bee/v2/pkg/p2p/mock" @@ -66,6 +67,7 @@ type DevBee struct { localstoreCloser io.Closer apiCloser io.Closer pssCloser io.Closer + dacCloser io.Closer errorLogWriter io.Writer apiServer *http.Server debugAPIServer *http.Server @@ -234,6 +236,11 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) { } b.localstoreCloser = localStore + session := dynamicaccess.NewDefaultSession(mockKey) + actLogic := dynamicaccess.NewLogic(session) + dac := dynamicaccess.NewController(actLogic) + b.dacCloser = dac + pssService := pss.New(mockKey, logger) b.pssCloser = pssService @@ -383,6 +390,7 @@ func NewDevBee(logger log.Logger, o *DevOptions) (b *DevBee, err error) { Pss: pssService, FeedFactory: mockFeeds, Post: post, + Dac: dac, PostageContract: postageContract, Staking: mockStaking, Steward: mockSteward, @@ -491,6 +499,7 @@ func (b *DevBee) Shutdown() error { } tryClose(b.pssCloser, "pss") + tryClose(b.dacCloser, "dac") tryClose(b.tracerCloser, "tracer") tryClose(b.stateStoreCloser, "statestore") tryClose(b.localstoreCloser, "localstore") diff --git a/pkg/node/node.go b/pkg/node/node.go index fa3f3f861b5..4ab881bc8ab 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -30,6 +30,7 @@ import ( "github.com/ethersphere/bee/v2/pkg/auth" "github.com/ethersphere/bee/v2/pkg/config" "github.com/ethersphere/bee/v2/pkg/crypto" + "github.com/ethersphere/bee/v2/pkg/dynamicaccess" "github.com/ethersphere/bee/v2/pkg/feeds/factory" "github.com/ethersphere/bee/v2/pkg/hive" "github.com/ethersphere/bee/v2/pkg/log" @@ -117,6 +118,7 @@ type Bee struct { shutdownInProgress bool shutdownMutex sync.Mutex syncingStopped *syncutil.Signaler + dacCloser io.Closer } type Options struct { @@ -202,6 +204,7 @@ func NewBee( logger log.Logger, libp2pPrivateKey, pssPrivateKey *ecdsa.PrivateKey, + session dynamicaccess.Session, o *Options, ) (b *Bee, err error) { tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{ @@ -778,6 +781,10 @@ func NewBee( b.localstoreCloser = localStore evictFn = func(id []byte) error { return localStore.EvictBatch(context.Background(), id) } + actLogic := dynamicaccess.NewLogic(session) + dac := dynamicaccess.NewController(actLogic) + b.dacCloser = dac + var ( syncErr atomic.Value syncStatus atomic.Value @@ -1093,6 +1100,7 @@ func NewBee( Pss: pssService, FeedFactory: feedFactory, Post: post, + Dac: dac, PostageContract: postageStampContractService, Staking: stakingContract, Steward: steward, @@ -1355,6 +1363,7 @@ func (b *Bee) Shutdown() error { c() } + tryClose(b.dacCloser, "dac") tryClose(b.tracerCloser, "tracer") tryClose(b.topologyCloser, "topology driver") tryClose(b.storageIncetivesCloser, "storage incentives agent") diff --git a/pkg/soc/testing/soc.go b/pkg/soc/testing/soc.go index e5325f464b1..9516a721d83 100644 --- a/pkg/soc/testing/soc.go +++ b/pkg/soc/testing/soc.go @@ -5,6 +5,7 @@ package testing import ( + "crypto/ecdsa" "testing" "github.com/ethersphere/bee/v2/pkg/cac" @@ -70,3 +71,38 @@ func GenerateMockSOC(t *testing.T, data []byte) *MockSOC { WrappedChunk: ch, } } + +// GenerateMockSOC generates a valid mocked SOC from given data and key. +func GenerateMockSOCWithKey(t *testing.T, data []byte, privKey *ecdsa.PrivateKey) *MockSOC { + t.Helper() + + signer := crypto.NewDefaultSigner(privKey) + owner, err := signer.EthereumAddress() + if err != nil { + t.Fatal(err) + } + + ch, err := cac.New(data) + if err != nil { + t.Fatal(err) + } + + id := make([]byte, swarm.HashSize) + hasher := swarm.NewHasher() + _, err = hasher.Write(append(id, ch.Address().Bytes()...)) + if err != nil { + t.Fatal(err) + } + + signature, err := signer.Sign(hasher.Sum(nil)) + if err != nil { + t.Fatal(err) + } + + return &MockSOC{ + ID: id, + Owner: owner.Bytes(), + Signature: signature, + WrappedChunk: ch, + } +}