diff --git a/Makefile b/Makefile index b3e039ca8..06a439867 100644 --- a/Makefile +++ b/Makefile @@ -752,13 +752,13 @@ acc_balance_query: ## Query the balance of the account specified (make acc_balan .PHONY: acc_balance_query_modules acc_balance_query_modules: ## Query the balance of the network level module accounts - @echo "### Application ###" + @echo "### Application Module ###\n" make acc_balance_query ACC=$(APPLICATION_MODULE_ADDRESS) - @echo "### Supplier ###" + @echo "### Supplier Module ###\n" make acc_balance_query ACC=$(SUPPLIER_MODULE_ADDRESS) - @echo "### Gateway ###" + @echo "### Gateway Module ###\n" make acc_balance_query ACC=$(GATEWAY_MODULE_ADDRESS) - @echo "### Service ###" + @echo "### Service Module ###\n" make acc_balance_query ACC=$(SERVICE_MODULE_ADDRESS) .PHONY: acc_balance_query_app1 diff --git a/api/poktroll/shared/service.pulsar.go b/api/poktroll/shared/service.pulsar.go index 67d0bb8a0..6c54446e8 100644 --- a/api/poktroll/shared/service.pulsar.go +++ b/api/poktroll/shared/service.pulsar.go @@ -2819,7 +2819,7 @@ type Service struct { // For example, what if we want to request a session for a certain service but with some additional configs that identify it? Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Unique identifier for the service - // TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary + // TODO_MAINNET: Remove this. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // (Optional) Semantic human readable name for the service // The cost of a single relay for this service in terms of compute units. // Must be used alongside the global 'compute_units_to_tokens_multipler' to calculate the cost of a relay for this service. @@ -2886,6 +2886,7 @@ type ApplicationServiceConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` // The Service for which the application is configured } @@ -2922,6 +2923,7 @@ type SupplierServiceConfig struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` // The Service for which the supplier is configured Endpoints []*SupplierEndpoint `protobuf:"bytes,2,rep,name=endpoints,proto3" json:"endpoints,omitempty"` // List of endpoints for the service } diff --git a/app/app.go b/app/app.go index d9d527e65..24d38ef77 100644 --- a/app/app.go +++ b/app/app.go @@ -164,7 +164,7 @@ func New( // // STAKING // - // For provinding a different validator and consensus address codec, add it below. + // For providing a different validator and consensus address codec, add it below. // By default the staking module uses the bech32 prefix provided in the auth config, // and appends "valoper" and "valcons" for validator and consensus addresses respectively. // When providing a custom address codec in auth, custom address codecs must be provided here as well. diff --git a/config.yml b/config.yml index 43f98d0e1..8f8e4429f 100644 --- a/config.yml +++ b/config.yml @@ -51,6 +51,14 @@ accounts: mnemonic: "elder spatial erosion soap athlete tide subject recipe also awkward head pattern cart version beach usual oxygen confirm erupt diamond maze smooth census garment" coins: - 300000000upokt + - name: source_owner_anvil + mnemonic: "burden effort glue note honey erupt fiscal vote gold addict toy flag spare wrap chest table bomb sort arena phone sadness sustain urge wink" + coins: + - 6900000000000upokt + - name: source_owner_ollama + mnemonic: "initial scorpion soccer decrease sorry convince donor canoe bid pill monster today cycle slot judge bulb dismiss reject hurt mesh glare fork sustain wash" + coins: + - 6900000000000upokt - name: unauthorized mnemonic: "abuse tumble whip pioneer immense pipe method note upon glory switch rail metal camp gasp top require rain party total struggle glance between fossil" coins: @@ -87,9 +95,21 @@ validators: # We can persist arbitrary genesis values via 1 to 1 mapping to genesis.json genesis: app_state: + # https://docs.cosmos.network/main/build/modules/mint mint: params: mint_denom: upokt + # Note that in Pocket Network, the majority of the inflation/deflation + # comes from the utility of network, not just the validators that + # secure it. Therefore, the inflation params of x/mint are set to 0. + # See x/tokenomics for all details related to token inflation. + inflation_rate_change: "0.0" + inflation_max: "0.0" + inflation_min: "0.0" + # These parameters are included for posterity but commented out for clarity + # goal_bonded: "NA" + # blocks_per_year: "NA" + # max_supply: "NA" staking: params: bond_denom: upokt @@ -140,12 +160,8 @@ genesis: service_configs: - service: id: anvil - name: "" - compute_units_per_relay: 1 - service: id: ollama - name: "" - compute_units_per_relay: 1 stake: # NB: This value should be exactly 1upokt smaller than the value in # `supplier1_stake_config.yaml` so that the stake command causes a state change. @@ -155,22 +171,18 @@ genesis: supplierList: - address: pokt19a3t4yunp0dlpfjrp7qwnzwlrzd5fzs2gjaaaj services: - - endpoints: + - service: + id: anvil + endpoints: - configs: [] rpc_type: JSON_RPC url: http://relayminer1:8545 - service: - compute_units_per_relay: 1 - id: anvil - name: "" - - endpoints: + - service: + id: ollama + endpoints: - configs: [] rpc_type: REST url: http://relayminer1:8545 - service: - compute_units_per_relay: 1 - id: ollama - name: "" stake: # NB: This value should be exactly 1upokt smaller than the value in # `application1_stake_config.yaml` so that the stake command causes a state change. @@ -189,7 +201,13 @@ genesis: add_service_fee: "1000000000" serviceList: - id: anvil - name: "" + name: "anvil" + compute_units_per_relay: 1 + owner_address: pokt1cwnu460557x0z78jv3hhc7356hhkrgc86c87q5 + - id: ollama + name: "ollama" + compute_units_per_relay: 1 + owner_address: pokt1mx0klkkrj6v3dw8gs4nzlq0cq8lsktmx35t03e proof: params: proof_request_probability: "0.25" diff --git a/e2e/tests/init_test.go b/e2e/tests/init_test.go index 201e666c3..841d65b8e 100644 --- a/e2e/tests/init_test.go +++ b/e2e/tests/init_test.go @@ -36,6 +36,7 @@ import ( "github.com/pokt-network/poktroll/testutil/yaml" apptypes "github.com/pokt-network/poktroll/x/application/types" prooftypes "github.com/pokt-network/poktroll/x/proof/types" + servicetypes "github.com/pokt-network/poktroll/x/service/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" shared "github.com/pokt-network/poktroll/x/shared" sharedtypes "github.com/pokt-network/poktroll/x/shared/types" @@ -359,15 +360,20 @@ func (s *suite) TheServiceRegisteredForApplicationHasAComputeUnitsPerRelayOf(ser app, ok := accNameToAppMap[appName] require.True(s, ok, "application %s not found", appName) + // CHeck if the application is registered for the service + isRegistered := false for _, serviceConfig := range app.ServiceConfigs { if serviceConfig.Service.Id == serviceId { - cupr, err := strconv.ParseUint(cuprStr, 10, 64) - require.NoError(s, err) - require.Equal(s, cupr, serviceConfig.Service.ComputeUnitsPerRelay) - return + isRegistered = true + break } } - s.Fatalf("ERROR: service %s is not registered for application %s", serviceId, appName) + require.True(s, isRegistered, "application %s is not registered for service %s", appName, serviceId) + + cuprActual := s.getServiceComputeUnitsPerRelay(serviceId) + cuprExpected, err := strconv.ParseUint(cuprStr, 10, 64) + require.NoError(s, err) + require.Equal(s, cuprExpected, cuprActual, "compute units per relay for service %s is not %d", serviceId, cuprExpected) } func (s *suite) TheUserVerifiesTheForAccountIsNotStaked(actorType, accName string) { @@ -682,6 +688,25 @@ func (s *suite) getSupplierUnbondingHeight(accName string) int64 { return unbondingHeight } +// getServiceComputeUnitsPerRelay returns the compute units per relay for a given service ID +func (s *suite) getServiceComputeUnitsPerRelay(serviceId string) uint64 { + args := []string{ + "query", + "service", + "show-service", + serviceId, + "--output=json", + } + + res, err := s.pocketd.RunCommandOnHostWithRetry("", numQueryRetries, args...) + require.NoError(s, err, "error getting shared module params") + + var resp servicetypes.QueryGetServiceResponse + responseBz := []byte(strings.TrimSpace(res.Stdout)) + s.cdc.MustUnmarshalJSON(responseBz, &resp) + return resp.Service.ComputeUnitsPerRelay +} + // accBalanceKey is a helper function to create a key to store the balance // for accName in the context of a scenario state. func accBalanceKey(accName string) string { diff --git a/e2e/tests/relay.feature b/e2e/tests/relay.feature index 78584d474..e93081ef7 100644 --- a/e2e/tests/relay.feature +++ b/e2e/tests/relay.feature @@ -8,13 +8,14 @@ Feature: Relay Namespace And the session for application "app1" and service "anvil" contains the supplier "supplier1" Then the application "app1" sends the supplier "supplier1" a successful request for service "anvil" with path "" and data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' - Scenario: App can send a REST relay to Supplier - Given the user has the pocketd binary installed - And the application "app1" is staked for service "ollama" - And the supplier "supplier1" is staked for service "ollama" - And the session for application "app1" and service "ollama" contains the supplier "supplier1" - When the application "app1" sends the supplier "supplier1" a successful request for service "ollama" with path "/api/chat" and data '{"model": "qwen:0.5b", "stream": false, "messages": [{"role": "user", "content":"count from 1 to 10"}]}' - And a "tokenomics" module "ClaimSettled" end block event is broadcast + # TODO(#727): Add this test back. + # Scenario: App can send a REST relay to Supplier + # Given the user has the pocketd binary installed + # And the application "app1" is staked for service "ollama" + # And the supplier "supplier1" is staked for service "ollama" + # And the session for application "app1" and service "ollama" contains the supplier "supplier1" + # When the application "app1" sends the supplier "supplier1" a successful request for service "ollama" with path "/api/chat" and data '{"model": "qwen:0.5b", "stream": false, "messages": [{"role": "user", "content":"count from 1 to 10"}]}' + # And a "tokenomics" module "ClaimSettled" end block event is broadcast # TODO_TEST(@Olshansk): # - Successful relay through applicat's sovereign appgate server diff --git a/go.mod b/go.mod index 3b5943e3c..69ce37df6 100644 --- a/go.mod +++ b/go.mod @@ -125,6 +125,7 @@ require ( github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft-db v0.9.1 // indirect + github.com/containerd/continuity v0.4.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/cosmos/btcutil v1.0.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect @@ -263,7 +264,7 @@ require ( github.com/vbatts/tar-split v0.11.5 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.etcd.io/bbolt v1.3.8 // indirect + go.etcd.io/bbolt v1.3.10 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect diff --git a/go.sum b/go.sum index 08f9cae0a..fa3214946 100644 --- a/go.sum +++ b/go.sum @@ -364,8 +364,8 @@ github.com/cometbft/cometbft v0.38.7 h1:ULhIOJ9+LgSy6nLekhq9ae3juX3NnQUMMPyVdhZV github.com/cometbft/cometbft v0.38.7/go.mod h1:HIyf811dFMI73IE0F7RrnY/Fr+d1+HuJAgtkEpQjCMY= github.com/cometbft/cometbft-db v0.9.1 h1:MIhVX5ja5bXNHF8EYrThkG9F7r9kSfv8BX4LWaxWJ4M= github.com/cometbft/cometbft-db v0.9.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= @@ -1160,8 +1160,8 @@ github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWp github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= diff --git a/localnet/grafana-dashboards/claim_proof_logs.json b/localnet/grafana-dashboards/claim_proof_logs.json index c7e962bdd..5a8ba0eb0 100644 --- a/localnet/grafana-dashboards/claim_proof_logs.json +++ b/localnet/grafana-dashboards/claim_proof_logs.json @@ -124,7 +124,7 @@ "uid": "P8E80F9AEF21F6940" }, "editorMode": "builder", - "expr": "{container=\"poktrolld-validator\"} | json | method = `SettleSessionAccounting`", + "expr": "{container=\"poktrolld-validator\"} | json | method = `ProcessTokenLogicModules`", "queryType": "range", "refId": "Claim Settlement" } diff --git a/pkg/crypto/protocol/hasher.go b/pkg/crypto/protocol/hasher.go index e5f008c1a..918c576bf 100644 --- a/pkg/crypto/protocol/hasher.go +++ b/pkg/crypto/protocol/hasher.go @@ -6,7 +6,9 @@ const ( RelayHasherSize = sha256.Size TrieHasherSize = sha256.Size TrieRootSize = TrieHasherSize + trieRootMetadataSize + TrieRootSumSize = 8 // TODO_CONSIDERATION: Export this from the SMT package. trieRootMetadataSize = 16 // TODO_CONSIDERATION: Export this from the SMT package. + ) var ( diff --git a/pkg/relayer/miner/miner.go b/pkg/relayer/miner/miner.go index c5f7a5605..96dcffa50 100644 --- a/pkg/relayer/miner/miner.go +++ b/pkg/relayer/miner/miner.go @@ -27,9 +27,10 @@ type miner struct { // relay_difficulty is the target hash which a relay hash must be less than to be volume/reward applicable. // - // TODO_MAINNET(#543): This is populated by querying the corresponding on-chain parameter during construction. - // If this parameter is updated on-chain the relayminer will need to be restarted to query the new value. - // TODO_FOLLOWUP(@olshansk, #690): This needs to be maintained (and updated) on a per service level. + // TODO_BETA(#705): This is populated by querying the corresponding on-chain parameter during construction. + // If this parameter is updated on-chain the relayminer will need to be restarted to query the new value. + // TODO_BETA(#705): This needs to be maintained (and updated) on a per service level. + // Make sure to update the `smst.Update` call in `relayer/session` alongside it. relayDifficultyTargetHash []byte } diff --git a/pkg/relayer/session/session.go b/pkg/relayer/session/session.go index a38c947a6..961e772b7 100644 --- a/pkg/relayer/session/session.go +++ b/pkg/relayer/session/session.go @@ -419,6 +419,8 @@ func (rs *relayerSessionsManager) mapAddMinedRelayToSessionTree( With("application", smst.GetSessionHeader().GetApplicationAddress()). With("supplier_address", smst.GetSupplierAddress().String()) + // TODO_BETA(#705): Make sure to update the weight of each relay to the value + // associated with `relayDifficultyTargetHash` in the `miner/miner.go`. if err := smst.Update(relay.Hash, relay.Bytes, 1); err != nil { // TODO_IMPROVE: log additional info? logger.Error().Err(err).Msg("failed to update smt") diff --git a/proto/poktroll/shared/service.proto b/proto/poktroll/shared/service.proto index eda3d59d3..cd07c6086 100644 --- a/proto/poktroll/shared/service.proto +++ b/proto/poktroll/shared/service.proto @@ -13,7 +13,7 @@ message Service { // For example, what if we want to request a session for a certain service but with some additional configs that identify it? string id = 1; // Unique identifier for the service - // TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary + // TODO_MAINNET: Remove this. string name = 2; // (Optional) Semantic human readable name for the service // The cost of a single relay for this service in terms of compute units. @@ -30,6 +30,7 @@ message Service { // ApplicationServiceConfig holds the service configuration the application stakes for message ApplicationServiceConfig { + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service service = 1; // The Service for which the application is configured // TODO_MAINNET: There is an opportunity for applications to advertise the max @@ -39,6 +40,7 @@ message ApplicationServiceConfig { // SupplierServiceConfig holds the service configuration the supplier stakes for message SupplierServiceConfig { + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service service = 1; // The Service for which the supplier is configured repeated SupplierEndpoint endpoints = 2; // List of endpoints for the service // TODO_MAINNET: There is an opportunity for supplier to advertise the min diff --git a/tests/integration/tokenomics/relay_mining_difficulty_test.go b/tests/integration/tokenomics/relay_mining_difficulty_test.go index 3bc9a82c5..7ba982537 100644 --- a/tests/integration/tokenomics/relay_mining_difficulty_test.go +++ b/tests/integration/tokenomics/relay_mining_difficulty_test.go @@ -30,6 +30,7 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) // Create a new integration app integrationApp := integration.NewCompleteIntegrationApp(t) + sdkCtx := integrationApp.GetSdkCtx() // Move forward a few blocks to move away from the genesis block integrationApp.NextBlocks(t, 3) @@ -39,7 +40,7 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) sharedParams := getSharedParams(t, integrationApp) // Prepare the trie with a single mined relay - trie := prepareSMST(t, integrationApp.GetSdkCtx(), integrationApp, session) + trie := prepareSMST(t, sdkCtx, integrationApp, session) // Compute the number of blocks to wait between different events // TODO_BLOCKER(@bryanchriswhite): See this comment: https://github.com/pokt-network/poktroll/pull/610#discussion_r1645777322 @@ -59,7 +60,7 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) proofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) // Wait until the earliest claim commit height. - currentBlockHeight := integrationApp.GetSdkCtx().BlockHeight() + currentBlockHeight := sdkCtx.BlockHeight() numBlocksUntilClaimWindowOpenHeight := earliestSupplierClaimCommitHeight - currentBlockHeight require.Greater(t, numBlocksUntilClaimWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest claim commit height") integrationApp.NextBlocks(t, int(numBlocksUntilClaimWindowOpenHeight)) @@ -78,7 +79,7 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) require.NotNil(t, result, "unexpected nil result when submitting a MsgCreateClaim tx") // Wait until the proof window is open - currentBlockHeight = integrationApp.GetSdkCtx().BlockHeight() + currentBlockHeight = sdkCtx.BlockHeight() numBlocksUntilProofWindowOpenHeight := earliestSupplierProofCommitHeight - currentBlockHeight require.Greater(t, numBlocksUntilProofWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest proof commit height") integrationApp.NextBlocks(t, int(numBlocksUntilProofWindowOpenHeight)) @@ -97,28 +98,25 @@ func TestUpdateRelayMiningDifficulty_NewServiceSeenForTheFirstTime(t *testing.T) require.NotNil(t, result, "unexpected nil result when submitting a MsgSubmitProof tx") // Wait until the proof window is closed - currentBlockHeight = integrationApp.GetSdkCtx().BlockHeight() + currentBlockHeight = sdkCtx.BlockHeight() numBlocksUntilProofWindowCloseHeight := proofWindowCloseHeight - currentBlockHeight require.Greater(t, numBlocksUntilProofWindowOpenHeight, int64(0), "unexpected non-positive number of blocks until the earliest proof commit height") - // TODO_TECHDEBT(@bryanchriswhite): Olshansky is unsure why the +1 is necessary here - // but it was required to pass the test. - integrationApp.NextBlocks(t, int(numBlocksUntilProofWindowCloseHeight)+1) - // The number 14 was determined empirically by running the tests and will need - // to be updated if they are changed. - expectedNumEvents := 15 - // Check the number of events is consistent. - events := integrationApp.GetSdkCtx().EventManager().Events() - require.Equalf(t, expectedNumEvents, len(events), "unexpected number of total events") + // TODO_TECHDEBT(@bryanchriswhite): Olshansky is unsure why the +1 is necessary here but it was required to pass the test. + integrationApp.NextBlocks(t, int(numBlocksUntilProofWindowCloseHeight)+1) + // Check that the expected events are emitted + events := sdkCtx.EventManager().Events() relayMiningEvents := testutilevents.FilterEvents[*tokenomicstypes.EventRelayMiningDifficultyUpdated](t, events, "poktroll.tokenomics.EventRelayMiningDifficultyUpdated") require.Len(t, relayMiningEvents, 1, "unexpected number of relay mining difficulty updated events") relayMiningEvent := relayMiningEvents[0] require.Equal(t, "svc1", relayMiningEvent.ServiceId) - // The default difficulty) + + // The default difficulty require.Equal(t, prooftypes.DefaultRelayDifficultyTargetHashHex, relayMiningEvent.PrevTargetHashHexEncoded) require.Equal(t, prooftypes.DefaultRelayDifficultyTargetHashHex, relayMiningEvent.NewTargetHashHexEncoded) + // The previous EMA is the same as the current one if the service is new require.Equal(t, uint64(1), relayMiningEvent.PrevNumRelaysEma) require.Equal(t, uint64(1), relayMiningEvent.NewNumRelaysEma) @@ -136,10 +134,12 @@ func UpdateRelayMiningDifficulty_UpdateServiceIsDecreasing(t *testing.T) {} func getSharedParams(t *testing.T, integrationApp *testutil.App) sharedtypes.Params { t.Helper() + sdkCtx := integrationApp.GetSdkCtx() + sharedQueryClient := sharedtypes.NewQueryClient(integrationApp.QueryHelper()) sharedParamsReq := sharedtypes.QueryParamsRequest{} - sharedQueryRes, err := sharedQueryClient.Params(integrationApp.GetSdkCtx(), &sharedParamsReq) + sharedQueryRes, err := sharedQueryClient.Params(sdkCtx, &sharedParamsReq) require.NoError(t, err) return sharedQueryRes.Params @@ -149,14 +149,16 @@ func getSharedParams(t *testing.T, integrationApp *testutil.App) sharedtypes.Par func getSession(t *testing.T, integrationApp *testutil.App) *sessiontypes.Session { t.Helper() + sdkCtx := integrationApp.GetSdkCtx() + sessionQueryClient := sessiontypes.NewQueryClient(integrationApp.QueryHelper()) getSessionReq := sessiontypes.QueryGetSessionRequest{ ApplicationAddress: integrationApp.DefaultApplication.Address, Service: integrationApp.DefaultService, - BlockHeight: integrationApp.GetSdkCtx().BlockHeight(), + BlockHeight: sdkCtx.BlockHeight(), } - getSessionRes, err := sessionQueryClient.GetSession(integrationApp.GetSdkCtx(), &getSessionReq) + getSessionRes, err := sessionQueryClient.GetSession(sdkCtx, &getSessionReq) require.NoError(t, err) require.NotNil(t, getSessionRes, "unexpected nil queryResponse") return getSessionRes.Session diff --git a/tests/integration/tokenomics/tokenomics_example_test.go b/tests/integration/tokenomics/tokenomics_example_test.go index 778941261..d135c978c 100644 --- a/tests/integration/tokenomics/tokenomics_example_test.go +++ b/tests/integration/tokenomics/tokenomics_example_test.go @@ -95,7 +95,7 @@ func TestTokenomicsIntegrationExample(t *testing.T) { createClaimMsg := prooftypes.MsgCreateClaim{ SupplierAddress: integrationApp.DefaultSupplier.GetAddress(), SessionHeader: session.GetHeader(), - RootHash: testutilproof.SmstRootWithSum(uint64(1)), + RootHash: testutilproof.SmstRootWithSumAndCount(1, 1), } // Run the message to create the claim diff --git a/testutil/integration/app.go b/testutil/integration/app.go index a26349fb1..e83e56233 100644 --- a/testutil/integration/app.go +++ b/testutil/integration/app.go @@ -42,6 +42,7 @@ import ( "github.com/pokt-network/poktroll/pkg/crypto/rings" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" testutilevents "github.com/pokt-network/poktroll/testutil/events" + "github.com/pokt-network/poktroll/testutil/sample" "github.com/pokt-network/poktroll/testutil/testkeyring" appkeeper "github.com/pokt-network/poktroll/x/application/keeper" application "github.com/pokt-network/poktroll/x/application/module" @@ -131,6 +132,13 @@ func NewIntegrationApp( WithIsCheckTx(true). WithEventManager(cosmostypes.NewEventManager()) + // Add a block proposer address to the context + valAddr, err := cosmostypes.ValAddressFromBech32(sample.ConsAddress()) + require.NoError(t, err) + consensusAddr := cosmostypes.ConsAddress(valAddr) + sdkCtx = sdkCtx.WithProposer(consensusAddr) + + // Create the base application txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) bApp := baseapp.NewBaseApp(appName, logger, db, txConfig.TxDecoder(), baseapp.SetChainID(appName)) bApp.MountKVStores(keys) @@ -156,7 +164,7 @@ func NewIntegrationApp( msgRouter.SetInterfaceRegistry(registry) bApp.SetMsgServiceRouter(msgRouter) - err := bApp.LoadLatestVersion() + err = bApp.LoadLatestVersion() require.NoError(t, err, "failed to load latest version") _, err = bApp.InitChain(&cmtabcitypes.RequestInitChain{ChainId: appName}) @@ -267,7 +275,8 @@ func NewCompleteIntegrationApp(t *testing.T) *App { accountKeeper, blockedAddresses, authority.String(), - logger) + logger, + ) // Prepare the shared keeper and module sharedKeeper := sharedkeeper.NewKeeper( @@ -280,6 +289,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { cdc, sharedKeeper, accountKeeper, + bankKeeper, ) @@ -289,6 +299,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[servicetypes.StoreKey]), logger, authority.String(), + bankKeeper, ) serviceModule := service.NewAppModule( @@ -304,6 +315,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[gatewaytypes.StoreKey]), logger, authority.String(), + bankKeeper, ) gatewayModule := gateway.NewAppModule( @@ -319,6 +331,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[apptypes.StoreKey]), logger, authority.String(), + bankKeeper, accountKeeper, gatewayKeeper, @@ -337,6 +350,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[suppliertypes.StoreKey]), logger, authority.String(), + bankKeeper, sharedKeeper, serviceKeeper, @@ -355,6 +369,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[sessiontypes.StoreKey]), logger, authority.String(), + accountKeeper, bankKeeper, applicationKeeper, @@ -374,6 +389,7 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[prooftypes.StoreKey]), logger, authority.String(), + sessionKeeper, applicationKeeper, accountKeeper, @@ -391,9 +407,11 @@ func NewCompleteIntegrationApp(t *testing.T) *App { runtime.NewKVStoreService(storeKeys[tokenomicstypes.StoreKey]), logger, authority.String(), + bankKeeper, accountKeeper, applicationKeeper, + supplierKeeper, proofKeeper, sharedKeeper, sessionKeeper, @@ -490,8 +508,9 @@ func NewCompleteIntegrationApp(t *testing.T) *App { // Prepare a new default service defaultService := sharedtypes.Service{ - Id: "svc1", - Name: "svcName1", + Id: "svc1", + Name: "svcName1", + OwnerAddress: sample.AccAddress(), } serviceKeeper.SetService(integrationApp.sdkCtx, defaultService) integrationApp.DefaultService = &defaultService diff --git a/testutil/keeper/supplier.go b/testutil/keeper/supplier.go index b914308e7..cce5bfe73 100644 --- a/testutil/keeper/supplier.go +++ b/testutil/keeper/supplier.go @@ -101,6 +101,3 @@ func SupplierKeeper(t testing.TB) (SupplierModuleKeepers, context.Context) { return supplierModuleKeepers, ctx } - -// TODO_OPTIMIZE: Index suppliers by service so we can easily query k.GetAllSuppliers(ctx, Service) -// func (k Keeper) GetAllSuppliers(ctx, sdkContext, serviceId string) (suppliers []sharedtypes.Supplier) {} diff --git a/testutil/keeper/tokenomics.go b/testutil/keeper/tokenomics.go index 851044d6f..a86f20c27 100644 --- a/testutil/keeper/tokenomics.go +++ b/testutil/keeper/tokenomics.go @@ -17,6 +17,7 @@ import ( cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/testutil/integration" + cosmostypes "github.com/cosmos/cosmos-sdk/types" sdk "github.com/cosmos/cosmos-sdk/types" authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" @@ -74,7 +75,7 @@ type TokenomicsModuleKeepersOpt func(context.Context, *TokenomicsModuleKeepers) func TokenomicsKeeper(t testing.TB) (tokenomicsKeeper tokenomicskeeper.Keeper, ctx context.Context) { t.Helper() - k, ctx, _, _ := TokenomicsKeeperWithActorAddrs(t, nil) + k, ctx, _, _, _ := TokenomicsKeeperWithActorAddrs(t) return k, ctx } @@ -83,18 +84,23 @@ func TokenomicsKeeper(t testing.TB) (tokenomicsKeeper tokenomicskeeper.Keeper, c // a result of the evolution of the testutil package. // TODO_REFACTOR(@Olshansk): Rather than making `service`, `appAddr` and `supplierAddr` // explicit params, make them passable by the caller as options. -func TokenomicsKeeperWithActorAddrs( - t testing.TB, - service *sharedtypes.Service, -) ( +func TokenomicsKeeperWithActorAddrs(t testing.TB) ( tokenomicsKeeper tokenomicskeeper.Keeper, ctx context.Context, appAddr string, supplierAddr string, + service *sharedtypes.Service, ) { t.Helper() storeKey := storetypes.NewKVStoreKey(tokenomicstypes.StoreKey) + service = &sharedtypes.Service{ + Id: "svc1", + Name: "svcName1", + ComputeUnitsPerRelay: 1, + OwnerAddress: sample.AccAddress(), + } + // Initialize the in-memory database. db := dbm.NewMemDB() stateStore := store.NewCommitMultiStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) @@ -110,8 +116,9 @@ func TokenomicsKeeperWithActorAddrs( // Prepare the test application. application := apptypes.Application{ - Address: sample.AccAddress(), - Stake: &sdk.Coin{Denom: "upokt", Amount: math.NewInt(100000)}, + Address: sample.AccAddress(), + Stake: &sdk.Coin{Denom: "upokt", Amount: math.NewInt(100000)}, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } // Prepare the test supplier. @@ -124,24 +131,34 @@ func TokenomicsKeeperWithActorAddrs( // Mock the application keeper. mockApplicationKeeper := mocks.NewMockApplicationKeeper(ctrl) - // Get test application if the address matches. mockApplicationKeeper.EXPECT(). GetApplication(gomock.Any(), gomock.Eq(application.Address)). Return(application, true). AnyTimes() - // Get zero-value application if the address does not match. mockApplicationKeeper.EXPECT(). GetApplication(gomock.Any(), gomock.Not(application.Address)). Return(apptypes.Application{}, false). AnyTimes() - // Mock SetApplication. mockApplicationKeeper.EXPECT(). SetApplication(gomock.Any(), gomock.Any()). AnyTimes() + // Mock the supplier keeper. + mockSupplierKeeper := mocks.NewMockSupplierKeeper(ctrl) + // Mock SetSupplier. + mockSupplierKeeper.EXPECT(). + SetSupplier(gomock.Any(), gomock.Any()). + AnyTimes() + + // Get test supplier if the address matches. + mockSupplierKeeper.EXPECT(). + GetSupplier(gomock.Any(), gomock.Eq(supplier.Address)). + Return(supplier, true). + AnyTimes() + // Mock the bank keeper. mockBankKeeper := mocks.NewMockBankKeeper(ctrl) mockBankKeeper.EXPECT(). @@ -153,6 +170,9 @@ func TokenomicsKeeperWithActorAddrs( mockBankKeeper.EXPECT(). SendCoinsFromModuleToAccount(gomock.Any(), suppliertypes.ModuleName, gomock.Any(), gomock.Any()). AnyTimes() + mockBankKeeper.EXPECT(). + SendCoinsFromModuleToAccount(gomock.Any(), tokenomicstypes.ModuleName, gomock.Any(), gomock.Any()). + AnyTimes() // Mock the account keeper mockAccountKeeper := mocks.NewMockAccountKeeper(ctrl) @@ -172,15 +192,10 @@ func TokenomicsKeeperWithActorAddrs( // Mock the service keeper mockServiceKeeper := mocks.NewMockServiceKeeper(ctrl) - if service != nil { - // Get service if the ID matches. - mockServiceKeeper.EXPECT(). - GetService(gomock.Any(), gomock.Eq(service.Id)). - Return(*service, true). - AnyTimes() - } - - // Get zero-value service if the id does not match. + mockServiceKeeper.EXPECT(). + GetService(gomock.Any(), gomock.Eq(service.Id)). + Return(*service, true). + AnyTimes() mockServiceKeeper.EXPECT(). GetService(gomock.Any(), gomock.Any()). Return(sharedtypes.Service{}, false). @@ -194,6 +209,7 @@ func TokenomicsKeeperWithActorAddrs( mockBankKeeper, mockAccountKeeper, mockApplicationKeeper, + mockSupplierKeeper, mockProofKeeper, mockSharedKeeper, mockSessionKeeper, @@ -202,10 +218,16 @@ func TokenomicsKeeperWithActorAddrs( sdkCtx := sdk.NewContext(stateStore, cmtproto.Header{}, false, log.NewNopLogger()) + // Add a block proposer address to the context + valAddr, err := cosmostypes.ValAddressFromBech32(sample.ConsAddress()) + require.NoError(t, err) + consensusAddr := cosmostypes.ConsAddress(valAddr) + sdkCtx = sdkCtx.WithProposer(consensusAddr) + // Initialize params require.NoError(t, k.SetParams(sdkCtx, tokenomicstypes.DefaultParams())) - return k, sdkCtx, application.Address, supplier.Address + return k, sdkCtx, application.Address, supplier.Address, service } // NewTokenomicsModuleKeepers is a helper function to create a tokenomics keeper @@ -241,6 +263,13 @@ func NewTokenomicsModuleKeepers( // Prepare the context ctx = sdk.NewContext(stateStore, cmtproto.Header{}, false, logger) + sdkCtx := sdk.UnwrapSDKContext(ctx) + + // Add a block proposer address to the context + valAddr, err := cosmostypes.ValAddressFromBech32(sample.ConsAddress()) + require.NoError(t, err) + consensusAddr := cosmostypes.ConsAddress(valAddr) + sdkCtx = sdkCtx.WithProposer(consensusAddr) // ctx.SetAccount // Prepare the account keeper. @@ -261,9 +290,10 @@ func NewTokenomicsModuleKeepers( // These module accounts are necessary in order to settle balances // during claim expiration. map[string][]string{ - minttypes.ModuleName: {authtypes.Minter}, - suppliertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, - apptypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + minttypes.ModuleName: {authtypes.Minter}, + suppliertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + apptypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + tokenomicstypes.ModuleName: {authtypes.Minter, authtypes.Burner}, }, addrCodec, app.AccountAddressPrefix, @@ -279,12 +309,12 @@ func NewTokenomicsModuleKeepers( authority.String(), logger, ) - require.NoError(t, bankKeeper.SetParams(ctx, banktypes.DefaultParams())) + require.NoError(t, bankKeeper.SetParams(sdkCtx, banktypes.DefaultParams())) // Provide some initial funds to the suppliers & applications module accounts. - err := bankKeeper.MintCoins(ctx, suppliertypes.ModuleName, sdk.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000000000)))) + err = bankKeeper.MintCoins(sdkCtx, suppliertypes.ModuleName, sdk.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000000000)))) require.NoError(t, err) - err = bankKeeper.MintCoins(ctx, apptypes.ModuleName, sdk.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000000000)))) + err = bankKeeper.MintCoins(sdkCtx, apptypes.ModuleName, sdk.NewCoins(sdk.NewCoin("upokt", math.NewInt(1000000000000)))) require.NoError(t, err) // Construct a real shared keeper. @@ -294,7 +324,7 @@ func NewTokenomicsModuleKeepers( logger, authority.String(), ) - require.NoError(t, sharedKeeper.SetParams(ctx, sharedtypes.DefaultParams())) + require.NoError(t, sharedKeeper.SetParams(sdkCtx, sharedtypes.DefaultParams())) // Construct gateway keeper with a mocked bank keeper. gatewayKeeper := gatewaykeeper.NewKeeper( @@ -304,7 +334,7 @@ func NewTokenomicsModuleKeepers( authority.String(), bankKeeper, ) - require.NoError(t, gatewayKeeper.SetParams(ctx, gatewaytypes.DefaultParams())) + require.NoError(t, gatewayKeeper.SetParams(sdkCtx, gatewaytypes.DefaultParams())) // Construct an application keeper to add apps to sessions. appKeeper := appkeeper.NewKeeper( @@ -317,7 +347,7 @@ func NewTokenomicsModuleKeepers( gatewayKeeper, sharedKeeper, ) - require.NoError(t, appKeeper.SetParams(ctx, apptypes.DefaultParams())) + require.NoError(t, appKeeper.SetParams(sdkCtx, apptypes.DefaultParams())) // Construct a service keeper needed by the supplier keeper. serviceKeeper := servicekeeper.NewKeeper( @@ -338,7 +368,7 @@ func NewTokenomicsModuleKeepers( sharedKeeper, serviceKeeper, ) - require.NoError(t, supplierKeeper.SetParams(ctx, suppliertypes.DefaultParams())) + require.NoError(t, supplierKeeper.SetParams(sdkCtx, suppliertypes.DefaultParams())) // Construct a real session keeper so that sessions can be queried. sessionKeeper := sessionkeeper.NewKeeper( @@ -352,7 +382,7 @@ func NewTokenomicsModuleKeepers( supplierKeeper, sharedKeeper, ) - require.NoError(t, sessionKeeper.SetParams(ctx, sessiontypes.DefaultParams())) + require.NoError(t, sessionKeeper.SetParams(sdkCtx, sessiontypes.DefaultParams())) // Construct a real proof keeper so that claims & proofs can be created. proofKeeper := proofkeeper.NewKeeper( @@ -365,7 +395,7 @@ func NewTokenomicsModuleKeepers( accountKeeper, sharedKeeper, ) - require.NoError(t, proofKeeper.SetParams(ctx, prooftypes.DefaultParams())) + require.NoError(t, proofKeeper.SetParams(sdkCtx, prooftypes.DefaultParams())) // Construct a real tokenomics keeper so that claims & tokenomics can be created. tokenomicsKeeper := tokenomicskeeper.NewKeeper( @@ -376,13 +406,14 @@ func NewTokenomicsModuleKeepers( bankKeeper, accountKeeper, appKeeper, + supplierKeeper, proofKeeper, sharedKeeper, sessionKeeper, serviceKeeper, ) - require.NoError(t, tokenomicsKeeper.SetParams(ctx, tokenomicstypes.DefaultParams())) + require.NoError(t, tokenomicsKeeper.SetParams(sdkCtx, tokenomicstypes.DefaultParams())) keepers := TokenomicsModuleKeepers{ Keeper: &tokenomicsKeeper, @@ -399,6 +430,7 @@ func NewTokenomicsModuleKeepers( } // Apply any options to update the keepers or context prior to returning them. + ctx = sdkCtx for _, opt := range opts { ctx = opt(ctx, &keepers) } diff --git a/testutil/proof/fixture_generators.go b/testutil/proof/fixture_generators.go index f6e6aedbb..6d4f59cd0 100644 --- a/testutil/proof/fixture_generators.go +++ b/testutil/proof/fixture_generators.go @@ -1,15 +1,15 @@ package proof import ( + "crypto/rand" "encoding/binary" - "math/rand" "testing" - "github.com/stretchr/testify/require" - "github.com/pokt-network/smt" + "github.com/stretchr/testify/require" "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/testutil/sample" testsession "github.com/pokt-network/poktroll/testutil/session" prooftypes "github.com/pokt-network/poktroll/x/proof/types" sessiontypes "github.com/pokt-network/poktroll/x/session/types" @@ -21,73 +21,73 @@ const ( DefaultTestServiceID = "svc1" ) -// BaseClaim returns a base (default, example, etc..) claim with the given app -// address, supplier address, sum, and serviceID that can be used for testing. -func BaseClaim(appAddr, supplierAddr string, sum uint64, serviceId string) prooftypes.Claim { +// BaseClaim returns a base (default, example, etc..) claim with the given +// service ID, app address, supplier address and num relays that can be used for testing. +func BaseClaim(serviceId, appAddr, supplierAddr string, numRelays uint64) prooftypes.Claim { + computeUnitsPerRelay := uint64(1) + sum := numRelays * computeUnitsPerRelay return prooftypes.Claim{ SupplierAddress: supplierAddr, SessionHeader: &sessiontypes.SessionHeader{ ApplicationAddress: appAddr, Service: &sharedtypes.Service{ - Id: serviceId, + Id: serviceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), // This may need to be an input param in the future. }, SessionId: "session_id", SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: SmstRootWithSum(sum), + // + RootHash: SmstRootWithSumAndCount(sum, numRelays), } } // ClaimWithRandomHash returns a claim with a random SMST root hash with the given -// app address, supplier address, and sum that can be used for testing. Each claim -// generated this way will have a random chance to require a proof via probabilistic -// selection. -func ClaimWithRandomHash(t *testing.T, appAddr, supplierAddr string, sum uint64) prooftypes.Claim { - claim := BaseClaim(appAddr, supplierAddr, sum, DefaultTestServiceID) - claim.RootHash = RandSmstRootWithSum(t, sum) +// app address, supplier address, and num relays that can be used for testing. +// Each claim generated this way will have a random chance to require a proof via +// probabilistic selection. +func ClaimWithRandomHash(t *testing.T, appAddr, supplierAddr string, numRelays uint64) prooftypes.Claim { + claim := BaseClaim(DefaultTestServiceID, appAddr, supplierAddr, numRelays) + claim.RootHash = RandSmstRootWithSumAndCount(t, numRelays, numRelays) return claim } -// SmstRootWithSum returns a SMST root with the given sum and a default -// hard-coded count of 1. -// TODO_POTENTIAL_TECHDEBT: Note that the count is meant to represent the number -// of non-empty leaves in the tree, and may need become a parameter depending on -// how the tests evolve. -// TODO_MAINNET: Revisit if the SMT should be big or little Endian. Refs: -// https://github.com/pokt-network/smt/pull/46#discussion_r1636975124 -// https://github.com/pokt-network/smt/blob/ea585c6c3bc31c804b6bafa83e985e473b275580/smst.go#L23C10-L23C76 -func SmstRootWithSum(sum uint64) smt.MerkleSumRoot { +// SmstRootWithSumAndCount returns a SMST root with the given sum and relay count. +func SmstRootWithSumAndCount(sum, count uint64) smt.MerkleSumRoot { root := [protocol.TrieRootSize]byte{} - return encodeSum(root, sum) + return encodeSmstRoot(root, sum, count) } -// RandSmstRootWithSum returns a randomized SMST root with the given sum that -// can be used for testing. Randomizing the root is a simple way to randomize -// test claim hashes for testing proof requirement cases. -func RandSmstRootWithSum(t *testing.T, sum uint64) smt.MerkleSumRoot { +// RandSmstRootWithSumAndCount returns a randomized SMST root with the given sum +// and count that can be used for testing. Randomizing the root is a simple way to +// randomize test claim hashes for testing proof requirement cases. +func RandSmstRootWithSumAndCount(t *testing.T, sum, count uint64) smt.MerkleSumRoot { t.Helper() root := [protocol.TrieRootSize]byte{} - // Only populate the first 32 bytes with random data, leave the last 8 bytes for the sum. - _, err := rand.Read(root[:protocol.TrieHasherSize]) //nolint:staticcheck // We need a deterministic pseudo-random source. + + // Only populate the first 32 bytes with random data, leaving the rest to the sum and relay count. + _, err := rand.Read(root[:protocol.TrieHasherSize]) // TODO_IMPROVE: We need a deterministic pseudo-random source. require.NoError(t, err) - return encodeSum(root, sum) + return encodeSmstRoot(root, sum, count) } -// encodeSum returns a copy of the given root, binary encodes the given sum, -// and stores the encoded sum in the root copy. -func encodeSum(r [protocol.TrieRootSize]byte, sum uint64) smt.MerkleSumRoot { - root := make([]byte, protocol.TrieRootSize) - copy(root, r[:]) +// encodeSmstRoot returns a copy of the given root with the sum and count binary +// encoded and appended to the end. +// TODO_MAINNET: Revisit if the SMT should be big or little Endian. Refs: +// https://github.com/pokt-network/smt/pull/46#discussion_r1636975124 +// https://github.com/pokt-network/smt/blob/ea585c6c3bc31c804b6bafa83e985e473b275580/smst.go#L23C10-L23C76 +func encodeSmstRoot(root [protocol.TrieRootSize]byte, sum, count uint64) smt.MerkleSumRoot { + encodedRoot := make([]byte, protocol.TrieRootSize) + copy(encodedRoot, root[:]) // Insert the sum into the root hash - binary.BigEndian.PutUint64(root[protocol.TrieHasherSize:], sum) + binary.BigEndian.PutUint64(encodedRoot[protocol.TrieHasherSize:], sum) // Insert the count into the root hash - // TODO_TECHDEBT: This is a hard-coded count of 1, but could be a parameter. - // TODO_TECHDEBT: We are assuming the sum takes up 8 bytes. - binary.BigEndian.PutUint64(root[protocol.TrieHasherSize+8:], 1) + binary.BigEndian.PutUint64(encodedRoot[protocol.TrieHasherSize+protocol.TrieRootSumSize:], count) - return root + return encodedRoot } diff --git a/testutil/sample/sample.go b/testutil/sample/sample.go index 6b299717f..7bf7a7d3c 100644 --- a/testutil/sample/sample.go +++ b/testutil/sample/sample.go @@ -1,6 +1,7 @@ package sample import ( + "github.com/cometbft/cometbft/crypto/tmhash" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" @@ -16,11 +17,22 @@ func AccAddressAndPubKey() (string, cryptotypes.PubKey) { // AccAddress returns a sample account address func AccAddress() string { + // TODO_BETA(@olshansk): Change this to secp256k1 because that's what we'll + // use in production for all real accounts. pk := ed25519.GenPrivKey().PubKey() addr := pk.Address() return sdk.AccAddress(addr).String() } +// ConsAddress returns a sample consensus address, which has the prefix +// of validators (i.e. consensus nodes) when converted to bech32. +func ConsAddress() string { + pk := ed25519.GenPrivKey().PubKey() + consensusAddress := tmhash.SumTruncated(pk.Address()) + valAddress := sdk.ValAddress(consensusAddress) + return valAddress.String() +} + // AccAddressAndPubKeyEdd2519 returns a sample account address and public key func AccAddressAndPubKeyEdd2519() (string, cryptotypes.PubKey) { pk := ed25519.GenPrivKey().PubKey() diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go index e56be9a40..0a8d2caac 100644 --- a/testutil/testtree/tree.go +++ b/testutil/testtree/tree.go @@ -22,7 +22,7 @@ import ( // relay is signed by the supplier and application respectively. func NewFilledSessionTree( ctx context.Context, t *testing.T, - numRelays uint, + numRelays, computeUnitsPerRelay uint64, supplierKeyUid, supplierAddr string, sessionTreeHeader, reqHeader, resHeader *sessiontypes.SessionHeader, keyRing keyring.Keyring, @@ -36,7 +36,8 @@ func NewFilledSessionTree( // Add numRelays of relays to the session tree. FillSessionTree( ctx, t, - sessionTree, numRelays, + sessionTree, + numRelays, computeUnitsPerRelay, supplierKeyUid, supplierAddr, reqHeader, resHeader, keyRing, @@ -82,7 +83,7 @@ func NewEmptySessionTree( func FillSessionTree( ctx context.Context, t *testing.T, sessionTree relayer.SessionTree, - numRelays uint, + numRelays, computeUnitsPerRelay uint64, supplierKeyUid, supplierAddr string, reqHeader, resHeader *sessiontypes.SessionHeader, keyRing keyring.Keyring, @@ -104,23 +105,11 @@ func FillSessionTree( relayKey, err := relay.GetHash() require.NoError(t, err) - // See FillSessionTreeExpectedComputeUnits below for explanation. - relayWeight := uint64(i) - - err = sessionTree.Update(relayKey[:], relayBz, relayWeight) + err = sessionTree.Update(relayKey[:], relayBz, computeUnitsPerRelay) require.NoError(t, err) } } -// FillSessionTreeExpectedComputeUnits returns the number of expected compute units -// to covert numRelays (in a test scenario) whereby every subsequent relay costs -// an addition compute unit. -// This is basic random approach selected for testing purposes. Don't think too -// deeply about it. -func FillSessionTreeExpectedComputeUnits(numRelays uint) uint64 { - return uint64(numRelays * (numRelays - 1) / 2) -} - // NewProof creates a new proof structure. func NewProof( t *testing.T, diff --git a/x/proof/keeper/msg_server_create_claim.go b/x/proof/keeper/msg_server_create_claim.go index 1e9dc8b52..eccf4a324 100644 --- a/x/proof/keeper/msg_server_create_claim.go +++ b/x/proof/keeper/msg_server_create_claim.go @@ -83,6 +83,12 @@ func (k msgServer) CreateClaim( } _, isExistingClaim = k.Keeper.GetClaim(ctx, claim.GetSessionHeader().GetSessionId(), claim.GetSupplierAddress()) + // TODO_UPNEXT(#705): Check (and test) that numClaimComputeUnits is equal + // to num_relays * the_compute_units_per_relay for this_service. + // Add a comment that for now, we expect it to be the case because every + // relay for a specific service is wroth the same, but may change in the + // future. + // Upsert the claim k.Keeper.UpsertClaim(ctx, claim) logger.Info("successfully upserted the claim") diff --git a/x/proof/keeper/msg_server_create_claim_test.go b/x/proof/keeper/msg_server_create_claim_test.go index 69515cc64..fb3f7f4bd 100644 --- a/x/proof/keeper/msg_server_create_claim_test.go +++ b/x/proof/keeper/msg_server_create_claim_test.go @@ -23,11 +23,12 @@ import ( ) const ( - expectedNumComputeUnits = 10 - expectedNumRelays = 1 + expectedNumRelays = 10 + computeUnitsPerRelay = 1 + expectedNumComputeUnits = expectedNumRelays * computeUnitsPerRelay ) -var defaultMerkleRoot = testproof.SmstRootWithSum(expectedNumComputeUnits) +var defaultMerkleRoot = testproof.SmstRootWithSumAndCount(expectedNumComputeUnits, expectedNumRelays) func TestMsgServer_CreateClaim_Success(t *testing.T) { var claimWindowOpenBlockHash []byte @@ -72,7 +73,11 @@ func TestMsgServer_CreateClaim_Success(t *testing.T) { // The base session start height used for testing sessionStartHeight := blockHeight - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } appAddr := sample.AccAddress() keepers.SetSupplier(ctx, sharedtypes.Supplier{ @@ -167,7 +172,11 @@ func TestMsgServer_CreateClaim_Error_OutsideOfWindow(t *testing.T) { // The base session start height used for testing sessionStartHeight := int64(1) - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } supplierAddr := sample.AccAddress() appAddr := sample.AccAddress() @@ -285,7 +294,11 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) { // The base session start height used for testing sessionStartHeight := int64(1) // service is the only service for which a session should exist. - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } // supplierAddr is staked for "svc1" such that it is expected to be in the session. supplierAddr := sample.AccAddress() // wrongSupplierAddr is staked for "nosvc1" such that it is *not* expected to be in the session. @@ -315,7 +328,13 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) { supplierKeeper.SetSupplier(ctx, sharedtypes.Supplier{ Address: wrongSupplierAddr, Services: []*sharedtypes.SupplierServiceConfig{ - {Service: &sharedtypes.Service{Id: "nosvc1"}}, + { + Service: &sharedtypes.Service{ + Id: "nosvc1", + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + }, + }, }, }) @@ -331,7 +350,13 @@ func TestMsgServer_CreateClaim_Error(t *testing.T) { appKeeper.SetApplication(ctx, apptypes.Application{ Address: wrongAppAddr, ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{ - {Service: &sharedtypes.Service{Id: "nosvc1"}}, + { + Service: &sharedtypes.Service{ + Id: "nosvc1", + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + }, + }, }, }) diff --git a/x/proof/keeper/msg_server_submit_proof_test.go b/x/proof/keeper/msg_server_submit_proof_test.go index a9a8064f7..090fdc19e 100644 --- a/x/proof/keeper/msg_server_submit_proof_test.go +++ b/x/proof/keeper/msg_server_submit_proof_test.go @@ -17,6 +17,7 @@ import ( "github.com/pokt-network/poktroll/pkg/relayer" testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" + "github.com/pokt-network/poktroll/testutil/sample" "github.com/pokt-network/poktroll/testutil/testkeyring" "github.com/pokt-network/poktroll/testutil/testtree" "github.com/pokt-network/poktroll/x/proof/keeper" @@ -117,7 +118,11 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { preGeneratedAccts, ).String() - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } // Add a supplier and application pair that are expected to be in the session. keepers.AddServiceActors(ctx, t, service, supplierAddr, appAddr) @@ -141,10 +146,11 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { require.NoError(t, err) // Submit the corresponding proof. - expectedNumRelays := uint(5) + numRelays := uint64(5) + numComputeUnits := numRelays * service.ComputeUnitsPerRelay sessionTree := testtree.NewFilledSessionTree( ctx, t, - expectedNumRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, sessionHeader, sessionHeader, sessionHeader, keyRing, @@ -221,8 +227,8 @@ func TestMsgServer_SubmitProof_Success(t *testing.T) { require.EqualValues(t, claim, proofSubmittedEvent.GetClaim()) require.EqualValues(t, &proofs[0], proofSubmittedEvent.GetProof()) - require.Equal(t, uint64(expectedNumComputeUnits), proofSubmittedEvent.GetNumComputeUnits()) - require.Equal(t, uint64(expectedNumRelays), proofSubmittedEvent.GetNumRelays()) + require.Equal(t, uint64(numRelays), proofSubmittedEvent.GetNumRelays()) + require.Equal(t, uint64(numComputeUnits), proofSubmittedEvent.GetNumComputeUnits()) }) } } @@ -264,7 +270,11 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { preGeneratedAccts, ).String() - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } // Add a supplier and application pair that are expected to be in the session. keepers.AddServiceActors(ctx, t, service, supplierAddr, appAddr) @@ -288,10 +298,10 @@ func TestMsgServer_SubmitProof_Error_OutsideOfWindow(t *testing.T) { require.NoError(t, err) // Submit the corresponding proof. - numRelays := uint(5) + numRelays := uint64(5) sessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, sessionHeader, sessionHeader, sessionHeader, keyRing, @@ -443,8 +453,16 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { preGeneratedAccts, ).String() - service := &sharedtypes.Service{Id: testServiceId} - wrongService := &sharedtypes.Service{Id: "wrong_svc"} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } + wrongService := &sharedtypes.Service{ + Id: "wrong_svc", + ComputeUnitsPerRelay: computeUnitsPerRelay, + OwnerAddress: sample.AccAddress(), + } // Add a supplier and application pair that are expected to be in the session. keepers.AddServiceActors(ctx, t, service, supplierAddr, appAddr) @@ -474,10 +492,10 @@ func TestMsgServer_SubmitProof_Error(t *testing.T) { require.NoError(t, err) // Construct a valid session tree with 5 relays. - numRelays := uint(5) + numRelays := uint64(5) validSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, validSessionHeader, keyRing, diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 1a4707ec5..4c1655723 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -17,6 +17,7 @@ import ( "github.com/pokt-network/poktroll/pkg/polylog/polyzero" "github.com/pokt-network/poktroll/pkg/relayer" keepertest "github.com/pokt-network/poktroll/testutil/keeper" + "github.com/pokt-network/poktroll/testutil/sample" "github.com/pokt-network/poktroll/testutil/testkeyring" "github.com/pokt-network/poktroll/testutil/testrelayer" "github.com/pokt-network/poktroll/testutil/testtree" @@ -78,7 +79,11 @@ func TestEnsureValidProof_Error(t *testing.T) { preGeneratedAccts, ).String() - service := &sharedtypes.Service{Id: testServiceId} + service := &sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: 1, + OwnerAddress: sample.AccAddress(), + } wrongService := &sharedtypes.Service{Id: "wrong_svc"} // Add a supplier and application pair that are expected to be in the session. @@ -113,10 +118,10 @@ func TestEnsureValidProof_Error(t *testing.T) { require.NoError(t, err) // Construct a valid session tree with 5 relays. - numRelays := uint(5) + numRelays := uint64(5) validSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, validSessionHeader, keyRing, @@ -335,10 +340,10 @@ func TestEnsureValidProof_Error(t *testing.T) { newProof: func(t *testing.T) *prooftypes.Proof { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the proof session ID. - numRelays := uint(1) + numRelays := uint64(1) wrongRequestSessionIdSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, &wrongSessionIdHeader, validSessionHeader, keyRing, @@ -384,10 +389,10 @@ func TestEnsureValidProof_Error(t *testing.T) { newProof: func(t *testing.T) *prooftypes.Proof { // Construct a session tree with 1 relay with a session header containing // a session ID that doesn't match the expected session ID. - numRelays := uint(1) + numRelays := uint64(1) wrongResponseSessionIdSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, &wrongSessionIdHeader, keyRing, @@ -550,10 +555,10 @@ func TestEnsureValidProof_Error(t *testing.T) { newProof: func(t *testing.T) *prooftypes.Proof { // Construct a new valid session tree for this test case because once the // closest proof has already been generated, the path cannot be changed. - numRelays := uint(5) + numRelays := uint64(5) wrongPathSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, validSessionHeader, keyRing, @@ -621,10 +626,10 @@ func TestEnsureValidProof_Error(t *testing.T) { desc: "claim must exist for proof message", newProof: func(t *testing.T) *prooftypes.Proof { // Construct a new session tree corresponding to the unclaimed session. - numRelays := uint(5) + numRelays := uint64(5) unclaimedSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, "wrong_supplier", wrongSupplierAddr, unclaimedSessionHeader, unclaimedSessionHeader, unclaimedSessionHeader, keyRing, @@ -660,10 +665,10 @@ func TestEnsureValidProof_Error(t *testing.T) { { desc: "Valid proof cannot validate claim with an incorrect root", newProof: func(t *testing.T) *prooftypes.Proof { - numRelays := uint(10) + numRelays := uint64(10) wrongMerkleRootSessionTree := testtree.NewFilledSessionTree( ctx, t, - numRelays, + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, validSessionHeader, keyRing, @@ -685,10 +690,11 @@ func TestEnsureValidProof_Error(t *testing.T) { keepers.UpsertClaim(claimCtx, *claim) require.NoError(t, err) - // Construct a valid session tree with 5 relays. + // Construct a valid session tree. + numRelays = uint64(5) validSessionTree := testtree.NewFilledSessionTree( ctx, t, - uint(5), + numRelays, service.ComputeUnitsPerRelay, supplierUid, supplierAddr, validSessionHeader, validSessionHeader, validSessionHeader, keyRing, diff --git a/x/service/module/autocli.go b/x/service/module/autocli.go index 287cbbcbc..9ecac312c 100644 --- a/x/service/module/autocli.go +++ b/x/service/module/autocli.go @@ -10,7 +10,7 @@ import ( func (am AppModule) AutoCLIOptions() *autocliv1.ModuleOptions { return &autocliv1.ModuleOptions{ Query: &autocliv1.ServiceCommandDescriptor{ - Service: modulev1.Query_ServiceDesc.ServiceName, + Service: modulev1.Query_ServiceDesc.ServiceName, RpcCommandOptions: []*autocliv1.RpcCommandOptions{ // { // RpcMethod: "Params", @@ -22,12 +22,13 @@ func (am AppModule) AutoCLIOptions() *autocliv1.ModuleOptions { // Use: "list-service", // Short: "List all service", // }, - // { - // RpcMethod: "Service", - // Use: "show-service [id]", - // Short: "Shows a service", - // PositionalArgs: []*autocliv1.PositionalArgDescriptor{{ProtoField: "index"}}, - // }, + { + RpcMethod: "Service", + Use: "show-service [id]", + Short: "Shows a service", + Long: "Retrieve the service details by its id.", + PositionalArgs: []*autocliv1.PositionalArgDescriptor{{ProtoField: "id"}}, + }, // this line is used by ignite scaffolding # autocli/query }, }, diff --git a/x/session/keeper/session_hydrator.go b/x/session/keeper/session_hydrator.go index c7c379328..e0711dd3a 100644 --- a/x/session/keeper/session_hydrator.go +++ b/x/session/keeper/session_hydrator.go @@ -135,8 +135,8 @@ func (k Keeper) hydrateSessionID(ctx context.Context, sh *sessionHydrator) error // hydrateSessionApplication hydrates the full Application actor based on the address provided func (k Keeper) hydrateSessionApplication(ctx context.Context, sh *sessionHydrator) error { - foundApp, appIsFound := k.applicationKeeper.GetApplication(ctx, sh.sessionHeader.ApplicationAddress) - if !appIsFound { + foundApp, isAppFound := k.applicationKeeper.GetApplication(ctx, sh.sessionHeader.ApplicationAddress) + if !isAppFound { return types.ErrSessionAppNotFound.Wrapf( "could not find app with address %q at height %d", sh.sessionHeader.ApplicationAddress, diff --git a/x/shared/types/service.pb.go b/x/shared/types/service.pb.go index 4c364e196..acfd8144a 100644 --- a/x/shared/types/service.pb.go +++ b/x/shared/types/service.pb.go @@ -92,7 +92,7 @@ func (ConfigOptions) EnumDescriptor() ([]byte, []int) { type Service struct { // For example, what if we want to request a session for a certain service but with some additional configs that identify it? Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // TODO_BETA: Name is currently unused but acts as a reminder that an optional onchain representation of the service is necessary + // TODO_MAINNET: Remove this. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // The cost of a single relay for this service in terms of compute units. // Must be used alongside the global 'compute_units_to_tokens_multipler' to calculate the cost of a relay for this service. @@ -168,6 +168,7 @@ func (m *Service) GetOwnerAddress() string { // ApplicationServiceConfig holds the service configuration the application stakes for type ApplicationServiceConfig struct { + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` } @@ -213,6 +214,7 @@ func (m *ApplicationServiceConfig) GetService() *Service { // SupplierServiceConfig holds the service configuration the supplier stakes for type SupplierServiceConfig struct { + // TODO_MAINNET: Avoid embedding the full Service because we just need the ID. Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` Endpoints []*SupplierEndpoint `protobuf:"bytes,2,rep,name=endpoints,proto3" json:"endpoints,omitempty"` } diff --git a/x/supplier/keeper/supplier.go b/x/supplier/keeper/supplier.go index 3dfc2e0e5..f890b7a96 100644 --- a/x/supplier/keeper/supplier.go +++ b/x/supplier/keeper/supplier.go @@ -62,5 +62,5 @@ func (k Keeper) GetAllSuppliers(ctx context.Context) (suppliers []sharedtypes.Su return } -// TODO_MAINNET: Index suppliers by service so we can easily query k.GetAllSuppliers(ctx, Service) +// TODO_OPTIMIZE: Index suppliers by service ID // func (k Keeper) GetAllSuppliers(ctx, sdkContext, serviceId string) (suppliers []sharedtypes.Supplier) {} diff --git a/x/tokenomics/keeper/keeper.go b/x/tokenomics/keeper/keeper.go index a903c38fe..c21be050f 100644 --- a/x/tokenomics/keeper/keeper.go +++ b/x/tokenomics/keeper/keeper.go @@ -26,6 +26,7 @@ type Keeper struct { bankKeeper types.BankKeeper accountKeeper types.AccountKeeper applicationKeeper types.ApplicationKeeper + supplierKeeper types.SupplierKeeper proofKeeper types.ProofKeeper sharedKeeper types.SharedKeeper sessionKeeper types.SessionKeeper @@ -43,6 +44,7 @@ func NewKeeper( bankKeeper types.BankKeeper, accountKeeper types.AccountKeeper, applicationKeeper types.ApplicationKeeper, + supplierKeeper types.SupplierKeeper, proofKeeper types.ProofKeeper, sharedKeeper types.SharedKeeper, sessionKeeper types.SessionKeeper, @@ -63,6 +65,7 @@ func NewKeeper( bankKeeper: bankKeeper, accountKeeper: accountKeeper, applicationKeeper: applicationKeeper, + supplierKeeper: supplierKeeper, proofKeeper: proofKeeper, sharedKeeper: sharedKeeper, sessionKeeper: sessionKeeper, diff --git a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go index 42b5fc0e5..9375d3870 100644 --- a/x/tokenomics/keeper/keeper_settle_pending_claims_test.go +++ b/x/tokenomics/keeper/keeper_settle_pending_claims_test.go @@ -19,6 +19,7 @@ import ( testutilevents "github.com/pokt-network/poktroll/testutil/events" keepertest "github.com/pokt-network/poktroll/testutil/keeper" testutilproof "github.com/pokt-network/poktroll/testutil/proof" + "github.com/pokt-network/poktroll/testutil/sample" "github.com/pokt-network/poktroll/testutil/testkeyring" "github.com/pokt-network/poktroll/testutil/testtree" apptypes "github.com/pokt-network/poktroll/x/application/types" @@ -35,16 +36,16 @@ func init() { cmd.InitSDKConfig() } +// TODO_TECHDEBT(@olshansk): Consolidate the setup for all tests that use TokenomicsModuleKeepers type TestSuite struct { suite.Suite - sdkCtx cosmostypes.Context ctx context.Context keepers keepertest.TokenomicsModuleKeepers claim prooftypes.Claim proof prooftypes.Proof - expectedComputeUnits uint64 + numRelays uint64 } // SetupTest creates the following and stores them in the suite: @@ -57,8 +58,13 @@ func (s *TestSuite) SetupTest() { t := s.T() s.keepers, s.ctx = keepertest.NewTokenomicsModuleKeepers(s.T(), nil) - s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(1) - s.ctx = s.sdkCtx + sdkCtx := cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(1) + + // Add a block proposer address to the context + valAddr, err := cosmostypes.ValAddressFromBech32(sample.ConsAddress()) + require.NoError(t, err) + consensusAddr := cosmostypes.ConsAddress(valAddr) + sdkCtx = sdkCtx.WithProposer(consensusAddr) // Construct a keyring to hold the keypairs for the accounts used in the test. keyRing := keyring.NewInMemory(s.keepers.Codec) @@ -69,21 +75,25 @@ func (s *TestSuite) SetupTest() { // Create accounts in the account keeper with corresponding keys in the keyring // // for the applications and suppliers used in the tests. supplierAddr := testkeyring.CreateOnChainAccount( - s.ctx, t, + sdkCtx, t, "supplier", keyRing, s.keepers.AccountKeeper, preGeneratedAccts, ).String() appAddr := testkeyring.CreateOnChainAccount( - s.ctx, t, + sdkCtx, t, "app", keyRing, s.keepers.AccountKeeper, preGeneratedAccts, ).String() - service := sharedtypes.Service{Id: testServiceId} + service := sharedtypes.Service{ + Id: testServiceId, + ComputeUnitsPerRelay: 1, + OwnerAddress: sample.AccAddress(), + } s.keepers.SetService(s.ctx, service) supplierStake := types.NewCoin("upokt", math.NewInt(1000000)) @@ -109,7 +119,7 @@ func (s *TestSuite) SetupTest() { Service: &service, BlockHeight: 1, } - sessionRes, err := s.keepers.GetSession(s.sdkCtx, sessionReq) + sessionRes, err := s.keepers.GetSession(sdkCtx, sessionReq) require.NoError(t, err) sessionHeader := sessionRes.Session.Header @@ -124,30 +134,29 @@ func (s *TestSuite) SetupTest() { require.NoError(t, err) // Construct a valid session tree with 10 relays. - numRelays := uint(10) + s.numRelays = uint64(10) sessionTree := testtree.NewFilledSessionTree( - s.ctx, t, - numRelays, + sdkCtx, t, + s.numRelays, service.ComputeUnitsPerRelay, "supplier", supplierAddr, sessionHeader, sessionHeader, sessionHeader, keyRing, ringClient, ) - s.expectedComputeUnits = testtree.FillSessionTreeExpectedComputeUnits(numRelays) blockHeaderHash := make([]byte, 0) expectedMerkleProofPath := protocol.GetPathForProof(blockHeaderHash, sessionHeader.SessionId) // Advance the block height to the earliest claim commit height. - sharedParams := s.keepers.SharedKeeper.GetParams(s.ctx) + sharedParams := s.keepers.SharedKeeper.GetParams(sdkCtx) claimMsgHeight := shared.GetEarliestSupplierClaimCommitHeight( &sharedParams, sessionHeader.GetSessionEndBlockHeight(), blockHeaderHash, supplierAddr, ) - s.sdkCtx = cosmostypes.UnwrapSDKContext(s.ctx).WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) - s.ctx = s.sdkCtx + sdkCtx = sdkCtx.WithBlockHeight(claimMsgHeight).WithHeaderHash(blockHeaderHash) + s.ctx = sdkCtx merkleRootBz, err := sessionTree.Flush() require.NoError(t, err) @@ -168,19 +177,15 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingBeforeSettlement() { // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // 0. Add the claim & verify it exists - claim := s.claim - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) + // Upsert the claim only + s.keepers.UpsertClaim(ctx, s.claim) - // 1. Settle pending claims while the session is still active. + // Settle pending claims while the session is still active. // Expectations: No claims should be settled because the session is still ongoing - blockHeight := claim.SessionHeader.SessionEndBlockHeight - 2 // session is still active - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + blockHeight := s.claim.SessionHeader.SessionEndBlockHeight - 2 // session is still active + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) @@ -189,15 +194,15 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingBeforeSettlement() { require.Equal(t, uint64(0), expiredResult.NumClaims) // Validate that one claim still remains. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 1) // Calculate a block height which is within the proof window. proofWindowOpenHeight := shared.GetProofWindowOpenHeight( - &sharedParams, claim.SessionHeader.SessionEndBlockHeight, + &sharedParams, s.claim.SessionHeader.SessionEndBlockHeight, ) proofWindowCloseHeight := shared.GetProofWindowCloseHeight( - &sharedParams, claim.SessionHeader.SessionEndBlockHeight, + &sharedParams, s.claim.SessionHeader.SessionEndBlockHeight, ) blockHeight = (proofWindowCloseHeight - proofWindowOpenHeight) / 2 @@ -220,163 +225,174 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequiredAndNotProv // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // Create a claim that requires a proof - claim := s.claim + // Retrieve the number of compute units in the claim + numComputeUnits, err := s.claim.GetNumComputeUnits() + require.NoError(t, err) - // 0. Add the claim & verify it exists - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) + // Set the proof parameters such that s.claim requires a proof because: + // - proof_request_probability is 0% + // - proof_requirement_threshold is below the claim (i.e. claim is above threshold) + err = s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 0, + ProofRequirementThreshold: uint64(numComputeUnits - 1), // -1 to push threshold below s.claim's compute units + }) + require.NoError(t, err) + + // Upsert the claim ONLY + s.keepers.UpsertClaim(ctx, s.claim) // Settle pending claims after proof window closes // Expectation: All (1) claims should be expired. // NB: proofs should be rejected when the current height equals the proof window close height. - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) - // Check that no claims were settled. - require.Equal(t, uint64(0), settledResult.NumClaims) - // Validate that exactly one claims expired - require.Equal(t, uint64(1), expiredResult.NumClaims) + // Validate claim settlement results + require.Equal(t, uint64(0), settledResult.NumClaims) // 0 claims settled + require.Equal(t, uint64(1), expiredResult.NumClaims) // 1 claim expired // Validate that no claims remain. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() require.Len(t, events, 5) // minting, burning, settling, etc.. - expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, - events, "poktroll.tokenomics.EventClaimExpired") + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) // Validate the event expectedEvent := expectedEvents[0] require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_MISSING, expectedEvent.GetExpirationReason()) - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, s.numRelays, expectedEvent.GetNumRelays()) } -func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOneProvided() { +func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvided_ViaThreshold() { // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // Create a claim that requires a proof and an invalid proof - claim := s.claim - proof := s.proof - proof.ClosestMerkleProof = []byte("invalid_proof") + // Retrieve the number of compute units in the claim + numComputeUnits, err := s.claim.GetNumComputeUnits() + require.NoError(t, err) - // Upsert the proof & claim - s.keepers.UpsertClaim(ctx, claim) - s.keepers.UpsertProof(ctx, proof) + // Set the proof parameters such that s.claim requires a proof because: + // - proof_request_probability is 0% + // - proof_requirement_threshold is below the claim (i.e. claim is above threshold) + err = s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 0, + ProofRequirementThreshold: uint64(numComputeUnits - 1), // -1 to push threshold below s.claim's compute units + }) + require.NoError(t, err) + + // Upsert the claim & proof + s.keepers.UpsertClaim(ctx, s.claim) + s.keepers.UpsertProof(ctx, s.proof) // Settle pending claims after proof window closes - // Expectation: All (1) claims should be expired. + // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) - // Check that no claims were settled. - require.Equal(t, uint64(0), settledResult.NumClaims) - // Validate that exactly one claims expired - require.Equal(t, uint64(1), expiredResult.NumClaims) + // Validate claim settlement results + require.Equal(t, uint64(1), settledResult.NumClaims) // 1 claim settled + require.Equal(t, uint64(0), expiredResult.NumClaims) // 0 claims expired // Validate that no claims remain. claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) - // Validate that no proofs remain. - proofs := s.keepers.GetAllProofs(ctx) - require.Len(t, proofs, 0) - - // Confirm an expiration event was emitted + // Confirm an settlement event was emitted events := sdkCtx.EventManager().Events() - require.Len(t, events, 5) // minting, burning, settling, etc.. - expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, - events, "poktroll.tokenomics.EventClaimExpired") + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, events, "poktroll.tokenomics.EventClaimSettled") require.Len(t, expectedEvents, 1) // Validate the event expectedEvent := expectedEvents[0] - require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, prooftypes.ProofRequirementReason_THRESHOLD, expectedEvent.GetProofRequirement()) + require.Equal(t, s.numRelays, expectedEvent.GetNumRelays()) } -func (s *TestSuite) TestSettlePendingClaims_ClaimSettled_ProofRequiredAndProvided_ViaThreshold() { +func (s *TestSuite) TestSettlePendingClaims_ClaimExpired_ProofRequired_InvalidOneProvided() { // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // Create a claim that requires a proof - claim := s.claim + // Set the proof parameters such that s.claim DOES NOT require a proof because: + // - proof_request_probability is 100% + err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 1, + }) + require.NoError(t, err) - // Add the claim & verify it exists - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) + // Create a claim that requires a proof and an invalid proof + proof := s.proof + proof.ClosestMerkleProof = []byte("invalid_proof") - // Upsert the proof - s.keepers.UpsertProof(ctx, s.proof) + // Upsert the proof & claim + s.keepers.UpsertClaim(ctx, s.claim) + s.keepers.UpsertProof(ctx, proof) // Settle pending claims after proof window closes - // Expectation: All (1) claims should be claimed. + // Expectation: All (1) claims should be expired. // NB: proofs should be rejected when the current height equals the proof window close height. - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) - // Check that one claim was settled. - require.Equal(t, uint64(1), settledResult.NumClaims) - - // Validate that no claims expired. - require.Equal(t, uint64(0), expiredResult.NumClaims) + // Validate claim settlement results + require.Equal(t, uint64(0), settledResult.NumClaims) // 0 claims settled + require.Equal(t, uint64(1), expiredResult.NumClaims) // 1 claim expired // Validate that no claims remain. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) - // Confirm an settlement event was emitted + // Validate that no proofs remain. + proofs := s.keepers.GetAllProofs(ctx) + require.Len(t, proofs, 0) + + // Confirm an expiration event was emitted events := sdkCtx.EventManager().Events() - expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, - events, "poktroll.tokenomics.EventClaimSettled") + require.Len(t, events, 5) // minting, burning, settling, etc.. + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimExpired](t, events, "poktroll.tokenomics.EventClaimExpired") require.Len(t, expectedEvents, 1) // Validate the event expectedEvent := expectedEvents[0] - require.Equal(t, prooftypes.ProofRequirementReason_THRESHOLD, expectedEvent.GetProofRequirement()) - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, tokenomicstypes.ClaimExpirationReason_PROOF_INVALID, expectedEvent.GetExpirationReason()) + require.Equal(t, s.numRelays, expectedEvent.GetNumRelays()) } func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_ViaProbability() { // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // Set the proof parameters such that s.claim requires a proof because the - // proof_request_probability is 100%. This is accomplished by setting the - // proof_requirement_threshold to exceed s.expectedComputeUnits, which - // matches s.claim. - err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ - ProofRequestProbability: 1, - // +1 to push the requirement threshold ABOVE s.claim's compute units - ProofRequirementThreshold: s.expectedComputeUnits + 1, + // Retrieve the number of compute units in the claim + numComputeUnits, err := s.claim.GetNumComputeUnits() + require.NoError(t, err) + + // Set the proof parameters such that s.claim requires a proof because: + // - proof_request_probability is 100% + // - proof_requirement_threshold is 0, should not matter + err = s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 1, + ProofRequirementThreshold: numComputeUnits + 1, // +1 so its not required via probability }) require.NoError(t, err) @@ -389,14 +405,13 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // NB: proof window has definitely closed at this point sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) - // Check that one claim was settled. - require.Equal(t, uint64(1), settledResult.NumClaims) - // Validate that no claims expired. - require.Equal(t, uint64(0), expiredResult.NumClaims) + // Validate claim settlement results + require.Equal(t, uint64(1), settledResult.NumClaims) // 1 claim settled + require.Equal(t, uint64(0), expiredResult.NumClaims) // 0 claims expired // Validate that no claims remain. claims := s.keepers.GetAllClaims(ctx) @@ -404,67 +419,63 @@ func (s *TestSuite) TestClaimSettlement_ClaimSettled_ProofRequiredAndProvided_Vi // Confirm an settlement event was emitted events := sdkCtx.EventManager().Events() - expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, - events, "poktroll.tokenomics.EventClaimSettled") + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, events, "poktroll.tokenomics.EventClaimSettled") require.Len(t, expectedEvents, 1) + + // Validate the settlement event expectedEvent := expectedEvents[0] require.Equal(t, prooftypes.ProofRequirementReason_PROBABILISTIC, expectedEvent.GetProofRequirement()) - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, s.numRelays, expectedEvent.GetNumRelays()) } func (s *TestSuite) TestSettlePendingClaims_Settles_WhenAProofIsNotRequired() { // Retrieve default values t := s.T() ctx := s.ctx - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) - // Create a claim that does not require a proof - claim := s.claim + // Retrieve the number of compute units in the claim + numComputeUnits, err := s.claim.GetNumComputeUnits() + require.NoError(t, err) - // Set the proof parameters such that s.claim DOES NOT require a proof because - // the proof_request_probability is 0% AND because the proof_requirement_threshold - // exceeds s.expectedComputeUnits, which matches s.claim. - err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ - ProofRequestProbability: 0, - // +1 to push the threshold above s.claim's compute units - ProofRequirementThreshold: s.expectedComputeUnits + 1, + // Set the proof parameters such that s.claim DOES NOT require a proof because: + // - proof_request_probability is 0% AND + // - proof_requirement_threshold exceeds s.claim's compute units + err = s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 0, + ProofRequirementThreshold: numComputeUnits + 1, // +1 to push threshold above s.claim's compute units }) require.NoError(t, err) - // Add the claim & verify it exists - s.keepers.UpsertClaim(ctx, claim) - claims := s.keepers.GetAllClaims(ctx) - s.Require().Len(claims, 1) + // Upsert the claim only (not the proof) + s.keepers.UpsertClaim(ctx, s.claim) // Settle pending claims after proof window closes // Expectation: All (1) claims should be claimed. // NB: proofs should be rejected when the current height equals the proof window close height. - sessionEndHeight := claim.SessionHeader.SessionEndBlockHeight + sessionEndHeight := s.claim.SessionHeader.SessionEndBlockHeight blockHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionEndHeight) - sdkCtx = sdkCtx.WithBlockHeight(blockHeight) + sdkCtx := cosmostypes.UnwrapSDKContext(ctx).WithBlockHeight(blockHeight) settledResult, expiredResult, err := s.keepers.SettlePendingClaims(sdkCtx) require.NoError(t, err) // Check that one claim was settled. - require.Equal(t, uint64(1), settledResult.NumClaims) - // Validate that no claims expired. - require.Equal(t, uint64(0), expiredResult.NumClaims) + require.Equal(t, uint64(1), settledResult.NumClaims) // 1 claim settled + require.Equal(t, uint64(0), expiredResult.NumClaims) // 0 claims expired // Validate that no claims remain. - claims = s.keepers.GetAllClaims(ctx) + claims := s.keepers.GetAllClaims(ctx) require.Len(t, claims, 0) - // Confirm an expiration event was emitted + // Confirm a settlement event was emitted events := sdkCtx.EventManager().Events() - expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, - events, "poktroll.tokenomics.EventClaimSettled") + expectedEvents := testutilevents.FilterEvents[*tokenomicstypes.EventClaimSettled](t, events, "poktroll.tokenomics.EventClaimSettled") require.Len(t, expectedEvents, 1) - // Validate the event + // Validate the settlement event expectedEvent := expectedEvents[0] require.Equal(t, prooftypes.ProofRequirementReason_NOT_REQUIRED.String(), expectedEvent.GetProofRequirement().String()) - require.Equal(t, s.expectedComputeUnits, expectedEvent.GetNumComputeUnits()) + require.Equal(t, s.numRelays, expectedEvent.GetNumRelays()) } func (s *TestSuite) TestSettlePendingClaims_DoesNotSettle_BeforeProofWindowCloses() { @@ -490,13 +501,16 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { sdkCtx := cosmostypes.UnwrapSDKContext(ctx) sharedParams := s.keepers.SharedKeeper.GetParams(ctx) + // Retrieve the number of compute units in the claim + numComputeUnits, err := s.claim.GetNumComputeUnits() + require.NoError(t, err) + // Set the proof parameters such that s.claim DOES NOT require a proof // because the proof_request_probability is 0% and the proof_request_threshold // is greater than the claims' compute units. - err := s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ - ProofRequestProbability: 0, - // +1 to push the threshold above s.claim's compute units - ProofRequirementThreshold: s.expectedComputeUnits + 1, + err = s.keepers.ProofKeeper.SetParams(ctx, prooftypes.Params{ + ProofRequestProbability: 0, + ProofRequirementThreshold: numComputeUnits + 1, // +1 to push threshold above s.claim's compute units }) require.NoError(t, err) @@ -508,10 +522,10 @@ func (s *TestSuite) TestSettlePendingClaims_ClaimPendingAfterSettlement() { // Add a second claim with a session header corresponding to the next session. sessionTwoClaim := testutilproof.BaseClaim( + sessionOneClaim.GetSessionHeader().GetService().Id, sessionOneClaim.GetSessionHeader().GetApplicationAddress(), sessionOneClaim.GetSupplierAddress(), - s.expectedComputeUnits, - sessionOneClaim.GetSessionHeader().GetService().Id, + s.numRelays, ) sessionOneProofWindowCloseHeight := shared.GetProofWindowCloseHeight(&sharedParams, sessionOneEndHeight) diff --git a/x/tokenomics/keeper/msg_server_test.go b/x/tokenomics/keeper/msg_server_test.go index d64987d84..3db23cda2 100644 --- a/x/tokenomics/keeper/msg_server_test.go +++ b/x/tokenomics/keeper/msg_server_test.go @@ -14,7 +14,7 @@ import ( func setupMsgServer(t testing.TB) (keeper.Keeper, types.MsgServer, context.Context) { t.Helper() - k, ctx, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t, nil) + k, ctx, _, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t) return k, keeper.NewMsgServerImpl(k), ctx } diff --git a/x/tokenomics/keeper/msg_update_params_test.go b/x/tokenomics/keeper/msg_update_params_test.go index 850895087..5f510265c 100644 --- a/x/tokenomics/keeper/msg_update_params_test.go +++ b/x/tokenomics/keeper/msg_update_params_test.go @@ -92,7 +92,7 @@ func TestMsgUpdateParams(t *testing.T) { } func TestUpdateParams_ComputeUnitsToTokensMultiplier(t *testing.T) { - tokenomicsKeeper, ctx, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t, nil) + tokenomicsKeeper, ctx, _, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t) srv := keeper.NewMsgServerImpl(tokenomicsKeeper) // Set the default params diff --git a/x/tokenomics/keeper/query_params_test.go b/x/tokenomics/keeper/query_params_test.go index 2b62d826d..48e685fda 100644 --- a/x/tokenomics/keeper/query_params_test.go +++ b/x/tokenomics/keeper/query_params_test.go @@ -10,7 +10,7 @@ import ( ) func TestGetParams(t *testing.T) { - k, ctx, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t, nil) + k, ctx, _, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t) // TODO_TECHDEBT(@bryanchriswhite, #394): Params tests don't assert initial state. params := types.DefaultParams() @@ -20,7 +20,7 @@ func TestGetParams(t *testing.T) { } func TestParamsQuery(t *testing.T) { - keeper, ctx, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t, nil) + keeper, ctx, _, _, _ := testkeeper.TokenomicsKeeperWithActorAddrs(t) params := types.DefaultParams() require.NoError(t, keeper.SetParams(ctx, params)) diff --git a/x/tokenomics/keeper/settle_pending_claims.go b/x/tokenomics/keeper/settle_pending_claims.go index e04dc65b5..1cafa3f4b 100644 --- a/x/tokenomics/keeper/settle_pending_claims.go +++ b/x/tokenomics/keeper/settle_pending_claims.go @@ -43,9 +43,9 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( logger.Debug("settling expiring claims") for _, claim := range expiringClaims { var ( - numClaimComputeUnits uint64 - numRelaysInSessionTree uint64 - proofRequirement prooftypes.ProofRequirementReason + numClaimComputeUnits uint64 + numClaimRelays uint64 + proofRequirement prooftypes.ProofRequirementReason ) // NB: Note that not every (Req, Res) pair in the session is inserted in @@ -57,7 +57,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( return settledResult, expiredResult, err } - numRelaysInSessionTree, err = claim.GetNumRelays() + numClaimRelays, err = claim.GetNumRelays() if err != nil { return settledResult, expiredResult, err } @@ -76,7 +76,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( "session_id", sessionId, "supplier_address", claim.SupplierAddress, "num_claim_compute_units", numClaimComputeUnits, - "num_relays_in_session_tree", numRelaysInSessionTree, + "num_relays_in_session_tree", numClaimRelays, "proof_requirement", proofRequirement, ) @@ -100,7 +100,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( claimExpiredEvent := types.EventClaimExpired{ Claim: &claim, NumComputeUnits: numClaimComputeUnits, - NumRelays: numRelaysInSessionTree, + NumRelays: numClaimRelays, ExpirationReason: expirationReason, // TODO_CONSIDERATION: Add the error to the event if the proof was invalid. } @@ -118,7 +118,7 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( } expiredResult.NumClaims++ - expiredResult.NumRelays += numRelaysInSessionTree + expiredResult.NumRelays += numClaimRelays expiredResult.NumComputeUnits += numClaimComputeUnits continue } @@ -129,14 +129,14 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( // 2. The claim requires a proof and a valid proof was found. // Manage the mint & burn accounting for the claim. - if err = k.SettleSessionAccounting(ctx, &claim); err != nil { - logger.Error(fmt.Sprintf("error settling session accounting for claim %q: %v", claim.SessionHeader.SessionId, err)) + if err = k.ProcessTokenLogicModules(ctx, &claim); err != nil { + logger.Error(fmt.Sprintf("error processing token logic modules for claim %q: %v", claim.SessionHeader.SessionId, err)) return settledResult, expiredResult, err } claimSettledEvent := types.EventClaimSettled{ Claim: &claim, - NumRelays: numRelaysInSessionTree, + NumRelays: numClaimRelays, NumComputeUnits: numClaimComputeUnits, ProofRequirement: proofRequirement, } @@ -168,9 +168,9 @@ func (k Keeper) SettlePendingClaims(ctx sdk.Context) ( } settledResult.NumClaims++ - settledResult.NumRelays += numRelaysInSessionTree + settledResult.NumRelays += numClaimRelays settledResult.NumComputeUnits += numClaimComputeUnits - settledResult.RelaysPerServiceMap[claim.SessionHeader.Service.Id] += numRelaysInSessionTree + settledResult.RelaysPerServiceMap[claim.SessionHeader.Service.Id] += numClaimRelays logger.Info(fmt.Sprintf("Successfully settled claim for session ID %q at block height %d", claim.SessionHeader.SessionId, blockHeight)) } diff --git a/x/tokenomics/keeper/token_logic_modules.go b/x/tokenomics/keeper/token_logic_modules.go new file mode 100644 index 000000000..c0a10ebbe --- /dev/null +++ b/x/tokenomics/keeper/token_logic_modules.go @@ -0,0 +1,482 @@ +package keeper + +// References: +// - https://docs.pokt.network/pokt-protocol/the-shannon-upgrade/proposed-tokenomics/token-logic-modules +// - https://github.com/pokt-network/shannon-tokenomics-static-tests + +import ( + "context" + "fmt" + "math/big" + + "cosmossdk.io/math" + cosmostypes "github.com/cosmos/cosmos-sdk/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pokt-network/smt" + + "github.com/pokt-network/poktroll/app/volatile" + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/poktroll/telemetry" + apptypes "github.com/pokt-network/poktroll/x/application/types" + prooftypes "github.com/pokt-network/poktroll/x/proof/types" + sharedtypes "github.com/pokt-network/poktroll/x/shared/types" + suppliertypes "github.com/pokt-network/poktroll/x/supplier/types" + tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" + tokenomictypes "github.com/pokt-network/poktroll/x/tokenomics/types" +) + +const ( + // TODO_UPNEXT(@olshansk): Make all of the governance params + MintAllocationDAO = 0.1 + MintAllocationProposer = 0.05 + MintAllocationSupplier = 0.7 + MintAllocationSourceOwner = 0.15 + MintAllocationApplication = 0.0 + // TODO_UPNEXT(@olshansk): Remove this. An ephemeral placeholder before + // real values are introduced. When this is changed to a governance param, + // make sure to also add the necessary unit tests. + MintGlobalAllocation = 0.0000000 +) + +type TokenLogicModule int + +const ( + TLMRelayBurnEqualsMint TokenLogicModule = iota + TLMGlobalMint + // TODO_UPNEXT(@olshansk): Add more TLMs +) + +var tokenLogicModuleStrings = [...]string{ + "TLMRelayBurnEqualsMint", + "TLMGlobalMint", +} + +func (tlm TokenLogicModule) String() string { + return tokenLogicModuleStrings[tlm] +} + +func (tlm TokenLogicModule) EnumIndex() int { + return int(tlm) +} + +// TokenLogicModuleProcessor is the method signature that all token logic modules +// are expected to implement. +// IMPORTANT SIDE EFFECTS: Please note that TLMS may update the application and supplier +// objects, which is why they are passed in as pointers. However, this IS NOT persisted. +// The persistence to the keeper is currently done by ProcessTokenLogicModules only. +// This may be an interim state of the implementation and may change in the future. +type TokenLogicModuleProcessor func( + Keeper, + context.Context, + *sharedtypes.Service, + *apptypes.Application, + *sharedtypes.Supplier, + cosmostypes.Coin, + *tokenomictypes.RelayMiningDifficulty, +) error + +// tokenLogicModuleProcessorMap is a map of token logic modules to their respective processors. +var tokenLogicModuleProcessorMap = map[TokenLogicModule]TokenLogicModuleProcessor{ + TLMRelayBurnEqualsMint: Keeper.TokenLogicModuleRelayBurnEqualsMint, + TLMGlobalMint: Keeper.TokenLogicModuleGlobalMint, +} + +func init() { + // Ensure 100% of minted rewards are allocated + if 1.0 != MintAllocationDAO+MintAllocationProposer+MintAllocationSupplier+MintAllocationSourceOwner+MintAllocationApplication { + panic("mint allocation percentages do not add to 1.0") + } +} + +// ProcessTokenLogicModules is responsible for calling all of the token logic +// modules (i.e. post session claim accounting) necessary to burn, mint or transfer +// tokens as a result of the amount of work (i.e. compute units) done. +func (k Keeper) ProcessTokenLogicModules( + ctx context.Context, + claim *prooftypes.Claim, // IMPORTANT: It is assumed the proof for the claim has been validated BEFORE calling this function +) (err error) { + logger := k.Logger().With("method", "ProcessTokenLogicModules") + + // Declaring variables that will be emitted by telemetry + settlementCoin := cosmostypes.NewCoin("upokt", math.NewInt(0)) + isSuccessful := false + + // This is emitted only when the function returns (successful or not) + defer telemetry.EventSuccessCounter( + "process_token_logic_modules", + func() float32 { + if settlementCoin.Amount.BigInt() == nil { + return 0 + } + return float32(settlementCoin.Amount.Int64()) + }, + func() bool { return isSuccessful }, + ) + + // Ensure the claim is not nil + if claim == nil { + logger.Error("received a nil claim") + return tokenomicstypes.ErrTokenomicsClaimNil + } + + // Retrieve & validate the session header + sessionHeader := claim.GetSessionHeader() + if sessionHeader == nil { + logger.Error("received a nil session header") + return tokenomicstypes.ErrTokenomicsSessionHeaderNil + } + if err = sessionHeader.ValidateBasic(); err != nil { + logger.Error("received an invalid session header", "error", err) + return tokenomicstypes.ErrTokenomicsSessionHeaderInvalid + } + + // Retrieve the supplier address that will be getting rewarded; providing services + supplierAddr, err := cosmostypes.AccAddressFromBech32(claim.GetSupplierAddress()) + if err != nil || supplierAddr == nil { + return tokenomicstypes.ErrTokenomicsSupplierAddressInvalid + } + + // Retrieve the application address that is being charged; getting services + applicationAddress, err := cosmostypes.AccAddressFromBech32(sessionHeader.GetApplicationAddress()) + if err != nil || applicationAddress == nil { + return tokenomicstypes.ErrTokenomicsApplicationAddressInvalid + } + + // Retrieve the root of the claim to determine the amount of work done + root := (smt.MerkleSumRoot)(claim.GetRootHash()) + + // Ensure the root hash is valid + if !root.HasDigestSize(protocol.TrieHasherSize) { + return tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrapf( + "root hash has invalid digest size (%d), expected (%d)", + root.DigestSize(), protocol.TrieHasherSize, + ) + } + + // Retrieve the on-chain staked application record + application, isAppFound := k.applicationKeeper.GetApplication(ctx, applicationAddress.String()) + if !isAppFound { + logger.Warn(fmt.Sprintf("application for claim with address %q not found", applicationAddress)) + return tokenomicstypes.ErrTokenomicsApplicationNotFound + } + + // Retrieve the on-chain staked supplier record + supplier, isSupplierFound := k.supplierKeeper.GetSupplier(ctx, supplierAddr.String()) + if !isSupplierFound { + logger.Warn(fmt.Sprintf("supplier for claim with address %q not found", supplierAddr)) + return tokenomicstypes.ErrTokenomicsSupplierNotFound + } + + service, isServiceFound := k.serviceKeeper.GetService(ctx, sessionHeader.Service.Id) + if !isServiceFound { + return tokenomicstypes.ErrTokenomicsServiceNotFound.Wrapf("service with ID %q not found", sessionHeader.Service.Id) + } + + // Retrieve the count (i.e. number of relays) to determine the amount of work done + numRelays, err := root.Count() + if err != nil { + return tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrapf("%v", err) + } + // TODO_POST_MAINNET: Because of how things have evolved, we are now using + // root.Count (numRelays) instead of root.Sum (numComputeUnits) to determine + // the amount of work done. This is because the compute_units_per_relay is + /// a service specific (not request specific) parameter that will be maintained + // by the service owner to capture the average amount of resources (i.e. + // compute, storage, bandwidth, electricity, etc...) per request. Modifying + // this on a per request basis has been deemed too complex and not a mainnet + // blocker. + + // Determine the total number of tokens that'll be used for settling the session. + // When the network achieves equilibrium, this will be the mint & burn. + settlementCoin, err = k.numRelaysToCoin(ctx, numRelays, &service) + if err != nil { + return err + } + + // Retrieving the relay mining difficulty for the service at hand + relayMiningDifficulty, found := k.GetRelayMiningDifficulty(ctx, service.Id) + if !found { + if err != nil { + return err + } + logger.Warn(fmt.Sprintf("relay mining difficulty for service %q not found. Using default difficulty", service.Id)) + relayMiningDifficulty = tokenomicstypes.RelayMiningDifficulty{ + ServiceId: service.Id, + BlockHeight: sdk.UnwrapSDKContext(ctx).BlockHeight(), + NumRelaysEma: numRelays, + TargetHash: prooftypes.DefaultRelayDifficultyTargetHash, + } + } + + // Helpers for logging the same metadata throughout this function calls + logger = logger.With( + "num_relays", numRelays, + "num_settlement_upokt", settlementCoin.Amount, + "session_id", sessionHeader.GetSessionId(), + "service_id", sessionHeader.GetService().Id, + "supplier", supplier.Address, + "application", application.Address, + ) + logger.Info(fmt.Sprintf("About to start processing TLMs for (%d) relays equaling to (%s) coins", numRelays, settlementCoin)) + + // Execute all the token logic modules processors + for tlm, tlmProcessor := range tokenLogicModuleProcessorMap { + logger.Info(fmt.Sprintf("Starting to execute TLM %q", tlm)) + if err := tlmProcessor(k, ctx, &service, &application, &supplier, settlementCoin, &relayMiningDifficulty); err != nil { + return err + } + logger.Info(fmt.Sprintf("Finished executing TLM %q", tlm)) + } + + // Update the application's on-chain record + k.applicationKeeper.SetApplication(ctx, application) + logger.Info(fmt.Sprintf("updated on-chain application record with address %q", application.Address)) + + // Update the suppliers's on-chain record + k.supplierKeeper.SetSupplier(ctx, supplier) + logger.Info(fmt.Sprintf("updated on-chain supplier record with address %q", supplier.Address)) + + // Update isSuccessful to true for telemetry + isSuccessful = true + return nil +} + +// TokenLogicModuleRelayBurnEqualsMint processes the business logic for the RelayBurnEqualsMint TLM. +func (k Keeper) TokenLogicModuleRelayBurnEqualsMint( + ctx context.Context, + service *sharedtypes.Service, + application *apptypes.Application, + supplier *sharedtypes.Supplier, + settlementCoins cosmostypes.Coin, + relayMiningDifficulty *tokenomictypes.RelayMiningDifficulty, +) error { + logger := k.Logger().With("method", "TokenLogicModuleRelayBurnEqualsMint") + + supplierAddr, err := cosmostypes.AccAddressFromBech32(supplier.Address) + if err != nil { + return err + } + + // NB: We are doing a mint & burn + transfer, instead of a simple transfer + // of funds from the supplier to the application in order to enable second + // order economic effects with more optionality. This could include funds + // going to pnf, delegators, enabling bonuses/rebates, etc... + + // Mint new uPOKT to the supplier module account. + // These funds will be transferred to the supplier below. + if err = k.bankKeeper.MintCoins( + ctx, suppliertypes.ModuleName, sdk.NewCoins(settlementCoins), + ); err != nil { + return tokenomicstypes.ErrTokenomicsSupplierModuleSendFailed.Wrapf( + "minting %s to the supplier module account: %v", + settlementCoins, + err, + ) + } + logger.Info(fmt.Sprintf("minted (%v) coins in the supplier module", settlementCoins)) + + // Send the newley minted uPOKT from the supplier module account + // to the supplier's account. + if err = k.bankKeeper.SendCoinsFromModuleToAccount( + ctx, suppliertypes.ModuleName, supplierAddr, sdk.NewCoins(settlementCoins), + ); err != nil { + return tokenomicstypes.ErrTokenomicsSupplierModuleSendFailed.Wrapf( + "sending (%s) to supplier with address %s: %v", + settlementCoins, + supplier.Address, + err, + ) + } + logger.Info(fmt.Sprintf("sent (%v) from the supplier module to the supplier account with address %q", settlementCoins, supplier.Address)) + + // TODO_MAINNET: Decide on the behaviour here when an app is over serviced. + // If an app has 10 POKT staked, but the supplier earned 20 POKT. We still + // end up minting 20 POKT but only burn 10 POKT from the app. There are + // questions and nuance here that needs to be addressed. + + // Verify that the application has enough uPOKT to pay for the services it consumed + if application.GetStake().IsLT(settlementCoins) { + settlementCoins, err = k.handleOverservicedApplication(ctx, application, settlementCoins) + if err != nil { + return err + } + } + + // Burn uPOKT from the application module account which was held in escrow + // on behalf of the application account. + if err = k.bankKeeper.BurnCoins( + ctx, apptypes.ModuleName, sdk.NewCoins(settlementCoins), + ); err != nil { + return tokenomicstypes.ErrTokenomicsApplicationModuleBurn.Wrapf("burning %s from the application module account: %v", settlementCoins, err) + } + logger.Info(fmt.Sprintf("burned (%v) from the application module account", settlementCoins)) + + // Update the application's on-chain stake + newAppStake, err := application.Stake.SafeSub(settlementCoins) + if err != nil { + return tokenomicstypes.ErrTokenomicsApplicationNewStakeInvalid.Wrapf("application %q stake cannot be reduced to a negative amount %v", application.Address, newAppStake) + } + application.Stake = &newAppStake + logger.Info(fmt.Sprintf("updated application %q stake to %v", application.Address, newAppStake)) + + return nil +} + +// TokenLogicModuleGlobalMint processes the business logic for the GlobalMint TLM. +// TODO_UPNEXT(@olshansk): Delete this in favor of a real TLM that mints tokens +// and distributes them to the appropriate accounts via boosts. +func (k Keeper) TokenLogicModuleGlobalMint( + ctx context.Context, + service *sharedtypes.Service, + application *apptypes.Application, + supplier *sharedtypes.Supplier, + settlementCoins cosmostypes.Coin, + relayMiningDifficulty *tokenomictypes.RelayMiningDifficulty, +) error { + logger := k.Logger().With("method", "TokenLogicModuleGlobalMint") + + // Determine how much new uPOKT to mint based on global inflation + // TODO_MAINNET: Consider using fixed point arithmetic for deterministic results. + settlementAmtFloat := new(big.Float).SetUint64(settlementCoins.Amount.Uint64()) + newMintAmtFloat := new(big.Float).Mul(settlementAmtFloat, big.NewFloat(MintGlobalAllocation)) + newMintAmtInt, _ := newMintAmtFloat.Int64() + newMintCoins := sdk.NewCoins(cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(newMintAmtInt))) + + // Mint new uPOKT to the tokenomics module account + if err := k.bankKeeper.MintCoins(ctx, tokenomictypes.ModuleName, newMintCoins); err != nil { + return tokenomicstypes.ErrTokenomicsModuleMintFailed.Wrapf( + "minting %s to the tokenomics module account: %v", newMintCoins, err) + } + logger.Info(fmt.Sprintf("minted (%v) coins in the tokenomics module", newMintCoins)) + + // Send a portion of the rewards to the application + appCoins, err := k.sendRewardsToAccount(ctx, application.Address, newMintAmtFloat, MintAllocationApplication) + if err != nil { + return tokenomictypes.ErrTokenomicsSendingMindRewards.Wrapf("sending rewards to application: %v", err) + } + logger.Debug(fmt.Sprintf("sent (%v) newley minted coins from the tokenomics module to the application with address %q", appCoins, application.Address)) + + // Send a portion of the rewards to the supplier + supplierCoins, err := k.sendRewardsToAccount(ctx, supplier.Address, newMintAmtFloat, MintAllocationSupplier) + if err != nil { + return tokenomictypes.ErrTokenomicsSendingMindRewards.Wrapf("sending rewards to supplier: %v", err) + } + logger.Debug(fmt.Sprintf("sent (%v) newley minted coins from the tokenomics module to the supplier with address %q", supplierCoins, supplier.Address)) + + // Send a portion of the rewards to the DAO + daoCoins, err := k.sendRewardsToAccount(ctx, k.GetAuthority(), newMintAmtFloat, MintAllocationDAO) + if err != nil { + return tokenomictypes.ErrTokenomicsSendingMindRewards.Wrapf("sending rewards to DAO: %v", err) + } + logger.Debug(fmt.Sprintf("sent (%v) newley minted coins from the tokenomics module to the DAO with address %q", daoCoins, k.GetAuthority())) + + // Send a portion of the rewards to the source owner + serviceCoins, err := k.sendRewardsToAccount(ctx, service.OwnerAddress, newMintAmtFloat, MintAllocationSourceOwner) + if err != nil { + return tokenomictypes.ErrTokenomicsSendingMindRewards.Wrapf("sending rewards to source owner: %v", err) + } + logger.Debug(fmt.Sprintf("sent (%v) newley minted coins from the tokenomics module to the source owner with address %q", serviceCoins, service.OwnerAddress)) + + // Send a portion of the rewards to the block proposer + proposerAddr := cosmostypes.AccAddress(sdk.UnwrapSDKContext(ctx).BlockHeader().ProposerAddress).String() + proposerCoins, err := k.sendRewardsToAccount(ctx, proposerAddr, newMintAmtFloat, MintAllocationProposer) + if err != nil { + return tokenomictypes.ErrTokenomicsSendingMindRewards.Wrapf("sending rewards to proposer: %v", err) + } + logger.Debug(fmt.Sprintf("sent (%v) newley minted coins from the tokenomics module to the proposer with address %q", proposerCoins, proposerAddr)) + + // TODO_MAINNET: Verify that the total distributed coins equals the settlement coins which could happen due to float rounding + totalDistributedCoins := appCoins.Add(*supplierCoins).Add(*daoCoins).Add(*serviceCoins).Add(*proposerCoins) + if totalDistributedCoins.Amount.BigInt().Cmp(settlementCoins.Amount.BigInt()) != 0 { + logger.Error(fmt.Sprintf("TODO_MAINNET: The total distributed coins (%v) does not equal the settlement coins (%v)", totalDistributedCoins, settlementCoins.Amount.BigInt())) + } + logger.Info(fmt.Sprintf("distributed (%v) coins to the application, supplier, DAO, source owner, and proposer", totalDistributedCoins)) + + return nil +} + +// sendRewardsToAccount sends (settlementAmtFloat * allocation) tokens from the +// tokenomics module account to the specified address. +func (k Keeper) sendRewardsToAccount( + ctx context.Context, + addr string, + settlementAmtFloat *big.Float, + allocation float64, +) (*sdk.Coin, error) { + logger := k.Logger().With("method", "mintRewardsToAccount") + + accountAddr, err := cosmostypes.AccAddressFromBech32(addr) + if err != nil { + return nil, err + } + + coinsToAccAmt, _ := new(big.Float).Mul(settlementAmtFloat, big.NewFloat(allocation)).Int64() + coinToAcc := cosmostypes.NewCoin(volatile.DenomuPOKT, math.NewInt(coinsToAccAmt)) + if err := k.bankKeeper.SendCoinsFromModuleToAccount( + ctx, suppliertypes.ModuleName, accountAddr, sdk.NewCoins(coinToAcc), + ); err != nil { + return nil, err + } + logger.Info(fmt.Sprintf("sent (%v) coins from the tokenomics module to the account with address %q", coinToAcc, addr)) + + return &coinToAcc, nil +} + +func (k Keeper) handleOverservicedApplication( + ctx context.Context, + application *apptypes.Application, + settlementCoins cosmostypes.Coin, +) ( + newSettlementCoins cosmostypes.Coin, + err error, +) { + logger := k.Logger().With("method", "handleOverservicedApplication") + // over-serviced application + logger.Warn(fmt.Sprintf( + "THIS SHOULD NEVER HAPPEN. Application with address %s needs to be charged more than it has staked: %v > %v", + application.Address, + settlementCoins, + application.Stake, + )) + + // TODO_MAINNET(@Olshansk, @RawthiL): The application was over-serviced in the last session so it basically + // goes "into debt". Need to design a way to handle this when we implement + // probabilistic proofs and add all the parameter logic. Do we touch the application balance? + // Do we just let it go into debt? Do we penalize the application? Do we unstake it? Etc... + // See this document from @red-0ne and @bryanchriswhite for more context: notion.so/buildwithgrove/Off-chain-Application-Stake-Tracking-6a8bebb107db4f7f9dc62cbe7ba555f7 + expectedBurn := settlementCoins + + applicationOverservicedEvent := &tokenomicstypes.EventApplicationOverserviced{ + ApplicationAddr: application.Address, + ExpectedBurn: &expectedBurn, + EffectiveBurn: application.GetStake(), + } + eventManager := cosmostypes.UnwrapSDKContext(ctx).EventManager() + if err := eventManager.EmitTypedEvent(applicationOverservicedEvent); err != nil { + return cosmostypes.Coin{}, tokenomicstypes.ErrTokenomicsApplicationOverserviced.Wrapf( + "application address: %s; expected burn %s; effective burn: %s", + application.GetAddress(), + expectedBurn.String(), + application.GetStake().String(), + ) + } + return *application.Stake, nil +} + +// numRelaysToCoin calculates the amount of uPOKT to mint based on the number of compute units. +func (k Keeper) numRelaysToCoin( + ctx context.Context, + numRelays uint64, // numRelays is a session specific parameter + service *sharedtypes.Service, +) (cosmostypes.Coin, error) { + // CUTTM is a GLOBAL network wide parameter + computeUnitsToTokensMultiplier := k.GetParams(ctx).ComputeUnitsToTokensMultiplier + // CUPR is a LOCAL service specific parameter + computeUnitsPerRelay := service.ComputeUnitsPerRelay + upoktAmount := math.NewInt(int64(numRelays * computeUnitsPerRelay * computeUnitsToTokensMultiplier)) + if upoktAmount.IsNegative() { + return cosmostypes.Coin{}, tokenomicstypes.ErrTokenomicsRootHashInvalid.Wrap("sum * compute_units_to_tokens_multiplier is negative") + } + + return cosmostypes.NewCoin(volatile.DenomuPOKT, upoktAmount), nil +} diff --git a/x/tokenomics/keeper/settle_session_accounting_test.go b/x/tokenomics/keeper/token_logic_modules_test.go similarity index 79% rename from x/tokenomics/keeper/settle_session_accounting_test.go rename to x/tokenomics/keeper/token_logic_modules_test.go index 82837d859..291a4888f 100644 --- a/x/tokenomics/keeper/settle_session_accounting_test.go +++ b/x/tokenomics/keeper/token_logic_modules_test.go @@ -14,6 +14,7 @@ import ( "github.com/pokt-network/smt" "github.com/stretchr/testify/require" + "github.com/pokt-network/poktroll/cmd/poktrolld/cmd" "github.com/pokt-network/poktroll/pkg/crypto/protocol" testkeeper "github.com/pokt-network/poktroll/testutil/keeper" testproof "github.com/pokt-network/poktroll/testutil/proof" @@ -27,9 +28,11 @@ import ( tokenomicstypes "github.com/pokt-network/poktroll/x/tokenomics/types" ) -func TestSettleSessionAccounting_HandleAppGoingIntoDebt(t *testing.T) { - t.Skip("TODO_MAINNET: Add coverage of the design choice made for how to handle this scenario.") +func init() { + cmd.InitSDKConfig() +} +func TestProcessTokenLogicModules_HandleAppGoingIntoDebt(t *testing.T) { keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil) // Create a service that can be registered in the application and used in the claims @@ -39,17 +42,14 @@ func TestSettleSessionAccounting_HandleAppGoingIntoDebt(t *testing.T) { ComputeUnitsPerRelay: 1, OwnerAddress: sample.AccAddress(), } + keepers.SetService(ctx, *service) // Add a new application appStake := cosmostypes.NewCoin("upokt", math.NewInt(1000000)) app := apptypes.Application{ - Address: sample.AccAddress(), - Stake: &appStake, - ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{ - { - Service: service, - }, - }, + Address: sample.AccAddress(), + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } keepers.SetApplication(ctx, app) @@ -62,34 +62,36 @@ func TestSettleSessionAccounting_HandleAppGoingIntoDebt(t *testing.T) { keepers.SetSupplier(ctx, supplier) // The base claim whose root will be customized for testing purposes + numRelays := appStake.Amount.Uint64() + 1 // More than the app stake + numComputeUnits := numRelays * service.ComputeUnitsPerRelay claim := prooftypes.Claim{ SupplierAddress: supplier.Address, SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: app.Address, - Service: &sharedtypes.Service{ - Id: service.Id, - }, + ApplicationAddress: app.Address, + Service: service, SessionId: "session_id", SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: testproof.SmstRootWithSum(appStake.Amount.Uint64() + 1), // More than the app stake + RootHash: testproof.SmstRootWithSumAndCount(numComputeUnits, numRelays), } - err := keepers.SettleSessionAccounting(ctx, &claim) + err := keepers.ProcessTokenLogicModules(ctx, &claim) require.NoError(t, err) } func TestSettleSessionAccounting_ValidAccounting(t *testing.T) { // Create a service that can be registered in the application and used in the claims - service := sharedtypes.Service{ + service := &sharedtypes.Service{ Id: "svc1", Name: "svcName1", ComputeUnitsPerRelay: 1, OwnerAddress: sample.AccAddress(), } - keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(service)) + keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(*service)) + keepers.SetService(ctx, *service) + appModuleAddress := authtypes.NewModuleAddress(apptypes.ModuleName).String() supplierModuleAddress := authtypes.NewModuleAddress(suppliertypes.ModuleName).String() @@ -99,23 +101,17 @@ func TestSettleSessionAccounting_ValidAccounting(t *testing.T) { }) require.NoError(t, err) - // Add a new application + // Add a new application with non-zero app stake end balance to assert against. appStake := cosmostypes.NewCoin("upokt", math.NewInt(1000000)) - // NB: Ensure a non-zero app stake end balance to assert against. expectedAppEndStakeAmount := cosmostypes.NewCoin("upokt", math.NewInt(420)) expectedAppBurn := appStake.Sub(expectedAppEndStakeAmount) app := apptypes.Application{ - Address: sample.AccAddress(), - Stake: &appStake, + Address: sample.AccAddress(), + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } keepers.SetApplication(ctx, app) - // Query application balance prior to the accounting. - appStartBalance := getBalance(t, ctx, keepers, app.GetAddress()) - - // Query application module balance prior to the accounting. - appModuleStartBalance := getBalance(t, ctx, keepers, appModuleAddress) - // Add a new supplier. supplierStake := cosmostypes.NewCoin("upokt", math.NewInt(1000000)) supplier := sharedtypes.Supplier{ @@ -124,28 +120,34 @@ func TestSettleSessionAccounting_ValidAccounting(t *testing.T) { } keepers.SetSupplier(ctx, supplier) + // Query application balance prior to the accounting. + appStartBalance := getBalance(t, ctx, keepers, app.GetAddress()) + // Query application module balance prior to the accounting. + appModuleStartBalance := getBalance(t, ctx, keepers, appModuleAddress) + // Query supplier balance prior to the accounting. supplierStartBalance := getBalance(t, ctx, keepers, supplier.GetAddress()) - // Query supplier module balance prior to the accounting. supplierModuleStartBalance := getBalance(t, ctx, keepers, supplierModuleAddress) + // Assumes ComputeUnitToTokenMultiplier is 1 + numComputeUnits := expectedAppBurn.Amount.Uint64() + numRelays := numComputeUnits / service.ComputeUnitsPerRelay // The base claim whose root will be customized for testing purposes claim := prooftypes.Claim{ SupplierAddress: supplier.Address, SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: app.Address, - Service: &sharedtypes.Service{ - Id: service.Id, - }, + ApplicationAddress: app.Address, + Service: service, SessionId: "session_id", SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: testproof.SmstRootWithSum(expectedAppBurn.Amount.Uint64()), + RootHash: testproof.SmstRootWithSumAndCount(numComputeUnits, numRelays), } - err = keepers.SettleSessionAccounting(ctx, &claim) + // Process the token logic modules + err = keepers.ProcessTokenLogicModules(ctx, &claim) require.NoError(t, err) // Assert that `applicationAddress` account balance is *unchanged* @@ -184,14 +186,16 @@ func TestSettleSessionAccounting_ValidAccounting(t *testing.T) { func TestSettleSessionAccounting_AppStakeTooLow(t *testing.T) { // Create a service that can be registered in the application and used in the claims - service := sharedtypes.Service{ + service := &sharedtypes.Service{ Id: "svc1", Name: "svcName1", ComputeUnitsPerRelay: 1, OwnerAddress: sample.AccAddress(), } - keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(service)) + keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(*service)) + keepers.SetService(ctx, *service) + appModuleAddress := authtypes.NewModuleAddress(apptypes.ModuleName).String() supplierModuleAddress := authtypes.NewModuleAddress(suppliertypes.ModuleName).String() @@ -206,14 +210,14 @@ func TestSettleSessionAccounting_AppStakeTooLow(t *testing.T) { expectedAppEndStakeZeroAmount := cosmostypes.NewCoin("upokt", math.NewInt(0)) expectedAppBurn := appStake.AddAmount(math.NewInt(2000)) app := apptypes.Application{ - Address: sample.AccAddress(), - Stake: &appStake, + Address: sample.AccAddress(), + Stake: &appStake, + ServiceConfigs: []*sharedtypes.ApplicationServiceConfig{{Service: service}}, } keepers.SetApplication(ctx, app) // Query application balance prior to the accounting. appStartBalance := getBalance(t, ctx, keepers, app.GetAddress()) - // Query application module balance prior to the accounting. appModuleStartBalance := getBalance(t, ctx, keepers, appModuleAddress) @@ -227,26 +231,29 @@ func TestSettleSessionAccounting_AppStakeTooLow(t *testing.T) { // Query supplier balance prior to the accounting. supplierStartBalance := getBalance(t, ctx, keepers, supplier.GetAddress()) - // Query supplier module balance prior to the accounting. supplierModuleStartBalance := getBalance(t, ctx, keepers, supplierModuleAddress) + // Determine the number of relays to use up the application's entire stake + sharedParams := keepers.Keeper.GetParams(ctx) + numComputeUnits := expectedAppBurn.Amount.Uint64() / sharedParams.ComputeUnitsToTokensMultiplier + numRelays := numComputeUnits / service.ComputeUnitsPerRelay + // The base claim whose root will be customized for testing purposes claim := prooftypes.Claim{ SupplierAddress: supplier.Address, SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: app.Address, - Service: &sharedtypes.Service{ - Id: service.Id, - }, + ApplicationAddress: app.Address, + Service: service, SessionId: "session_id", SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: testproof.SmstRootWithSum(expectedAppBurn.Amount.Uint64()), + RootHash: testproof.SmstRootWithSumAndCount(numComputeUnits, numRelays), } - err = keepers.SettleSessionAccounting(ctx, &claim) + // Process the token logic modules + err = keepers.ProcessTokenLogicModules(ctx, &claim) require.NoError(t, err) // Assert that `applicationAddress` account balance is *unchanged* @@ -280,9 +287,8 @@ func TestSettleSessionAccounting_AppStakeTooLow(t *testing.T) { supplierModuleEndBalance := getBalance(t, ctx, keepers, supplierModuleAddress) require.EqualValues(t, supplierModuleStartBalance, supplierModuleEndBalance) - sdkCtx := cosmostypes.UnwrapSDKContext(ctx) - events := sdkCtx.EventManager().Events() - + // Check that the expected burn >> effective burn because application is overserviced + events := cosmostypes.UnwrapSDKContext(ctx).EventManager().Events() appAddrAttribute, _ := events.GetAttributes("application_addr") expectedBurnAttribute, _ := events.GetAttributes("expected_burn") effectiveBurnAttribute, _ := events.GetAttributes("effective_burn") @@ -300,41 +306,35 @@ func TestSettleSessionAccounting_AppStakeTooLow(t *testing.T) { require.Greater(t, expectedBurnEventCoin.Amount.Uint64(), effectiveBurnEventCoin.Amount.Uint64()) } -func TestSettleSessionAccounting_AppNotFound(t *testing.T) { - - service := &sharedtypes.Service{ - Id: "svc1", - Name: "svcName1", - ComputeUnitsPerRelay: 1, - OwnerAddress: sample.AccAddress(), - } - - keeper, ctx, _, supplierAddr := testkeeper.TokenomicsKeeperWithActorAddrs(t, service) +func TestProcessTokenLogicModules_AppNotFound(t *testing.T) { + keeper, ctx, _, supplierAddr, service := testkeeper.TokenomicsKeeperWithActorAddrs(t) // The base claim whose root will be customized for testing purposes + numRelays := uint64(42) + numComputeUnits := numRelays * service.ComputeUnitsPerRelay claim := prooftypes.Claim{ SupplierAddress: supplierAddr, SessionHeader: &sessiontypes.SessionHeader{ - ApplicationAddress: sample.AccAddress(), // Random address - Service: &sharedtypes.Service{ - Id: service.Id, - }, + ApplicationAddress: sample.AccAddress(), // Random address + Service: service, SessionId: "session_id", SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: testproof.SmstRootWithSum(42), + RootHash: testproof.SmstRootWithSumAndCount(numComputeUnits, numRelays), } - err := keeper.SettleSessionAccounting(ctx, &claim) + // Process the token logic modules + err := keeper.ProcessTokenLogicModules(ctx, &claim) require.Error(t, err) require.ErrorIs(t, err, tokenomicstypes.ErrTokenomicsApplicationNotFound) } func TestSettleSessionAccounting_ServiceNotFound(t *testing.T) { + keeper, ctx, appAddr, supplierAddr, service := testkeeper.TokenomicsKeeperWithActorAddrs(t) - keeper, ctx, appAddr, supplierAddr := testkeeper.TokenomicsKeeperWithActorAddrs(t, nil) - + numRelays := uint64(42) + numComputeUnits := numRelays * service.ComputeUnitsPerRelay claim := prooftypes.Claim{ SupplierAddress: supplierAddr, SessionHeader: &sessiontypes.SessionHeader{ @@ -346,7 +346,7 @@ func TestSettleSessionAccounting_ServiceNotFound(t *testing.T) { SessionStartBlockHeight: 1, SessionEndBlockHeight: testsession.GetSessionEndHeightWithDefaultParams(1), }, - RootHash: testproof.SmstRootWithSum(42), + RootHash: testproof.SmstRootWithSumAndCount(numComputeUnits, numRelays), } // Execute test function @@ -356,17 +356,9 @@ func TestSettleSessionAccounting_ServiceNotFound(t *testing.T) { require.ErrorIs(t, err, tokenomicstypes.ErrTokenomicsServiceNotFound) } -func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { - - // Create a service that can be registered in the application and used in the claims - service := &sharedtypes.Service{ - Id: "svc1", - Name: "svcName1", - ComputeUnitsPerRelay: 1, - OwnerAddress: sample.AccAddress(), - } - - keeper, ctx, appAddr, supplierAddr := testkeeper.TokenomicsKeeperWithActorAddrs(t, service) +func TestProcessTokenLogicModules_InvalidRoot(t *testing.T) { + keeper, ctx, appAddr, supplierAddr, service := testkeeper.TokenomicsKeeperWithActorAddrs(t) + numRelays := uint64(42) // Define test cases tests := []struct { @@ -407,7 +399,7 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { { desc: "correct size and a valid value", root: func() []byte { - root := testproof.SmstRootWithSum(42) + root := testproof.SmstRootWithSumAndCount(numRelays, numRelays) return root[:] }(), errExpected: false, @@ -418,11 +410,11 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { // Setup claim by copying the testproof.BaseClaim and updating the root - claim := testproof.BaseClaim(appAddr, supplierAddr, 0, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, 0) claim.RootHash = smt.MerkleRoot(test.root[:]) // Execute test function - err := keeper.SettleSessionAccounting(ctx, &claim) + err := keeper.ProcessTokenLogicModules(ctx, &claim) // Assert the error if test.errExpected { @@ -434,16 +426,9 @@ func TestSettleSessionAccounting_InvalidRoot(t *testing.T) { } } -func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { - // Create a service that can be registered in the application and used in the claims - service := &sharedtypes.Service{ - Id: "svc1", - Name: "svcName1", - ComputeUnitsPerRelay: 1, - OwnerAddress: sample.AccAddress(), - } - - keeper, ctx, appAddr, supplierAddr := testkeeper.TokenomicsKeeperWithActorAddrs(t, service) +func TestProcessTokenLogicModules_InvalidClaim(t *testing.T) { + keeper, ctx, appAddr, supplierAddr, service := testkeeper.TokenomicsKeeperWithActorAddrs(t) + numRelays := uint64(42) // Define test cases tests := []struct { @@ -456,7 +441,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { { desc: "Valid Claim", claim: func() *prooftypes.Claim { - claim := testproof.BaseClaim(appAddr, supplierAddr, 42, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, numRelays) return &claim }(), errExpected: false, @@ -470,7 +455,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { { desc: "Claim with nil session header", claim: func() *prooftypes.Claim { - claim := testproof.BaseClaim(appAddr, supplierAddr, 42, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, numRelays) claim.SessionHeader = nil return &claim }(), @@ -480,7 +465,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { { desc: "Claim with invalid session id", claim: func() *prooftypes.Claim { - claim := testproof.BaseClaim(appAddr, supplierAddr, 42, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, numRelays) claim.SessionHeader.SessionId = "" return &claim }(), @@ -490,7 +475,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { { desc: "Claim with invalid application address", claim: func() *prooftypes.Claim { - claim := testproof.BaseClaim(appAddr, supplierAddr, 42, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, numRelays) claim.SessionHeader.ApplicationAddress = "invalid address" return &claim }(), @@ -500,7 +485,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { { desc: "Claim with invalid supplier address", claim: func() *prooftypes.Claim { - claim := testproof.BaseClaim(appAddr, supplierAddr, 42, service.Id) + claim := testproof.BaseClaim(service.Id, appAddr, supplierAddr, numRelays) claim.SupplierAddress = "invalid address" return &claim }(), @@ -519,7 +504,7 @@ func TestSettleSessionAccounting_InvalidClaim(t *testing.T) { err = fmt.Errorf("panic occurred: %v", r) } }() - return keeper.SettleSessionAccounting(ctx, test.claim) + return keeper.ProcessTokenLogicModules(ctx, test.claim) }() // Assert the error @@ -539,7 +524,6 @@ func getBalance( bankKeeper tokenomicstypes.BankKeeper, accountAddr string, ) *cosmostypes.Coin { - appBalanceRes, err := bankKeeper.Balance(ctx, &banktypes.QueryBalanceRequest{ Address: accountAddr, Denom: "upokt", diff --git a/x/tokenomics/keeper/update_relay_mining_difficulty.go b/x/tokenomics/keeper/update_relay_mining_difficulty.go index b222b4f37..71f2b50fc 100644 --- a/x/tokenomics/keeper/update_relay_mining_difficulty.go +++ b/x/tokenomics/keeper/update_relay_mining_difficulty.go @@ -31,6 +31,9 @@ const TargetNumRelays = uint64(10e4) // TODO_MAINNET: Use a language agnostic float implementation or arithmetic library // to ensure deterministic results across different language implementations of the // protocol. +// +// TODO_MAINNET(@olshansk, @ramiro): Play around with the value N for EMA to +// capture what the memory should be. var emaSmoothingFactor = new(big.Float).SetFloat64(0.1) // UpdateRelayMiningDifficulty updates the on-chain relay mining difficulty @@ -50,9 +53,7 @@ func (k Keeper) UpdateRelayMiningDifficulty( "No previous relay mining difficulty found for service %s. Initializing with default difficulty %v", serviceId, prevDifficulty.TargetHash, ).Error()) - - // If a previous difficulty for the service is not found, we initialize - // it with a default. + // If a previous difficulty for the service is not found, we initialize a default. prevDifficulty = types.RelayMiningDifficulty{ ServiceId: serviceId, BlockHeight: sdkCtx.BlockHeight(), diff --git a/x/tokenomics/module/genesis_test.go b/x/tokenomics/module/genesis_test.go index 9b156df28..d9d206ef5 100644 --- a/x/tokenomics/module/genesis_test.go +++ b/x/tokenomics/module/genesis_test.go @@ -26,7 +26,7 @@ func TestGenesis(t *testing.T) { // this line is used by starport scaffolding # genesis/test/state } - k, ctx, _, _ := keepertest.TokenomicsKeeperWithActorAddrs(t, nil) + k, ctx, _, _, _ := keepertest.TokenomicsKeeperWithActorAddrs(t) tokenomics.InitGenesis(ctx, k, genesisState) got := tokenomics.ExportGenesis(ctx, k) require.NotNil(t, got) diff --git a/x/tokenomics/module/module.go b/x/tokenomics/module/module.go index efe1b35f4..e6ef6c7b5 100644 --- a/x/tokenomics/module/module.go +++ b/x/tokenomics/module/module.go @@ -180,6 +180,7 @@ type ModuleInputs struct { AccountKeeper types.AccountKeeper BankKeeper types.BankKeeper ApplicationKeeper types.ApplicationKeeper + SupplierKeeper types.SupplierKeeper ProofKeeper types.ProofKeeper SharedKeeper types.SharedKeeper SessionKeeper types.SessionKeeper @@ -204,9 +205,11 @@ func ProvideModule(in ModuleInputs) ModuleOutputs { in.StoreService, in.Logger, authority.String(), + in.BankKeeper, in.AccountKeeper, in.ApplicationKeeper, + in.SupplierKeeper, in.ProofKeeper, in.SharedKeeper, in.SessionKeeper, diff --git a/x/tokenomics/types/errors.go b/x/tokenomics/types/errors.go index d4fa8568f..4f02c5d44 100644 --- a/x/tokenomics/types/errors.go +++ b/x/tokenomics/types/errors.go @@ -11,20 +11,23 @@ var ( ErrTokenomicsClaimNil = sdkerrors.Register(ModuleName, 1102, "provided claim is nil") ErrTokenomicsSessionHeaderNil = sdkerrors.Register(ModuleName, 1103, "provided claim's session header is nil") ErrTokenomicsSessionHeaderInvalid = sdkerrors.Register(ModuleName, 1104, "provided claim's session header is invalid") - ErrTokenomicsSupplierModuleMintFailed = sdkerrors.Register(ModuleName, 1105, "failed to mint uPOKT to supplier module account") - ErrTokenomicsSupplierRewardFailed = sdkerrors.Register(ModuleName, 1106, "failed to send uPOKT from supplier module account to supplier") - ErrTokenomicsSupplierAddressInvalid = sdkerrors.Register(ModuleName, 1107, "the supplier address in the claim is not a valid bech32 address") + ErrTokenomicsSupplierModuleSendFailed = sdkerrors.Register(ModuleName, 1105, "failed to send uPOKT to supplier module account") + ErrTokenomicsSupplierAddressInvalid = sdkerrors.Register(ModuleName, 1106, "the supplier address in the claim is not a valid bech32 address") + ErrTokenomicsSupplierNotFound = sdkerrors.Register(ModuleName, 1107, "supplier not found") ErrTokenomicsApplicationNotFound = sdkerrors.Register(ModuleName, 1108, "application not found") ErrTokenomicsApplicationModuleBurn = sdkerrors.Register(ModuleName, 1109, "failed to burn uPOKT from application module account") - ErrTokenomicsApplicationAddressInvalid = sdkerrors.Register(ModuleName, 1112, "the application address in the claim is not a valid bech32 address") - ErrTokenomicsParamsInvalid = sdkerrors.Register(ModuleName, 1113, "provided params are invalid") - ErrTokenomicsRootHashInvalid = sdkerrors.Register(ModuleName, 1114, "the root hash in the claim is invalid") - ErrTokenomicsApplicationNewStakeInvalid = sdkerrors.Register(ModuleName, 1115, "application stake cannot be reduced to a -ve amount") - ErrTokenomicsParamNameInvalid = sdkerrors.Register(ModuleName, 1116, "the provided param name is invalid") - ErrTokenomicsParamInvalid = sdkerrors.Register(ModuleName, 1117, "the provided param is invalid") - ErrTokenomicsUnmarshalInvalid = sdkerrors.Register(ModuleName, 1118, "failed to unmarshal the provided bytes") - ErrTokenomicsDuplicateIndex = sdkerrors.Register(ModuleName, 1119, "cannot have a duplicate index") - ErrTokenomicsMissingRelayMiningDifficulty = sdkerrors.Register(ModuleName, 1120, "missing relay mining difficulty") - ErrTokenomicsApplicationOverserviced = sdkerrors.Register(ModuleName, 1121, "application was overserviced") - ErrTokenomicsServiceNotFound = sdkerrors.Register(ModuleName, 1122, "service not found") + ErrTokenomicsApplicationAddressInvalid = sdkerrors.Register(ModuleName, 1110, "the application address in the claim is not a valid bech32 address") + ErrTokenomicsParamsInvalid = sdkerrors.Register(ModuleName, 1111, "provided params are invalid") + ErrTokenomicsRootHashInvalid = sdkerrors.Register(ModuleName, 1112, "the root hash in the claim is invalid") + ErrTokenomicsApplicationNewStakeInvalid = sdkerrors.Register(ModuleName, 1113, "application stake cannot be reduced to a -ve amount") + ErrTokenomicsParamNameInvalid = sdkerrors.Register(ModuleName, 1114, "the provided param name is invalid") + ErrTokenomicsParamInvalid = sdkerrors.Register(ModuleName, 1115, "the provided param is invalid") + ErrTokenomicsUnmarshalInvalid = sdkerrors.Register(ModuleName, 1116, "failed to unmarshal the provided bytes") + ErrTokenomicsDuplicateIndex = sdkerrors.Register(ModuleName, 1117, "cannot have a duplicate index") + ErrTokenomicsMissingRelayMiningDifficulty = sdkerrors.Register(ModuleName, 1118, "missing relay mining difficulty") + ErrTokenomicsApplicationOverserviced = sdkerrors.Register(ModuleName, 1119, "application was overserviced") + ErrTokenomicsServiceNotFound = sdkerrors.Register(ModuleName, 1120, "service not found") + ErrTokenomicsModuleMintFailed = sdkerrors.Register(ModuleName, 1121, "failed to mint uPOKT to tokenomics module account") + ErrTokenomicsSendingMindRewards = sdkerrors.Register(ModuleName, 1122, "failed to send minted rewards") + ErrTokenomicsSupplierModuleMintFailed = sdkerrors.Register(ModuleName, 1123, "failed to mint uPOKT to supplier module account") ) diff --git a/x/tokenomics/types/expected_keepers.go b/x/tokenomics/types/expected_keepers.go index b1520d1eb..724b91cdd 100644 --- a/x/tokenomics/types/expected_keepers.go +++ b/x/tokenomics/types/expected_keepers.go @@ -1,4 +1,4 @@ -//go:generate mockgen -destination ../../../testutil/tokenomics/mocks/expected_keepers_mock.go -package mocks . AccountKeeper,BankKeeper,ApplicationKeeper,ProofKeeper,SharedKeeper,SessionKeeper,ServiceKeeper +//go:generate mockgen -destination ../../../testutil/tokenomics/mocks/expected_keepers_mock.go -package mocks . AccountKeeper,BankKeeper,ApplicationKeeper,SupplierKeeper,ProofKeeper,SharedKeeper,SessionKeeper,ServiceKeeper package types @@ -76,12 +76,12 @@ type SessionKeeper interface { type SupplierKeeper interface { GetSupplier(ctx context.Context, supplierAddr string) (supplier sharedtypes.Supplier, found bool) + GetAllSuppliers(ctx context.Context) (suppliers []sharedtypes.Supplier) SetSupplier(ctx context.Context, supplier sharedtypes.Supplier) } type ServiceKeeper interface { GetService(ctx context.Context, serviceID string) (sharedtypes.Service, bool) - // NOTE: SetService is not used by the tokenomics keeper. - // It is only defined here to make it easier to add services to the service module in the tests. + // Only used for testing & simulation SetService(ctx context.Context, service sharedtypes.Service) }