diff --git a/api/poktroll/application/tx.pulsar.go b/api/poktroll/application/tx.pulsar.go index 4c1f5fe84..866f5acf4 100644 --- a/api/poktroll/application/tx.pulsar.go +++ b/api/poktroll/application/tx.pulsar.go @@ -5,11 +5,11 @@ import ( _ "cosmossdk.io/api/amino" v1beta1 "cosmossdk.io/api/cosmos/base/v1beta1" _ "cosmossdk.io/api/cosmos/msg/v1" + shared "github.com/pokt-network/poktroll/api/poktroll/shared" fmt "fmt" _ "github.com/cosmos/cosmos-proto" runtime "github.com/cosmos/cosmos-proto/runtime" _ "github.com/cosmos/gogoproto/gogoproto" - shared "github.com/pokt-network/poktroll/api/poktroll/shared" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" diff --git a/pkg/crypto/protocol/relay_difficulty.go b/pkg/crypto/protocol/relay_difficulty.go index e1cd64b5e..ff15bc6f2 100644 --- a/pkg/crypto/protocol/relay_difficulty.go +++ b/pkg/crypto/protocol/relay_difficulty.go @@ -33,64 +33,51 @@ func IsRelayVolumeApplicable(relayHash, targetHash []byte) bool { // on the target number of relays we want the network to mine and the new EMA of // the number of relays. func ComputeNewDifficultyTargetHash(prevTargetHash []byte, targetNumRelays, newRelaysEma uint64) []byte { - // TODO_MAINNET(@red-0ne): Use a language agnostic float implementation to ensure - // deterministic results and avoid loss of precision. Specifically, we need to - // use big.Rat, delay any computation. + // If targetNumRelays == newRelaysEma -> do not scale -> keep the same difficulty to mine relays + // TODO_IMPROVE: Figure out if there's a range (e.g. 5%) within which it is reasonable + // to keep the same difficulty. + if targetNumRelays == newRelaysEma { + return prevTargetHash + } // Calculate the proportion of target relays relative to the EMA of actual volume applicable relays - difficultyScalingRatio := new(big.Float).Quo( - new(big.Float).SetUint64(targetNumRelays), - new(big.Float).SetUint64(newRelaysEma), - ) // If difficultyScalingRatio < 1 -> scale down -> increase difficulty to mine relays // If difficultyScalingRatio > 1 -> scale up -> decrease difficulty to mine relays - if difficultyScalingRatio.Cmp(big.NewFloat(1)) == 0 { - return prevTargetHash - } - - // You can't scale the base relay difficulty hash below BaseRelayDifficultyHashBz - isDecreasingDifficulty := difficultyScalingRatio.Cmp(big.NewFloat(1)) == 1 - if isDecreasingDifficulty && bytes.Equal(prevTargetHash, BaseRelayDifficultyHashBz) { + difficultyScalingRatio := big.NewRat(int64(targetNumRelays), int64(newRelaysEma)) + scaledDifficultyHashBz := ScaleRelayDifficultyHash(prevTargetHash, difficultyScalingRatio) + // Trim all the leftmost zeros from the big endian representation of the scaled + // hash to ensure that it only contains the meaningful bytes. + scaledDifficultyHashBz = bytes.TrimLeft(scaledDifficultyHashBz, "\x00") + // TODO_IMPROVE: Make it so scaling up -> increase difficulty while scaling down -> decrease difficulty. + // If scaledDifficultyHash is longer than BaseRelayDifficultyHashBz, then use + // BaseRelayDifficultyHashBz as we should not have a bigger hash than the base. + if len(scaledDifficultyHashBz) > len(BaseRelayDifficultyHashBz) { return BaseRelayDifficultyHashBz } - scaledDifficultyHash := ScaleRelayDifficultyHash(prevTargetHash, difficultyScalingRatio) - if len(scaledDifficultyHash) > len(BaseRelayDifficultyHashBz) { - return BaseRelayDifficultyHashBz + // Ensure the scaled hash is padded to (at least) the same length as the provided hash. + if len(scaledDifficultyHashBz) < len(prevTargetHash) { + return padBytesToLength(scaledDifficultyHashBz, len(prevTargetHash)) } - // Compute the new target hash by scaling the previous target hash based on the ratio - return ScaleRelayDifficultyHash(prevTargetHash, difficultyScalingRatio) + return scaledDifficultyHashBz } // ScaleRelayDifficultyHash scales the provided hash based on the given ratio. // If the ratio is less than 1, the hash will be scaled down. // DEV_NOTE: Only exposed publicly for testing purposes. -func ScaleRelayDifficultyHash(difficultyHashBz []byte, ratio *big.Float) []byte { - // Convert difficultyHashBz to a big.Float to minimize precision loss. - // TODO_MAINNET(@red-one): Use a language agnostic float implementation or arithmetic library - // to ensure deterministic results across different language implementations of the - // protocol. - prevHashInt := bytesToBigInt(difficultyHashBz) - prevHashFloat := new(big.Float).SetInt(prevHashInt) +func ScaleRelayDifficultyHash( + initialDifficultyHashBz []byte, + difficultyScalingRatio *big.Rat, +) []byte { + difficultyHashInt := bytesToBigInt(initialDifficultyHashBz) + difficultyHashRat := new(big.Rat).SetInt(difficultyHashInt) // Scale the current by multiplying it by the ratio. - // TODO(@red-0ne): Ensure that the precision lost here doesn't cause any - // major issues by using big.Rat. - scaledHashFloat := new(big.Float).Mul(prevHashFloat, ratio) - scaledHashInt, _ := scaledHashFloat.Int(nil) - // scaledHashBz := make([]byte, len(BaseRelayDifficultyHashBz)) - // scaledHashInt.FillBytes(scaledHashBz) - scaledHashBz := scaledHashInt.Bytes() - - // Ensure the scaled hash is padded to (at least) the same length as the provided hash. - if len(scaledHashBz) < len(difficultyHashBz) { - paddedHash := make([]byte, len(difficultyHashBz)) - copy(paddedHash[len(paddedHash)-len(scaledHashBz):], scaledHashBz) - return paddedHash - } - - return scaledHashBz + scaledHashRat := new(big.Rat).Mul(difficultyHashRat, difficultyScalingRatio) + scaledHashInt := new(big.Int).Div(scaledHashRat.Num(), scaledHashRat.Denom()) + // Convert the scaled hash to a byte slice. + return scaledHashInt.Bytes() } // GetRelayDifficultyProbability returns a fraction that determines the probability that a @@ -127,3 +114,16 @@ func GetRelayDifficultyMultiplierToFloat32(relayDifficultyHash []byte) float32 { func bytesToBigInt(b []byte) *big.Int { return new(big.Int).SetBytes(b) } + +// padBytesToLength returns a zero padded representation of the given value. +// If the value is longer the desired length, it is returned as is. +func padBytesToLength(valueToPad []byte, length int) []byte { + paddingOffset := length - len(valueToPad) + if paddingOffset <= 0 { + return valueToPad + } + + paddedScaledDifficultyHash := make([]byte, length) + copy(paddedScaledDifficultyHash[paddingOffset:], valueToPad) + return paddedScaledDifficultyHash +} diff --git a/pkg/crypto/protocol/relay_difficulty_test.go b/pkg/crypto/protocol/relay_difficulty_test.go index aeddd0951..de7c4ebb0 100644 --- a/pkg/crypto/protocol/relay_difficulty_test.go +++ b/pkg/crypto/protocol/relay_difficulty_test.go @@ -197,7 +197,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { tests := []struct { desc string prevDifficultyHashHex string - scalingRatio float64 + scalingRatio big.Rat expectedScaledDifficultyHashHex string // scaled but unbounded expectedNewDifficultyHashHex string // uses the scaled result but bounded @@ -205,7 +205,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 1 (same number of relays)", prevDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 1, + scalingRatio: *big.NewRat(1, 1), // Scaled hash == expected hash expectedScaledDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -216,7 +216,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 0.5 (allow less relays)", prevDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 0.5, + scalingRatio: *big.NewRat(1, 2), // Scaled hash == expected hash expectedScaledDifficultyHashHex: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -225,7 +225,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 2 (allow more relays)", prevDifficultyHashHex: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 2, + scalingRatio: *big.NewRat(2, 1), // Scaled hash == expected hash expectedScaledDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", @@ -236,7 +236,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 0.25 (allow less relays)", prevDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 0.25, + scalingRatio: *big.NewRat(1, 4), // Scaled hash == expected hash expectedScaledDifficultyHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", @@ -245,7 +245,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 4 (allow more relays)", prevDifficultyHashHex: "3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 4, + scalingRatio: *big.NewRat(4, 1), // Scaled hash == expected hash expectedScaledDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc", @@ -256,42 +256,61 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { { desc: "Scale by 0.1 (allow less relays)", prevDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 0.1, + scalingRatio: *big.NewRat(1, 10), - // Scaled hash != expected hash - expectedScaledDifficultyHashHex: "19999999999999ffffffffffffffffffffffffffffffffffffffffffffffffff", - expectedNewDifficultyHashHex: "19999999999999999fffffffffffffffffffffffffffffffffffffffffffffff", + // Scaled hash == expected hash + // Scaling down 0xff..ff by 10 leads a non-Int result (...987.5 rounded down to ...987), + // making a scaling up of the result (0x19..99) by 10 not equal to the original hash. + expectedScaledDifficultyHashHex: "1999999999999999999999999999999999999999999999999999999999999999", + expectedNewDifficultyHashHex: "1999999999999999999999999999999999999999999999999999999999999999", }, { desc: "Scale by 10 (allow more relays)", prevDifficultyHashHex: "1999999999999999999999999999999999999999999999999999999999999999", - scalingRatio: 10, + scalingRatio: *big.NewRat(10, 1), // Scaled hash == expected hash - expectedScaledDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8", - expectedNewDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8", + expectedScaledDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa", + expectedNewDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa", }, // Scale down and up by 10e-12 and 10e12 { desc: "Scale by 10e-12 (allow less relays)", prevDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - scalingRatio: 10e-12, + scalingRatio: *big.NewRat(1, 10e12), // Scaled hash != expected hash - expectedScaledDifficultyHashHex: "000000000afebff0bcb24a7fffffffffffffffffffffffffffffffffffffffff", - expectedNewDifficultyHashHex: "000000000afebff0bcb24aafefffffffffffffffffffffffffffffffffffffff", + expectedScaledDifficultyHashHex: "1c25c268497681c2650cb4be40d60df7311e9872477f201c409ec0", + expectedNewDifficultyHashHex: "00000000001c25c268497681c2650cb4be40d60df7311e9872477f201c409ec0", + }, + { + desc: "Scale by 10e12 (allow more relays)", + prevDifficultyHashHex: "00000000001c25c268497681c2650cb4be40d60df7311e9872477f201c409ec0", + scalingRatio: *big.NewRat(10e12, 1), + + // Scaled hash == expected hash + expectedScaledDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffff8cd94b80000", + expectedNewDifficultyHashHex: "fffffffffffffffffffffffffffffffffffffffffffffffffffff8cd94b80000", + }, + { + desc: "Scale by 10e-12 (allow more relays) padding", + prevDifficultyHashHex: "0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + scalingRatio: *big.NewRat(1, 10e12), + + // Scaled hash != expected hash: Padding + expectedScaledDifficultyHashHex: "01c25c268497681c2650cb4be40d60df7311e9872477f201c409ec", + expectedNewDifficultyHashHex: "000000000001c25c268497681c2650cb4be40d60df7311e9872477f201c409ec", + }, + { + desc: "Scale by 10e12 (allow more relays) truncating", + prevDifficultyHashHex: "0000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + scalingRatio: *big.NewRat(10e12, 1), + + // Scaled hash != expected hash: Truncating + expectedScaledDifficultyHashHex: "9184e729fffffffffffffffffffffffffffffffffffffffffffffffff6e7b18d6000", + expectedNewDifficultyHashHex: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, - // TODO_BETA(@red-0ne): See this comment: https://github.com/pokt-network/poktroll/pull/771#issuecomment-2364772430 - // { - // desc: "Scale by 10e12 (allow more relays)", - // prevDifficultyHashHex: "000000000afebff0bcb24a7fffffffffffffffffffffffffffffffffffffffff", - // scalingRatio: 10e12, - - // // Scaled hash != expected hash - // expectedScaledDifficultyHashHex: "63fffffffffffe4c079b8ffffffffffffffffffffffffffffffffff80000000000", - // expectedNewDifficultyHashHex: "0001357c299a88ea715eae88eddcd3879fffffffffffffffffffffffffe00000", - // }, } for _, test := range tests { @@ -303,8 +322,7 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { expectedScaledHashBz, err := hex.DecodeString(test.expectedScaledDifficultyHashHex) require.NoError(t, err) - ratio := new(big.Float).SetFloat64(test.scalingRatio) - scaledDifficultyHash := ScaleRelayDifficultyHash(currHashBz, ratio) + scaledDifficultyHash := ScaleRelayDifficultyHash(currHashBz, &test.scalingRatio) isScaledHashAsExpected := bytes.Equal(expectedScaledHashBz, scaledDifficultyHash) require.True(t, isScaledHashAsExpected, "expected scaled (unbounded) difficulty hash %x, but got %x", expectedScaledHashBz, scaledDifficultyHash) @@ -313,9 +331,8 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { expectedNewHashBz, err := hex.DecodeString(test.expectedNewDifficultyHashHex) require.NoError(t, err) - // Multiplying by 10e12 to avoid precision loss - targetNumRelays := uint64(test.scalingRatio * 10e12) - newRelaysEma := uint64(1 * 10e12) + targetNumRelays := test.scalingRatio.Num().Uint64() + newRelaysEma := test.scalingRatio.Denom().Uint64() newDifficultyHash := ComputeNewDifficultyTargetHash(currHashBz, targetNumRelays, newRelaysEma) isNewHashAsExpected := bytes.Equal(expectedNewHashBz, newDifficultyHash) @@ -328,10 +345,12 @@ func TestRelayDifficulty_ScaleDifficultyTargetHash(t *testing.T) { if test.expectedNewDifficultyHashHex != test.expectedScaledDifficultyHashHex { require.NotEqual(t, test.scalingRatio, 1, "should not reach this code path if scaling ratio is 1") // New difficulty was padded - if test.scalingRatio < 1 { - require.Equal(t, len(scaledDifficultyHash), len(newDifficultyHash), "scaled down difficulty should have been padded") - } else if test.scalingRatio > 1 { - require.Greater(t, len(scaledDifficultyHash), len(newDifficultyHash), "scaled up difficulty should have been truncated") + if targetNumRelays < newRelaysEma { + require.Less(t, len(expectedScaledHashBz), len(newDifficultyHash)) + require.Equal(t, len(expectedNewHashBz), len(newDifficultyHash), "scaled down difficulty should have been padded") + } else if targetNumRelays > newRelaysEma { + require.Greater(t, len(expectedScaledHashBz), len(newDifficultyHash)) + require.Equal(t, len(expectedNewHashBz), len(newDifficultyHash), "scaled down difficulty should have been truncated") } } }) @@ -362,6 +381,34 @@ func TestRelayDifficulty_EnsureRelayMiningProbabilityIsProportional(t *testing.T } } +// This test ensure that a difficulty hash byte representation that is trimmed +// of its meaningless zeros does not change its value. +// See the discussion below for more details: +// https://github.com/pokt-network/poktroll/pull/831#discussion_r1774183541 +func TestRelayDifficulty_TruncateRelayDifficultyHashToBaseSizeDoesNotChangeItsValue(t *testing.T) { + difficultyInt := big.NewInt(256) + expectedDifficultyHash := []byte{0x01, 0x00} + // big.Int#Bytes returns a big endian representation, this means that any + // leftmost consecutive zeros are non-meaningful to the represented value. + difficultyHashBz := difficultyInt.Bytes() + + // Ensure that big.Int do not produce non-meaningful bytes + require.Equal(t, expectedDifficultyHash, difficultyHashBz) + + // Assuming that more non-meaningful bytes have been added to the difficulty, + // ensure that truncating them does not affect the value. + + difficultyHashWithNonMeaningfulBz := append([]byte{0x00, 0x00, 0x00}, difficultyHashBz...) + nonTrimmedDifficultyInt := big.NewInt(0).SetBytes(difficultyHashWithNonMeaningfulBz) + + trimmedDifficultyBz := bytes.TrimLeft(difficultyHashWithNonMeaningfulBz, "\x00") + trimmedDifficultyInt := big.NewInt(0).SetBytes(trimmedDifficultyBz) + + require.Len(t, trimmedDifficultyBz, 2) + require.Equal(t, difficultyInt, trimmedDifficultyInt) + require.Equal(t, difficultyInt, nonTrimmedDifficultyInt) +} + // scaleRelaysFromActualToTarget scales the number of relays (i.e. estimated offchain serviced relays) // down to the number of expected on-chain volume applicable relays func scaleRelaysFromActualToTarget(t *testing.T, relayDifficultyProbability *big.Rat, numRelays uint64) uint64 { @@ -380,9 +427,9 @@ func scaleRelaysFromActualToTarget(t *testing.T, relayDifficultyProbability *big func TestRelayDifficulty_EnsureRelayMiningMultiplierIsProportional(t *testing.T) { // Target Num Relays is the target number of volume applicable relays a session tree should have. const ( - targetNumRelays = uint64(10e2) // Target number of volume applicable relays - lowVolumeService = 1e4 // Number of actual off-chain relays serviced by a RelayMiner - highVolumeService = 1e6 // Number of actual off-chain relays serviced by a RelayMiner + targetNumRelays = uint64(10e3) // Target number of volume applicable relays + lowVolumeService = 1e5 // Number of actual off-chain relays serviced by a RelayMiner + highVolumeService = 1e7 // Number of actual off-chain relays serviced by a RelayMiner allowableDelta = 0.05 // Allow a 5% error margin between estimated probabilities and results ) diff --git a/x/application/types/message_update_param.go b/x/application/types/message_update_param.go index 6836425a8..2f71cdac0 100644 --- a/x/application/types/message_update_param.go +++ b/x/application/types/message_update_param.go @@ -8,7 +8,7 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) -var _ cosmostypes.Msg = &MsgUpdateParam{} +var _ cosmostypes.Msg = (*MsgUpdateParam)(nil) func NewMsgUpdateParam(authority string, name string, asType any) *MsgUpdateParam { var asTypeIface isMsgUpdateParam_AsType diff --git a/x/gateway/keeper/msg_server_stake_gateway.go b/x/gateway/keeper/msg_server_stake_gateway.go index a1e776fdb..74c87e058 100644 --- a/x/gateway/keeper/msg_server_stake_gateway.go +++ b/x/gateway/keeper/msg_server_stake_gateway.go @@ -37,7 +37,7 @@ func (k msgServer) StakeGateway( // NB: This SHOULD NEVER happen because msg.ValidateBasic() validates the address as bech32. if err != nil { // TODO_TECHDEBT(#384): determine whether to continue using cosmos logger for debug level. - logger.Info(fmt.Sprintf("could not parse address %q", msg.Address)) + logger.Info(fmt.Sprintf("ERROR: could not parse address %q", msg.Address)) return nil, status.Error(codes.InvalidArgument, err.Error()) } @@ -45,11 +45,11 @@ func (k msgServer) StakeGateway( var coinsToEscrow sdk.Coin gateway, isGatewayFound := k.GetGateway(ctx, msg.Address) if !isGatewayFound { - logger.Info(fmt.Sprintf("gateway not found. Creating new gateway for address %q", msg.Address)) + logger.Info(fmt.Sprintf("gateway not found; creating new gateway for address %q", msg.Address)) gateway = k.createGateway(ctx, msg) coinsToEscrow = *msg.Stake } else { - logger.Info(fmt.Sprintf("gateway found. About to try and update gateway for address %q", msg.Address)) + logger.Info(fmt.Sprintf("gateway found; about to try and update gateway for address %q", msg.Address)) currGatewayStake := *gateway.Stake if err = k.updateGateway(ctx, &gateway, msg); err != nil { logger.Error(fmt.Sprintf("could not update gateway for address %q due to error %v", msg.Address, err)) @@ -69,24 +69,19 @@ func (k msgServer) StakeGateway( } // MUST ALWAYS stake or upstake (> 0 delta). + // TODO_MAINNET(#853): Consider removing the requirement above. if coinsToEscrow.IsZero() { - errMsg := fmt.Sprintf("gateway %q must escrow more than 0 additional coins", msg.GetAddress()) - logger.Info(errMsg) - return nil, status.Error( - codes.InvalidArgument, - types.ErrGatewayInvalidStake.Wrap(errMsg).Error(), - ) + err = types.ErrGatewayInvalidStake.Wrapf("gateway %q must escrow more than 0 additional coins", msg.GetAddress()) + logger.Info(fmt.Sprintf("ERROR: %s", err)) + return nil, status.Error(codes.InvalidArgument, err.Error()) } // MUST ALWAYS have at least minimum stake. minStake := k.GetParams(ctx).MinStake if msg.Stake.Amount.LT(minStake.Amount) { - errFmt := "gateway %q must stake at least %s" - logger.Info(fmt.Sprintf(errFmt, msg.Address, minStake)) - return nil, status.Error( - codes.InvalidArgument, - types.ErrGatewayInvalidStake.Wrapf(errFmt, msg.Address, minStake).Error(), - ) + err = types.ErrGatewayInvalidStake.Wrapf("gateway %q must stake at least %s", msg.Address, minStake) + logger.Info(fmt.Sprintf("ERROR: %s", err)) + return nil, status.Error(codes.InvalidArgument, err.Error()) } // Send the coins from the gateway to the staked gateway pool