From d421b16b9875b9c502d8ee0be3efa62204cc4e9a Mon Sep 17 00:00:00 2001 From: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:38:54 +0200 Subject: [PATCH 01/58] Expands the type registry to include all the available options (#2353) Expands the type registry to include all the available options --- bittensor/core/settings.py | 122 ++++++++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 2 deletions(-) diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index 36314c2b72..29948b612e 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -112,11 +112,28 @@ def turn_console_on(): } # --- Type Registry --- -TYPE_REGISTRY: dict = { +TYPE_REGISTRY: dict[str, dict] = { "types": { "Balance": "u64", # Need to override default u128 }, "runtime_api": { + "DelegateInfoRuntimeApi": { + "methods": { + "get_delegated": { + "params": [ + { + "name": "coldkey", + "type": "Vec", + }, + ], + "type": "Vec", + }, + "get_delegates": { + "params": [], + "type": "Vec", + }, + } + }, "NeuronInfoRuntimeApi": { "methods": { "get_neuron_lite": { @@ -141,8 +158,65 @@ def turn_console_on(): ], "type": "Vec", }, + "get_neuron": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + { + "name": "uid", + "type": "u16", + }, + ], + "type": "Vec", + }, + "get_neurons": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + ], + "type": "Vec", + }, } }, + "StakeInfoRuntimeApi": { + "methods": { + "get_stake_info_for_coldkey": { + "params": [ + { + "name": "coldkey_account_vec", + "type": "Vec", + }, + ], + "type": "Vec", + }, + "get_stake_info_for_coldkeys": { + "params": [ + { + "name": "coldkey_account_vecs", + "type": "Vec>", + }, + ], + "type": "Vec", + }, + }, + }, + "ValidatorIPRuntimeApi": { + "methods": { + "get_associated_validator_ip_info_for_subnet": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + ], + "type": "Vec", + }, + }, + }, "SubnetInfoRuntimeApi": { "methods": { "get_subnet_hyperparams": { @@ -153,12 +227,56 @@ def turn_console_on(): }, ], "type": "Vec", - } + }, + "get_subnet_info": { + "params": [ + { + "name": "netuid", + "type": "u16", + }, + ], + "type": "Vec", + }, + "get_subnets_info": { + "params": [], + "type": "Vec", + }, } }, "SubnetRegistrationRuntimeApi": { "methods": {"get_network_registration_cost": {"params": [], "type": "u64"}} }, + "ColdkeySwapRuntimeApi": { + "methods": { + "get_scheduled_coldkey_swap": { + "params": [ + { + "name": "coldkey_account_vec", + "type": "Vec", + }, + ], + "type": "Vec", + }, + "get_remaining_arbitration_period": { + "params": [ + { + "name": "coldkey_account_vec", + "type": "Vec", + }, + ], + "type": "Vec", + }, + "get_coldkey_swap_destinations": { + "params": [ + { + "name": "coldkey_account_vec", + "type": "Vec", + }, + ], + "type": "Vec", + }, + } + }, }, } From 88f9177f8af419ac98b03b3e57d1a3a097394063 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 16 Oct 2024 09:03:41 -0700 Subject: [PATCH 02/58] Add commit-reveal subprocess and related utilities Introducing a new subprocess for the commit-reveal mechanism alongside necessary utility functions. Also added end-to-end tests to ensure proper functionality of the new subprocess feature. --- bittensor/__init__.py | 3 + bittensor/core/extrinsics/commit_weights.py | 53 ++++- bittensor/core/extrinsics/set_weights.py | 3 + bittensor/core/subtensor.py | 206 ++++++++++-------- requirements/prod.txt | 1 + scripts/__init__.py | 0 scripts/subprocess/__init__.py | 0 scripts/subprocess/commit_reveal.py | 223 ++++++++++++++++++++ scripts/subprocess_utils.py | 141 +++++++++++++ tests/e2e_tests/conftest.py | 128 ++++++----- tests/e2e_tests/test_reveal_weights.py | 166 +++++++++++++++ 11 files changed, 771 insertions(+), 153 deletions(-) create mode 100644 scripts/__init__.py create mode 100644 scripts/subprocess/__init__.py create mode 100644 scripts/subprocess/commit_reveal.py create mode 100644 scripts/subprocess_utils.py create mode 100644 tests/e2e_tests/test_reveal_weights.py diff --git a/bittensor/__init__.py b/bittensor/__init__.py index f4d8ee906a..d3344b9c92 100644 --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -20,6 +20,9 @@ from .core.settings import __version__, version_split, DEFAULTS from .utils.btlogging import logging from .utils.deprecated import * +from scripts import subprocess_utils + +subprocess_utils.start_commit_reveal_subprocess() def __getattr__(name): diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 5e9f2e9e19..51b7e74f06 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -18,7 +18,7 @@ """Module commit weights and reveal weights extrinsic.""" from typing import Optional, TYPE_CHECKING - +import socket from retry import retry from rich.prompt import Confirm @@ -36,12 +36,12 @@ # # Chain call for `commit_weights_extrinsic` @ensure_connected def do_commit_weights( - self: "Subtensor", - wallet: "Wallet", - netuid: int, - commit_hash: str, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, + self: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, ) -> tuple[bool, Optional[dict]]: """ Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights. @@ -144,6 +144,28 @@ def commit_weights_extrinsic( return False, error_message +def commit_weights_process( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + uids: list[int], + weights: list[int], + salt: list[int], +): + def send_command(command): + client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + client.connect(('localhost', 9999)) + client.send(command.encode()) + client.close() + + curr_block = subtensor.get_current_block() + cr_interval = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval + reveal_block = curr_block + cr_interval + + command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}"' + send_command(command) + # Chain call for `reveal_weights_extrinsic` @ensure_connected def do_reveal_weights( @@ -272,3 +294,20 @@ def reveal_weights_extrinsic( error_message = format_error_message(error_message) logging.error(f"Failed to reveal weights: {error_message}") return False, error_message + + +def reveal_weights_process( + wallet: "Wallet", + netuid: int, + uids: list[int], + weights: list[int], + salt: list[int], +): + def send_command(command): + client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + client.connect(('localhost', 9999)) + client.send(command.encode()) + client.close() + + command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey}" "{netuid}" "{uids}" "{weights}" "{salt}"' + send_command(command) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 7680061c5b..dead099761 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -160,6 +160,9 @@ def set_weights_extrinsic( with bt_console.status( f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." ): + + # TODO: Check if CR is enabled, do commit instead if yes. + try: success, error_message = do_set_weights( self=subtensor, diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index ca7397adb6..6a272c50b1 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -48,7 +48,7 @@ from bittensor.core.config import Config from bittensor.core.extrinsics.commit_weights import ( commit_weights_extrinsic, - reveal_weights_extrinsic, + reveal_weights_extrinsic, reveal_weights_process, commit_weights_process, ) from bittensor.core.extrinsics.prometheus import ( do_serve_prometheus, @@ -70,9 +70,12 @@ from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.weight_utils import generate_weight_hash +from scripts import subprocess_utils KEY_NONCE: dict[str, int] = {} +COMMIT_REVEAL_PROCESS = "commit_reveal.py" + class ParamWithTypes(TypedDict): name: str # Name of the parameter. @@ -130,12 +133,12 @@ class Subtensor: """ def __init__( - self, - network: Optional[str] = None, - config: Optional["Config"] = None, - _mock: bool = False, - log_verbose: bool = False, - connection_timeout: int = 600, + self, + network: Optional[str] = None, + config: Optional["Config"] = None, + _mock: bool = False, + log_verbose: bool = False, + connection_timeout: int = 600, ) -> None: """ Initializes a Subtensor interface for interacting with the Bittensor blockchain. @@ -169,8 +172,8 @@ def __init__( ) if ( - self.network == "finney" - or self.chain_endpoint == settings.FINNEY_ENTRYPOINT + self.network == "finney" + or self.chain_endpoint == settings.FINNEY_ENTRYPOINT ) and log_verbose: logging.info( f"You are connecting to {self.network} network with endpoint {self.chain_endpoint}." @@ -382,9 +385,9 @@ def add_args(cls, parser: "argparse.ArgumentParser", prefix: Optional[str] = Non # Inner private functions @networking.ensure_connected def _encode_params( - self, - call_definition: list["ParamWithTypes"], - params: Union[list[Any], dict[str, Any]], + self, + call_definition: list["ParamWithTypes"], + params: Union[list[Any], dict[str, Any]], ) -> str: """Returns a hex encoded string of the params using their types.""" param_data = scalecodec.ScaleBytes(b"") @@ -402,7 +405,7 @@ def _encode_params( return param_data.to_hex() def _get_hyperparameter( - self, param_name: str, netuid: int, block: Optional[int] = None + self, param_name: str, netuid: int, block: Optional[int] = None ) -> Optional[Any]: """ Retrieves a specified hyperparameter for a specific subnet. @@ -427,7 +430,7 @@ def _get_hyperparameter( # Calls methods @networking.ensure_connected def query_subtensor( - self, name: str, block: Optional[int] = None, params: Optional[list] = None + self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "ScaleType": """ Queries named storage from the Subtensor module on the Bittensor blockchain. This function is used to retrieve specific data or parameters from the blockchain, such as stake, rank, or other neuron-specific attributes. @@ -458,7 +461,7 @@ def make_substrate_call_with_retry() -> "ScaleType": @networking.ensure_connected def query_map_subtensor( - self, name: str, block: Optional[int] = None, params: Optional[list] = None + self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "QueryMapResult": """ Queries map storage from the Subtensor module on the Bittensor blockchain. This function is designed to retrieve a map-like data structure, which can include various neuron-specific details or network-wide attributes. @@ -488,11 +491,11 @@ def make_substrate_call_with_retry(): return make_substrate_call_with_retry() def query_runtime_api( - self, - runtime_api: str, - method: str, - params: Optional[Union[list[int], dict[str, int]]], - block: Optional[int] = None, + self, + runtime_api: str, + method: str, + params: Optional[Union[list[int], dict[str, int]]], + block: Optional[int] = None, ) -> Optional[str]: """ Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types. @@ -541,7 +544,7 @@ def query_runtime_api( @networking.ensure_connected def state_call( - self, method: str, data: str, block: Optional[int] = None + self, method: str, data: str, block: Optional[int] = None ) -> dict[Any, Any]: """ Makes a state call to the Bittensor blockchain, allowing for direct queries of the blockchain's state. This function is typically used for advanced queries that require specific method calls and data inputs. @@ -569,11 +572,11 @@ def make_substrate_call_with_retry() -> dict[Any, Any]: @networking.ensure_connected def query_map( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, ) -> "QueryMapResult": """ Queries map storage from any module on the Bittensor blockchain. This function retrieves data structures that represent key-value mappings, essential for accessing complex and structured data within the blockchain modules. @@ -605,7 +608,7 @@ def make_substrate_call_with_retry() -> "QueryMapResult": @networking.ensure_connected def query_constant( - self, module_name: str, constant_name: str, block: Optional[int] = None + self, module_name: str, constant_name: str, block: Optional[int] = None ) -> Optional["ScaleType"]: """ Retrieves a constant from the specified module on the Bittensor blockchain. This function is used to access fixed parameters or values defined within the blockchain's modules, which are essential for understanding the network's configuration and rules. @@ -635,11 +638,11 @@ def make_substrate_call_with_retry(): @networking.ensure_connected def query_module( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, ) -> "ScaleType": """ Queries any module storage on the Bittensor blockchain with the specified parameters and block number. This function is a generic query interface that allows for flexible and diverse data retrieval from various blockchain modules. @@ -671,7 +674,7 @@ def make_substrate_call_with_retry() -> "ScaleType": # Common subtensor methods def metagraph( - self, netuid: int, lite: bool = True, block: Optional[int] = None + self, netuid: int, lite: bool = True, block: Optional[int] = None ) -> "Metagraph": # type: ignore """ Returns a synced metagraph for a specified subnet within the Bittensor network. The metagraph represents the network's structure, including neuron connections and interactions. @@ -695,7 +698,7 @@ def metagraph( @staticmethod def determine_chain_endpoint_and_network( - network: str, + network: str, ) -> tuple[Optional[str], Optional[str]]: """Determines the chain endpoint and network from the passed network or chain_endpoint. @@ -720,18 +723,18 @@ def determine_chain_endpoint_and_network( return network, settings.ARCHIVE_ENTRYPOINT else: if ( - network == settings.FINNEY_ENTRYPOINT - or "entrypoint-finney.opentensor.ai" in network + network == settings.FINNEY_ENTRYPOINT + or "entrypoint-finney.opentensor.ai" in network ): return "finney", settings.FINNEY_ENTRYPOINT elif ( - network == settings.FINNEY_TEST_ENTRYPOINT - or "test.finney.opentensor.ai" in network + network == settings.FINNEY_TEST_ENTRYPOINT + or "test.finney.opentensor.ai" in network ): return "test", settings.FINNEY_TEST_ENTRYPOINT elif ( - network == settings.ARCHIVE_ENTRYPOINT - or "archive.chain.opentensor.ai" in network + network == settings.ARCHIVE_ENTRYPOINT + or "archive.chain.opentensor.ai" in network ): return "archive", settings.ARCHIVE_ENTRYPOINT elif "127.0.0.1" in network or "localhost" in network: @@ -741,7 +744,7 @@ def determine_chain_endpoint_and_network( return None, None def get_netuids_for_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None + self, hotkey_ss58: str, block: Optional[int] = None ) -> list[int]: """ Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active. @@ -778,7 +781,7 @@ def make_substrate_call_with_retry(): return make_substrate_call_with_retry() def is_hotkey_registered_any( - self, hotkey_ss58: str, block: Optional[int] = None + self, hotkey_ss58: str, block: Optional[int] = None ) -> bool: """ Checks if a neuron's hotkey is registered on any subnet within the Bittensor network. @@ -795,7 +798,7 @@ def is_hotkey_registered_any( return len(self.get_netuids_for_hotkey(hotkey_ss58, block)) > 0 def is_hotkey_registered_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> bool: """ Checks if a neuron's hotkey is registered on a specific subnet within the Bittensor network. @@ -813,10 +816,10 @@ def is_hotkey_registered_on_subnet( return self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) is not None def is_hotkey_registered( - self, - hotkey_ss58: str, - netuid: Optional[int] = None, - block: Optional[int] = None, + self, + hotkey_ss58: str, + netuid: Optional[int] = None, + block: Optional[int] = None, ) -> bool: """ Determines whether a given hotkey (public key) is registered in the Bittensor network, either globally across any subnet or specifically on a specified subnet. This function checks the registration status of a neuron identified by its hotkey, which is crucial for validating its participation and activities within the network. @@ -838,16 +841,16 @@ def is_hotkey_registered( # Not used in Bittensor, but is actively used by the community in almost all subnets def set_weights( - self, - wallet: "Wallet", - netuid: int, - uids: Union[NDArray[np.int64], "torch.LongTensor", list], - weights: Union[NDArray[np.float32], "torch.FloatTensor", list], - version_key: int = settings.version_as_int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, + self, + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = settings.version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, ) -> tuple[bool, str]: """ Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture. @@ -873,8 +876,8 @@ def set_weights( success = False message = "No attempt made. Perhaps it is too soon to set weights!" while ( - self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore - and retries < max_retries + self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore + and retries < max_retries ): try: logging.info( @@ -899,11 +902,11 @@ def set_weights( return success, message def serve_axon( - self, - netuid: int, - axon: "Axon", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, + self, + netuid: int, + axon: "Axon", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, ) -> bool: """ Registers an ``Axon`` serving endpoint on the Bittensor network for a specific neuron. This function is used to set up the Axon, a key component of a neuron that handles incoming queries and data processing tasks. @@ -1007,13 +1010,13 @@ def subnetwork_n(self, netuid: int, block: Optional[int] = None) -> Optional[int # Community uses this method def transfer( - self, - wallet: "Wallet", - dest: str, - amount: Union["Balance", float], - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, + self, + wallet: "Wallet", + dest: str, + amount: Union["Balance", float], + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + prompt: bool = False, ) -> bool: """ Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons. @@ -1043,7 +1046,7 @@ def transfer( # Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic` def get_neuron_for_pubkey_and_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional["NeuronInfo"]: """ Retrieves information about a neuron based on its public key (hotkey SS58 address) and the specific subnet UID (netuid). This function provides detailed neuron information for a particular subnet within the Bittensor network. @@ -1066,7 +1069,7 @@ def get_neuron_for_pubkey_and_subnet( @networking.ensure_connected def neuron_for_uid( - self, uid: Optional[int], netuid: int, block: Optional[int] = None + self, uid: Optional[int], netuid: int, block: Optional[int] = None ) -> "NeuronInfo": """ Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status. @@ -1104,12 +1107,12 @@ def make_substrate_call_with_retry(): # Community uses this method def serve_prometheus( - self, - wallet: "Wallet", - port: int, - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, + self, + wallet: "Wallet", + port: int, + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, ) -> bool: """ Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization. @@ -1135,7 +1138,7 @@ def serve_prometheus( # Community uses this method def get_subnet_hyperparameters( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[Union[list, "SubnetHyperparameters"]]: """ Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior. @@ -1169,7 +1172,7 @@ def get_subnet_hyperparameters( # Community uses this method # Returns network ImmunityPeriod hyper parameter. def immunity_period( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Retrieves the 'ImmunityPeriod' hyperparameter for a specific subnet. This parameter defines the duration during which new neurons are protected from certain network penalties or restrictions. @@ -1190,7 +1193,7 @@ def immunity_period( # Community uses this method def get_uid_for_hotkey_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet. @@ -1247,7 +1250,7 @@ def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. def min_allowed_weights( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Returns network MinAllowedWeights hyperparameter. @@ -1266,7 +1269,7 @@ def min_allowed_weights( # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. def max_weight_limit( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[float]: """ Returns network MaxWeightsLimit hyperparameter. @@ -1285,7 +1288,7 @@ def max_weight_limit( # # Community uses this method. It is used in subtensor in neuron_info, and serving. def get_prometheus_info( - self, netuid: int, hotkey_ss58: str, block: Optional[int] = None + self, netuid: int, hotkey_ss58: str, block: Optional[int] = None ) -> Optional["PrometheusInfo"]: """ Returns the prometheus information for this hotkey account. @@ -1328,7 +1331,7 @@ def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: # Metagraph uses this method def bonds( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence. @@ -1420,7 +1423,7 @@ def get_subnets(self, block: Optional[int] = None) -> list[int]: # Metagraph uses this method def neurons_lite( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list["NeuronInfoLite"]: """ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation. @@ -1453,7 +1456,7 @@ def neurons_lite( # Used in the `neurons` method which is used in metagraph.py def weights( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms. @@ -1517,7 +1520,7 @@ def make_substrate_call_with_retry(): # Used in community via `bittensor.core.subtensor.Subtensor.transfer` @networking.ensure_connected def get_transfer_fee( - self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] + self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] ) -> "Balance": """ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity. @@ -1568,7 +1571,7 @@ def get_transfer_fee( # Used in community via `bittensor.core.subtensor.Subtensor.transfer` def get_existential_deposit( - self, block: Optional[int] = None + self, block: Optional[int] = None ) -> Optional["Balance"]: """ Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with balances below this threshold can be reaped to conserve network resources. @@ -1657,6 +1660,18 @@ def commit_weights( prompt=prompt, ) if success: + # add to local db if called directly + # committed(self, wallet.hotkey, commit_hash, netuid, uids, weights, salt) + if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + commit_weights_process( + self, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + uids=list(uids), + weights=list(weights), + salt=list(salt) + ) break except Exception as e: logging.error(f"Error committing weights: {e}") @@ -1722,6 +1737,17 @@ def reveal_weights( prompt=prompt, ) if success: + # remove from local db if called directly + # Call the subprocess using parameters (signal or something else) + # revealed(wallet.hotkey, wallet.name, wallet.path, netuid, uids, weights, salt) + if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + reveal_weights_process( + wallet=wallet, + netuid=netuid, + uids=list(uids), + weights=list(weights), + salt=list(salt) + ) break except Exception as e: logging.error(f"Error revealing weights: {e}") diff --git a/requirements/prod.txt b/requirements/prod.txt index 4a319c506c..e08466547c 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -22,3 +22,4 @@ scalecodec==1.2.11 substrate-interface~=1.7.9 uvicorn bittensor-wallet>=2.0.2 +psutil diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/subprocess/__init__.py b/scripts/subprocess/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py new file mode 100644 index 0000000000..5ffbc05bb1 --- /dev/null +++ b/scripts/subprocess/commit_reveal.py @@ -0,0 +1,223 @@ +import argparse +import json +import os +import time +import utils # Ensure this import works +import socket +import threading + +from bittensor.core.subtensor import Subtensor +from bittensor_wallet import Wallet +from scripts import subprocess_utils as utils + +# Path to the SQLite database +DB_PATH = os.path.expanduser("~/.bittensor/bittensor.db") + + +def table_exists(table_name: str) -> bool: + try: + columns, rows = utils.read_table(table_name) + print(f"Table '{table_name}' exists with columns: {columns}") + return True + except Exception as e: + print(f"Table '{table_name}' does not exist: {e}") + return False + + +def is_table_empty(table_name: str) -> bool: + try: + # Attempt to read the table + columns, rows = utils.read_table(table_name) + + # Check if the table is empty + if not rows: + print(f"Table '{table_name}' is empty.") + return True + else: + print(f"Table '{table_name}' is not empty.") + return False + + except Exception as e: + print(f"Error checking if table '{table_name}' is empty: {e}") + return False + + +def initialize_db(): + # Create 'commits' table if it doesn't exist + columns = [ + ("wallet_hotkey", "TEXT"), + ("wallet_path", "TEXT"), + ("wallet_name", "TEXT"), + ("commit_hash", "TEXT"), + ("netuid", "INTEGER"), + ("commit_block", "INTEGER"), + ("reveal_block", "INTEGER"), + ("uids", "TEXT"), # Store list as a JSON string for simplicity + ("weights", "TEXT"), # Store list as a JSON string for simplicity + ("salt", "TEXT"), # Store list as a JSON string for simplicity + ] + + # Check if the 'commits' table exists before creating it + if not table_exists("commits"): + print("Creating table 'commits'...") + utils.create_table("commits", columns, []) + else: + print("Table 'commits' already exists.") + + +def reveal(subtensor, data): + # create wallet + wallet_name = data["wallet_name"] + wallet_path = data["wallet_path"] + wallet_hotkey = data["wallet_hotkey"] + + wallet = Wallet(name=wallet_name, path=wallet_path, hotkey=wallet_hotkey) + + # Calls subtensor.reveal_weights + success, message = subtensor.reveal_weights( + wallet=wallet, + commit_hash=data["commit_hash"], + uids=list(map(int, json.loads(data["uids"]))), + weights=list(map(int, json.loads(data["weights"]))), + wait_for_inclusion=True, + wait_for_finalization=True) + + # delete wallet object + del wallet + + if success: + print("Reveal success") + else: + print(f"Reveal failure: {message}") + + +def revealed(wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, salt): + # Check if a row with the specified data exists in the 'commits' table + with utils.DB(db_path=DB_PATH) as (conn, cursor): + sql = "SELECT COUNT(*) FROM commits WHERE wallet_hotkey=? AND wallet_name=? AND wallet_path=? AND netuid=? AND uids=? AND weights=? AND salt=?" + cursor.execute(sql, (wallet_hotkey, wallet_name, wallet_path, netuid, json.dumps(uids), json.dumps(weights), json.dumps(salt))) + count = cursor.fetchone()[0] + + if count > 0: + # Delete the row if it exists + delete_sql = "DELETE FROM commits WHERE wallet_hotkey=? AND wallet_name=? AND wallet_path=? AND netuid=? AND uids=? AND weights=? AND salt=?" + cursor.execute(delete_sql, + (wallet_hotkey, wallet_name, wallet_path, netuid, json.dumps(uids), json.dumps(weights), json.dumps(salt))) + conn.commit() + print("Deleted existing row with specified data") + else: + print("No existing row found with specified data") + + +def committed(wallet_name, wallet_path, wallet_hotkey, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt): + + commit_data = { + "wallet_hotkey": wallet_hotkey, + "wallet_name": wallet_name, + "wallet_path": wallet_path, + "commit_hash": commit_hash, + "netuid": netuid, + "commit_block": curr_block, + "reveal_block": reveal_block, + "uids": json.dumps(uids), + "weights": json.dumps(weights), + "salt": json.dumps(salt), + } + with utils.DB(db_path=DB_PATH) as (conn, cursor): + column_names = ", ".join(commit_data.keys()) + data = ", ".join(["?"] * len(commit_data)) + sql = f"INSERT INTO commits ({column_names}) VALUES ({data})" + cursor.execute(sql, tuple(commit_data.values())) + conn.commit() + + print("Committed commit data: {}", commit_data) + + +def check_reveal(subtensor, curr_block: int): + try: + columns, rows = utils.read_table("commits") + except Exception as e: + print(f"Error reading table 'commits': {e}") + return False + + curr_reveal = None + for commit in rows: + row_dict = dict(zip(columns, commit)) + if row_dict['reveal_block'] == curr_block: + curr_reveal = row_dict + break + + if curr_reveal: + reveal(subtensor, curr_reveal) + # Delete the row after revealing, and delete all older reveals + with utils.DB(db_path=DB_PATH) as (conn, cursor): + cursor.execute('DELETE FROM commits WHERE reveal_block <= ?', (curr_block,)) + conn.committed() + return True + + return False + + +def handle_client_connection(client_socket): + try: + while True: + request = client_socket.recv(1024).decode() + if not request: + break + args = request.split() + command = args[0] + if command == 'revealed': + # wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, salt + revealed(args[1], args[2], args[3], args[4], json.loads(args[5]), json.loads(args[6]), json.loads(args[7])) + elif command == 'committed': + # wallet_name, wallet_path, wallet_hotkey, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt + committed(args[1], args[2], args[3], args[4], args[5], args[6], args[7], json.loads(args[8]), json.loads(args[9]), + json.loads(args[10])) + else: + print("Command not recognized") + except Exception as e: + print(f"Error: {e}") + finally: + client_socket.close() + + +def start_socket_server(): + server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server.bind(('0.0.0.0', 9999)) + server.listen(5) + print('Listening on port 9999...') + while True: + client_sock, addr = server.accept() + client_handler = threading.Thread( + target=handle_client_connection, + args=(client_sock,) + ) + client_handler.start() + + +def main(args): + # Initialize database and create table if necessary + print("Initializing database...") + initialize_db() + subtensor = Subtensor(network=args.network) # Using network argument + # A new block is created every 12 seconds. Check if the current block is equal to the reveal block + + server_thread = threading.Thread(target=start_socket_server) + server_thread.start() + while True: + # get curr block + curr_block = subtensor.get_current_block() + if check_reveal(subtensor=subtensor, curr_block=curr_block): + print(f"Revealing commit for block {curr_block}") + else: + print(f"Nothing to reveal for block {curr_block}") + time.sleep(args.sleep_interval) # Using sleep interval argument + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") + parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") + parser.add_argument("--sleep_interval", type=int, default=2, help="Interval between block checks in seconds") + # Add more arguments as needed + args = parser.parse_args() + main(args) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py new file mode 100644 index 0000000000..f334b213c2 --- /dev/null +++ b/scripts/subprocess_utils.py @@ -0,0 +1,141 @@ +import os +import sqlite3 +from typing import Optional + +import subprocess +import psutil + + +def is_process_running(process_name: str) -> bool: + """Check if a process with a given name is currently running.""" + for proc in psutil.process_iter(['pid', 'name', 'cmdline']): + cmdline = proc.info['cmdline'] + if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): + return True + return False + + +def get_process(process_name: str) -> Optional[int]: + """Check if a process with a given name is currently running, and return its PID if found.""" + for proc in psutil.process_iter(['pid', 'name', 'cmdline']): + cmdline = proc.info['cmdline'] + if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): + return proc.info['pid'] + return None + + +def start_commit_reveal_subprocess(): + """Start the commit reveal subprocess if not already running.""" + process_name = 'commit_reveal.py' + script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) + project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + + if not is_process_running(process_name): + stdout_file = open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stdout.log", "w") + stderr_file = open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stderr.log", "w") + print(f"Starting subprocess '{process_name}'...") + env = os.environ.copy() + env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") + + process = subprocess.Popen( + ['python3', script_path], + stdout=stdout_file, + stderr=stderr_file, + preexec_fn=os.setsid, + env=env + ) + print(f"Subprocess '{process_name}' started with PID {process.pid}.") + + # Read and print what was captured to files + with open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stdout.log") as f: + print("Subprocess output:") + print(f.read()) + + with open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stderr.log") as f: + print("Subprocess errors:") + print(f.read()) + + else: + print(f"Subprocess '{process_name}' is already running.") + + +class DB: + """ + For ease of interaction with the SQLite database used for --reuse-last and --html outputs of tables + """ + + def __init__( + self, + db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), + row_factory=None, + ): + self.db_path = db_path + self.conn: Optional[sqlite3.Connection] = None + self.row_factory = row_factory + + def __enter__(self): + self.conn = sqlite3.connect(self.db_path) + self.conn.row_factory = self.row_factory + return self.conn, self.conn.cursor() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.conn: + self.conn.close() + + +def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) -> None: + """ + Creates and populates the rows of a table in the SQLite database. + + :param title: title of the table + :param columns: [(column name, column type), ...] + :param rows: [[element, element, ...], ...] + :return: None + """ + blob_cols = [] + for idx, (_, col_type) in enumerate(columns): + if col_type == "BLOB": + blob_cols.append(idx) + if blob_cols: + for row in rows: + for idx in blob_cols: + row[idx] = row[idx].to_bytes(row[idx].bit_length() + 7, byteorder="big") + with DB() as (conn, cursor): + drop_query = f"DROP TABLE IF EXISTS {title}" + cursor.execute(drop_query) + conn.commit() + columns_ = ", ".join([" ".join(x) for x in columns]) + creation_query = f"CREATE TABLE IF NOT EXISTS {title} ({columns_})" + conn.commit() + cursor.execute(creation_query) + conn.committed() + query = f"INSERT INTO {title} ({', '.join([x[0] for x in columns])}) VALUES ({', '.join(['?'] * len(columns))})" + cursor.executemany(query, rows) + conn.commit() + return + + +def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: + """ + Reads a table from a SQLite database, returning back a column names and rows as a tuple + :param table_name: the table name in the database + :param order_by: the order of the columns in the table, optional + :return: ([column names], [rows]) + """ + with DB() as (conn, cursor): + cursor.execute(f"PRAGMA table_info({table_name})") + columns_info = cursor.fetchall() + column_names = [info[1] for info in columns_info] + column_types = [info[2] for info in columns_info] + cursor.execute(f"SELECT * FROM {table_name} {order_by}") + rows = cursor.fetchall() + blob_cols = [] + for idx, col_type in enumerate(column_types): + if col_type == "BLOB": + blob_cols.append(idx) + if blob_cols: + rows = [list(row) for row in rows] + for row in rows: + for idx in blob_cols: + row[idx] = int.from_bytes(row[idx], byteorder="big") + return column_names, rows \ No newline at end of file diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index 59170c9512..1c706aae0d 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -1,7 +1,9 @@ +import logging import os import re import shlex import signal +import socket import subprocess import time @@ -17,69 +19,83 @@ ) +# Function to check if the process is running by port +def is_chain_running(port): + """Check if a node is running on the given port.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + # Attempt to connect to the given port on localhost + s.connect(("127.0.0.1", port)) + return True + except (ConnectionRefusedError, OSError): + # If the connection is refused or there's an OS error, the node is not running + return False + + # Fixture for setting up and tearing down a localnet.sh chain between tests @pytest.fixture(scope="function") def local_chain(request): param = request.param if hasattr(request, "param") else None - # Get the environment variable for the script path script_path = os.getenv("LOCALNET_SH_PATH") if not script_path: - # Skip the test if the localhost.sh path is not set logging.warning("LOCALNET_SH_PATH env variable is not set, e2e test skipped.") pytest.skip("LOCALNET_SH_PATH environment variable is not set.") - # Check if param is None, and handle it accordingly - args = "" if param is None else f"{param}" - - # Compile commands to send to process - cmds = shlex.split(f"{script_path} {args}") - - # Start new node process - process = subprocess.Popen( - cmds, stdout=subprocess.PIPE, text=True, preexec_fn=os.setsid - ) - - # Pattern match indicates node is compiled and ready - pattern = re.compile(r"Imported #1") - - # install neuron templates - logging.info("downloading and installing neuron templates from github") - # commit with subnet-template-repo changes for rust wallet - templates_dir = clone_or_update_templates() - install_templates(templates_dir) - - timestamp = int(time.time()) - - def wait_for_node_start(process, pattern): - for line in process.stdout: - print(line.strip()) - # 10 min as timeout - if int(time.time()) - timestamp > 10 * 60: - print("Subtensor not started in time") - break - if pattern.search(line): - print("Node started!") - break - - wait_for_node_start(process, pattern) - - # Run the test, passing in substrate interface - yield SubstrateInterface(url="ws://127.0.0.1:9945") - - # Terminate the process group (includes all child processes) - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - - # Give some time for the process to terminate - time.sleep(1) - - # If the process is not terminated, send SIGKILL - if process.poll() is None: - os.killpg(os.getpgid(process.pid), signal.SIGKILL) - - # Ensure the process has terminated - process.wait() - - # uninstall templates - logging.info("uninstalling neuron templates") - uninstall_templates(template_path) + # Determine the port to check based on `param` + port = 9945 # Default port if `param` is None + + # TODO: uncomment templates when done + # Always perform template installation + # logging.info("Downloading and installing neuron templates from GitHub") + # templates_dir = clone_or_update_templates() + # install_templates(templates_dir) + + already_running = False + if is_chain_running(port): + already_running = True + logging.info(f"Chain already running on port {port}, skipping start.") + else: + logging.info(f"Starting new chain on port {port}...") + # compile commands to send to process + cmds = shlex.split(f"{script_path} {param}") + # Start new node process + process = subprocess.Popen( + cmds, stdout=subprocess.PIPE, text=True, preexec_fn=os.setsid + ) + + # Wait for the node to start using the existing pattern match + pattern = re.compile(r"Imported #1") + timestamp = int(time.time()) + + def wait_for_node_start(process, pattern): + for line in process.stdout: + print(line.strip()) + if int(time.time()) - timestamp > 20 * 60: + pytest.fail("Subtensor not started in time") + if pattern.search(line): + print("Node started!") + break + + wait_for_node_start(process, pattern) + + # Run the test, passing in the substrate interface + yield SubstrateInterface(url=f"ws://127.0.0.1:{port}") + + if not already_running: + # Terminate the process group (includes all child processes) + os.killpg(os.getpgid(process.pid), signal.SIGTERM) + + # Give some time for the process to terminate + time.sleep(1) + + # If the process is not terminated, send SIGKILL + if process.poll() is None: + os.killpg(os.getpgid(process.pid), signal.SIGKILL) + + # Ensure the process has terminated + process.wait() + + # TODO: uncomment templates when done + # logging.info("Uninstalling neuron templates") + # uninstall_templates(templates_dir) \ No newline at end of file diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py new file mode 100644 index 0000000000..83ee5177ba --- /dev/null +++ b/tests/e2e_tests/test_reveal_weights.py @@ -0,0 +1,166 @@ +import time + +import numpy as np +import pytest +import scripts.subprocess.commit_reveal as commit_reveal_subprocess +import bittensor +from bittensor import logging +from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit +from tests.e2e_tests.utils.chain_interactions import ( + add_stake, + register_neuron, + register_subnet, + sudo_set_hyperparameter_bool, + sudo_set_hyperparameter_values, + wait_interval, +) +from tests.e2e_tests.utils.e2e_test_utils import setup_wallet + + +@pytest.mark.asyncio +async def test_commit_and_reveal_weights(local_chain): + """ + Tests the commit/reveal weights mechanism with a subprocess doing the reveal function + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Enable commit-reveal mechanism on the subnet + 4. Lower the commit_reveal interval and rate limit + 5. Commit weights and verify + 6. Wait interval & see if subprocess did the reveal weights and verify + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + logging.info("Testing test_commit_and_reveal_weights") + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + assert register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Enable commit_reveal on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, + alice_wallet, + "sudo_set_commit_reveal_weights_enabled", + True, + netuid, + ), "Unable to enable commit reveal on the subnet" + + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" + + # Lower the commit_reveal interval + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "370"}, + return_error_message=True, + ) + + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 370 + ), "Failed to set commit/reveal interval" + + assert ( + subtensor.weights_rate_limit(netuid=netuid) > 0 + ), "Weights rate limit is below 0" + # Lower the rate limit + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_weights_set_rate_limit", + call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, + return_error_message=True, + ) + subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + ), "Failed to set weights_rate_limit" + assert subtensor.weights_rate_limit(netuid=netuid) == 0 + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.1], dtype=np.float32) + salt = [18, 179, 107, 0, 165, 211, 141, 197] + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Assert no local CR processes in table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Commit weights + success, message = subtensor.commit_weights( + alice_wallet, + netuid, + salt=salt, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + weight_commits = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, alice_wallet.hotkey.ss58_address], + ) + + # Assert that the committed weights are set correctly + assert weight_commits.value is not None, "Weight commit not found in storage" + commit_hash, commit_block = weight_commits.value + assert commit_block > 0, f"Invalid block number: {commit_block}" + + # Query the WeightCommitRevealInterval storage map + weight_commit_reveal_interval = subtensor.query_module( + module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + ) + interval = weight_commit_reveal_interval.value + assert interval > 0, "Invalid WeightCommitRevealInterval" + + # Verify that sqlite has entry + assert commit_reveal_subprocess.is_table_empty("commits") is False + + # Wait until the reveal block range + await wait_interval(interval, subtensor) + + # allow one more block to pass + time.sleep(12) + + # Verify that subprocess did the reveal and deleted entry from local table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Query the Weights storage map + revealed_weights = subtensor.query_module( + module="SubtensorModule", + name="Weights", + params=[netuid, 0], # netuid and uid + ) + + # Assert that the revealed weights are set correctly + assert revealed_weights.value is not None, "Weight reveal not found in storage" + + assert ( + weight_vals[0] == revealed_weights.value[0][1] + ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" + logging.info("✅ Passed test_commit_and_reveal_weights") From 3046fa9ed578d1eabf255af3e1ae66e82d653978 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 16 Oct 2024 09:10:01 -0700 Subject: [PATCH 03/58] Refactor log file paths to constants Replace hardcoded log file paths with constants for stdout and stderr log files. This improves code readability and maintainability by centralizing the log file paths. Removed redundant code that reads and prints the log files. --- scripts/subprocess_utils.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index f334b213c2..4d4f3ce32a 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -5,6 +5,8 @@ import subprocess import psutil +STDOUT_PATH = "scripts/subprocess/logs/commit_reveal_stdout.log" +STDERR_PATH = "scripts/subprocess/logs/commit_reveal_stderr.log" def is_process_running(process_name: str) -> bool: """Check if a process with a given name is currently running.""" @@ -31,8 +33,8 @@ def start_commit_reveal_subprocess(): project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) if not is_process_running(process_name): - stdout_file = open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stdout.log", "w") - stderr_file = open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stderr.log", "w") + stdout_file = open(STDOUT_PATH, "w") + stderr_file = open(STDERR_PATH, "w") print(f"Starting subprocess '{process_name}'...") env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") @@ -46,15 +48,6 @@ def start_commit_reveal_subprocess(): ) print(f"Subprocess '{process_name}' started with PID {process.pid}.") - # Read and print what was captured to files - with open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stdout.log") as f: - print("Subprocess output:") - print(f.read()) - - with open("/Users/daniel/repos/bittensor-sdk/scripts/subprocess/logs/commit_reveal_stderr.log") as f: - print("Subprocess errors:") - print(f.read()) - else: print(f"Subprocess '{process_name}' is already running.") From d325288a889dcde3c1299310f1a8ca232ef4b9a7 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 16 Oct 2024 09:58:50 -0700 Subject: [PATCH 04/58] Refactor weight setting with commit-reveal logic. This commit introduces a check for whether commit-reveal is enabled before setting weights. If enabled, weights will be committed with a generated salt; otherwise, weights are set directly. Additionally, this commit adjusts the sleep interval for the subprocess and removes an unnecessary comment. --- bittensor/core/extrinsics/set_weights.py | 141 +++++++++++++++-------- bittensor/core/subtensor.py | 3 +- scripts/subprocess/commit_reveal.py | 2 +- 3 files changed, 95 insertions(+), 51 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index dead099761..0bedbfcb9c 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -17,7 +17,7 @@ import logging from typing import Union, Optional, TYPE_CHECKING - +import random import numpy as np from numpy.typing import NDArray from retry import retry @@ -144,54 +144,99 @@ def set_weights_extrinsic( if isinstance(weights, list): weights = np.array(weights, dtype=np.float32) - # Reformat and normalize. - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids, weights - ) + if subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_enabled: + # if cr is enabled, commit instead of setting the weights. + salt = [random.randint(0, 350) for _ in range(8)] + + # Ask before moving on. + if prompt: + if not Confirm.ask( + f"Do you want to commit weights:\n[bold white] weights: {weights}\n" + f"uids: {uids}[/bold white ]?" + ): + return False, "Prompt refused." - # Ask before moving on. - if prompt: - if not Confirm.ask( - f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n" - f"uids: {weight_uids}[/bold white ]?" + with bt_console.status( + f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." ): - return False, "Prompt refused." - - with bt_console.status( - f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." - ): - - # TODO: Check if CR is enabled, do commit instead if yes. - - try: - success, error_message = do_set_weights( - self=subtensor, - wallet=wallet, - netuid=netuid, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - if success is True: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") - logging.success( - msg=str(success), - prefix="Set weights", - suffix="Finalized: ", + try: + success, message = subtensor.commit_weights( + wallet=wallet, + netuid=netuid, + salt=salt, + uids=uids, + weights=weights, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, ) - return True, "Successfully set weights and Finalized." - else: - error_message = format_error_message(error_message) - logging.error(error_message) - return False, error_message - - except Exception as e: - bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") - logging.debug(str(e)) + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + logging.success( + msg=str(success), + prefix="Committed weights", + suffix="Finalized: ", + ) + return True, "Successfully committed weights and Finalized." + else: + error_message = format_error_message(message) + logging.error(error_message) + return False, error_message + + except Exception as e: + bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") + logging.debug(str(e)) return False, str(e) + else: + # Reformat and normalize. + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + uids, weights + ) + + # Ask before moving on. + if prompt: + if not Confirm.ask( + f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n" + f"uids: {weight_uids}[/bold white ]?" + ): + return False, "Prompt refused." + + with bt_console.status( + f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." + ): + + try: + success, error_message = do_set_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + logging.success( + msg=str(success), + prefix="Set weights", + suffix="Finalized: ", + ) + return True, "Successfully set weights and Finalized." + else: + error_message = format_error_message(error_message) + logging.error(error_message) + return False, error_message + + except Exception as e: + bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") + logging.debug(str(e)) + return False, str(e) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 6a272c50b1..72894fb7eb 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1661,7 +1661,6 @@ def commit_weights( ) if success: # add to local db if called directly - # committed(self, wallet.hotkey, commit_hash, netuid, uids, weights, salt) if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): commit_weights_process( self, @@ -1670,7 +1669,7 @@ def commit_weights( commit_hash=commit_hash, uids=list(uids), weights=list(weights), - salt=list(salt) + salt=salt ) break except Exception as e: diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index 5ffbc05bb1..71b6312539 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -217,7 +217,7 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") - parser.add_argument("--sleep_interval", type=int, default=2, help="Interval between block checks in seconds") + parser.add_argument("--sleep_interval", type=int, default=12, help="Interval between block checks in seconds") # Add more arguments as needed args = parser.parse_args() main(args) From 6b0fb3fabf711553e3d129f82e7171d399a5addc Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Wed, 16 Oct 2024 10:15:02 -0700 Subject: [PATCH 05/58] add `Subtensor.register`, `Subtensor.difficulty` and related staff with tests (#2352) * add `bittensor.core.subtensor.Subtensor.register`, `bittensor.core.subtensor.Subtensor.difficulty` and related staff with tests * remove commented code * update `_terminate_workers_and_wait_for_exit` by review --- bittensor/core/extrinsics/registration.py | 287 +++++ bittensor/core/subtensor.py | 82 ++ bittensor/utils/formatting.py | 41 + bittensor/utils/register_cuda.py | 130 +++ bittensor/utils/registration.py | 1021 ++++++++++++++++- requirements/prod.txt | 4 +- .../test_subtensor_integration.py | 171 +++ .../extrinsics/test_registration.py | 181 +++ tests/unit_tests/utils/test_formatting.py | 80 ++ 9 files changed, 1995 insertions(+), 2 deletions(-) create mode 100644 bittensor/core/extrinsics/registration.py create mode 100644 bittensor/utils/formatting.py create mode 100644 bittensor/utils/register_cuda.py create mode 100644 tests/unit_tests/extrinsics/test_registration.py create mode 100644 tests/unit_tests/utils/test_formatting.py diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py new file mode 100644 index 0000000000..bd19b16389 --- /dev/null +++ b/bittensor/core/extrinsics/registration.py @@ -0,0 +1,287 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import time +from typing import Union, Optional, TYPE_CHECKING + +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.settings import bt_console +from bittensor.utils import format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected +from bittensor.utils.registration import ( + POWSolution, + create_pow, + torch, + log_no_torch_error, +) + +# For annotation purposes +if TYPE_CHECKING: + from bittensor.core.subtensor import Subtensor + from bittensor_wallet import Wallet + + +@ensure_connected +def _do_pow_register( + self: "Subtensor", + netuid: int, + wallet: "Wallet", + pow_result: POWSolution, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> tuple[bool, Optional[str]]: + """Sends a (POW) register extrinsic to the chain. + + Args: + netuid (int): The subnet to register on. + wallet (bittensor.wallet): The wallet to register. + pow_result (POWSolution): The PoW result to register. + wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. + Default to `False`. + wait_for_finalization (bool): If ``True``, waits for the extrinsic to be finalized. Default to `True`. + + Returns: + success (bool): ``True`` if the extrinsic was included in a block. + error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error + message. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + # create extrinsic call + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="register", + call_params={ + "netuid": netuid, + "block_number": pow_result.block_number, + "nonce": pow_result.nonce, + "work": [int(byte_) for byte_ in pow_result.seal], + "hotkey": wallet.hotkey.ss58_address, + "coldkey": wallet.coldkeypub.ss58_address, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.hotkey + ) + response = self.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + # process if registration successful, try again if pow is still valid + response.process_events() + if not response.is_success: + return False, format_error_message(response.error_message) + # Successful registration + else: + return True, None + + return make_substrate_call_with_retry() + + +def register_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + max_allowed_attempts: int = 3, + output_in_place: bool = True, + cuda: bool = False, + dev_id: Union[list[int], int] = 0, + tpb: int = 256, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + log_verbose: bool = False, +) -> bool: + """Registers the wallet to the chain. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor interface. + wallet (bittensor.wallet): Bittensor wallet object. + netuid (int): The ``netuid`` of the subnet to register on. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + max_allowed_attempts (int): Maximum number of attempts to register the wallet. + output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`. + cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). + dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. + tpb (int): The number of threads per block (CUDA). + num_processes (int): The number of processes to use to register. + update_interval (int): The number of nonces to solve between updates. + log_verbose (bool): If ``true``, the registration process will log more information. + + Returns: + success (bool): + Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + if not subtensor.subnet_exists(netuid): + bt_console.print( + ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( + netuid + ) + ) + return False + + with bt_console.status( + f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." + ): + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + if not neuron.is_null: + logging.debug( + f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}" + ) + return True + + if prompt: + if not Confirm.ask( + "Continue Registration?\n hotkey: [bold white]{}[/bold white]\n coldkey: [bold white]{}[/bold white]\n network: [bold white]{}[/bold white]".format( + wallet.hotkey.ss58_address, + wallet.coldkeypub.ss58_address, + subtensor.network, + ) + ): + return False + + if not torch: + log_no_torch_error() + return False + + # Attempt rolling registration. + attempts = 1 + while True: + bt_console.print( + ":satellite: Registering...({}/{})".format(attempts, max_allowed_attempts) + ) + # Solve latest POW. + if cuda: + if not torch.cuda.is_available(): + if prompt: + bt_console.print("CUDA is not available.") + return False + pow_result: Optional[POWSolution] = create_pow( + subtensor, + wallet, + netuid, + output_in_place, + cuda=cuda, + dev_id=dev_id, + tpb=tpb, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + else: + pow_result: Optional[POWSolution] = create_pow( + subtensor, + wallet, + netuid, + output_in_place, + cuda=cuda, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + + # pow failed + if not pow_result: + # might be registered already on this subnet + is_registered = subtensor.is_hotkey_registered( + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + bt_console.print( + f":white_heavy_check_mark: [green]Already registered on netuid:{netuid}[/green]" + ) + return True + + # pow successful, proceed to submit pow to chain for registration + else: + with bt_console.status(":satellite: Submitting POW..."): + # check if pow result is still valid + while not pow_result.is_stale(subtensor=subtensor): + result: tuple[bool, Optional[str]] = _do_pow_register( + self=subtensor, + netuid=netuid, + wallet=wallet, + pow_result=pow_result, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + success, err_msg = result + + if not success: + # Look error here + # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs + if "HotKeyAlreadyRegisteredInSubNet" in err_msg: + bt_console.print( + f":white_heavy_check_mark: [green]Already Registered on [bold]subnet:{netuid}[/bold][/green]" + ) + return True + + bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") + time.sleep(0.5) + + # Successful registration, final check for neuron and pubkey + else: + bt_console.print(":satellite: Checking Balance...") + is_registered = subtensor.is_hotkey_registered( + hotkey_ss58=wallet.hotkey.ss58_address, + netuid=netuid, + ) + if is_registered: + bt_console.print( + ":white_heavy_check_mark: [green]Registered[/green]" + ) + return True + else: + # neuron not found, try again + bt_console.print( + ":cross_mark: [red]Unknown error. Neuron not found.[/red]" + ) + continue + else: + # Exited loop because pow is no longer valid. + bt_console.print("[red]POW is stale.[/red]") + # Try again. + continue + + if attempts < max_allowed_attempts: + # Failed registration, retry pow + attempts += 1 + bt_console.print( + ":satellite: Failed registration, retrying pow ...({}/{})".format( + attempts, max_allowed_attempts + ) + ) + else: + # Failed to register after max attempts. + bt_console.print("[red]No more attempts.[/red]") + return False diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index ca7397adb6..cef96e802f 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -54,6 +54,7 @@ do_serve_prometheus, prometheus_extrinsic, ) +from bittensor.core.extrinsics.registration import register_extrinsic from bittensor.core.extrinsics.serving import ( do_serve_axon, serve_axon_extrinsic, @@ -898,6 +899,65 @@ def set_weights( return success, message + def register( + self, + wallet: "Wallet", + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + max_allowed_attempts: int = 3, + output_in_place: bool = True, + cuda: bool = False, + dev_id: Union[list[int], int] = 0, + tpb: int = 256, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + log_verbose: bool = False, + ) -> bool: + """ + Registers a neuron on the Bittensor network using the provided wallet. + + Registration is a critical step for a neuron to become an active participant in the network, enabling it to stake, set weights, and receive incentives. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered. + netuid (int): The unique identifier of the subnet. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. + max_allowed_attempts (int): Maximum number of attempts to register the wallet. + output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`. + cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`. + dev_id (Union[List[int], int]): The CUDA device id to use, or a list of device ids. Defaults to `0` (zero). + tpb (int): The number of threads per block (CUDA). Default to `256`. + num_processes (Optional[int]): The number of processes to use to register. Default to `None`. + update_interval (Optional[int]): The number of nonces to solve between updates. Default to `None`. + log_verbose (bool): If ``true``, the registration process will log more information. Default to `False`. + + Returns: + bool: ``True`` if the registration is successful, False otherwise. + + This function facilitates the entry of new neurons into the network, supporting the decentralized + growth and scalability of the Bittensor ecosystem. + """ + return register_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + max_allowed_attempts=max_allowed_attempts, + output_in_place=output_in_place, + cuda=cuda, + dev_id=dev_id, + tpb=tpb, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + def serve_axon( self, netuid: int, @@ -1730,6 +1790,28 @@ def reveal_weights( return success, message + def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]: + """ + Retrieves the 'Difficulty' hyperparameter for a specified subnet in the Bittensor network. + + This parameter is instrumental in determining the computational challenge required for neurons to participate in consensus and validation processes. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[int]: The value of the 'Difficulty' hyperparameter if the subnet exists, ``None`` otherwise. + + The 'Difficulty' parameter directly impacts the network's security and integrity by setting the computational effort required for validating transactions and participating in the network's consensus mechanism. + """ + call = self._get_hyperparameter( + param_name="Difficulty", netuid=netuid, block=block + ) + if call is None: + return None + return int(call) + # Subnet 27 uses this method _do_serve_prometheus = do_serve_prometheus # Subnet 27 uses this method name diff --git a/bittensor/utils/formatting.py b/bittensor/utils/formatting.py new file mode 100644 index 0000000000..1ee3fd6671 --- /dev/null +++ b/bittensor/utils/formatting.py @@ -0,0 +1,41 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import math + + +def get_human_readable(num, suffix="H"): + """Convert a number into a human-readable format with suffixes.""" + for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: + if abs(num) < 1000.0: + return f"{num:3.1f}{unit}{suffix}" + num /= 1000.0 + return f"{num:.1f}Y{suffix}" + + +def millify(n: int): + """Converts a number into a more readable format with suffixes.""" + mill_names = ["", " K", " M", " B", " T"] + n = float(n) + mill_idx = max( + 0, + min( + len(mill_names) - 1, + int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)), + ), + ) + return "{:.2f}{}".format(n / 10 ** (3 * mill_idx), mill_names[mill_idx]) diff --git a/bittensor/utils/register_cuda.py b/bittensor/utils/register_cuda.py new file mode 100644 index 0000000000..e0a77f19c9 --- /dev/null +++ b/bittensor/utils/register_cuda.py @@ -0,0 +1,130 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import binascii +import hashlib +import io +import math +from contextlib import redirect_stdout +from typing import Any, Union + +import numpy as np +from Crypto.Hash import keccak + + +def solve_cuda( + nonce_start: "np.int64", + update_interval: "np.int64", + tpb: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + dev_id: int = 0, +) -> Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]: + """ + Solves the PoW problem using CUDA. + + Args: + nonce_start (numpy.int64): Starting nonce. + update_interval (numpy.int64): Number of nonces to solve before updating block information. + tpb (int): Threads per block. + block_and_hotkey_hash_bytes (bytes): Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes. + difficulty (int): Difficulty of the PoW problem. + limit (int): Upper limit of the nonce. + dev_id (int): The CUDA device ID. Defaults to ``0``. + + Returns: + (Union[tuple[Any, bytes], tuple[int, bytes], tuple[Any, None]]): Tuple of the nonce and the seal corresponding to the solution. Returns -1 for nonce if no solution is found. + """ + + try: + import cubit + except ImportError: + raise ImportError( + "Please install cubit. See the instruction https://github.com/opentensor/cubit?tab=readme-ov-file#install." + ) + + upper = int(limit // difficulty) + + upper_bytes = upper.to_bytes(32, byteorder="little", signed=False) + + def _hex_bytes_to_u8_list(hex_bytes: bytes): + """Converts a sequence of hex bytes to a list of unsigned 8-bit integers.""" + hex_chunks = [ + int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2) + ] + return hex_chunks + + def _create_seal_hash(block_and_hotkey_hash_hex_: bytes, nonce: int) -> bytes: + """Creates a seal hash from the block and hotkey hash and nonce.""" + nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) + pre_seal = nonce_bytes + block_and_hotkey_hash_hex_ + seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest() + kec = keccak.new(digest_bits=256) + return kec.update(seal_sh256).digest() + + def _seal_meets_difficulty(seal_: bytes, difficulty_: int): + """Checks if the seal meets the given difficulty.""" + seal_number = int.from_bytes(seal_, "big") + product = seal_number * difficulty_ + limit_ = int(math.pow(2, 256)) - 1 + + return product < limit_ + + # Call cython function + # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit, + # const unsigned char[:] block_bytes, int dev_id + block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64] + + solution = cubit.solve_cuda( + tpb, + nonce_start, + update_interval, + upper_bytes, + block_and_hotkey_hash_hex, + dev_id, + ) # 0 is first GPU + seal = None + if solution != -1: + seal = _create_seal_hash(block_and_hotkey_hash_hex, solution) + if _seal_meets_difficulty(seal, difficulty): + return solution, seal + else: + return -1, b"\x00" * 32 + return solution, seal + + +def reset_cuda(): + """Resets the CUDA environment.""" + try: + import cubit + except ImportError: + raise ImportError("Please install cubit") + cubit.reset_cuda() + + +def log_cuda_errors() -> str: + """Logs any CUDA errors.""" + try: + import cubit + except ImportError: + raise ImportError("Please install cubit") + + file = io.StringIO() + with redirect_stdout(file): + cubit.log_cuda_errors() + return file.getvalue() diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py index 4d0cdb93d6..46c39d3d40 100644 --- a/bittensor/utils/registration.py +++ b/bittensor/utils/registration.py @@ -15,13 +15,30 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +import binascii +import dataclasses import functools +import hashlib +import math +import multiprocessing import os -from typing import TYPE_CHECKING +import random +import subprocess +import time +from datetime import timedelta +from multiprocessing.queues import Queue as QueueType +from queue import Empty, Full +from typing import Any, Callable, Optional, Union, TYPE_CHECKING +import backoff import numpy +from Crypto.Hash import keccak +from rich import console as rich_console, status as rich_status +from bittensor.core.settings import bt_console from bittensor.utils.btlogging import logging +from bittensor.utils.formatting import get_human_readable, millify +from bittensor.utils.register_cuda import solve_cuda def use_torch() -> bool: @@ -95,5 +112,1007 @@ def __getattr__(self, name): if TYPE_CHECKING: import torch + from bittensor.core.subtensor import Subtensor + from bittensor_wallet import Wallet else: torch = LazyLoadedTorch() + + +def _hex_bytes_to_u8_list(hex_bytes: bytes): + hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] + return hex_chunks + + +def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes: + """Create a seal hash for a given block and nonce.""" + nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) + pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64] + seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest() + kec = keccak.new(digest_bits=256) + seal = kec.update(seal_sh256).digest() + return seal + + +def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int): + """Check if the seal meets the given difficulty criteria.""" + seal_number = int.from_bytes(seal, "big") + product = seal_number * difficulty + return product < limit + + +@dataclasses.dataclass +class POWSolution: + """A solution to the registration PoW problem.""" + + nonce: int + block_number: int + difficulty: int + seal: bytes + + def is_stale(self, subtensor: "Subtensor") -> bool: + """ + Returns True if the POW is stale. + + This means the block the POW is solved for is within 3 blocks of the current block. + """ + return self.block_number < subtensor.get_current_block() - 3 + + +class _UsingSpawnStartMethod: + def __init__(self, force: bool = False): + self._old_start_method = None + self._force = force + + def __enter__(self): + self._old_start_method = multiprocessing.get_start_method(allow_none=True) + if self._old_start_method is None: + self._old_start_method = "spawn" # default to spawn + + multiprocessing.set_start_method("spawn", force=self._force) + + def __exit__(self, *args): + # restore the old start method + multiprocessing.set_start_method(self._old_start_method, force=True) + + +class _SolverBase(multiprocessing.Process): + """ + A process that solves the registration PoW problem. + + Args: + proc_num (int): The number of the process being created. + num_proc (int): The total number of processes running. + update_interval (int): The number of nonces to try to solve before checking for a new block. + finished_queue (multiprocessing.Queue): The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes. + solution_queue (multiprocessing.Queue): The queue to put the solution the process has found during the pow solve. + newBlockEvent (multiprocessing.Event): The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce. + stopEvent (multiprocessing.Event): The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found. + curr_block (multiprocessing.Array): The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set. + curr_block_num (multiprocessing.Value): The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when newBlockEvent is set. + curr_diff (multiprocessing.Array): The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set. + check_block (multiprocessing.Lock): The lock to prevent this process from getting the new block data while the main process is updating the data. + limit (int): The limit of the pow solve for a valid solution. + """ + + proc_num: int + num_proc: int + update_interval: int + finished_queue: "multiprocessing.Queue" + solution_queue: "multiprocessing.Queue" + newBlockEvent: "multiprocessing.Event" + stopEvent: "multiprocessing.Event" + hotkey_bytes: bytes + curr_block: "multiprocessing.Array" + curr_block_num: "multiprocessing.Value" + curr_diff: "multiprocessing.Array" + check_block: "multiprocessing.Lock" + limit: int + + def __init__( + self, + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stopEvent, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ): + multiprocessing.Process.__init__(self, daemon=True) + self.proc_num = proc_num + self.num_proc = num_proc + self.update_interval = update_interval + self.finished_queue = finished_queue + self.solution_queue = solution_queue + self.newBlockEvent = multiprocessing.Event() + self.newBlockEvent.clear() + self.curr_block = curr_block + self.curr_block_num = curr_block_num + self.curr_diff = curr_diff + self.check_block = check_block + self.stopEvent = stopEvent + self.limit = limit + + def run(self): + raise NotImplementedError("_SolverBase is an abstract class") + + @staticmethod + def create_shared_memory() -> ( + tuple["multiprocessing.Array", "multiprocessing.Value", "multiprocessing.Array"] + ): + """Creates shared memory for the solver processes to use.""" + curr_block = multiprocessing.Array("h", 32, lock=True) # byte array + curr_block_num = multiprocessing.Value("i", 0, lock=True) # int + curr_diff = multiprocessing.Array("Q", [0, 0], lock=True) # [high, low] + + return curr_block, curr_block_num, curr_diff + + +class _Solver(_SolverBase): + def run(self): + block_number: int + block_and_hotkey_hash_bytes: bytes + block_difficulty: int + nonce_limit = int(math.pow(2, 64)) - 1 + + # Start at random nonce + nonce_start = random.randint(0, nonce_limit) + nonce_end = nonce_start + self.update_interval + while not self.stopEvent.is_set(): + if self.newBlockEvent.is_set(): + with self.check_block: + block_number = self.curr_block_num.value + block_and_hotkey_hash_bytes = bytes(self.curr_block) + block_difficulty = _registration_diff_unpack(self.curr_diff) + + self.newBlockEvent.clear() + + # Do a block of nonces + solution = _solve_for_nonce_block( + nonce_start, + nonce_end, + block_and_hotkey_hash_bytes, + block_difficulty, + self.limit, + block_number, + ) + if solution is not None: + self.solution_queue.put(solution) + + try: + # Send time + self.finished_queue.put_nowait(self.proc_num) + except Full: + pass + + nonce_start = random.randint(0, nonce_limit) + nonce_start = nonce_start % nonce_limit + nonce_end = nonce_start + self.update_interval + + +class _CUDASolver(_SolverBase): + dev_id: int + tpb: int + + def __init__( + self, + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stopEvent, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + dev_id: int, + tpb: int, + ): + super().__init__( + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stopEvent, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ) + self.dev_id = dev_id + self.tpb = tpb + + def run(self): + block_number: int = 0 # dummy value + block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value + block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value + nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX + + # Start at random nonce + nonce_start = random.randint(0, nonce_limit) + while not self.stopEvent.is_set(): + if self.newBlockEvent.is_set(): + with self.check_block: + block_number = self.curr_block_num.value + block_and_hotkey_hash_bytes = bytes(self.curr_block) + block_difficulty = _registration_diff_unpack(self.curr_diff) + + self.newBlockEvent.clear() + + # Do a block of nonces + solution = _solve_for_nonce_block_cuda( + nonce_start, + self.update_interval, + block_and_hotkey_hash_bytes, + block_difficulty, + self.limit, + block_number, + self.dev_id, + self.tpb, + ) + if solution is not None: + self.solution_queue.put(solution) + + try: + # Signal that a nonce_block was finished using queue + # send our proc_num + self.finished_queue.put(self.proc_num) + except Full: + pass + + # increase nonce by number of nonces processed + nonce_start += self.update_interval * self.tpb + nonce_start = nonce_start % nonce_limit + + +def _solve_for_nonce_block_cuda( + nonce_start: int, + update_interval: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + block_number: int, + dev_id: int, + tpb: int, +) -> Optional["POWSolution"]: + """Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb""" + solution, seal = solve_cuda( + nonce_start, + update_interval, + tpb, + block_and_hotkey_hash_bytes, + difficulty, + limit, + dev_id, + ) + + if solution != -1: + # Check if solution is valid (i.e. not -1) + return POWSolution(solution, block_number, difficulty, seal) + + return None + + +def _solve_for_nonce_block( + nonce_start: int, + nonce_end: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + block_number: int, +) -> Optional["POWSolution"]: + """Tries to solve the POW for a block of nonces (nonce_start, nonce_end)""" + for nonce in range(nonce_start, nonce_end): + # Create seal. + seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce) + + # Check if seal meets difficulty + if _seal_meets_difficulty(seal, difficulty, limit): + # Found a solution, save it. + return POWSolution(nonce, block_number, difficulty, seal) + + return None + + +def _registration_diff_unpack(packed_diff: "multiprocessing.Array") -> int: + """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian.""" + return int(packed_diff[0] << 32 | packed_diff[1]) + + +def _registration_diff_pack(diff: int, packed_diff: "multiprocessing.Array"): + """Packs the difficulty into two 32-bit integers. Little endian.""" + packed_diff[0] = diff >> 32 + packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits + + +def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes: + """Hashes the block with the hotkey using Keccak-256 to get 32 bytes""" + kec = keccak.new(digest_bits=256) + kec = kec.update(bytearray(block_bytes + hotkey_bytes)) + block_and_hotkey_hash_bytes = kec.digest() + return block_and_hotkey_hash_bytes + + +def _update_curr_block( + curr_diff: "multiprocessing.Array", + curr_block: "multiprocessing.Array", + curr_block_num: "multiprocessing.Value", + block_number: int, + block_bytes: bytes, + diff: int, + hotkey_bytes: bytes, + lock: "multiprocessing.Lock", +): + """Updates the current block's information atomically using a lock.""" + with lock: + curr_block_num.value = block_number + # Hash the block with the hotkey + block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes) + for i in range(32): + curr_block[i] = block_and_hotkey_hash_bytes[i] + _registration_diff_pack(diff, curr_diff) + + +def get_cpu_count() -> int: + """Returns the number of CPUs in the system.""" + try: + return len(os.sched_getaffinity(0)) + except AttributeError: + # OSX does not have sched_getaffinity + return os.cpu_count() + + +@dataclasses.dataclass +class RegistrationStatistics: + """Statistics for a registration.""" + + time_spent_total: float + rounds_total: int + time_average: float + time_spent: float + hash_rate_perpetual: float + hash_rate: float + difficulty: int + block_number: int + block_hash: bytes + + +class RegistrationStatisticsLogger: + """Logs statistics for a registration.""" + + console: rich_console.Console + status: Optional[rich_status.Status] + + def __init__( + self, console: rich_console.Console, output_in_place: bool = True + ) -> None: + self.console = console + + if output_in_place: + self.status = self.console.status("Solving") + else: + self.status = None + + def start(self) -> None: + if self.status is not None: + self.status.start() + + def stop(self) -> None: + if self.status is not None: + self.status.stop() + + def get_status_message( + self, stats: RegistrationStatistics, verbose: bool = False + ) -> str: + """Generates the status message based on registration statistics.""" + message = ( + "Solving\n" + + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" + + ( + f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n" + + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n" + if verbose + else "" + ) + + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n" + + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / " + + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n" + + f"Block Number: [bold white]{stats.block_number}[/bold white]\n" + + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n" + ) + return message + + def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None: + if self.status is not None: + self.status.update(self.get_status_message(stats, verbose=verbose)) + else: + self.console.log(self.get_status_message(stats, verbose=verbose)) + + +def _solve_for_difficulty_fast( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + output_in_place: bool = True, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + n_samples: int = 10, + alpha_: float = 0.80, + log_verbose: bool = False, +) -> Optional[POWSolution]: + """ + Solves the POW for registration using multiprocessing. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance to connect to for block information and to submit. + wallet (bittensor_wallet.Wallet): wallet to use for registration. + netuid (int): The netuid of the subnet to register to. + output_in_place (bool): If true, prints the status in place. Otherwise, prints the status on a new line. + num_processes (int): Number of processes to use. + update_interval (int): Number of nonces to solve before updating block information. + n_samples (int): The number of samples of the hash_rate to keep for the EWMA. + alpha_ (float): The alpha for the EWMA for the hash_rate calculation. + log_verbose (bool): If true, prints more verbose logging of the registration metrics. + + Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. + Note: We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed. + """ + if num_processes is None: + # get the number of allowed processes for this process + num_processes = min(1, get_cpu_count()) + + if update_interval is None: + update_interval = 50_000 + + limit = int(math.pow(2, 256)) - 1 + + curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory() + + # Establish communication queues + # See the _Solver class for more information on the queues. + stopEvent = multiprocessing.Event() + stopEvent.clear() + + solution_queue = multiprocessing.Queue() + finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] + check_block = multiprocessing.Lock() + + hotkey_bytes = ( + wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key + ) + # Start consumers + solvers = [ + _Solver( + i, + num_processes, + update_interval, + finished_queues[i], + solution_queue, + stopEvent, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ) + for i in range(num_processes) + ] + + # Get first block + block_number, difficulty, block_hash = _get_block_with_retry( + subtensor=subtensor, netuid=netuid + ) + + block_bytes = bytes.fromhex(block_hash[2:]) + old_block_number = block_number + # Set to current block + _update_curr_block( + curr_diff, + curr_block, + curr_block_num, + block_number, + block_bytes, + difficulty, + hotkey_bytes, + check_block, + ) + + # Set new block events for each solver to start at the initial block + for worker in solvers: + worker.newBlockEvent.set() + + for worker in solvers: + worker.start() # start the solver processes + + start_time = time.time() # time that the registration started + time_last = start_time # time that the last work blocks completed + + curr_stats = RegistrationStatistics( + time_spent_total=0.0, + time_average=0.0, + rounds_total=0, + time_spent=0.0, + hash_rate_perpetual=0.0, + hash_rate=0.0, + difficulty=difficulty, + block_number=block_number, + block_hash=block_hash, + ) + + start_time_perpetual = time.time() + + logger = RegistrationStatisticsLogger(bt_console, output_in_place) + logger.start() + + solution = None + + hash_rates = [0] * n_samples # The last n true hash_rates + weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha + + while netuid == -1 or not subtensor.is_hotkey_registered( + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ): + # Wait until a solver finds a solution + try: + solution = solution_queue.get(block=True, timeout=0.25) + if solution is not None: + break + except Empty: + # No solution found, try again + pass + + # check for new block + old_block_number = _check_for_newest_block_and_update( + subtensor=subtensor, + netuid=netuid, + hotkey_bytes=hotkey_bytes, + old_block_number=old_block_number, + curr_diff=curr_diff, + curr_block=curr_block, + curr_block_num=curr_block_num, + curr_stats=curr_stats, + update_curr_block=_update_curr_block, + check_block=check_block, + solvers=solvers, + ) + + num_time = 0 + for finished_queue in finished_queues: + try: + proc_num = finished_queue.get(timeout=0.1) + num_time += 1 + + except Empty: + continue + + time_now = time.time() # get current time + time_since_last = time_now - time_last # get time since last work block(s) + if num_time > 0 and time_since_last > 0.0: + # create EWMA of the hash_rate to make measure more robust + + hash_rate_ = (num_time * update_interval) / time_since_last + hash_rates.append(hash_rate_) + hash_rates.pop(0) # remove the 0th data point + curr_stats.hash_rate = sum( + [hash_rates[i] * weights[i] for i in range(n_samples)] + ) / (sum(weights)) + + # update time last to now + time_last = time_now + + curr_stats.time_average = ( + curr_stats.time_average * curr_stats.rounds_total + + curr_stats.time_spent + ) / (curr_stats.rounds_total + num_time) + curr_stats.rounds_total += num_time + + # Update stats + curr_stats.time_spent = time_since_last + new_time_spent_total = time_now - start_time_perpetual + curr_stats.hash_rate_perpetual = ( + curr_stats.rounds_total * update_interval + ) / new_time_spent_total + curr_stats.time_spent_total = new_time_spent_total + + # Update the logger + logger.update(curr_stats, verbose=log_verbose) + + # exited while, solution contains the nonce or wallet is registered + stopEvent.set() # stop all other processes + logger.stop() + + # terminate and wait for all solvers to exit + _terminate_workers_and_wait_for_exit(solvers) + + return solution + + +@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3) +def _get_block_with_retry( + subtensor: "Subtensor", netuid: int +) -> tuple[int, int, bytes]: + """ + Gets the current block number, difficulty, and block hash from the substrate node. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use to get the block number, difficulty, and block hash. + netuid (int): The netuid of the network to get the block number, difficulty, and block hash from. + + Returns: + tuple[int, int, bytes] + block_number (int): The current block number. + difficulty (int): The current difficulty of the subnet. + block_hash (bytes): The current block hash. + + Raises: + Exception: If the block hash is None. + ValueError: If the difficulty is None. + """ + block_number = subtensor.get_current_block() + difficulty = 1_000_000 if netuid == -1 else subtensor.difficulty(netuid=netuid) + block_hash = subtensor.get_block_hash(block_number) + if block_hash is None: + raise Exception( + "Network error. Could not connect to substrate to get block hash" + ) + if difficulty is None: + raise ValueError("Chain error. Difficulty is None") + return block_number, difficulty, block_hash + + +def _check_for_newest_block_and_update( + subtensor: "Subtensor", + netuid: int, + old_block_number: int, + hotkey_bytes: bytes, + curr_diff: "multiprocessing.Array", + curr_block: "multiprocessing.Array", + curr_block_num: "multiprocessing.Value", + update_curr_block: "Callable", + check_block: "multiprocessing.Lock", + solvers: Union[list["_Solver"], list["_CUDASolver"]], + curr_stats: "RegistrationStatistics", +) -> int: + """ + Checks for a new block and updates the current block information if a new block is found. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor object to use for getting the current block. + netuid (int): The netuid to use for retrieving the difficulty. + old_block_number (int): The old block number to check against. + hotkey_bytes (bytes): The bytes of the hotkey's pubkey. + curr_diff (multiprocessing.Array): The current difficulty as a multiprocessing array. + curr_block (multiprocessing.Array): Where the current block is stored as a multiprocessing array. + curr_block_num (multiprocessing.Value): Where the current block number is stored as a multiprocessing value. + update_curr_block (typing.Callable): A function that updates the current block. + check_block (multiprocessing.Lock): A mp lock that is used to check for a new block. + solvers (list[bittensor.utils.registration._Solver]): A list of solvers to update the current block for. + curr_stats (bittensor.utils.registration.RegistrationStatistics): The current registration statistics to update. + + Returns: + (int) The current block number. + """ + block_number = subtensor.get_current_block() + if block_number != old_block_number: + old_block_number = block_number + # update block information + block_number, difficulty, block_hash = _get_block_with_retry( + subtensor=subtensor, netuid=netuid + ) + block_bytes = bytes.fromhex(block_hash[2:]) + + update_curr_block( + curr_diff, + curr_block, + curr_block_num, + block_number, + block_bytes, + difficulty, + hotkey_bytes, + check_block, + ) + # Set new block events for each solver + + for worker in solvers: + worker.newBlockEvent.set() + + # update stats + curr_stats.block_number = block_number + curr_stats.block_hash = block_hash + curr_stats.difficulty = difficulty + + return old_block_number + + +def _solve_for_difficulty_fast_cuda( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + output_in_place: bool = True, + update_interval: int = 50_000, + tpb: int = 512, + dev_id: Union[list[int], int] = 0, + n_samples: int = 10, + alpha_: float = 0.80, + log_verbose: bool = False, +) -> Optional["POWSolution"]: + """ + Solves the registration fast using CUDA. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor node to grab blocks. + wallet (bittensor_wallet.Wallet): The wallet to register. + netuid (int): The netuid of the subnet to register to. + output_in_place (bool) If true, prints the output in place, otherwise prints to new lines. + update_interval (int): The number of nonces to try before checking for more blocks. + tpb (int): The number of threads per block. CUDA param that should match the GPU capability + dev_id (Union[list[int], int]): The CUDA device IDs to execute the registration on, either a single device or a list of devices. + n_samples (int): The number of samples of the hash_rate to keep for the EWMA. + alpha_ (float): The alpha for the EWMA for the hash_rate calculation. + log_verbose (bool): If true, prints more verbose logging of the registration metrics. + + Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. + """ + if isinstance(dev_id, int): + dev_id = [dev_id] + elif dev_id is None: + dev_id = [0] + + if update_interval is None: + update_interval = 50_000 + + if not torch.cuda.is_available(): + raise Exception("CUDA not available") + + limit = int(math.pow(2, 256)) - 1 + + # Set mp start to use spawn so CUDA doesn't complain + with _UsingSpawnStartMethod(force=True): + curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory() + + # Create a worker per CUDA device + num_processes = len(dev_id) + + # Establish communication queues + stopEvent = multiprocessing.Event() + stopEvent.clear() + solution_queue = multiprocessing.Queue() + finished_queues = [multiprocessing.Queue() for _ in range(num_processes)] + check_block = multiprocessing.Lock() + + hotkey_bytes = wallet.hotkey.public_key + # Start workers + solvers = [ + _CUDASolver( + i, + num_processes, + update_interval, + finished_queues[i], + solution_queue, + stopEvent, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + dev_id[i], + tpb, + ) + for i in range(num_processes) + ] + + # Get first block + block_number, difficulty, block_hash = _get_block_with_retry( + subtensor=subtensor, netuid=netuid + ) + + block_bytes = bytes.fromhex(block_hash[2:]) + old_block_number = block_number + + # Set to current block + _update_curr_block( + curr_diff, + curr_block, + curr_block_num, + block_number, + block_bytes, + difficulty, + hotkey_bytes, + check_block, + ) + + # Set new block events for each solver to start at the initial block + for worker in solvers: + worker.newBlockEvent.set() + + for worker in solvers: + worker.start() # start the solver processes + + start_time = time.time() # time that the registration started + time_last = start_time # time that the last work blocks completed + + curr_stats = RegistrationStatistics( + time_spent_total=0.0, + time_average=0.0, + rounds_total=0, + time_spent=0.0, + hash_rate_perpetual=0.0, + hash_rate=0.0, # EWMA hash_rate (H/s) + difficulty=difficulty, + block_number=block_number, + block_hash=block_hash, + ) + + start_time_perpetual = time.time() + + logger = RegistrationStatisticsLogger(bt_console, output_in_place) + logger.start() + + hash_rates = [0] * n_samples # The last n true hash_rates + weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha + + solution = None + while netuid == -1 or not subtensor.is_hotkey_registered( + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ): + # Wait until a solver finds a solution + try: + solution = solution_queue.get(block=True, timeout=0.15) + if solution is not None: + break + except Empty: + # No solution found, try again + pass + + # check for new block + old_block_number = _check_for_newest_block_and_update( + subtensor=subtensor, + netuid=netuid, + hotkey_bytes=hotkey_bytes, + curr_diff=curr_diff, + curr_block=curr_block, + curr_block_num=curr_block_num, + old_block_number=old_block_number, + curr_stats=curr_stats, + update_curr_block=_update_curr_block, + check_block=check_block, + solvers=solvers, + ) + + num_time = 0 + # Get times for each solver + for finished_queue in finished_queues: + try: + proc_num = finished_queue.get(timeout=0.1) + num_time += 1 + + except Empty: + continue + + time_now = time.time() # get current time + time_since_last = time_now - time_last # get time since last work block(s) + if num_time > 0 and time_since_last > 0.0: + # create EWMA of the hash_rate to make measure more robust + + hash_rate_ = (num_time * tpb * update_interval) / time_since_last + hash_rates.append(hash_rate_) + hash_rates.pop(0) # remove the 0th data point + curr_stats.hash_rate = sum( + [hash_rates[i] * weights[i] for i in range(n_samples)] + ) / (sum(weights)) + + # update time last to now + time_last = time_now + + curr_stats.time_average = ( + curr_stats.time_average * curr_stats.rounds_total + + curr_stats.time_spent + ) / (curr_stats.rounds_total + num_time) + curr_stats.rounds_total += num_time + + # Update stats + curr_stats.time_spent = time_since_last + new_time_spent_total = time_now - start_time_perpetual + curr_stats.hash_rate_perpetual = ( + curr_stats.rounds_total * (tpb * update_interval) + ) / new_time_spent_total + curr_stats.time_spent_total = new_time_spent_total + + # Update the logger + logger.update(curr_stats, verbose=log_verbose) + + # exited while, found_solution contains the nonce or wallet is registered + + stopEvent.set() # stop all other processes + logger.stop() + + # terminate and wait for all solvers to exit + _terminate_workers_and_wait_for_exit(solvers) + + return solution + + +def _terminate_workers_and_wait_for_exit( + workers: list[Union[multiprocessing.Process, QueueType]], +) -> None: + for worker in workers: + if isinstance(worker, QueueType): + worker.join_thread() + else: + try: + worker.join(3.0) + except subprocess.TimeoutExpired: + worker.terminate() + try: + worker.close() + except ValueError: + worker.terminate() + + +def create_pow( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + output_in_place: bool = True, + cuda: bool = False, + dev_id: Union[list[int], int] = 0, + tpb: int = 256, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + log_verbose: bool = False, +) -> Optional[dict[str, Any]]: + """ + Creates a proof of work for the given subtensor and wallet. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor to create a proof of work for. + wallet (bittensor_wallet.Wallet): The wallet to create a proof of work for. + netuid (int): The netuid for the subnet to create a proof of work for. + output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Default is ``True``. + cuda (bool): If true, uses CUDA to solve the proof of work. Default is ``False``. + dev_id (Union[List[int], int]): The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work. Default is ``0``. + tpb (int): The number of threads per block to use when solving the proof of work. Should be a multiple of 32. Default is ``256``. + num_processes (Optional[int]): The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores. Default is None. + update_interval (Optional[int]): The number of nonces to run before checking for a new block. Default is ``None``. + log_verbose (bool): If true, prints the progress of the proof of work more verbosely. Default is ``False``. + + Returns: + Optional[Dict[str, Any]]: The proof of work solution or None if the wallet is already registered or there is a different error. + + Raises: + ValueError: If the subnet does not exist. + """ + if netuid != -1: + if not subtensor.subnet_exists(netuid=netuid): + raise ValueError(f"Subnet {netuid} does not exist.") + + if cuda: + solution: Optional[POWSolution] = _solve_for_difficulty_fast_cuda( + subtensor, + wallet, + netuid=netuid, + output_in_place=output_in_place, + dev_id=dev_id, + tpb=tpb, + update_interval=update_interval, + log_verbose=log_verbose, + ) + else: + solution: Optional[POWSolution] = _solve_for_difficulty_fast( + subtensor, + wallet, + netuid=netuid, + output_in_place=output_in_place, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + return solution diff --git a/requirements/prod.txt b/requirements/prod.txt index 4a319c506c..bed65e9d2e 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -1,7 +1,8 @@ wheel setuptools~=70.0.0 -bittensor-cli aiohttp~=3.9 +backoff +bittensor-cli bt-decode colorama~=0.4.6 fastapi~=0.110.1 @@ -12,6 +13,7 @@ nest_asyncio netaddr packaging python-statemachine~=2.1 +pycryptodome>=3.18.0,<4.0.0 pyyaml retry requests diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py index e252cb63f1..552e5ab993 100644 --- a/tests/integration_tests/test_subtensor_integration.py +++ b/tests/integration_tests/test_subtensor_integration.py @@ -15,7 +15,9 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +import random import unittest +from queue import Empty as QueueEmpty from unittest.mock import MagicMock, patch import pytest @@ -247,6 +249,175 @@ def test_defaults_to_finney(self): assert sub.network == "finney" assert sub.chain_endpoint == settings.FINNEY_ENTRYPOINT + def test_registration_multiprocessed_already_registered(self): + work_blocks_before_is_registered = random.randint(5, 10) + # return False each work block but return True after a random number of blocks + is_registered_return_values = ( + [False for _ in range(work_blocks_before_is_registered)] + + [True] + + [True, False] + ) + # this should pass the initial False check in the subtensor class and then return True because the neuron is already registered + + mock_neuron = MagicMock() + mock_neuron.is_null = True + + # patch solution queue to return None + with patch( + "multiprocessing.queues.Queue.get", return_value=None + ) as mock_queue_get: + # patch time queue get to raise Empty exception + with patch( + "multiprocessing.queues.Queue.get_nowait", side_effect=QueueEmpty + ) as mock_queue_get_nowait: + wallet = get_mock_wallet( + hotkey=get_mock_keypair(0, self.id()), + coldkey=get_mock_keypair(1, self.id()), + ) + self.subtensor.is_hotkey_registered = MagicMock( + side_effect=is_registered_return_values + ) + + self.subtensor.difficulty = MagicMock(return_value=1) + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + side_effect=mock_neuron + ) + self.subtensor._do_pow_register = MagicMock(return_value=(True, None)) + + with patch("bittensor.core.settings.bt_console") as mock_set_status: + # Need to patch the console status to avoid opening a parallel live display + mock_set_status.__enter__ = MagicMock(return_value=True) + mock_set_status.__exit__ = MagicMock(return_value=True) + + # should return True + assert self.subtensor.register( + wallet=wallet, netuid=3, num_processes=3, update_interval=5 + ) + + # calls until True and once again before exiting subtensor class + # This assertion is currently broken when difficulty is too low + assert ( + self.subtensor.is_hotkey_registered.call_count + == work_blocks_before_is_registered + 2 + ) + + def test_registration_partly_failed(self): + do_pow_register_mock = MagicMock( + side_effect=[(False, "Failed"), (False, "Failed"), (True, None)] + ) + + def is_registered_side_effect(*args, **kwargs): + nonlocal do_pow_register_mock + return do_pow_register_mock.call_count < 3 + + current_block = [i for i in range(0, 100)] + + wallet = get_mock_wallet( + hotkey=get_mock_keypair(0, self.id()), + coldkey=get_mock_keypair(1, self.id()), + ) + + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=bittensor.NeuronInfo.get_null_neuron() + ) + self.subtensor.is_hotkey_registered = MagicMock( + side_effect=is_registered_side_effect + ) + + self.subtensor.difficulty = MagicMock(return_value=1) + self.subtensor.get_current_block = MagicMock(side_effect=current_block) + self.subtensor._do_pow_register = do_pow_register_mock + + # should return True + self.assertTrue( + self.subtensor.register( + wallet=wallet, netuid=3, num_processes=3, update_interval=5 + ), + msg="Registration should succeed", + ) + + def test_registration_failed(self): + is_registered_return_values = [False for _ in range(100)] + current_block = [i for i in range(0, 100)] + mock_neuron = MagicMock() + mock_neuron.is_null = True + + with patch( + "bittensor.core.extrinsics.registration.create_pow", return_value=None + ) as mock_create_pow: + wallet = get_mock_wallet( + hotkey=get_mock_keypair(0, self.id()), + coldkey=get_mock_keypair(1, self.id()), + ) + + self.subtensor.is_hotkey_registered = MagicMock( + side_effect=is_registered_return_values + ) + + self.subtensor.get_current_block = MagicMock(side_effect=current_block) + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=mock_neuron + ) + self.subtensor.substrate.get_block_hash = MagicMock( + return_value="0x" + "0" * 64 + ) + self.subtensor._do_pow_register = MagicMock(return_value=(False, "Failed")) + + # should return True + self.assertIsNot( + self.subtensor.register(wallet=wallet, netuid=3), + True, + msg="Registration should fail", + ) + self.assertEqual(mock_create_pow.call_count, 3) + + def test_registration_stale_then_continue(self): + # verify that after a stale solution, to solve will continue without exiting + + class ExitEarly(Exception): + pass + + mock_is_stale = MagicMock(side_effect=[True, False]) + + mock_do_pow_register = MagicMock(side_effect=ExitEarly()) + + mock_subtensor_self = MagicMock( + neuron_for_pubkey=MagicMock( + return_value=MagicMock(is_null=True) + ), # not registered + substrate=MagicMock( + get_block_hash=MagicMock(return_value="0x" + "0" * 64), + ), + ) + + mock_wallet = MagicMock() + + mock_create_pow = MagicMock(return_value=MagicMock(is_stale=mock_is_stale)) + + with patch( + "bittensor.core.extrinsics.registration.create_pow", mock_create_pow + ), patch( + "bittensor.core.extrinsics.registration._do_pow_register", + mock_do_pow_register, + ): + # should create a pow and check if it is stale + # then should create a new pow and check if it is stale + # then should enter substrate and exit early because of test + self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock( + return_value=bittensor.NeuronInfo.get_null_neuron() + ) + with pytest.raises(ExitEarly): + bittensor.subtensor.register(mock_subtensor_self, mock_wallet, netuid=3) + self.assertEqual( + mock_create_pow.call_count, 2, msg="must try another pow after stale" + ) + self.assertEqual(mock_is_stale.call_count, 2) + self.assertEqual( + mock_do_pow_register.call_count, + 1, + msg="only tries to submit once, then exits", + ) + if __name__ == "__main__": unittest.main() diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py new file mode 100644 index 0000000000..b1bc1f0725 --- /dev/null +++ b/tests/unit_tests/extrinsics/test_registration.py @@ -0,0 +1,181 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import pytest +from bittensor_wallet import Wallet + +from bittensor.core.extrinsics.registration import ( + register_extrinsic, +) +from bittensor.core.subtensor import Subtensor +from bittensor.utils.registration import POWSolution + + +# Mocking external dependencies +@pytest.fixture +def mock_subtensor(mocker): + mock = mocker.MagicMock(spec=Subtensor) + mock.network = "mock_network" + mock.substrate = mocker.MagicMock() + return mock + + +@pytest.fixture +def mock_wallet(mocker): + mock = mocker.MagicMock(spec=Wallet) + mock.coldkeypub.ss58_address = "mock_address" + mock.coldkey = mocker.MagicMock() + mock.hotkey = mocker.MagicMock() + mock.hotkey.ss58_address = "fake_ss58_address" + return mock + + +@pytest.fixture +def mock_pow_solution(mocker): + mock = mocker.MagicMock(spec=POWSolution) + mock.block_number = 123 + mock.nonce = 456 + mock.seal = [0, 1, 2, 3] + mock.is_stale.return_value = False + return mock + + +@pytest.fixture +def mock_new_wallet(mocker): + mock = mocker.MagicMock(spec=Wallet) + mock.coldkeypub.ss58_address = "mock_address" + mock.coldkey = mocker.MagicMock() + mock.hotkey = mocker.MagicMock() + return mock + + +@pytest.mark.parametrize( + "subnet_exists, neuron_is_null, prompt, prompt_response, cuda_available, expected_result, test_id", + [ + (False, True, True, True, True, False, "subnet-does-not-exist"), + (True, False, True, True, True, True, "neuron-already-registered"), + (True, True, True, False, True, False, "user-declines-prompt"), + (True, True, False, None, False, False, "cuda-unavailable"), + ], +) +def test_register_extrinsic_without_pow( + mock_subtensor, + mock_wallet, + subnet_exists, + neuron_is_null, + prompt, + prompt_response, + cuda_available, + expected_result, + test_id, + mocker, +): + # Arrange + with mocker.patch.object( + mock_subtensor, "subnet_exists", return_value=subnet_exists + ), mocker.patch.object( + mock_subtensor, + "get_neuron_for_pubkey_and_subnet", + return_value=mocker.MagicMock(is_null=neuron_is_null), + ), mocker.patch( + "rich.prompt.Confirm.ask", return_value=prompt_response + ), mocker.patch("torch.cuda.is_available", return_value=cuda_available): + # Act + result = register_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + netuid=123, + wait_for_inclusion=True, + wait_for_finalization=True, + prompt=prompt, + max_allowed_attempts=3, + output_in_place=True, + cuda=True, + dev_id=0, + tpb=256, + num_processes=None, + update_interval=None, + log_verbose=False, + ) + + # Assert + assert result == expected_result, f"Test failed for test_id: {test_id}" + + +@pytest.mark.parametrize( + "pow_success, pow_stale, registration_success, cuda, hotkey_registered, expected_result, test_id", + [ + (True, False, True, False, False, True, "successful-with-valid-pow"), + (True, False, True, True, False, True, "successful-with-valid-cuda-pow"), + # Pow failed but key was registered already + (False, False, False, False, True, True, "hotkey-registered"), + # Pow was a success but registration failed with error 'key already registered' + (True, False, False, False, False, True, "registration-fail-key-registered"), + ], +) +def test_register_extrinsic_with_pow( + mock_subtensor, + mock_wallet, + mock_pow_solution, + pow_success, + pow_stale, + registration_success, + cuda, + hotkey_registered, + expected_result, + test_id, + mocker, +): + # Arrange + with mocker.patch( + "bittensor.utils.registration._solve_for_difficulty_fast", + return_value=mock_pow_solution if pow_success else None, + ), mocker.patch( + "bittensor.utils.registration._solve_for_difficulty_fast_cuda", + return_value=mock_pow_solution if pow_success else None, + ), mocker.patch( + "bittensor.core.extrinsics.registration._do_pow_register", + return_value=(registration_success, "HotKeyAlreadyRegisteredInSubNet"), + ), mocker.patch("torch.cuda.is_available", return_value=cuda): + # Act + if pow_success: + mock_pow_solution.is_stale.return_value = pow_stale + + if not pow_success and hotkey_registered: + mock_subtensor.is_hotkey_registered = mocker.MagicMock( + return_value=hotkey_registered + ) + + result = register_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + netuid=123, + wait_for_inclusion=True, + wait_for_finalization=True, + prompt=False, + max_allowed_attempts=3, + output_in_place=True, + cuda=cuda, + dev_id=0, + tpb=256, + num_processes=None, + update_interval=None, + log_verbose=False, + ) + + # Assert + assert result == expected_result, f"Test failed for test_id: {test_id}." diff --git a/tests/unit_tests/utils/test_formatting.py b/tests/unit_tests/utils/test_formatting.py new file mode 100644 index 0000000000..3c223a48b3 --- /dev/null +++ b/tests/unit_tests/utils/test_formatting.py @@ -0,0 +1,80 @@ +# The MIT License (MIT) +# Copyright © 2024 Opentensor Foundation +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. +# +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import math + +from bittensor.utils import formatting + + +def test_get_human_readable(): + """Tests the `get_human_readable` function in the `formatting` module.""" + num1 = 1000 + num2 = 1_000_000 + num3 = 1_000_000_000 + num4 = 150 + negative_num = -1000 + + # Test for default output + assert formatting.get_human_readable(num1) == "1.0KH" + + # Test for different quantities + assert formatting.get_human_readable(num2) == "1.0MH" + assert formatting.get_human_readable(num3) == "1.0GH" + + # Test for numbers less than 1000 + assert formatting.get_human_readable(num4) == "150.0H" + + # Test for negative numbers + assert formatting.get_human_readable(negative_num) == "-1.0KH" + + # Test for different suffix + assert formatting.get_human_readable(num1, suffix="B") == "1.0KB" + assert formatting.get_human_readable(num2, suffix="B") == "1.0MB" + assert formatting.get_human_readable(num3, suffix="B") == "1.0GB" + assert formatting.get_human_readable(num4, suffix="B") == "150.0B" + assert formatting.get_human_readable(negative_num, suffix="B") == "-1.0KB" + + +def test_millify(): + """Test millify function with various cases.""" + # Testing with value 0 + assert formatting.millify(0) == "0.00" + # Testing with a number in the tens + assert formatting.millify(10) == "10.00" + # Testing with a number in the hundreds + assert formatting.millify(100) == "100.00" + # Testing with a number in the thousands + assert formatting.millify(1000) == "1.00 K" + # Testing with a number in the millions + assert formatting.millify(1000000) == "1.00 M" + # Testing with a number in the billions + assert formatting.millify(1000000000) == "1.00 B" + # Testing with a number in the trillions + assert formatting.millify(1000000000000) == "1.00 T" + # Testing with maximum limit + mill_names = ["", " K", " M", " B", " T"] + n = 10 ** (3 * (len(mill_names) - 1) + 1) + mill_idx = max( + 0, + min( + len(mill_names) - 1, + int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3)), + ), + ) + assert formatting.millify(n) == "{:.2f}{}".format( + n / 10 ** (3 * mill_idx), mill_names[mill_idx] + ) From 720894a069d92b8dc614414cd6889bb2c73eb42e Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Wed, 16 Oct 2024 12:43:39 -0700 Subject: [PATCH 06/58] added to Subtensor: `burned_register`, `get_subnet_burn_cost`, `recycle` and related extrinsics (#2359) * added to Subtensor: `burned_register`, `get_subnet_burn_cost`, `recycle` and related extrinsics * formatter * Update bittensor/core/extrinsics/registration.py Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> --------- Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> --- bittensor/core/extrinsics/registration.py | 164 ++++++++++++++++++ bittensor/core/subtensor.py | 75 +++++++- .../extrinsics/test_registration.py | 62 ++++++- tests/unit_tests/test_subtensor.py | 130 ++++++++++++++ 4 files changed, 425 insertions(+), 6 deletions(-) diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py index bd19b16389..2528368094 100644 --- a/bittensor/core/extrinsics/registration.py +++ b/bittensor/core/extrinsics/registration.py @@ -18,6 +18,7 @@ import time from typing import Union, Optional, TYPE_CHECKING +from bittensor_wallet.errors import KeyFileError from retry import retry from rich.prompt import Confirm @@ -285,3 +286,166 @@ def register_extrinsic( # Failed to register after max attempts. bt_console.print("[red]No more attempts.[/red]") return False + + +@ensure_connected +def _do_burned_register( + self, + netuid: int, + wallet: "Wallet", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> tuple[bool, Optional[str]]: + """ + Performs a burned register extrinsic call to the Subtensor chain. + + This method sends a registration transaction to the Subtensor blockchain using the burned register mechanism. It + retries the call up to three times with exponential backoff in case of failures. + + Args: + self (bittensor.core.subtensor.Subtensor): Subtensor instance. + netuid (int): The network unique identifier to register on. + wallet (bittensor_wallet.Wallet): The wallet to be registered. + wait_for_inclusion (bool): Whether to wait for the transaction to be included in a block. Default is False. + wait_for_finalization (bool): Whether to wait for the transaction to be finalized. Default is True. + + Returns: + Tuple[bool, Optional[str]]: A tuple containing a boolean indicating success or failure, and an optional error message. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + # create extrinsic call + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="burned_register", + call_params={ + "netuid": netuid, + "hotkey": wallet.hotkey.ss58_address, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + response = self.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + # process if registration successful, try again if pow is still valid + response.process_events() + if not response.is_success: + return False, format_error_message(response.error_message) + # Successful registration + else: + return True, None + + return make_substrate_call_with_retry() + + +def burned_register_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, +) -> bool: + """Registers the wallet to chain by recycling TAO. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance. + wallet (bittensor.wallet): Bittensor wallet object. + netuid (int): The ``netuid`` of the subnet to register on. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + if not subtensor.subnet_exists(netuid): + bt_console.print( + ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( + netuid + ) + ) + return False + + try: + wallet.unlock_coldkey() + except KeyFileError: + bt_console.print( + ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + ) + return False + with bt_console.status( + f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." + ): + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + + old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) + + recycle_amount = subtensor.recycle(netuid=netuid) + if not neuron.is_null: + bt_console.print( + ":white_heavy_check_mark: [green]Already Registered[/green]:\n" + "uid: [bold white]{}[/bold white]\n" + "netuid: [bold white]{}[/bold white]\n" + "hotkey: [bold white]{}[/bold white]\n" + "coldkey: [bold white]{}[/bold white]".format( + neuron.uid, neuron.netuid, neuron.hotkey, neuron.coldkey + ) + ) + return True + + if prompt: + # Prompt user for confirmation. + if not Confirm.ask(f"Recycle {recycle_amount} to register on subnet:{netuid}?"): + return False + + with bt_console.status(":satellite: Recycling TAO for Registration..."): + success, err_msg = _do_burned_register( + self=subtensor, + netuid=netuid, + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not success: + bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") + time.sleep(0.5) + return False + # Successful registration, final check for neuron and pubkey + else: + bt_console.print(":satellite: Checking Balance...") + block = subtensor.get_current_block() + new_balance = subtensor.get_balance( + wallet.coldkeypub.ss58_address, block=block + ) + + bt_console.print( + "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( + old_balance, new_balance + ) + ) + is_registered = subtensor.is_hotkey_registered( + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + bt_console.print(":white_heavy_check_mark: [green]Registered[/green]") + return True + else: + # neuron not found, try again + bt_console.print( + ":cross_mark: [red]Unknown error. Neuron not found.[/red]" + ) + return False diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index cef96e802f..b57b3d85bd 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -54,7 +54,10 @@ do_serve_prometheus, prometheus_extrinsic, ) -from bittensor.core.extrinsics.registration import register_extrinsic +from bittensor.core.extrinsics.registration import ( + burned_register_extrinsic, + register_extrinsic, +) from bittensor.core.extrinsics.serving import ( do_serve_axon, serve_axon_extrinsic, @@ -958,6 +961,36 @@ def register( log_verbose=log_verbose, ) + def burned_register( + self, + wallet: "Wallet", + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + ) -> bool: + """ + Registers a neuron on the Bittensor network by recycling TAO. This method of registration involves recycling TAO tokens, allowing them to be re-mined by performing work on the network. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron to be registered. + netuid (int): The unique identifier of the subnet. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to `False`. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. + prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to `False`. + + Returns: + bool: ``True`` if the registration is successful, False otherwise. + """ + return burned_register_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + def serve_axon( self, netuid: int, @@ -1412,6 +1445,30 @@ def bonds( return b_map + def get_subnet_burn_cost(self, block: Optional[int] = None) -> Optional[str]: + """ + Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet. + + Args: + block (Optional[int]): The blockchain block number for the query. + + Returns: + int: The burn cost for subnet registration. + + The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability. + """ + lock_cost = self.query_runtime_api( + runtime_api="SubnetRegistrationRuntimeApi", + method="get_network_registration_cost", + params=[], + block=block, + ) + + if lock_cost is None: + return None + + return lock_cost + # Metagraph uses this method def neurons(self, netuid: int, block: Optional[int] = None) -> list["NeuronInfo"]: """ @@ -1812,6 +1869,22 @@ def difficulty(self, netuid: int, block: Optional[int] = None) -> Optional[int]: return None return int(call) + def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance"]: + """ + Retrieves the 'Burn' hyperparameter for a specified subnet. The 'Burn' parameter represents the amount of Tao that is effectively recycled within the Bittensor network. + + Args: + netuid (int): The unique identifier of the subnet. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[Balance]: The value of the 'Burn' hyperparameter if the subnet exists, None otherwise. + + Understanding the 'Burn' rate is essential for analyzing the network registration usage, particularly how it is correlated with user activity and the overall cost of participation in a given subnet. + """ + call = self._get_hyperparameter(param_name="Burn", netuid=netuid, block=block) + return None if call is None else Balance.from_rao(int(call)) + # Subnet 27 uses this method _do_serve_prometheus = do_serve_prometheus # Subnet 27 uses this method name diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py index b1bc1f0725..9a39ba7d1a 100644 --- a/tests/unit_tests/extrinsics/test_registration.py +++ b/tests/unit_tests/extrinsics/test_registration.py @@ -18,9 +18,7 @@ import pytest from bittensor_wallet import Wallet -from bittensor.core.extrinsics.registration import ( - register_extrinsic, -) +from bittensor.core.extrinsics import registration from bittensor.core.subtensor import Subtensor from bittensor.utils.registration import POWSolution @@ -95,7 +93,7 @@ def test_register_extrinsic_without_pow( "rich.prompt.Confirm.ask", return_value=prompt_response ), mocker.patch("torch.cuda.is_available", return_value=cuda_available): # Act - result = register_extrinsic( + result = registration.register_extrinsic( subtensor=mock_subtensor, wallet=mock_wallet, netuid=123, @@ -160,7 +158,7 @@ def test_register_extrinsic_with_pow( return_value=hotkey_registered ) - result = register_extrinsic( + result = registration.register_extrinsic( subtensor=mock_subtensor, wallet=mock_wallet, netuid=123, @@ -179,3 +177,57 @@ def test_register_extrinsic_with_pow( # Assert assert result == expected_result, f"Test failed for test_id: {test_id}." + + +@pytest.mark.parametrize( + "subnet_exists, neuron_is_null, recycle_success, prompt, prompt_response, is_registered, expected_result, test_id", + [ + # Happy paths + (True, False, None, False, None, None, True, "neuron-not-null"), + (True, True, True, True, True, True, True, "happy-path-wallet-registered"), + # Error paths + (False, True, False, False, None, None, False, "subnet-non-existence"), + (True, True, True, True, False, None, False, "prompt-declined"), + (True, True, False, True, True, False, False, "error-path-recycling-failed"), + (True, True, True, True, True, False, False, "error-path-not-registered"), + ], +) +def test_burned_register_extrinsic( + mock_subtensor, + mock_wallet, + subnet_exists, + neuron_is_null, + recycle_success, + prompt, + prompt_response, + is_registered, + expected_result, + test_id, + mocker, +): + # Arrange + with mocker.patch.object( + mock_subtensor, "subnet_exists", return_value=subnet_exists + ), mocker.patch.object( + mock_subtensor, + "get_neuron_for_pubkey_and_subnet", + return_value=mocker.MagicMock(is_null=neuron_is_null), + ), mocker.patch( + "bittensor.core.extrinsics.registration._do_burned_register", + return_value=(recycle_success, "Mock error message"), + ), mocker.patch.object( + mock_subtensor, "is_hotkey_registered", return_value=is_registered + ): + mock_confirm = mocker.MagicMock(return_value=prompt_response) + registration.Confirm.ask = mock_confirm + # Act + result = registration.burned_register_extrinsic( + subtensor=mock_subtensor, wallet=mock_wallet, netuid=123, prompt=prompt + ) + # Assert + assert result == expected_result, f"Test failed for test_id: {test_id}" + + if prompt: + mock_confirm.assert_called_once() + else: + mock_confirm.assert_not_called() diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index d0783d20ff..bc1ea360c6 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -2051,3 +2051,133 @@ def test_connect_with_substrate(mocker): # Assertions assert spy_get_substrate.call_count == 0 + + +def test_get_subnet_burn_cost_success(subtensor, mocker): + """Tests get_subnet_burn_cost method with successfully result.""" + # Preps + mocked_query_runtime_api = mocker.patch.object(subtensor, "query_runtime_api") + fake_block = 123 + + # Call + result = subtensor.get_subnet_burn_cost(fake_block) + + # Asserts + mocked_query_runtime_api.assert_called_once_with( + runtime_api="SubnetRegistrationRuntimeApi", + method="get_network_registration_cost", + params=[], + block=fake_block, + ) + + assert result == mocked_query_runtime_api.return_value + + +def test_get_subnet_burn_cost_none(subtensor, mocker): + """Tests get_subnet_burn_cost method with None result.""" + # Preps + mocked_query_runtime_api = mocker.patch.object( + subtensor, "query_runtime_api", return_value=None + ) + fake_block = 123 + + # Call + result = subtensor.get_subnet_burn_cost(fake_block) + + # Asserts + mocked_query_runtime_api.assert_called_once_with( + runtime_api="SubnetRegistrationRuntimeApi", + method="get_network_registration_cost", + params=[], + block=fake_block, + ) + + assert result is None + + +def test_difficulty_success(subtensor, mocker): + """Tests difficulty method with successfully result.""" + # Preps + mocked_get_hyperparameter = mocker.patch.object(subtensor, "_get_hyperparameter") + fake_netuid = 1 + fake_block = 2 + + # Call + result = subtensor.difficulty(fake_netuid, fake_block) + + # Asserts + mocked_get_hyperparameter.assert_called_once_with( + param_name="Difficulty", + netuid=fake_netuid, + block=fake_block, + ) + + assert result == int(mocked_get_hyperparameter.return_value) + + +def test_difficulty_none(subtensor, mocker): + """Tests difficulty method with None result.""" + # Preps + mocked_get_hyperparameter = mocker.patch.object( + subtensor, "_get_hyperparameter", return_value=None + ) + fake_netuid = 1 + fake_block = 2 + + # Call + result = subtensor.difficulty(fake_netuid, fake_block) + + # Asserts + mocked_get_hyperparameter.assert_called_once_with( + param_name="Difficulty", + netuid=fake_netuid, + block=fake_block, + ) + + assert result is None + + +def test_recycle_success(subtensor, mocker): + """Tests recycle method with successfully result.""" + # Preps + mocked_get_hyperparameter = mocker.patch.object( + subtensor, "_get_hyperparameter", return_value=0.1 + ) + fake_netuid = 1 + fake_block = 2 + mocked_balance = mocker.patch("bittensor.utils.balance.Balance") + + # Call + result = subtensor.recycle(fake_netuid, fake_block) + + # Asserts + mocked_get_hyperparameter.assert_called_once_with( + param_name="Burn", + netuid=fake_netuid, + block=fake_block, + ) + + mocked_balance.assert_called_once_with(int(mocked_get_hyperparameter.return_value)) + assert result == mocked_balance.return_value + + +def test_recycle_none(subtensor, mocker): + """Tests recycle method with None result.""" + # Preps + mocked_get_hyperparameter = mocker.patch.object( + subtensor, "_get_hyperparameter", return_value=None + ) + fake_netuid = 1 + fake_block = 2 + + # Call + result = subtensor.recycle(fake_netuid, fake_block) + + # Asserts + mocked_get_hyperparameter.assert_called_once_with( + param_name="Burn", + netuid=fake_netuid, + block=fake_block, + ) + + assert result is None From 0ca94f11a2b6f4826d09ad0634c8770be239af50 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 16 Oct 2024 23:00:56 -0700 Subject: [PATCH 07/58] Tests passing, happy path is done. Added an option for starting the commit reveal subprocess during the Subtensor initialization. Enhanced subprocess management and logging details to capture wallet and commit information more clearly. Streamlined error handling in set_weights module. --- bittensor/__init__.py | 3 -- bittensor/core/extrinsics/commit_weights.py | 2 +- bittensor/core/extrinsics/set_weights.py | 5 +- bittensor/core/subtensor.py | 4 ++ scripts/subprocess/commit_reveal.py | 50 +++++++++++++------ scripts/subprocess_utils.py | 54 +++++++++++++++------ tests/e2e_tests/conftest.py | 7 ++- tests/e2e_tests/test_reveal_weights.py | 4 +- 8 files changed, 88 insertions(+), 41 deletions(-) diff --git a/bittensor/__init__.py b/bittensor/__init__.py index d3344b9c92..f4d8ee906a 100644 --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -20,9 +20,6 @@ from .core.settings import __version__, version_split, DEFAULTS from .utils.btlogging import logging from .utils.deprecated import * -from scripts import subprocess_utils - -subprocess_utils.start_commit_reveal_subprocess() def __getattr__(name): diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 51b7e74f06..30534dcc58 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -163,7 +163,7 @@ def send_command(command): cr_interval = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval reveal_block = curr_block + cr_interval - command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}"' + command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}"' send_command(command) # Chain call for `reveal_weights_extrinsic` diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 0bedbfcb9c..a0a300c44a 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -182,9 +182,8 @@ def set_weights_extrinsic( ) return True, "Successfully committed weights and Finalized." else: - error_message = format_error_message(message) - logging.error(error_message) - return False, error_message + logging.error(message) + return False, message except Exception as e: bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 72894fb7eb..f7b4d2dcff 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -139,6 +139,7 @@ def __init__( _mock: bool = False, log_verbose: bool = False, connection_timeout: int = 600, + subprocess_initialization: bool = True, ) -> None: """ Initializes a Subtensor interface for interacting with the Bittensor blockchain. @@ -187,6 +188,9 @@ def __init__( "To get ahead of this change, please run a local subtensor node and point to it." ) + if subprocess_initialization: + subprocess_utils.start_commit_reveal_subprocess(network=network) + self.log_verbose = log_verbose self._connection_timeout = connection_timeout self.substrate: "SubstrateInterface" = None diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index 71b6312539..7d429a3191 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -1,6 +1,7 @@ import argparse import json import os +import shlex import time import utils # Ensure this import works import socket @@ -45,7 +46,8 @@ def is_table_empty(table_name: str) -> bool: def initialize_db(): # Create 'commits' table if it doesn't exist columns = [ - ("wallet_hotkey", "TEXT"), + ("wallet_hotkey_name", "TEXT"), + ("wallet_hotkey_ss58", "TEXT"), ("wallet_path", "TEXT"), ("wallet_name", "TEXT"), ("commit_hash", "TEXT"), @@ -69,18 +71,35 @@ def reveal(subtensor, data): # create wallet wallet_name = data["wallet_name"] wallet_path = data["wallet_path"] - wallet_hotkey = data["wallet_hotkey"] + wallet_hotkey_name = data["wallet_hotkey_name"] - wallet = Wallet(name=wallet_name, path=wallet_path, hotkey=wallet_hotkey) + wallet = Wallet(name=wallet_name, path=wallet_path, hotkey=wallet_hotkey_name) + + print(f"the data: {data}") + print(f"wallet: {wallet}") + + commit_hash = data["commit_hash"] + uids = list(map(int, json.loads(data["uids"]))) + weights = list(map(int, json.loads(data["weights"]))) + netuid = data["netuid"] + salt = list(map(int, json.loads(data["salt"]))) + + print(f"commit_hash: {commit_hash}") + print(f"uids: {uids}") + print(f"weights: {weights}") + print(f"netuid: {netuid}") + print(f"salt: {salt}") # Calls subtensor.reveal_weights success, message = subtensor.reveal_weights( wallet=wallet, - commit_hash=data["commit_hash"], - uids=list(map(int, json.loads(data["uids"]))), - weights=list(map(int, json.loads(data["weights"]))), + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, wait_for_inclusion=True, - wait_for_finalization=True) + wait_for_finalization=True + ) # delete wallet object del wallet @@ -109,10 +128,11 @@ def revealed(wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, sal print("No existing row found with specified data") -def committed(wallet_name, wallet_path, wallet_hotkey, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt): +def committed(wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt): commit_data = { - "wallet_hotkey": wallet_hotkey, + "wallet_hotkey_name": wallet_hotkey_name, + "wallet_hotkey_ss58": wallet_hotkey_ss58, "wallet_name": wallet_name, "wallet_path": wallet_path, "commit_hash": commit_hash, @@ -152,7 +172,7 @@ def check_reveal(subtensor, curr_block: int): # Delete the row after revealing, and delete all older reveals with utils.DB(db_path=DB_PATH) as (conn, cursor): cursor.execute('DELETE FROM commits WHERE reveal_block <= ?', (curr_block,)) - conn.committed() + conn.commit() return True return False @@ -164,15 +184,15 @@ def handle_client_connection(client_socket): request = client_socket.recv(1024).decode() if not request: break - args = request.split() + args = shlex.split(request) command = args[0] if command == 'revealed': # wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, salt revealed(args[1], args[2], args[3], args[4], json.loads(args[5]), json.loads(args[6]), json.loads(args[7])) elif command == 'committed': - # wallet_name, wallet_path, wallet_hotkey, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt - committed(args[1], args[2], args[3], args[4], args[5], args[6], args[7], json.loads(args[8]), json.loads(args[9]), - json.loads(args[10])) + # wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt + committed(args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], json.loads(args[9]), json.loads(args[10]), + json.loads(args[11])) else: print("Command not recognized") except Exception as e: @@ -217,7 +237,7 @@ def main(args): if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") - parser.add_argument("--sleep_interval", type=int, default=12, help="Interval between block checks in seconds") + parser.add_argument("--sleep_interval", type=float, default=0.25, help="Interval between block checks in seconds") # Add more arguments as needed args = parser.parse_args() main(args) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index 4d4f3ce32a..da890986e2 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -5,8 +5,10 @@ import subprocess import psutil -STDOUT_PATH = "scripts/subprocess/logs/commit_reveal_stdout.log" -STDERR_PATH = "scripts/subprocess/logs/commit_reveal_stderr.log" +STDOUT_LOG = "/commit_reveal_stdout.log" +STDERR_LOG = "/commit_reveal_stderr.log" +PROCESS_NAME = "commit_reveal.py" + def is_process_running(process_name: str) -> bool: """Check if a process with a given name is currently running.""" @@ -26,30 +28,48 @@ def get_process(process_name: str) -> Optional[int]: return None -def start_commit_reveal_subprocess(): +def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[str] = None): """Start the commit reveal subprocess if not already running.""" - process_name = 'commit_reveal.py' + log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) - if not is_process_running(process_name): - stdout_file = open(STDOUT_PATH, "w") - stderr_file = open(STDERR_PATH, "w") - print(f"Starting subprocess '{process_name}'...") + if not is_process_running(PROCESS_NAME): + stdout_file = open(log_path + STDOUT_LOG, "w") + stderr_file = open(log_path + STDERR_LOG, "w") + print(f"Starting subprocess '{PROCESS_NAME}'...") env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") + args = ['python3', script_path] + if network: + args.extend(['--network', network]) + if sleep_interval: + args.extend(['--sleep-interval', sleep_interval]) + process = subprocess.Popen( - ['python3', script_path], + args=args, stdout=stdout_file, stderr=stderr_file, preexec_fn=os.setsid, env=env ) - print(f"Subprocess '{process_name}' started with PID {process.pid}.") + print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") + + else: + print(f"Subprocess '{PROCESS_NAME}' is already running.") + +def stop_commit_reveal_subprocess(): + """Stop the commit reveal subprocess if it is running.""" + pid = get_process(PROCESS_NAME) + + if pid is not None: + print(f"Stopping subprocess '{PROCESS_NAME}' with PID {pid}...") + os.kill(pid, 15) # SIGTERM + print(f"Subprocess '{PROCESS_NAME}' stopped.") else: - print(f"Subprocess '{process_name}' is already running.") + print(f"Subprocess '{PROCESS_NAME}' is not running.") class DB: @@ -58,10 +78,13 @@ class DB: """ def __init__( - self, - db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), - row_factory=None, + self, + db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), + row_factory=None, ): + if not os.path.exists(os.path.dirname(db_path)): + os.makedirs(os.path.dirname(db_path)) + self.db_path = db_path self.conn: Optional[sqlite3.Connection] = None self.row_factory = row_factory @@ -76,6 +99,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.conn.close() + def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) -> None: """ Creates and populates the rows of a table in the SQLite database. @@ -131,4 +155,4 @@ def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: for row in rows: for idx in blob_cols: row[idx] = int.from_bytes(row[idx], byteorder="big") - return column_names, rows \ No newline at end of file + return column_names, rows diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index 1c706aae0d..cdde3f1ff1 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -9,7 +9,7 @@ import pytest from substrateinterface import SubstrateInterface - +from scripts import subprocess_utils from bittensor import logging from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, @@ -98,4 +98,7 @@ def wait_for_node_start(process, pattern): # TODO: uncomment templates when done # logging.info("Uninstalling neuron templates") - # uninstall_templates(templates_dir) \ No newline at end of file + # uninstall_templates(templates_dir) + + # kill subprocess if its running + subprocess_utils.stop_commit_reveal_subprocess() diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 83ee5177ba..225d825f65 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -128,7 +128,7 @@ async def test_commit_and_reveal_weights(local_chain): # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value + commit_hash, commit_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map @@ -145,7 +145,7 @@ async def test_commit_and_reveal_weights(local_chain): await wait_interval(interval, subtensor) # allow one more block to pass - time.sleep(12) + time.sleep(30) # Verify that subprocess did the reveal and deleted entry from local table assert commit_reveal_subprocess.is_table_empty("commits") From 616e0d1be040ac14ebdc6f98f59c7de4eca80f4a Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 17 Oct 2024 18:15:34 -0700 Subject: [PATCH 08/58] Add commit-reveal subprocess and weight handling improvements Enhanced the commit-reveal mechanism by adding a subprocess with sleep interval control, class-based handling in commit_reveal.py, and improved reveal checking. Adjusted tests and utils accordingly to ensure functionality and performance consistency. --- bittensor/core/extrinsics/commit_weights.py | 21 +- bittensor/core/extrinsics/set_weights.py | 23 +- bittensor/core/subtensor.py | 11 +- scripts/subprocess/commit_reveal.py | 285 ++++++++++++-------- scripts/subprocess_utils.py | 5 +- tests/e2e_tests/test_commit_weights.py | 9 +- tests/e2e_tests/test_reveal_weights.py | 147 +++++++++- 7 files changed, 367 insertions(+), 134 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 30534dcc58..971112f45d 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -22,10 +22,12 @@ from retry import retry from rich.prompt import Confirm +from bittensor.core import settings from bittensor.core.extrinsics.utils import submit_extrinsic from bittensor.utils import format_error_message from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected +from bittensor.utils.weight_utils import generate_weight_hash # For annotation purposes if TYPE_CHECKING: @@ -155,7 +157,7 @@ def commit_weights_process( ): def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client.connect(('localhost', 9999)) + client.connect(('127.0.0.1', 9949)) client.send(command.encode()) client.close() @@ -302,12 +304,25 @@ def reveal_weights_process( uids: list[int], weights: list[int], salt: list[int], + version_key: int = settings.version_as_int, ): def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client.connect(('localhost', 9999)) + client.connect(('127.0.0.1', 9949)) client.send(command.encode()) client.close() - command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey}" "{netuid}" "{uids}" "{weights}" "{salt}"' + try: + # Generate the hash of the weights - so we can remove from local reveal subprocess + commit_hash = generate_weight_hash( + address=wallet.hotkey.ss58_address, + netuid=netuid, + uids=list(uids), + values=list(weights), + salt=salt, + version_key=version_key, + ) + command = f'revealed_hash "{commit_hash}"' + except Exception as e: + command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}"' send_command(command) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index a0a300c44a..6dccf08cba 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -132,17 +132,6 @@ def set_weights_extrinsic( Returns: tuple[bool, str]: A tuple containing a success flag and an optional response message. """ - # First convert types. - if use_torch(): - if isinstance(uids, list): - uids = torch.tensor(uids, dtype=torch.int64) - if isinstance(weights, list): - weights = torch.tensor(weights, dtype=torch.float32) - else: - if isinstance(uids, list): - uids = np.array(uids, dtype=np.int64) - if isinstance(weights, list): - weights = np.array(weights, dtype=np.float32) if subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_enabled: # if cr is enabled, commit instead of setting the weights. @@ -190,6 +179,18 @@ def set_weights_extrinsic( logging.debug(str(e)) return False, str(e) else: + # First convert types. + if use_torch(): + if isinstance(uids, list): + uids = torch.tensor(uids, dtype=torch.int64) + if isinstance(weights, list): + weights = torch.tensor(weights, dtype=torch.float32) + else: + if isinstance(uids, list): + uids = np.array(uids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + # Reformat and normalize. weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( uids, weights diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index f7b4d2dcff..87a23477e0 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -140,6 +140,7 @@ def __init__( log_verbose: bool = False, connection_timeout: int = 600, subprocess_initialization: bool = True, + subprocess_sleep_interval: float = 12, ) -> None: """ Initializes a Subtensor interface for interacting with the Bittensor blockchain. @@ -189,7 +190,7 @@ def __init__( ) if subprocess_initialization: - subprocess_utils.start_commit_reveal_subprocess(network=network) + subprocess_utils.start_commit_reveal_subprocess(network=network, sleep_interval=subprocess_sleep_interval) self.log_verbose = log_verbose self._connection_timeout = connection_timeout @@ -880,8 +881,9 @@ def set_weights( success = False message = "No attempt made. Perhaps it is too soon to set weights!" while ( - self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore - and retries < max_retries + self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore + and retries < max_retries + and not success ): try: logging.info( @@ -1749,7 +1751,8 @@ def reveal_weights( netuid=netuid, uids=list(uids), weights=list(weights), - salt=list(salt) + salt=list(salt), + version_key=version_key ) break except Exception as e: diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index 7d429a3191..d9ade9867c 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -2,17 +2,73 @@ import json import os import shlex +import sys import time -import utils # Ensure this import works import socket import threading - from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet from scripts import subprocess_utils as utils +from typing import List, Any # Path to the SQLite database DB_PATH = os.path.expanduser("~/.bittensor/bittensor.db") +# Global variable to control the server loop +running = True + + +class Commit: + def __init__(self, wallet_hotkey_name: str, wallet_hotkey_ss58: str, wallet_name: str, wallet_path: str, + commit_hash: str, netuid: int, commit_block: int, reveal_block: int, uids: List[int], + weights: List[int], salt: List[int], version_key: int): + self.wallet_hotkey_name = wallet_hotkey_name + self.wallet_hotkey_ss58 = wallet_hotkey_ss58 + self.wallet_name = wallet_name + self.wallet_path = wallet_path + self.commit_hash = commit_hash + self.netuid = netuid + self.commit_block = commit_block + self.reveal_block = reveal_block + self.uids = uids + self.weights = weights + self.salt = salt + self.version_key = version_key + + def to_dict(self) -> dict: + return { + "wallet_hotkey_name": self.wallet_hotkey_name, + "wallet_hotkey_ss58": self.wallet_hotkey_ss58, + "wallet_name": self.wallet_name, + "wallet_path": self.wallet_path, + "commit_hash": self.commit_hash, + "netuid": self.netuid, + "commit_block": self.commit_block, + "reveal_block": self.reveal_block, + "uids": json.dumps(self.uids), + "weights": json.dumps(self.weights), + "salt": json.dumps(self.salt), + "version_key": self.version_key + } + + @staticmethod + def from_dict(data: dict) -> 'Commit': + return Commit( + wallet_hotkey_name=data["wallet_hotkey_name"], + wallet_hotkey_ss58=data["wallet_hotkey_ss58"], + wallet_name=data["wallet_name"], + wallet_path=data["wallet_path"], + commit_hash=data["commit_hash"], + netuid=data["netuid"], + commit_block=data["commit_block"], + reveal_block=data["reveal_block"], + uids=json.loads(data["uids"]), + weights=json.loads(data["weights"]), + salt=json.loads(data["salt"]), + version_key=data["version_key"] + ) + + def __str__(self): + return f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, version_key={self.version_key})" def table_exists(table_name: str) -> bool: @@ -29,7 +85,6 @@ def is_table_empty(table_name: str) -> bool: try: # Attempt to read the table columns, rows = utils.read_table(table_name) - # Check if the table is empty if not rows: print(f"Table '{table_name}' is empty.") @@ -37,7 +92,6 @@ def is_table_empty(table_name: str) -> bool: else: print(f"Table '{table_name}' is not empty.") return False - except Exception as e: print(f"Error checking if table '{table_name}' is empty: {e}") return False @@ -54,11 +108,11 @@ def initialize_db(): ("netuid", "INTEGER"), ("commit_block", "INTEGER"), ("reveal_block", "INTEGER"), - ("uids", "TEXT"), # Store list as a JSON string for simplicity - ("weights", "TEXT"), # Store list as a JSON string for simplicity - ("salt", "TEXT"), # Store list as a JSON string for simplicity + ("uids", "TEXT"), + ("weights", "TEXT"), + ("salt", "TEXT"), + ("version_key", "INTEGER") ] - # Check if the 'commits' table exists before creating it if not table_exists("commits"): print("Creating table 'commits'...") @@ -67,114 +121,106 @@ def initialize_db(): print("Table 'commits' already exists.") -def reveal(subtensor, data): +def reveal(subtensor, commit: Commit): # create wallet - wallet_name = data["wallet_name"] - wallet_path = data["wallet_path"] - wallet_hotkey_name = data["wallet_hotkey_name"] - - wallet = Wallet(name=wallet_name, path=wallet_path, hotkey=wallet_hotkey_name) - - print(f"the data: {data}") - print(f"wallet: {wallet}") - - commit_hash = data["commit_hash"] - uids = list(map(int, json.loads(data["uids"]))) - weights = list(map(int, json.loads(data["weights"]))) - netuid = data["netuid"] - salt = list(map(int, json.loads(data["salt"]))) - - print(f"commit_hash: {commit_hash}") - print(f"uids: {uids}") - print(f"weights: {weights}") - print(f"netuid: {netuid}") - print(f"salt: {salt}") - - # Calls subtensor.reveal_weights + wallet = Wallet(name=commit.wallet_name, path=commit.wallet_path, hotkey=commit.wallet_hotkey_name) success, message = subtensor.reveal_weights( wallet=wallet, - netuid=netuid, - uids=uids, - weights=weights, - salt=salt, + netuid=commit.netuid, + uids=commit.uids, + weights=commit.weights, + salt=commit.salt, wait_for_inclusion=True, wait_for_finalization=True ) - # delete wallet object del wallet - if success: - print("Reveal success") + print(f"Reveal success for commit {commit}") else: - print(f"Reveal failure: {message}") + print(f"Reveal failure for commit: {message}") -def revealed(wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, salt): - # Check if a row with the specified data exists in the 'commits' table - with utils.DB(db_path=DB_PATH) as (conn, cursor): - sql = "SELECT COUNT(*) FROM commits WHERE wallet_hotkey=? AND wallet_name=? AND wallet_path=? AND netuid=? AND uids=? AND weights=? AND salt=?" - cursor.execute(sql, (wallet_hotkey, wallet_name, wallet_path, netuid, json.dumps(uids), json.dumps(weights), json.dumps(salt))) - count = cursor.fetchone()[0] - - if count > 0: - # Delete the row if it exists - delete_sql = "DELETE FROM commits WHERE wallet_hotkey=? AND wallet_name=? AND wallet_path=? AND netuid=? AND uids=? AND weights=? AND salt=?" - cursor.execute(delete_sql, - (wallet_hotkey, wallet_name, wallet_path, netuid, json.dumps(uids), json.dumps(weights), json.dumps(salt))) - conn.commit() - print("Deleted existing row with specified data") - else: - print("No existing row found with specified data") - - -def committed(wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt): - - commit_data = { - "wallet_hotkey_name": wallet_hotkey_name, - "wallet_hotkey_ss58": wallet_hotkey_ss58, - "wallet_name": wallet_name, - "wallet_path": wallet_path, - "commit_hash": commit_hash, - "netuid": netuid, - "commit_block": curr_block, - "reveal_block": reveal_block, - "uids": json.dumps(uids), - "weights": json.dumps(weights), - "salt": json.dumps(salt), - } +def revealed(wallet_name, wallet_path, wallet_hotkey_str, wallet_hotkey_ss58, netuid, uids, weights, salt, version_key): + try: + # Check if a row with the specified data exists in the 'commits' table + with utils.DB(db_path=DB_PATH) as (conn, cursor): + sql = ( + "SELECT COUNT(*) FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " + "uids=? AND weights=? AND salt=? AND version_key=?") + cursor.execute(sql, ( + wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), + json.dumps(weights), + json.dumps(salt), version_key)) + count = cursor.fetchone()[0] + if count > 0: + # Delete the row if it exists + delete_sql = ( + "DELETE FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " + "uids=? AND weights=? AND salt=? AND version_key=?") + cursor.execute(delete_sql, ( + wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), + json.dumps(weights), json.dumps(salt), version_key)) + conn.commit() + print( + f"Deleted existing row with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}") + else: + print( + f"No existing row found with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}") + except Exception as e: + print(f"Error removing from table 'commits': {e}") + + +def revealed_hash(commit_hash: str): + try: + # Check if a row with the specified data exists in the 'commits' table + with utils.DB(db_path=DB_PATH) as (conn, cursor): + sql = ( + "SELECT COUNT(*) FROM commits WHERE commit_hash=?") + cursor.execute(sql, (commit_hash,)) + count = cursor.fetchone()[0] + if count > 0: + # Delete the row if it exists + delete_sql = ( + "DELETE FROM commits WHERE commit_hash=?") + cursor.execute(delete_sql, (commit_hash,)) + conn.commit() + print(f"\nDeleted existing row with commit hash {commit_hash}") + else: + print(f"\nNo existing row found with commit hash {commit_hash}") + except Exception as e: + print(f"Error removing from table 'commits': {e}") + + +def committed(commit: Commit): with utils.DB(db_path=DB_PATH) as (conn, cursor): + commit_data = commit.to_dict() column_names = ", ".join(commit_data.keys()) data = ", ".join(["?"] * len(commit_data)) sql = f"INSERT INTO commits ({column_names}) VALUES ({data})" cursor.execute(sql, tuple(commit_data.values())) conn.commit() - - print("Committed commit data: {}", commit_data) + print(f"Committed commit data: {commit_data}") -def check_reveal(subtensor, curr_block: int): +def check_reveal(subtensor: Subtensor): try: columns, rows = utils.read_table("commits") + commits = [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] except Exception as e: print(f"Error reading table 'commits': {e}") return False - curr_reveal = None - for commit in rows: - row_dict = dict(zip(columns, commit)) - if row_dict['reveal_block'] == curr_block: - curr_reveal = row_dict - break - - if curr_reveal: - reveal(subtensor, curr_reveal) - # Delete the row after revealing, and delete all older reveals - with utils.DB(db_path=DB_PATH) as (conn, cursor): - cursor.execute('DELETE FROM commits WHERE reveal_block <= ?', (curr_block,)) - conn.commit() - return True - + if commits: + # Sort commits by reveal block asc, and if two reveal blocks are the same, sort them by commit blocks asc + commits.sort(key=lambda commit: (commit.reveal_block, commit.commit_block)) + next_reveal = commits[0] + curr_block = subtensor.get_current_block() + if next_reveal.reveal_block <= curr_block: + reveal(subtensor, next_reveal) + # # Delete the row after revealing, and delete all older reveals + revealed_hash(next_reveal.commit_hash) + return True return False @@ -187,12 +233,31 @@ def handle_client_connection(client_socket): args = shlex.split(request) command = args[0] if command == 'revealed': - # wallet_name, wallet_path, wallet_hotkey, netuid, uids, weights, salt - revealed(args[1], args[2], args[3], args[4], json.loads(args[5]), json.loads(args[6]), json.loads(args[7])) + # revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}" + revealed(args[1], args[2], args[3], args[4], args[5], json.loads(args[6]), json.loads(args[7]), + json.loads(args[8]), int(args[9])) + elif command == 'revealed_hash': + # revealed_hash "{commit_hash}" + revealed_hash(args[1]) elif command == 'committed': - # wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt - committed(args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], json.loads(args[9]), json.loads(args[10]), - json.loads(args[11])) + # wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt, version_key + commit = Commit( + wallet_hotkey_name=args[3], + wallet_hotkey_ss58=args[4], + wallet_name=args[1], + wallet_path=args[2], + commit_hash=args[7], + netuid=int(args[8]), + commit_block=int(args[5]), + reveal_block=int(args[6]), + uids=json.loads(args[9]), + weights=json.loads(args[10]), + salt=json.loads(args[11]), + version_key=int(args[12]) + ) + committed(commit) + elif command == 'terminate': + terminate_process(None, None) else: print("Command not recognized") except Exception as e: @@ -203,10 +268,10 @@ def handle_client_connection(client_socket): def start_socket_server(): server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.bind(('0.0.0.0', 9999)) + server.bind(('127.0.0.1', 9949)) server.listen(5) - print('Listening on port 9999...') - while True: + print('Listening on port 9949...') + while running: client_sock, addr = server.accept() client_handler = threading.Thread( target=handle_client_connection, @@ -215,29 +280,35 @@ def start_socket_server(): client_handler.start() +def terminate_process(signal_number, frame): + global running + print(f"Terminating process with signal {signal_number}") + running = False + sys.exit(0) + + def main(args): # Initialize database and create table if necessary print("Initializing database...") initialize_db() - subtensor = Subtensor(network=args.network) # Using network argument + subtensor = Subtensor(network=args.network, subprocess_initialization=False) # Using network argument # A new block is created every 12 seconds. Check if the current block is equal to the reveal block - server_thread = threading.Thread(target=start_socket_server) server_thread.start() - while True: - # get curr block - curr_block = subtensor.get_current_block() - if check_reveal(subtensor=subtensor, curr_block=curr_block): - print(f"Revealing commit for block {curr_block}") + while running: + if check_reveal(subtensor=subtensor): + print(f"Revealing commit for block {subtensor.get_current_block()}") else: - print(f"Nothing to reveal for block {curr_block}") + print(f"Nothing to reveal for block {subtensor.get_current_block()}") time.sleep(args.sleep_interval) # Using sleep interval argument if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") - parser.add_argument("--sleep_interval", type=float, default=0.25, help="Interval between block checks in seconds") + # TODO: have finney be default + # parser.add_argument("--network", type=str, default="wss://entrypoint-finney.opentensor.ai:443", help="Subtensor network address") + parser.add_argument("--sleep-interval", type=float, default=12, help="Interval between block checks in seconds") # Add more arguments as needed args = parser.parse_args() main(args) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index da890986e2..6ba400c3b3 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -28,7 +28,7 @@ def get_process(process_name: str) -> Optional[int]: return None -def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[str] = None): +def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None): """Start the commit reveal subprocess if not already running.""" log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) @@ -45,7 +45,7 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval if network: args.extend(['--network', network]) if sleep_interval: - args.extend(['--sleep-interval', sleep_interval]) + args.extend(['--sleep-interval', str(sleep_interval)]) process = subprocess.Popen( args=args, @@ -99,7 +99,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.conn.close() - def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) -> None: """ Creates and populates the rows of a table in the SQLite database. diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index ca9b0a0a2c..3563a84f37 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -20,7 +20,7 @@ @pytest.mark.asyncio async def test_commit_and_reveal_weights(local_chain): """ - Tests the commit/reveal weights mechanism + Tests the commit/reveal weights mechanism with subprocess disabled (CR1.0) Steps: 1. Register a subnet through Alice @@ -59,7 +59,7 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_initialization=False) assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -73,7 +73,6 @@ async def test_commit_and_reveal_weights(local_chain): return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters( netuid=netuid @@ -92,7 +91,7 @@ async def test_commit_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" @@ -124,7 +123,7 @@ async def test_commit_and_reveal_weights(local_chain): ) # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value + commit_hash, commit_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 225d825f65..d797b592c1 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -59,7 +59,7 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=0.25) assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -144,6 +144,151 @@ async def test_commit_and_reveal_weights(local_chain): # Wait until the reveal block range await wait_interval(interval, subtensor) + # allow one more block to pass + time.sleep(40) + + # Verify that subprocess did the reveal and deleted entry from local table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Query the Weights storage map + revealed_weights = subtensor.query_module( + module="SubtensorModule", + name="Weights", + params=[netuid, 0], # netuid and uid + ) + + # Assert that the revealed weights are set correctly + assert revealed_weights.value is not None, "Weight reveal not found in storage" + + assert ( + weight_vals[0] == revealed_weights.value[0][1] + ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" + logging.info("✅ Passed test_commit_and_reveal_weights") + + +@pytest.mark.asyncio +async def test_set_and_reveal_weights(local_chain): + """ + Tests the commit/reveal weights mechanism with a subprocess doing the reveal function + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Enable commit-reveal mechanism on the subnet + 4. Lower the commit_reveal interval and rate limit + 5. Commit weights and verify + 6. Wait interval & see if subprocess did the reveal weights and verify + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + logging.info("Testing test_set_and_reveal_weights") + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + assert register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Enable commit_reveal on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, + alice_wallet, + "sudo_set_commit_reveal_weights_enabled", + True, + netuid, + ), "Unable to enable commit reveal on the subnet" + + subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=0.25) # Subprocess works with fast blocks + assert subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" + + # Lower the commit_reveal interval + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "370"}, + return_error_message=True, + ) + + assert ( + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 370 + ), "Failed to set commit/reveal interval" + + assert ( + subtensor.weights_rate_limit(netuid=netuid) > 0 + ), "Weights rate limit is below 0" + # Lower the rate limit + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_weights_set_rate_limit", + call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, + return_error_message=True, + ) + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + ), "Failed to set weights_rate_limit" + assert subtensor.weights_rate_limit(netuid=netuid) == 0 + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.1], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Assert no local CR processes in table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Set weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + weight_commits = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, alice_wallet.hotkey.ss58_address], + ) + + # Assert that the committed weights are set correctly + assert weight_commits.value is not None, "Weight commit not found in storage" + commit_hash, commit_block = weight_commits.value[0] + assert commit_block > 0, f"Invalid block number: {commit_block}" + + # Query the WeightCommitRevealInterval storage map + weight_commit_reveal_interval = subtensor.query_module( + module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + ) + interval = weight_commit_reveal_interval.value + assert interval > 0, "Invalid WeightCommitRevealInterval" + + # Verify that sqlite has entry + assert commit_reveal_subprocess.is_table_empty("commits") is False + + # Wait until the reveal block range + await wait_interval(interval, subtensor) + # allow one more block to pass time.sleep(30) From 2e3d79f1ac9a0e82ffef1a3de4a9fc21083a2c9d Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 17 Oct 2024 18:28:58 -0700 Subject: [PATCH 09/58] Add blocks_until_next_epoch method Introduce a method to calculate the number of blocks remaining until the next epoch for a specific subnet. This addition aids in network governance and operational planning by providing essential timing information within the Bittensor blockchain. --- bittensor/core/subtensor.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 87a23477e0..33aeb5aaaa 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1762,6 +1762,25 @@ def reveal_weights( return success, message + def blocks_until_next_epoch(self, netuid: int) -> int: + """ + Calculates the number of blocks remaining until the next epoch for a specific subnet. + + Args: + netuid (int): The unique identifier of the subnet. + + Returns: + int: The number of blocks remaining until the next epoch. + + This function is useful for determining the time remaining until the next epoch, which is important + for network governance and operational planning within the Bittensor blockchain. + """ + # formula is (block_number + netuid + 1 ) % (tempo + 1) = 0 + curr_block = self.get_current_block() + tempo = self.get_subnet_hyperparameters(netuid=netuid).tempo + remainder = (curr_block + netuid + 1) % (tempo + 1) + return remainder + # Subnet 27 uses this method _do_serve_prometheus = do_serve_prometheus # Subnet 27 uses this method name From ef0e90bd111fabc4f0861bf60bcd3968f3a7fd4e Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 17 Oct 2024 18:41:14 -0700 Subject: [PATCH 10/58] Add version_key parameter and detailed docstrings Added the version_key parameter to function signatures across multiple files for better version tracking. Updated docstrings for enhanced clarity and consistency, improving code readability and maintainability. --- bittensor/core/extrinsics/commit_weights.py | 6 +- bittensor/core/subtensor.py | 3 +- scripts/subprocess/commit_reveal.py | 218 ++++++++++++++++---- 3 files changed, 181 insertions(+), 46 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 971112f45d..6b6b917910 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -154,6 +154,7 @@ def commit_weights_process( uids: list[int], weights: list[int], salt: list[int], + version_key: int = settings.version_as_int, ): def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -161,11 +162,12 @@ def send_command(command): client.send(command.encode()) client.close() + # TODO: Recalculate reveal interval with tempos curr_block = subtensor.get_current_block() cr_interval = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval reveal_block = curr_block + cr_interval - command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}"' + command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) # Chain call for `reveal_weights_extrinsic` @@ -324,5 +326,5 @@ def send_command(command): ) command = f'revealed_hash "{commit_hash}"' except Exception as e: - command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}"' + command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 33aeb5aaaa..a1333e28af 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1675,7 +1675,8 @@ def commit_weights( commit_hash=commit_hash, uids=list(uids), weights=list(weights), - salt=salt + salt=salt, + version_key=version_key ) break except Exception as e: diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index d9ade9867c..b84a5f6a14 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -9,7 +9,7 @@ from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet from scripts import subprocess_utils as utils -from typing import List, Any +from typing import List, Any, Dict, Tuple, Optional # Path to the SQLite database DB_PATH = os.path.expanduser("~/.bittensor/bittensor.db") @@ -18,6 +18,24 @@ class Commit: + """ + A class representing a commit in the Bittensor network. + + Attributes: + wallet_hotkey_name (str): The hotkey name associated with the wallet. + wallet_hotkey_ss58 (str): The wallet's SS58 address. + wallet_name (str): The wallet name. + wallet_path (str): The path to the wallet. + commit_hash (str): The commit hash. + netuid (int): The network UID. + commit_block (int): The block number at which the commit was made. + reveal_block (int): The block number at which the commit will be revealed. + uids (List[int]): The list of UIDs. + weights (List[int]): The list of weights. + salt (List[int]): The salt used for the commit. + version_key (int): The version key. + """ + def __init__(self, wallet_hotkey_name: str, wallet_hotkey_ss58: str, wallet_name: str, wallet_path: str, commit_hash: str, netuid: int, commit_block: int, reveal_block: int, uids: List[int], weights: List[int], salt: List[int], version_key: int): @@ -34,7 +52,13 @@ def __init__(self, wallet_hotkey_name: str, wallet_hotkey_ss58: str, wallet_name self.salt = salt self.version_key = version_key - def to_dict(self) -> dict: + def to_dict(self) -> Dict[str, Any]: + """ + Converts the commit object to a dictionary. + + Returns: + Dict[str, Any]: A dictionary representation of the commit. + """ return { "wallet_hotkey_name": self.wallet_hotkey_name, "wallet_hotkey_ss58": self.wallet_hotkey_ss58, @@ -51,7 +75,16 @@ def to_dict(self) -> dict: } @staticmethod - def from_dict(data: dict) -> 'Commit': + def from_dict(data: Dict[str, Any]) -> 'Commit': + """ + Creates a Commit object from a dictionary. + + Args: + data (Dict[str, Any]): A dictionary containing commit data. + + Returns: + Commit: A Commit object. + """ return Commit( wallet_hotkey_name=data["wallet_hotkey_name"], wallet_hotkey_ss58=data["wallet_hotkey_ss58"], @@ -67,11 +100,26 @@ def from_dict(data: dict) -> 'Commit': version_key=data["version_key"] ) - def __str__(self): + def __str__(self) -> str: + """ + Returns a string representation of the commit. + + Returns: + str: String representation of the commit. + """ return f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, version_key={self.version_key})" def table_exists(table_name: str) -> bool: + """ + Checks if a table exists in the database. + + Args: + table_name (str): The name of the table to check. + + Returns: + bool: True if the table exists, False otherwise. + """ try: columns, rows = utils.read_table(table_name) print(f"Table '{table_name}' exists with columns: {columns}") @@ -82,10 +130,17 @@ def table_exists(table_name: str) -> bool: def is_table_empty(table_name: str) -> bool: + """ + Checks if a table in the database is empty. + + Args: + table_name (str): The name of the table to check. + + Returns: + bool: True if the table is empty, False otherwise. + """ try: - # Attempt to read the table columns, rows = utils.read_table(table_name) - # Check if the table is empty if not rows: print(f"Table '{table_name}' is empty.") return True @@ -97,8 +152,13 @@ def is_table_empty(table_name: str) -> bool: return False -def initialize_db(): - # Create 'commits' table if it doesn't exist +def initialize_db() -> None: + """ + Initializes the database by creating the 'commits' table if it does not exist. + + Returns: + None + """ columns = [ ("wallet_hotkey_name", "TEXT"), ("wallet_hotkey_ss58", "TEXT"), @@ -113,7 +173,6 @@ def initialize_db(): ("salt", "TEXT"), ("version_key", "INTEGER") ] - # Check if the 'commits' table exists before creating it if not table_exists("commits"): print("Creating table 'commits'...") utils.create_table("commits", columns, []) @@ -121,8 +180,17 @@ def initialize_db(): print("Table 'commits' already exists.") -def reveal(subtensor, commit: Commit): - # create wallet +def reveal(subtensor: Subtensor, commit: Commit) -> None: + """ + Reveals the weights for a commit to the subtensor network. + + Args: + subtensor (Subtensor): The subtensor network object. + commit (Commit): The commit object containing the data to be revealed. + + Returns: + None + """ wallet = Wallet(name=commit.wallet_name, path=commit.wallet_path, hotkey=commit.wallet_hotkey_name) success, message = subtensor.reveal_weights( wallet=wallet, @@ -130,10 +198,10 @@ def reveal(subtensor, commit: Commit): uids=commit.uids, weights=commit.weights, salt=commit.salt, + version_key=commit.version_key, wait_for_inclusion=True, wait_for_finalization=True ) - # delete wallet object del wallet if success: print(f"Reveal success for commit {commit}") @@ -141,12 +209,29 @@ def reveal(subtensor, commit: Commit): print(f"Reveal failure for commit: {message}") -def revealed(wallet_name, wallet_path, wallet_hotkey_str, wallet_hotkey_ss58, netuid, uids, weights, salt, version_key): +def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_hotkey_ss58: str, netuid: int, + uids: List[int], weights: List[int], salt: List[int], version_key: int) -> None: + """ + Handles the revealed command by removing the corresponding commit from the database. + + Args: + wallet_name (str): The wallet name. + wallet_path (str): The path to the wallet. + wallet_hotkey_str (str): The wallet hotkey as a string. + wallet_hotkey_ss58 (str): The wallet hotkey SS58 address. + netuid (int): The network UID. + uids (List[int]): The list of UIDs. + weights (List[int]): The list of weights. + salt (List[int]): The salt used for the commit. + version_key (int): The version key. + + Returns: + None + """ try: - # Check if a row with the specified data exists in the 'commits' table with utils.DB(db_path=DB_PATH) as (conn, cursor): sql = ( - "SELECT COUNT(*) FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " + "SELECT COUNT(*) FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " "uids=? AND weights=? AND salt=? AND version_key=?") cursor.execute(sql, ( wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), @@ -154,9 +239,8 @@ def revealed(wallet_name, wallet_path, wallet_hotkey_str, wallet_hotkey_ss58, ne json.dumps(salt), version_key)) count = cursor.fetchone()[0] if count > 0: - # Delete the row if it exists delete_sql = ( - "DELETE FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " + "DELETE FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " "uids=? AND weights=? AND salt=? AND version_key=?") cursor.execute(delete_sql, ( wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), @@ -171,18 +255,23 @@ def revealed(wallet_name, wallet_path, wallet_hotkey_str, wallet_hotkey_ss58, ne print(f"Error removing from table 'commits': {e}") -def revealed_hash(commit_hash: str): +def revealed_hash(commit_hash: str) -> None: + """ + Handles the revealed_hash command by removing the corresponding commit from the database using the commit hash. + + Args: + commit_hash (str): The commit hash. + + Returns: + None + """ try: - # Check if a row with the specified data exists in the 'commits' table with utils.DB(db_path=DB_PATH) as (conn, cursor): - sql = ( - "SELECT COUNT(*) FROM commits WHERE commit_hash=?") + sql = "SELECT COUNT(*) FROM commits WHERE commit_hash=?" cursor.execute(sql, (commit_hash,)) count = cursor.fetchone()[0] if count > 0: - # Delete the row if it exists - delete_sql = ( - "DELETE FROM commits WHERE commit_hash=?") + delete_sql = "DELETE FROM commits WHERE commit_hash=?" cursor.execute(delete_sql, (commit_hash,)) conn.commit() print(f"\nDeleted existing row with commit hash {commit_hash}") @@ -192,7 +281,16 @@ def revealed_hash(commit_hash: str): print(f"Error removing from table 'commits': {e}") -def committed(commit: Commit): +def committed(commit: Commit) -> None: + """ + Commits a new commit object to the database. + + Args: + commit (Commit): The commit object to save. + + Returns: + None + """ with utils.DB(db_path=DB_PATH) as (conn, cursor): commit_data = commit.to_dict() column_names = ", ".join(commit_data.keys()) @@ -203,7 +301,16 @@ def committed(commit: Commit): print(f"Committed commit data: {commit_data}") -def check_reveal(subtensor: Subtensor): +def check_reveal(subtensor: Subtensor) -> bool: + """ + Checks if there are any commits to reveal and performs the reveal if necessary. + + Args: + subtensor (Subtensor): The subtensor network object. + + Returns: + bool: True if a commit was revealed, False otherwise. + """ try: columns, rows = utils.read_table("commits") commits = [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] @@ -212,19 +319,26 @@ def check_reveal(subtensor: Subtensor): return False if commits: - # Sort commits by reveal block asc, and if two reveal blocks are the same, sort them by commit blocks asc commits.sort(key=lambda commit: (commit.reveal_block, commit.commit_block)) next_reveal = commits[0] curr_block = subtensor.get_current_block() if next_reveal.reveal_block <= curr_block: reveal(subtensor, next_reveal) - # # Delete the row after revealing, and delete all older reveals revealed_hash(next_reveal.commit_hash) return True return False -def handle_client_connection(client_socket): +def handle_client_connection(client_socket: socket.socket) -> None: + """ + Handles incoming client connections for the socket server. + + Args: + client_socket (socket.socket): The client socket connection. + + Returns: + None + """ try: while True: request = client_socket.recv(1024).decode() @@ -233,14 +347,11 @@ def handle_client_connection(client_socket): args = shlex.split(request) command = args[0] if command == 'revealed': - # revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}" - revealed(args[1], args[2], args[3], args[4], args[5], json.loads(args[6]), json.loads(args[7]), + revealed(args[1], args[2], args[3], args[4], int(args[5]), json.loads(args[6]), json.loads(args[7]), json.loads(args[8]), int(args[9])) elif command == 'revealed_hash': - # revealed_hash "{commit_hash}" revealed_hash(args[1]) elif command == 'committed': - # wallet_name, wallet_path, wallet_hotkey_name, wallet_hotkey_ss58, curr_block, reveal_block, commit_hash, netuid, uids, weights, salt, version_key commit = Commit( wallet_hotkey_name=args[3], wallet_hotkey_ss58=args[4], @@ -266,7 +377,13 @@ def handle_client_connection(client_socket): client_socket.close() -def start_socket_server(): +def start_socket_server() -> None: + """ + Starts the socket server to listen for incoming connections. + + Returns: + None + """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 9949)) server.listen(5) @@ -280,35 +397,50 @@ def start_socket_server(): client_handler.start() -def terminate_process(signal_number, frame): +def terminate_process(signal_number: Optional[int], frame: Optional[Any]) -> None: + """ + Terminates the process gracefully. + + Args: + signal_number (Optional[int]): The signal number causing the termination. + frame (Optional[Any]): The current stack frame. + + Returns: + None + """ global running print(f"Terminating process with signal {signal_number}") running = False sys.exit(0) -def main(args): - # Initialize database and create table if necessary +def main(args: argparse.Namespace) -> None: + """ + The main function to run the Bittensor commit-reveal subprocess script. + + Args: + args (argparse.Namespace): The command-line arguments. + + Returns: + None + """ print("Initializing database...") initialize_db() - subtensor = Subtensor(network=args.network, subprocess_initialization=False) # Using network argument - # A new block is created every 12 seconds. Check if the current block is equal to the reveal block + subtensor = Subtensor(network=args.network, subprocess_initialization=False) server_thread = threading.Thread(target=start_socket_server) server_thread.start() + while running: if check_reveal(subtensor=subtensor): print(f"Revealing commit for block {subtensor.get_current_block()}") else: print(f"Nothing to reveal for block {subtensor.get_current_block()}") - time.sleep(args.sleep_interval) # Using sleep interval argument + time.sleep(args.sleep_interval) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") - # TODO: have finney be default - # parser.add_argument("--network", type=str, default="wss://entrypoint-finney.opentensor.ai:443", help="Subtensor network address") parser.add_argument("--sleep-interval", type=float, default=12, help="Interval between block checks in seconds") - # Add more arguments as needed args = parser.parse_args() main(args) From a2ae849d555421c72d81cdab2cbf742585d7f171 Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Fri, 18 Oct 2024 09:17:38 -0700 Subject: [PATCH 11/58] Poem "Risen from the Past". Act 3. (#2363) * add `get_delegate_by_hotkey`, update `DelegateInfo` in chain data * add `root_register_extrinsic`, `set_root_weights_extrinsic` and related stuff * add `Subtensor.get_all_subnets_info` method and related stuff * add `Subtensor.get_delegate_take` method and tests * ruff * remove unused import --- bittensor/core/chain_data/delegate_info.py | 131 ++++----- bittensor/core/chain_data/subnet_info.py | 104 +++---- bittensor/core/chain_data/utils.py | 2 +- bittensor/core/extrinsics/root.py | 310 +++++++++++++++++++++ bittensor/core/subtensor.py | 170 ++++++++++- tests/unit_tests/extrinsics/test_root.py | 309 ++++++++++++++++++++ tests/unit_tests/test_chain_data.py | 113 -------- tests/unit_tests/test_subtensor.py | 123 ++++++++ 8 files changed, 1010 insertions(+), 252 deletions(-) create mode 100644 bittensor/core/extrinsics/root.py create mode 100644 tests/unit_tests/extrinsics/test_root.py diff --git a/bittensor/core/chain_data/delegate_info.py b/bittensor/core/chain_data/delegate_info.py index d77f1e1412..a840d1bb15 100644 --- a/bittensor/core/chain_data/delegate_info.py +++ b/bittensor/core/chain_data/delegate_info.py @@ -1,10 +1,9 @@ -from dataclasses import dataclass -from typing import Optional, Any +import bt_decode -from scalecodec.utils.ss58 import ss58_encode +from dataclasses import dataclass +from typing import Optional -from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType -from bittensor.core.settings import SS58_FORMAT +from bittensor.core.chain_data.utils import decode_account_id from bittensor.utils import u16_normalized_float from bittensor.utils.balance import Balance @@ -24,7 +23,6 @@ class DelegateInfo: validator_permits (list[int]): List of subnets that the delegate is allowed to validate on. return_per_1000 (int): Return per 1000 TAO, for the delegate over a day. total_daily_return (int): Total daily return of the delegate. - """ hotkey_ss58: str # Hotkey of delegate @@ -37,69 +35,78 @@ class DelegateInfo: validator_permits: list[ int ] # List of subnets that the delegate is allowed to validate on - registrations: tuple[int] # List of subnets that the delegate is registered on + registrations: list[int] # list of subnets that the delegate is registered on return_per_1000: Balance # Return per 1000 tao of the delegate over a day total_daily_return: Balance # Total daily return of the delegate @classmethod - def fix_decoded_values(cls, decoded: Any) -> "DelegateInfo": - """Fixes the decoded values.""" - - return cls( - hotkey_ss58=ss58_encode(decoded["delegate_ss58"], SS58_FORMAT), - owner_ss58=ss58_encode(decoded["owner_ss58"], SS58_FORMAT), - take=u16_normalized_float(decoded["take"]), - nominators=[ - ( - ss58_encode(nom[0], SS58_FORMAT), - Balance.from_rao(nom[1]), - ) - for nom in decoded["nominators"] - ], - total_stake=Balance.from_rao( - sum([nom[1] for nom in decoded["nominators"]]) - ), - validator_permits=decoded["validator_permits"], - registrations=decoded["registrations"], - return_per_1000=Balance.from_rao(decoded["return_per_1000"]), - total_daily_return=Balance.from_rao(decoded["total_daily_return"]), + def from_vec_u8(cls, vec_u8: bytes) -> Optional["DelegateInfo"]: + decoded = bt_decode.DelegateInfo.decode(vec_u8) + hotkey = decode_account_id(decoded.delegate_ss58) + owner = decode_account_id(decoded.owner_ss58) + nominators = [ + (decode_account_id(x), Balance.from_rao(y)) for x, y in decoded.nominators + ] + total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0) + return DelegateInfo( + hotkey_ss58=hotkey, + total_stake=total_stake, + nominators=nominators, + owner_ss58=owner, + take=u16_normalized_float(decoded.take), + validator_permits=decoded.validator_permits, + registrations=decoded.registrations, + return_per_1000=Balance.from_rao(decoded.return_per_1000), + total_daily_return=Balance.from_rao(decoded.total_daily_return), ) @classmethod - def from_vec_u8(cls, vec_u8: list[int]) -> Optional["DelegateInfo"]: - """Returns a DelegateInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo) - if decoded is None: - return None - - return DelegateInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: list[int]) -> list["DelegateInfo"]: - """Returns a list of DelegateInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegateInfo, is_vec=True) - - if decoded is None: - return [] - - return [DelegateInfo.fix_decoded_values(d) for d in decoded] + def list_from_vec_u8(cls, vec_u8: bytes) -> list["DelegateInfo"]: + decoded = bt_decode.DelegateInfo.decode_vec(vec_u8) + results = [] + for d in decoded: + hotkey = decode_account_id(d.delegate_ss58) + owner = decode_account_id(d.owner_ss58) + nominators = [ + (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators + ] + total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0) + results.append( + DelegateInfo( + hotkey_ss58=hotkey, + total_stake=total_stake, + nominators=nominators, + owner_ss58=owner, + take=u16_normalized_float(d.take), + validator_permits=d.validator_permits, + registrations=d.registrations, + return_per_1000=Balance.from_rao(d.return_per_1000), + total_daily_return=Balance.from_rao(d.total_daily_return), + ) + ) + return results @classmethod def delegated_list_from_vec_u8( - cls, vec_u8: list[int] - ) -> list[tuple["DelegateInfo", "Balance"]]: - """Returns a list of Tuples of DelegateInfo objects, and Balance, from a ``vec_u8``. - - This is the list of delegates that the user has delegated to, and the amount of stake delegated. - """ - decoded = from_scale_encoding(vec_u8, ChainDataType.DelegatedInfo, is_vec=True) - if decoded is None: - return [] - - return [ - (DelegateInfo.fix_decoded_values(d), Balance.from_rao(s)) - for d, s in decoded - ] + cls, vec_u8: bytes + ) -> list[tuple["DelegateInfo", Balance]]: + decoded = bt_decode.DelegateInfo.decode_delegated(vec_u8) + results = [] + for d, b in decoded: + nominators = [ + (decode_account_id(x), Balance.from_rao(y)) for x, y in d.nominators + ] + total_stake = sum((x[1] for x in nominators)) if nominators else Balance(0) + delegate = DelegateInfo( + hotkey_ss58=decode_account_id(d.delegate_ss58), + total_stake=total_stake, + nominators=nominators, + owner_ss58=decode_account_id(d.owner_ss58), + take=u16_normalized_float(d.take), + validator_permits=d.validator_permits, + registrations=d.registrations, + return_per_1000=Balance.from_rao(d.return_per_1000), + total_daily_return=Balance.from_rao(d.total_daily_return), + ) + results.append((delegate, Balance.from_rao(b))) + return results diff --git a/bittensor/core/chain_data/subnet_info.py b/bittensor/core/chain_data/subnet_info.py index f1ce151872..4169746a08 100644 --- a/bittensor/core/chain_data/subnet_info.py +++ b/bittensor/core/chain_data/subnet_info.py @@ -1,13 +1,10 @@ from dataclasses import dataclass -from typing import Any, Optional, Union -from scalecodec.utils.ss58 import ss58_encode +import bt_decode -from bittensor.core.chain_data.utils import from_scale_encoding, ChainDataType -from bittensor.core.settings import SS58_FORMAT +from bittensor.core.chain_data.utils import decode_account_id from bittensor.utils import u16_normalized_float from bittensor.utils.balance import Balance -from bittensor.utils.registration import torch, use_torch @dataclass @@ -28,76 +25,39 @@ class SubnetInfo: blocks_since_epoch: int tempo: int modality: int - # netuid -> topk percentile prunning score requirement (u16:MAX normalized.) connection_requirements: dict[str, float] emission_value: float burn: Balance owner_ss58: str @classmethod - def from_vec_u8(cls, vec_u8: list[int]) -> Optional["SubnetInfo"]: - """Returns a SubnetInfo object from a ``vec_u8``.""" - if len(vec_u8) == 0: - return None - - decoded = from_scale_encoding(vec_u8, ChainDataType.SubnetInfo) - if decoded is None: - return None - - return SubnetInfo.fix_decoded_values(decoded) - - @classmethod - def list_from_vec_u8(cls, vec_u8: list[int]) -> list["SubnetInfo"]: - """Returns a list of SubnetInfo objects from a ``vec_u8``.""" - decoded = from_scale_encoding( - vec_u8, ChainDataType.SubnetInfo, is_vec=True, is_option=True - ) - - if decoded is None: - return [] - - return [SubnetInfo.fix_decoded_values(d) for d in decoded] - - @classmethod - def fix_decoded_values(cls, decoded: dict) -> "SubnetInfo": - """Returns a SubnetInfo object from a decoded SubnetInfo dictionary.""" - return SubnetInfo( - netuid=decoded["netuid"], - rho=decoded["rho"], - kappa=decoded["kappa"], - difficulty=decoded["difficulty"], - immunity_period=decoded["immunity_period"], - max_allowed_validators=decoded["max_allowed_validators"], - min_allowed_weights=decoded["min_allowed_weights"], - max_weight_limit=decoded["max_weights_limit"], - scaling_law_power=decoded["scaling_law_power"], - subnetwork_n=decoded["subnetwork_n"], - max_n=decoded["max_allowed_uids"], - blocks_since_epoch=decoded["blocks_since_last_step"], - tempo=decoded["tempo"], - modality=decoded["network_modality"], - connection_requirements={ - str(int(netuid)): u16_normalized_float(int(req)) - for netuid, req in decoded["network_connect"] - }, - emission_value=decoded["emission_values"], - burn=Balance.from_rao(decoded["burn"]), - owner_ss58=ss58_encode(decoded["owner"], SS58_FORMAT), - ) - - def to_parameter_dict(self) -> Union[dict[str, Any], "torch.nn.ParameterDict"]: - """Returns a torch tensor or dict of the subnet info.""" - if use_torch(): - return torch.nn.ParameterDict(self.__dict__) - else: - return self.__dict__ - - @classmethod - def from_parameter_dict( - cls, parameter_dict: Union[dict[str, Any], "torch.nn.ParameterDict"] - ) -> "SubnetInfo": - """Creates a SubnetInfo instance from a parameter dictionary.""" - if use_torch(): - return cls(**dict(parameter_dict)) - else: - return cls(**parameter_dict) + def list_from_vec_u8(cls, vec_u8: bytes) -> list["SubnetInfo"]: + decoded = bt_decode.SubnetInfo.decode_vec_option(vec_u8) + result = [] + for d in decoded: + result.append( + SubnetInfo( + netuid=d.netuid, + rho=d.rho, + kappa=d.kappa, + difficulty=d.difficulty, + immunity_period=d.immunity_period, + max_allowed_validators=d.max_allowed_validators, + min_allowed_weights=d.min_allowed_weights, + max_weight_limit=d.max_weights_limit, + scaling_law_power=d.scaling_law_power, + subnetwork_n=d.subnetwork_n, + max_n=d.max_allowed_uids, + blocks_since_epoch=d.blocks_since_last_step, + tempo=d.tempo, + modality=d.network_modality, + connection_requirements={ + str(int(netuid)): u16_normalized_float(int(req)) + for (netuid, req) in d.network_connect + }, + emission_value=d.emission_values, + burn=Balance.from_rao(d.burn), + owner_ss58=decode_account_id(d.owner), + ) + ) + return result diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py index 0544ca85a2..9c21c9d22e 100644 --- a/bittensor/core/chain_data/utils.py +++ b/bittensor/core/chain_data/utils.py @@ -260,7 +260,7 @@ def from_scale_encoding_using_type_string( } -def decode_account_id(account_id_bytes: list) -> str: +def decode_account_id(account_id_bytes: Union[bytes, str]) -> str: """ Decodes an AccountId from bytes to a Base64 string using SS58 encoding. diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py new file mode 100644 index 0000000000..1fd7e7b26e --- /dev/null +++ b/bittensor/core/extrinsics/root.py @@ -0,0 +1,310 @@ +import time +from typing import Optional, Union, TYPE_CHECKING + +import numpy as np +from bittensor_wallet.errors import KeyFileError +from numpy.typing import NDArray +from retry import retry +from rich.prompt import Confirm + +from bittensor.core.settings import bt_console, version_as_int +from bittensor.utils import format_error_message, weight_utils +from bittensor.utils.btlogging import logging +from bittensor.utils.networking import ensure_connected +from bittensor.utils.registration import torch, legacy_torch_api_compat + +if TYPE_CHECKING: + from bittensor_wallet import Wallet + from bittensor.core.subtensor import Subtensor + + +@ensure_connected +def _do_root_register( + self: "Subtensor", + wallet: "Wallet", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, +) -> tuple[bool, Optional[str]]: + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + # create extrinsic call + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="root_register", + call_params={"hotkey": wallet.hotkey.ss58_address}, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + response = self.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True + + # process if registration successful, try again if pow is still valid + response.process_events() + if not response.is_success: + return False, format_error_message(response.error_message) + # Successful registration + else: + return True, None + + return make_substrate_call_with_retry() + + +def root_register_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, +) -> bool: + """Registers the wallet to root network. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance. + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``True``. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Default is ``False``. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + + try: + wallet.unlock_coldkey() + except KeyFileError: + bt_console.print( + ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + ) + return False + + is_registered = subtensor.is_hotkey_registered( + netuid=0, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + bt_console.print( + ":white_heavy_check_mark: [green]Already registered on root network.[/green]" + ) + return True + + if prompt: + # Prompt user for confirmation. + if not Confirm.ask("Register to root network?"): + return False + + with bt_console.status(":satellite: Registering to root network..."): + success, err_msg = _do_root_register( + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not success: + bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") + time.sleep(0.5) + + # Successful registration, final check for neuron and pubkey + else: + is_registered = subtensor.is_hotkey_registered( + netuid=0, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + bt_console.print(":white_heavy_check_mark: [green]Registered[/green]") + return True + else: + # neuron not found, try again + bt_console.print( + ":cross_mark: [red]Unknown error. Neuron not found.[/red]" + ) + + +@ensure_connected +def _do_set_root_weights( + self: "Subtensor", + wallet: "Wallet", + uids: list[int], + vals: list[int], + netuid: int = 0, + version_key: int = version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[str]]: + """ + Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons on root. This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + self (bittensor.core.subtensor.Subtensor): Subtensor instance. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. + uids (List[int]): List of neuron UIDs for which weights are being set. + vals (List[int]): List of weight values corresponding to each UID. + netuid (int): Unique identifier for the network. + version_key (int, optional): Version key for compatibility with the network. Defaults is a current ``version_as_int``. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults is ``False``. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults is ``False``. + + Returns: + Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their trust in other neurons based on observed performance and contributions on the root network. + """ + + @retry(delay=2, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="set_root_weights", + call_params={ + "dests": uids, + "weights": vals, + "netuid": netuid, + "version_key": version_key, + "hotkey": wallet.hotkey.ss58_address, + }, + ) + # Period dictates how long the extrinsic will stay as part of waiting pool + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.coldkey, + era={"period": 5}, + ) + response = self.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalziation or inclusion." + + response.process_events() + if response.is_success: + return True, "Successfully set weights." + else: + return False, response.error_message + + return make_substrate_call_with_retry() + + +@legacy_torch_api_compat +def set_root_weights_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuids: Union[NDArray[np.int64], "torch.LongTensor", list[int]], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list[float]], + version_key: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> bool: + """Sets the given weights and values on chain for wallet hotkey account. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): Subtensor instance. + wallet (bittensor_wallet.Wallet): Bittensor wallet object. Bittensor wallet object. + netuids (Union[NDArray[np.int64], torch.LongTensor, list[int]]): The ``netuid`` of the subnet to set weights for. + weights (Union[NDArray[np.float32], torch.FloatTensor, list[float]]): Weights to set. These must be ``float`` s and must correspond to the passed ``netuid`` s. + version_key (int): The version key of the validator. Default is ``0``. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``False``. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Default is ``False``. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + + try: + wallet.unlock_coldkey() + except KeyFileError: + bt_console.print( + ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + ) + return False + + # First convert types. + if isinstance(netuids, list): + netuids = np.array(netuids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + # Get weight restrictions. + min_allowed_weights = subtensor.min_allowed_weights(netuid=0) + max_weight_limit = subtensor.max_weight_limit(netuid=0) + + # Get non zero values. + non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1) + non_zero_weight_uids = netuids[non_zero_weight_idx] + non_zero_weights = weights[non_zero_weight_idx] + if non_zero_weights.size < min_allowed_weights: + raise ValueError( + "The minimum number of weights required to set weights is {}, got {}".format( + min_allowed_weights, non_zero_weights.size + ) + ) + + # Normalize the weights to max value. + formatted_weights = weight_utils.normalize_max_weight( + x=weights, limit=max_weight_limit + ) + bt_console.print( + f"\nRaw Weights -> Normalized weights: \n\t{weights} -> \n\t{formatted_weights}\n" + ) + + # Ask before moving on. + if prompt: + if not Confirm.ask( + "Do you want to set the following root weights?:\n[bold white] weights: {}\n uids: {}[/bold white ]?".format( + formatted_weights, netuids + ) + ): + return False + + with bt_console.status( + ":satellite: Setting root weights on [white]{}[/white] ...".format( + subtensor.network + ) + ): + try: + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + netuids, weights + ) + success, error_message = _do_set_root_weights( + wallet=wallet, + netuid=0, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + bt_console.print(success, error_message) + + if not wait_for_finalization and not wait_for_inclusion: + return True + + if success is True: + bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + logging.success( + prefix="Set weights", + suffix="Finalized: " + str(success), + ) + return True + else: + bt_console.print(f":cross_mark: [red]Failed[/red]: {error_message}") + logging.warning( + prefix="Set weights", + suffix="Failed: " + str(error_message), + ) + return False + + except Exception as e: + bt_console.print(":cross_mark: [red]Failed[/red]: error:{}".format(e)) + logging.warning(prefix="Set weights", suffix="Failed: " + str(e)) + return False diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index b57b3d85bd..ac6c46bc46 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -39,11 +39,13 @@ from bittensor.core import settings from bittensor.core.axon import Axon from bittensor.core.chain_data import ( + custom_rpc_type_registry, + DelegateInfo, NeuronInfo, + NeuronInfoLite, PrometheusInfo, SubnetHyperparameters, - NeuronInfoLite, - custom_rpc_type_registry, + SubnetInfo, ) from bittensor.core.config import Config from bittensor.core.extrinsics.commit_weights import ( @@ -58,6 +60,10 @@ burned_register_extrinsic, register_extrinsic, ) +from bittensor.core.extrinsics.root import ( + root_register_extrinsic, + set_root_weights_extrinsic, +) from bittensor.core.extrinsics.serving import ( do_serve_axon, serve_axon_extrinsic, @@ -69,10 +75,10 @@ transfer_extrinsic, ) from bittensor.core.metagraph import Metagraph -from bittensor.utils import torch -from bittensor.utils import u16_normalized_float, networking +from bittensor.utils import networking, torch, ss58_to_vec_u8, u16_normalized_float from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging +from bittensor.utils.registration import legacy_torch_api_compat from bittensor.utils.weight_utils import generate_weight_hash KEY_NONCE: dict[str, int] = {} @@ -902,6 +908,45 @@ def set_weights( return success, message + @legacy_torch_api_compat + def root_set_weights( + self, + wallet: "Wallet", + netuids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + ) -> bool: + """ + Sets the weights for neurons on the root network. This action is crucial for defining the influence and interactions of neurons at the root level of the Bittensor network. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. + netuids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs for which weights are being set. + weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID. + version_key (int, optional): Version key for compatibility with the network. Default is ``0``. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to ``False``. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to ``False``. + prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to ``False``. + + Returns: + bool: ``True`` if the setting of root-level weights is successful, False otherwise. + + This function plays a pivotal role in shaping the root network's collective intelligence and decision-making processes, reflecting the principles of decentralized governance and collaborative learning in Bittensor. + """ + return set_root_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuids=netuids, + weights=weights, + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + def register( self, wallet: "Wallet", @@ -961,6 +1006,35 @@ def register( log_verbose=log_verbose, ) + def root_register( + self, + wallet: "Wallet", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + ) -> bool: + """ + Registers the neuron associated with the wallet on the root network. This process is integral for participating in the highest layer of decision-making and governance within the Bittensor network. + + Args: + wallet (bittensor.wallet): The wallet associated with the neuron to be registered on the root network. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Defaults to `False`. + + Returns: + bool: ``True`` if the registration on the root network is successful, False otherwise. + + This function enables neurons to engage in the most critical and influential aspects of the network's governance, signifying a high level of commitment and responsibility in the Bittensor ecosystem. + """ + return root_register_extrinsic( + subtensor=self, + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + def burned_register( self, wallet: "Wallet", @@ -1419,6 +1493,36 @@ def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: _result = self.query_subtensor("NetworksAdded", block, [netuid]) return getattr(_result, "value", False) + @networking.ensure_connected + def get_all_subnets_info(self, block: Optional[int] = None) -> list[SubnetInfo]: + """ + Retrieves detailed information about all subnets within the Bittensor network. This function provides comprehensive data on each subnet, including its characteristics and operational parameters. + + Args: + block (Optional[int]): The blockchain block number for the query. + + Returns: + list[SubnetInfo]: A list of SubnetInfo objects, each containing detailed information about a subnet. + + Gaining insights into the subnets' details assists in understanding the network's composition, the roles of different subnets, and their unique features. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + block_hash = None if block is None else self.substrate.get_block_hash(block) + + return self.substrate.rpc_request( + method="subnetInfo_getSubnetsInfo", # custom rpc method + params=[block_hash] if block_hash else [], + ) + + json_body = make_substrate_call_with_retry() + + if not (result := json_body.get("result", None)): + return [] + + return SubnetInfo.list_from_vec_u8(result) + # Metagraph uses this method def bonds( self, netuid: int, block: Optional[int] = None @@ -1885,6 +1989,64 @@ def recycle(self, netuid: int, block: Optional[int] = None) -> Optional["Balance call = self._get_hyperparameter(param_name="Burn", netuid=netuid, block=block) return None if call is None else Balance.from_rao(int(call)) + def get_delegate_take( + self, hotkey_ss58: str, block: Optional[int] = None + ) -> Optional[float]: + """ + Retrieves the delegate 'take' percentage for a neuron identified by its hotkey. The 'take' represents the percentage of rewards that the delegate claims from its nominators' stakes. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + block (Optional[int]): The blockchain block number for the query. + + Returns: + Optional[float]: The delegate take percentage, None if not available. + + The delegate take is a critical parameter in the network's incentive structure, influencing the distribution of rewards among neurons and their nominators. + """ + _result = self.query_subtensor("Delegates", block, [hotkey_ss58]) + return ( + None + if getattr(_result, "value", None) is None + else u16_normalized_float(_result.value) + ) + + @networking.ensure_connected + def get_delegate_by_hotkey( + self, hotkey_ss58: str, block: Optional[int] = None + ) -> Optional[DelegateInfo]: + """ + Retrieves detailed information about a delegate neuron based on its hotkey. This function provides a comprehensive view of the delegate's status, including its stakes, nominators, and reward distribution. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the delegate's hotkey. + block (Optional[int]): The blockchain block number for the query. Default is ``None``. + + Returns: + Optional[DelegateInfo]: Detailed information about the delegate neuron, ``None`` if not found. + + This function is essential for understanding the roles and influence of delegate neurons within the Bittensor network's consensus and governance structures. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(encoded_hotkey_: list[int]): + block_hash = None if block is None else self.substrate.get_block_hash(block) + + return self.substrate.rpc_request( + method="delegateInfo_getDelegate", # custom rpc method + params=( + [encoded_hotkey_, block_hash] if block_hash else [encoded_hotkey_] + ), + ) + + encoded_hotkey = ss58_to_vec_u8(hotkey_ss58) + json_body = make_substrate_call_with_retry(encoded_hotkey) + + if not (result := json_body.get("result", None)): + return None + + return DelegateInfo.from_vec_u8(result) + # Subnet 27 uses this method _do_serve_prometheus = do_serve_prometheus # Subnet 27 uses this method name diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py new file mode 100644 index 0000000000..bd37be203f --- /dev/null +++ b/tests/unit_tests/extrinsics/test_root.py @@ -0,0 +1,309 @@ +import pytest +from bittensor.core.subtensor import Subtensor +from bittensor.core.extrinsics import root + + +@pytest.fixture +def mock_subtensor(mocker): + mock = mocker.MagicMock(spec=Subtensor) + mock.network = "magic_mock" + return mock + + +@pytest.fixture +def mock_wallet(mocker): + mock = mocker.MagicMock() + mock.hotkey.ss58_address = "fake_hotkey_address" + return mock + + +@pytest.mark.parametrize( + "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, prompt, user_response, expected_result", + [ + ( + False, + True, + [True, None], + True, + True, + True, + True, + ), # Already registered after attempt + ( + False, + True, + [False, True], + True, + True, + True, + True, + ), # Registration succeeds with user confirmation + (False, True, [False, False], False, False, None, None), # Registration fails + ( + False, + True, + [False, False], + True, + False, + None, + None, + ), # Registration succeeds but neuron not found + ( + False, + True, + [False, False], + True, + True, + False, + False, + ), # User declines registration + ], + ids=[ + "success-already-registered", + "success-registration-succeeds", + "failure-registration-failed", + "failure-neuron-not-found", + "failure-prompt-declined", + ], +) +def test_root_register_extrinsic( + mock_subtensor, + mock_wallet, + wait_for_inclusion, + wait_for_finalization, + hotkey_registered, + registration_success, + prompt, + user_response, + expected_result, + mocker, +): + # Arrange + mock_subtensor.is_hotkey_registered.side_effect = hotkey_registered + + with mocker.patch("rich.prompt.Confirm.ask", return_value=user_response): + # Preps + mock_register = mocker.Mock( + return_value=(registration_success, "Error registering") + ) + root._do_root_register = mock_register + + # Act + result = root.root_register_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + # Assert + assert result == expected_result + + if not hotkey_registered[0] and user_response: + mock_register.assert_called_once() + + +@pytest.mark.parametrize( + "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", + [ + (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set + ( + False, + False, + [1, 2], + [0.5, 0.5], + False, + None, + True, + ), # Success - weights set no wait + ( + True, + False, + [1, 2], + [2000, 20], + True, + True, + True, + ), # Success - large value to be normalized + ( + True, + False, + [1, 2], + [2000, 0], + True, + True, + True, + ), # Success - single large value + ( + True, + False, + [1, 2], + [0.5, 0.5], + True, + False, + False, + ), # Failure - prompt declined + ( + True, + False, + [1, 2], + [0.5, 0.5], + False, + None, + False, + ), # Failure - setting weights failed + ( + True, + False, + [], + [], + None, + False, + False, + ), # Exception catched - ValueError 'min() arg is an empty sequence' + ], + ids=[ + "success-weights-set", + "success-not-wait", + "success-large-value", + "success-single-value", + "failure-user-declines", + "failure-setting-weights", + "failure-value-error-exception", + ], +) +def test_set_root_weights_extrinsic( + mock_subtensor, + mock_wallet, + wait_for_inclusion, + wait_for_finalization, + netuids, + weights, + prompt, + user_response, + expected_success, + mocker, +): + # Preps + root._do_set_root_weights = mocker.Mock( + return_value=(expected_success, "Mock error") + ) + mock_subtensor.min_allowed_weights = mocker.Mock(return_value=0) + mock_subtensor.max_weight_limit = mocker.Mock(return_value=1) + mock_confirm = mocker.Mock(return_value=(expected_success, "Mock error")) + root.Confirm.ask = mock_confirm + + # Call + result = root.set_root_weights_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + netuids=netuids, + weights=weights, + version_key=0, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + + # Asserts + assert result == expected_success + if prompt: + mock_confirm.assert_called_once() + else: + mock_confirm.assert_not_called() + + +@pytest.mark.parametrize( + "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", + [ + (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set + ( + False, + False, + [1, 2], + [0.5, 0.5], + False, + None, + True, + ), # Success - weights set no wait + ( + True, + False, + [1, 2], + [2000, 20], + True, + True, + True, + ), # Success - large value to be normalized + ( + True, + False, + [1, 2], + [2000, 0], + True, + True, + True, + ), # Success - single large value + ( + True, + False, + [1, 2], + [0.5, 0.5], + True, + False, + False, + ), # Failure - prompt declined + ( + True, + False, + [1, 2], + [0.5, 0.5], + False, + None, + False, + ), # Failure - setting weights failed + ( + True, + False, + [], + [], + None, + False, + False, + ), # Exception catched - ValueError 'min() arg is an empty sequence' + ], + ids=[ + "success-weights-set", + "success-not-wait", + "success-large-value", + "success-single-value", + "failure-user-declines", + "failure-setting-weights", + "failure-value-error-exception", + ], +) +def test_set_root_weights_extrinsic_torch( + mock_subtensor, + mock_wallet, + wait_for_inclusion, + wait_for_finalization, + netuids, + weights, + prompt, + user_response, + expected_success, + force_legacy_torch_compatible_api, + mocker, +): + test_set_root_weights_extrinsic( + mock_subtensor, + mock_wallet, + wait_for_inclusion, + wait_for_finalization, + netuids, + weights, + prompt, + user_response, + expected_success, + mocker, + ) diff --git a/tests/unit_tests/test_chain_data.py b/tests/unit_tests/test_chain_data.py index 353f697d46..65232e3382 100644 --- a/tests/unit_tests/test_chain_data.py +++ b/tests/unit_tests/test_chain_data.py @@ -364,116 +364,3 @@ def create_neuron_info_decoded( "prometheus_info": prometheus_info, "axon_info": axon_info, } - - -@pytest.fixture -def mock_from_scale_encoding(mocker): - return mocker.patch("bittensor.core.chain_data.delegate_info.from_scale_encoding") - - -@pytest.fixture -def mock_fix_decoded_values(mocker): - return mocker.patch( - "bittensor.core.chain_data.DelegateInfo.fix_decoded_values", - side_effect=lambda x: x, - ) - - -@pytest.mark.parametrize( - "test_id, vec_u8, expected", - [ - ( - "happy-path-1", - [1, 2, 3], - [ - DelegateInfo( - hotkey_ss58="hotkey", - total_stake=1000, - nominators=[ - "nominator1", - "nominator2", - ], - owner_ss58="owner", - take=10.1, - validator_permits=[1, 2, 3], - registrations=[4, 5, 6], - return_per_1000=100, - total_daily_return=1000, - ) - ], - ), - ( - "happy-path-2", - [4, 5, 6], - [ - DelegateInfo( - hotkey_ss58="hotkey", - total_stake=1000, - nominators=[ - "nominator1", - "nominator2", - ], - owner_ss58="owner", - take=2.1, - validator_permits=[1, 2, 3], - registrations=[4, 5, 6], - return_per_1000=100, - total_daily_return=1000, - ) - ], - ), - ], -) -def test_list_from_vec_u8_happy_path( - mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected -): - # Arrange - mock_from_scale_encoding.return_value = expected - - # Act - result = DelegateInfo.list_from_vec_u8(vec_u8) - - # Assert - mock_from_scale_encoding.assert_called_once_with( - vec_u8, ChainDataType.DelegateInfo, is_vec=True - ) - assert result == expected, f"Failed {test_id}" - - -@pytest.mark.parametrize( - "test_id, vec_u8, expected", - [ - ("edge_empty_list", [], []), - ], -) -def test_list_from_vec_u8_edge_cases( - mock_from_scale_encoding, mock_fix_decoded_values, test_id, vec_u8, expected -): - # Arrange - mock_from_scale_encoding.return_value = None - - # Act - result = DelegateInfo.list_from_vec_u8(vec_u8) - - # Assert - mock_from_scale_encoding.assert_called_once_with( - vec_u8, ChainDataType.DelegateInfo, is_vec=True - ) - assert result == expected, f"Failed {test_id}" - - -@pytest.mark.parametrize( - "vec_u8, expected_exception", - [ - ("not_a_list", TypeError), - ], -) -def test_list_from_vec_u8_error_cases( - vec_u8, - expected_exception, -): - # No Arrange section needed as input values are provided via test parameters - - # Act & Assert - with pytest.raises(expected_exception): - _ = DelegateInfo.list_from_vec_u8(vec_u8) diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index bc1ea360c6..6d8fb1ff5f 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -2181,3 +2181,126 @@ def test_recycle_none(subtensor, mocker): ) assert result is None + + +# `get_all_subnets_info` tests +def test_get_all_subnets_info_success(mocker, subtensor): + """Test get_all_subnets_info returns correct data when subnet information is found.""" + # Prep + block = 123 + subnet_data = [1, 2, 3] # Mocked response data + mocker.patch.object( + subtensor.substrate, "get_block_hash", return_value="mock_block_hash" + ) + mock_response = {"result": subnet_data} + mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) + mocker.patch.object( + subtensor_module.SubnetInfo, + "list_from_vec_u8", + return_value="list_from_vec_u80", + ) + + # Call + result = subtensor.get_all_subnets_info(block) + + # Asserts + subtensor.substrate.get_block_hash.assert_called_once_with(block) + subtensor.substrate.rpc_request.assert_called_once_with( + method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"] + ) + subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data) + + +@pytest.mark.parametrize("result_", [[], None]) +def test_get_all_subnets_info_no_data(mocker, subtensor, result_): + """Test get_all_subnets_info returns empty list when no subnet information is found.""" + # Prep + block = 123 + mocker.patch.object( + subtensor.substrate, "get_block_hash", return_value="mock_block_hash" + ) + mock_response = {"result": result_} + mocker.patch.object(subtensor.substrate, "rpc_request", return_value=mock_response) + mocker.patch.object(subtensor_module.SubnetInfo, "list_from_vec_u8") + + # Call + result = subtensor.get_all_subnets_info(block) + + # Asserts + assert result == [] + subtensor.substrate.get_block_hash.assert_called_once_with(block) + subtensor.substrate.rpc_request.assert_called_once_with( + method="subnetInfo_getSubnetsInfo", params=["mock_block_hash"] + ) + subtensor_module.SubnetInfo.list_from_vec_u8.assert_not_called() + + +def test_get_all_subnets_info_retry(mocker, subtensor): + """Test get_all_subnets_info retries on failure.""" + # Prep + block = 123 + subnet_data = [1, 2, 3] + mocker.patch.object( + subtensor.substrate, "get_block_hash", return_value="mock_block_hash" + ) + mock_response = {"result": subnet_data} + mock_rpc_request = mocker.patch.object( + subtensor.substrate, + "rpc_request", + side_effect=[Exception, Exception, mock_response], + ) + mocker.patch.object( + subtensor_module.SubnetInfo, "list_from_vec_u8", return_value=["some_data"] + ) + + # Call + result = subtensor.get_all_subnets_info(block) + + # Asserts + subtensor.substrate.get_block_hash.assert_called_with(block) + assert mock_rpc_request.call_count == 3 + subtensor_module.SubnetInfo.list_from_vec_u8.assert_called_once_with(subnet_data) + assert result == ["some_data"] + + +def test_get_delegate_take_success(subtensor, mocker): + """Verify `get_delegate_take` method successful path.""" + # Preps + fake_hotkey_ss58 = "FAKE_SS58" + fake_block = 123 + + subtensor_module.u16_normalized_float = mocker.Mock() + subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value="value")) + + # Call + result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block) + + # Asserts + subtensor.query_subtensor.assert_called_once_with( + "Delegates", fake_block, [fake_hotkey_ss58] + ) + subtensor_module.u16_normalized_float.assert_called_once_with( + subtensor.query_subtensor.return_value.value + ) + assert result == subtensor_module.u16_normalized_float.return_value + + +def test_get_delegate_take_none(subtensor, mocker): + """Verify `get_delegate_take` method returns None.""" + # Preps + fake_hotkey_ss58 = "FAKE_SS58" + fake_block = 123 + + subtensor.query_subtensor = mocker.Mock(return_value=mocker.Mock(value=None)) + subtensor_module.u16_normalized_float = mocker.Mock() + + # Call + result = subtensor.get_delegate_take(hotkey_ss58=fake_hotkey_ss58, block=fake_block) + + # Asserts + subtensor.query_subtensor.assert_called_once_with( + "Delegates", fake_block, [fake_hotkey_ss58] + ) + + subtensor_module.u16_normalized_float.assert_not_called() + assert result is None From 6c36639fee3247fe914b5fdaceaec01531408d56 Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 18 Oct 2024 15:56:06 -0700 Subject: [PATCH 12/58] Add batch weight reveal functionality Introduced batch processing for weight reveal operations in the Bittensor network. This includes a new method `batch_reveal_weights` and necessary changes to accommodate batch transactions, ensuring efficiency and scalability. Additionally, the commit/reveal intervals were replaced with periods for more precise control. --- .../core/chain_data/subnet_hyperparameters.py | 6 +- bittensor/core/chain_data/utils.py | 2 +- bittensor/core/extrinsics/commit_weights.py | 215 ++++++++++++++-- bittensor/core/subtensor.py | 84 ++++++- scripts/subprocess/commit_reveal.py | 96 +++++++- tests/e2e_tests/test_commit_weights.py | 10 +- tests/e2e_tests/test_reveal_weights.py | 230 ++++++++++++++++-- 7 files changed, 584 insertions(+), 59 deletions(-) diff --git a/bittensor/core/chain_data/subnet_hyperparameters.py b/bittensor/core/chain_data/subnet_hyperparameters.py index c28f802cfc..adc93f0bdb 100644 --- a/bittensor/core/chain_data/subnet_hyperparameters.py +++ b/bittensor/core/chain_data/subnet_hyperparameters.py @@ -32,7 +32,7 @@ class SubnetHyperparameters: max_validators (int): Maximum number of validators. adjustment_alpha (int): Alpha value for adjustments. difficulty (int): Difficulty level. - commit_reveal_weights_interval (int): Interval for commit-reveal weights. + commit_reveal_periods (int): Periods for commit-reveal weights. commit_reveal_weights_enabled (bool): Flag indicating if commit-reveal weights are enabled. alpha_high (int): High value of alpha. alpha_low (int): Low value of alpha. @@ -61,7 +61,7 @@ class SubnetHyperparameters: max_validators: int adjustment_alpha: int difficulty: int - commit_reveal_weights_interval: int + commit_reveal_periods: int commit_reveal_weights_enabled: bool alpha_high: int alpha_low: int @@ -104,7 +104,7 @@ def from_vec_u8(cls, vec_u8: bytes) -> Optional["SubnetHyperparameters"]: max_validators=decoded.max_validators, adjustment_alpha=decoded.adjustment_alpha, difficulty=decoded.difficulty, - commit_reveal_weights_interval=decoded.commit_reveal_weights_interval, + commit_reveal_periods=decoded.commit_reveal_periods, commit_reveal_weights_enabled=decoded.commit_reveal_weights_enabled, alpha_high=decoded.alpha_high, alpha_low=decoded.alpha_low, diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py index 0544ca85a2..ec85750748 100644 --- a/bittensor/core/chain_data/utils.py +++ b/bittensor/core/chain_data/utils.py @@ -241,7 +241,7 @@ def from_scale_encoding_using_type_string( ["max_validators", "Compact"], ["adjustment_alpha", "Compact"], ["difficulty", "Compact"], - ["commit_reveal_weights_interval", "Compact"], + ["commit_reveal_periods", "Compact"], ["commit_reveal_weights_enabled", "bool"], ["alpha_high", "Compact"], ["alpha_low", "Compact"], diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 6b6b917910..c243202578 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -38,12 +38,12 @@ # # Chain call for `commit_weights_extrinsic` @ensure_connected def do_commit_weights( - self: "Subtensor", - wallet: "Wallet", - netuid: int, - commit_hash: str, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, + self: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, ) -> tuple[bool, Optional[dict]]: """ Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights. @@ -147,14 +147,14 @@ def commit_weights_extrinsic( def commit_weights_process( - subtensor: "Subtensor", - wallet: "Wallet", - netuid: int, - commit_hash: str, - uids: list[int], - weights: list[int], - salt: list[int], - version_key: int = settings.version_as_int, + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + uids: list[int], + weights: list[int], + salt: list[int], + version_key: int = settings.version_as_int, ): def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -162,14 +162,17 @@ def send_command(command): client.send(command.encode()) client.close() - # TODO: Recalculate reveal interval with tempos curr_block = subtensor.get_current_block() - cr_interval = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval - reveal_block = curr_block + cr_interval + blocks_until_next_epoch = subtensor.blocks_until_next_epoch(netuid=netuid) + subnet_tempo_blocks = subtensor.get_subnet_hyperparameters(netuid=netuid).tempo + epoch_start_block = curr_block + blocks_until_next_epoch + cr_periods = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods + reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) + # Chain call for `reveal_weights_extrinsic` @ensure_connected def do_reveal_weights( @@ -328,3 +331,181 @@ def send_command(command): except Exception as e: command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) + + +# Chain call for `batch_reveal_weights_extrinsic` +@ensure_connected +def do_batch_reveal_weights( + self: "Subtensor", + wallet: "Wallet", + netuid: int, + uids: list[list[int]], + values: list[list[int]], + salt: list[list[int]], + version_keys: list[int], + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[dict]]: + """ + Internal method to send a batch transaction to the Bittensor blockchain, revealing the weights for a specific subnet. + This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + self (bittensor.core.subtensor.Subtensor): The Subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[list[int]]): List of neuron UIDs for which weights are being revealed. + values (list[list[int]]): List of weight values corresponding to each UID. + salt (list[list[int]]): List of salt values corresponding to the hash function. + version_keys (list[int]): Version key for compatibility with the network. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to False. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to False. + + Returns: + tuple[bool, Optional[dict]]: A tuple containing a success flag and an optional error message. + + This method ensures that the weight revelation is securely recorded on the Bittensor blockchain, providing transparency and accountability for the neuron's weight distribution. + """ + + @retry(delay=1, tries=3, backoff=2, max_delay=4) + def make_substrate_call_with_retry(): + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="batch_reveal_weights", + call_params={ + "netuid": netuid, + "uids_list": uids, + "values_list": values, + "salts_list": salt, + "version_keys": version_keys, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + response = submit_extrinsic( + substrate=self.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + response.process_events() + if response.is_success: + return True, None + else: + return False, response.error_message + + return make_substrate_call_with_retry() + + +def batch_reveal_weights_extrinsic( + subtensor: "Subtensor", + wallet: "Wallet", + netuid: int, + uids: list[list[int]], + weights: list[list[int]], + salt: list[list[int]], + version_keys: list[int], + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> tuple[bool, str]: + """ + Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. + This function is a wrapper around the `do_batch_reveal_weights` method, handling user prompts and error messages. + + Args: + version_keys: + subtensor (bittensor.core.subtensor.Subtensor): The Subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[list[int]]): List of neuron UID lists for which weights are being revealed in batch. + weights (list[list[int]]): List of weight value lists corresponding to each UID list. + salt (list[list[int]]): List of salt value lists corresponding to the hash function for each batch. + version_keys (list[int]): List of version keys for compatibility with the network for each batch. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to False. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to False. + prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to False. + + Returns: + tuple[bool, str]: ``True`` if the weight revelation is successful, ``False`` otherwise. And `msg`, a string + describing the success or potential error. + + This function provides a user-friendly interface for revealing weights in batch on the Bittensor blockchain, + ensuring proper error handling and user interaction when required. + """ + + if prompt and not Confirm.ask(f"Would you like to batch reveal weights?"): + return False, "User cancelled the operation." + + success, error_message = do_batch_reveal_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=uids, + values=weights, + salt=salt, + version_keys=version_keys, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if success: + success_message = "Successfully batch revealed weights." + logging.info(success_message) + return True, success_message + else: + error_message = format_error_message(error_message) + logging.error(f"Failed batch reveal weights extrinsic: {error_message}") + return False, error_message + + +def batch_reveal_weights_process( + wallet: "Wallet", + netuid: int, + uids: list[list[int]], + weights: list[list[int]], + salt: list[list[int]], + version_keys: list[int] +): + """ + Processes a batch reveal of weights for a specific subnet on the Bittensor blockchain using the provided wallet. + This function generates the hash of weights for each batch and sends the corresponding command. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[list[int]]): List of neuron UID lists for which weights are being revealed in batch. + weights (list[list[int]]): List of weight value lists corresponding to each UID list. + salt (list[list[int]]): List of salt value lists corresponding to the hash function for each batch. + version_keys (list[int]): List of version keys for compatibility with the network for each batch. + + This function facilitates the batch reveal process, ensuring that the hashed weights are properly recorded and sent. + """ + + def send_command(command): + client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + client.connect(('127.0.0.1', 9949)) + client.send(command.encode()) + client.close() + + try: + for batch_uids, batch_weights, batch_salt, batch_version_key in zip(uids, weights, salt, version_keys): + # Generate the hash of the weights for each individual batch + commit_hash = generate_weight_hash( + address=wallet.hotkey.ss58_address, + netuid=netuid, + uids=batch_uids, + values=batch_weights, + salt=batch_salt, + version_key=batch_version_key, + ) + command = f'revealed_hash "{commit_hash}"' + send_command(command) + except Exception as e: + logging.error(f"Failed batch reveal weights subprocess: {e}") diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index a1333e28af..feae5b55d5 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -48,7 +48,8 @@ from bittensor.core.config import Config from bittensor.core.extrinsics.commit_weights import ( commit_weights_extrinsic, - reveal_weights_extrinsic, reveal_weights_process, commit_weights_process, + reveal_weights_extrinsic, reveal_weights_process, commit_weights_process, batch_reveal_weights_extrinsic, + batch_reveal_weights_process, ) from bittensor.core.extrinsics.prometheus import ( do_serve_prometheus, @@ -65,7 +66,7 @@ transfer_extrinsic, ) from bittensor.core.metagraph import Metagraph -from bittensor.utils import torch +from bittensor.utils import torch, U64_MAX from bittensor.utils import u16_normalized_float, networking from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging @@ -1744,8 +1745,6 @@ def reveal_weights( ) if success: # remove from local db if called directly - # Call the subprocess using parameters (signal or something else) - # revealed(wallet.hotkey, wallet.name, wallet.path, netuid, uids, weights, salt) if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): reveal_weights_process( wallet=wallet, @@ -1779,8 +1778,83 @@ def blocks_until_next_epoch(self, netuid: int) -> int: # formula is (block_number + netuid + 1 ) % (tempo + 1) = 0 curr_block = self.get_current_block() tempo = self.get_subnet_hyperparameters(netuid=netuid).tempo + if tempo == 0: + return U64_MAX remainder = (curr_block + netuid + 1) % (tempo + 1) - return remainder + return tempo - remainder + + def batch_reveal_weights( + self, + wallet: "Wallet", + netuid: int, + uids: list[list[int]], + weights: list[list[int]], + salt: list[list[int]], + version_keys: list[int], + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, + ) -> tuple[bool, str]: + """ + Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. + This action serves as a revelation of the neuron's previously committed weight distribution. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[list[int]]): Nested list of neuron UIDs for which weights are being revealed. + weights (list[list[int]]): Nested list of weight values corresponding to each UID. + salt (list[list[int]]): Nested list of salt values corresponding to the hash function. + version_keys (list[int]): List of version keys for compatibility with the network. Default is ``int representation of Bittensor version``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. + max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the batch weight revelation is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function allows neurons to reveal their previously committed weight distribution, ensuring transparency + and accountability within the Bittensor network. + """ + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to reveal weights!" + + while retries < max_retries: + try: + success, message = batch_reveal_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + version_keys=version_keys, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + prompt=prompt, + ) + if success: + # remove from local db if called directly + if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + batch_reveal_weights_process( + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + version_keys=version_keys + ) + return success, message + except Exception as e: + logging.error(f"Error revealing weights: {e}") + finally: + retries += 1 + + return success, message # Subnet 27 uses this method _do_serve_prometheus = do_serve_prometheus diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index b84a5f6a14..59bf8d39ba 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -204,11 +204,50 @@ def reveal(subtensor: Subtensor, commit: Commit) -> None: ) del wallet if success: + revealed_hash(commit.commit_hash) print(f"Reveal success for commit {commit}") else: print(f"Reveal failure for commit: {message}") +def reveal_batch(subtensor: Subtensor, commits: List[Commit]) -> None: + """ + Reveals the weights for a batch of commits to the subtensor network. + + Args: + subtensor (Subtensor): The subtensor network object. + commits (List[Commit]): A list of commit objects to be revealed. + + Returns: + None + """ + wallet = Wallet(name=commits[0].wallet_name, path=commits[0].wallet_path, hotkey=commits[0].wallet_hotkey_name) + netuid = commits[0].netuid + uids = [commit.uids for commit in commits] + weights = [commit.weights for commit in commits] + salt = [commit.salt for commit in commits] + version_keys = [commit.version_key for commit in commits] + + success, message = subtensor.batch_reveal_weights( + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + version_keys=version_keys, + wait_for_inclusion=True, + wait_for_finalization=True + ) + del wallet + + if success: + for commit in commits: + revealed_hash(commit.commit_hash) + print(f"Reveal success for batch commit: {commit}") + else: + print(f"Reveal failure for batch commits: {message}") + + def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_hotkey_ss58: str, netuid: int, uids: List[int], weights: List[int], salt: List[int], version_key: int) -> None: """ @@ -281,6 +320,33 @@ def revealed_hash(commit_hash: str) -> None: print(f"Error removing from table 'commits': {e}") +def revealed_batch_hash(commit_hashes: List[str]) -> None: + """ + Handles the revealed_batch_hash command by removing the corresponding commits from the database using the commit hashes. + + Args: + commit_hashes (List[str]): The list of commit hashes. + + Returns: + None + """ + try: + with utils.DB(db_path=DB_PATH) as (conn, cursor): + for commit_hash in commit_hashes: + sql = "SELECT COUNT(*) FROM commits WHERE commit_hash=?" + cursor.execute(sql, (commit_hash,)) + count = cursor.fetchone()[0] + if count > 0: + delete_sql = "DELETE FROM commits WHERE commit_hash=?" + cursor.execute(delete_sql, (commit_hash,)) + conn.commit() + print(f"\nDeleted existing row with commit hash {commit_hash}") + else: + print(f"\nNo existing row found with commit hash {commit_hash}") + except Exception as e: + print(f"Error removing from table 'commits': {e}") + + def committed(commit: Commit) -> None: """ Commits a new commit object to the database. @@ -319,12 +385,32 @@ def check_reveal(subtensor: Subtensor) -> bool: return False if commits: - commits.sort(key=lambda commit: (commit.reveal_block, commit.commit_block)) - next_reveal = commits[0] curr_block = subtensor.get_current_block() - if next_reveal.reveal_block <= curr_block: - reveal(subtensor, next_reveal) - revealed_hash(next_reveal.commit_hash) + + # Filter for commits that are ready to be revealed + reveal_candidates = [commit for commit in commits if commit.reveal_block <= curr_block] + + if reveal_candidates: + # Group commits by wallet_hotkey_ss58 + grouped_reveals = {} + for commit in reveal_candidates: + key = commit.wallet_hotkey_ss58 + if key not in grouped_reveals: + grouped_reveals[key] = [] + grouped_reveals[key].append(commit) + + # Process each group separately + for hotkey_ss58, group in grouped_reveals.items(): + if len(group) > 1: + # Batch reveal if there are 2 or more reveal candidates + print("Revealing with batch") + reveal_batch(subtensor, group) + else: + # Otherwise, reveal individually + print("Revealing without batch") + reveal(subtensor, group[0]) + # for commit in group: + # revealed_hash(commit.commit_hash) return True return False diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 3563a84f37..ad6c478e19 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -68,17 +68,17 @@ async def test_commit_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_interval", - call_params={"netuid": netuid, "interval": "370"}, + call_function="sudo_set_commit_reveal_weights_periods", + call_params={"netuid": netuid, "periods": "1"}, return_error_message=True, ) assert ( subtensor.get_subnet_hyperparameters( netuid=netuid - ).commit_reveal_weights_interval - == 370 - ), "Failed to set commit/reveal interval" + ).commit_reveal_periods + == 1 + ), "Failed to set commit/reveal periods" assert ( subtensor.weights_rate_limit(netuid=netuid) > 0 diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index d797b592c1..b1941ec878 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -1,4 +1,5 @@ import time +from time import sleep import numpy as np import pytest @@ -68,17 +69,16 @@ async def test_commit_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_interval", - call_params={"netuid": netuid, "interval": "370"}, + call_function="sudo_set_commit_reveal_weights_periods", + call_params={"netuid": netuid, "periods": "1"}, return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters( netuid=netuid - ).commit_reveal_weights_interval - == 370 + ).commit_reveal_periods + == 1 ), "Failed to set commit/reveal interval" assert ( @@ -92,7 +92,7 @@ async def test_commit_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" @@ -132,20 +132,20 @@ async def test_commit_and_reveal_weights(local_chain): assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map - weight_commit_reveal_interval = subtensor.query_module( - module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + reveal_periods = subtensor.query_module( + module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid] ) - interval = weight_commit_reveal_interval.value - assert interval > 0, "Invalid WeightCommitRevealInterval" + periods = reveal_periods.value + assert periods > 0, "Invalid RevealPeriodEpochs" # Verify that sqlite has entry assert commit_reveal_subprocess.is_table_empty("commits") is False # Wait until the reveal block range - await wait_interval(interval, subtensor) + await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) # allow one more block to pass - time.sleep(40) + time.sleep(12) # Verify that subprocess did the reveal and deleted entry from local table assert commit_reveal_subprocess.is_table_empty("commits") @@ -217,17 +217,17 @@ async def test_set_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_interval", - call_params={"netuid": netuid, "interval": "370"}, + call_function="sudo_set_commit_reveal_weights_periods", + call_params={"netuid": netuid, "periods": "1"}, return_error_message=True, ) assert ( subtensor.get_subnet_hyperparameters( netuid=netuid - ).commit_reveal_weights_interval - == 370 - ), "Failed to set commit/reveal interval" + ).commit_reveal_periods + == 1 + ), "Failed to set commit/reveal period" assert ( subtensor.weights_rate_limit(netuid=netuid) > 0 @@ -240,6 +240,7 @@ async def test_set_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) + assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" @@ -277,20 +278,20 @@ async def test_set_and_reveal_weights(local_chain): assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map - weight_commit_reveal_interval = subtensor.query_module( - module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + reveal_periods = subtensor.query_module( + module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid] ) - interval = weight_commit_reveal_interval.value - assert interval > 0, "Invalid WeightCommitRevealInterval" + periods = reveal_periods.value + assert periods > 0, "Invalid RevealPeriodEpochs" # Verify that sqlite has entry assert commit_reveal_subprocess.is_table_empty("commits") is False # Wait until the reveal block range - await wait_interval(interval, subtensor) + await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) # allow one more block to pass - time.sleep(30) + time.sleep(12) # Verify that subprocess did the reveal and deleted entry from local table assert commit_reveal_subprocess.is_table_empty("commits") @@ -309,3 +310,186 @@ async def test_set_and_reveal_weights(local_chain): weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" logging.info("✅ Passed test_commit_and_reveal_weights") + + +@pytest.mark.asyncio +async def test_set_and_reveal_batch_weights(local_chain): + """ + Tests the commit/reveal batch weights mechanism with a subprocess doing the reveal function + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Enable commit-reveal mechanism on the subnet + 4. Lower the commit_reveal interval and rate limit + 5. Commit weights and verify + 6. Wait interval & see if subprocess did the reveal weights and verify + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + logging.info("Testing test_set_and_reveal_weights") + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + assert register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Enable commit_reveal on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, + alice_wallet, + "sudo_set_commit_reveal_weights_enabled", + True, + netuid, + ), "Unable to enable commit reveal on the subnet" + + subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=2) # Subprocess works with fast blocks + assert subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" + + # Lower the commit_reveal interval + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_commit_reveal_weights_periods", + call_params={"netuid": netuid, "periods": "1"}, + return_error_message=True, + ) + + assert ( + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_periods + == 1 + ), "Failed to set commit/reveal periods" + + assert ( + subtensor.weights_rate_limit(netuid=netuid) > 0 + ), "Weights rate limit is below 0" + # Lower the rate limit + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_weights_set_rate_limit", + call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, + return_error_message=True, + ) + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + ), "Failed to set weights_rate_limit" + assert subtensor.weights_rate_limit(netuid=netuid) == 0 + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.1], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Assert no local CR processes in table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Set weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + time.sleep(3) + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.2], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # add second weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + time.sleep(3) + + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.3], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # add second weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + weight_commits = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, alice_wallet.hotkey.ss58_address], + ) + + # Assert that the committed weights are set correctly + assert weight_commits.value is not None, "Weight commit not found in storage" + commit_hash, commit_block = weight_commits.value[0] + assert commit_block > 0, f"Invalid block number: {commit_block}" + + # Query the WeightCommitRevealInterval storage map + reveal_periods = subtensor.query_module( + module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid] + ) + periods = reveal_periods.value + assert periods > 0, "Invalid RevealPeriodEpochs" + + # Verify that sqlite has entry + assert commit_reveal_subprocess.is_table_empty("commits") is False + + # Wait until the reveal block range + await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) + + # allow one more block to pass + time.sleep(12) + + # Verify that subprocess did the reveal and deleted all entry from local table + assert commit_reveal_subprocess.is_table_empty("commits") + + # Query the Weights storage map + revealed_weights = subtensor.query_module( + module="SubtensorModule", + name="Weights", + params=[netuid, 0], # netuid and uid + ) + + # Assert that the revealed weights are set correctly + assert revealed_weights.value is not None, "Weight reveal not found in storage" + + assert ( + weight_vals[0] == revealed_weights.value[0][1] + ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" + logging.info("✅ Passed test_commit_and_reveal_weights") From 96f0af3f44a1376e50d37b550f13a7c3a247bece Mon Sep 17 00:00:00 2001 From: opendansor Date: Mon, 21 Oct 2024 15:33:11 -0700 Subject: [PATCH 13/58] Add chain hash consistency check and refactor commit reveal Introduce a function to verify local reveal list consistency with the Subtensor chain every 100 iterations. Refactor `check_reveal` and `reveal_candidates` to improve clarity and extract logic for retrieving all commits. Update docstrings and handle logs for better readability and debugging. --- bittensor/core/subtensor.py | 2 +- scripts/subprocess/commit_reveal.py | 118 ++++++++++++++++++++++++---- scripts/subprocess_utils.py | 58 ++++++++++++-- 3 files changed, 154 insertions(+), 24 deletions(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index feae5b55d5..b554664e8f 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1777,7 +1777,7 @@ def blocks_until_next_epoch(self, netuid: int) -> int: """ # formula is (block_number + netuid + 1 ) % (tempo + 1) = 0 curr_block = self.get_current_block() - tempo = self.get_subnet_hyperparameters(netuid=netuid).tempo + tempo = self.get_subnet_hyperparameters(netuid=netuid).tempo # type: ignore if tempo == 0: return U64_MAX remainder = (curr_block + netuid + 1) % (tempo + 1) diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index 59bf8d39ba..576bda9c2b 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -9,7 +9,7 @@ from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet from scripts import subprocess_utils as utils -from typing import List, Any, Dict, Tuple, Optional +from typing import List, Any, Dict, Optional # Path to the SQLite database DB_PATH = os.path.expanduser("~/.bittensor/bittensor.db") @@ -248,6 +248,57 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]) -> None: print(f"Reveal failure for batch commits: {message}") +def chain_hash_check(subtensor: Subtensor) -> None: + """ + Perform a verification to check if the local reveal list is consistent with the chain. + + Args: + subtensor (Subtensor): The subtensor network object. + + Returns: + None + """ + try: + # Retrieve all commits from the local database + commits = get_all_commits() + + # Group commits by wallet_hotkey_ss58 + commits_by_ss58 = {} + for commit in commits: + ss58 = commit.wallet_hotkey_ss58 + if ss58 not in commits_by_ss58: + commits_by_ss58[ss58] = [] + commits_by_ss58[ss58].append(commit) + + if commits_by_ss58: + for ss58, ss58_commits in commits_by_ss58.items(): + # Get a set of unique netuids from the commits + netuids = set(commit.netuid for commit in ss58_commits) + for netuid in netuids: + # Query the subtensor backend for commit hashes and blocks + response = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, ss58], + ) + + for commit_hash, commit_block in response.value: + # Check if any commit on Subtensor is absent in local database + if not any(c.commit_hash == commit_hash for c in ss58_commits): + print(f"There is a commit on Subtensor (hash: {commit_hash}) that we don't have locally.") + + # Check if any local commit is absent in Subtensor + local_commit_hashes = {c.commit_hash for c in ss58_commits} + subtensor_commit_hashes = {commit_hash for commit_hash, _ in response.value} + + for local_commit_hash in local_commit_hashes: + if local_commit_hash not in subtensor_commit_hashes: + print(f"There is a local commit (hash: {local_commit_hash}) that is not on Subtensor.") + revealed_hash(local_commit_hash) + except Exception as e: + print(f"Error during chain_hash_check: {e}") + + def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_hotkey_ss58: str, netuid: int, uids: List[int], weights: List[int], salt: List[int], version_key: int) -> None: """ @@ -367,6 +418,17 @@ def committed(commit: Commit) -> None: print(f"Committed commit data: {commit_data}") +def get_all_commits() -> List[Commit]: + """ + Retrieves all commits from the database. + + Returns: + List[Commit]: A list of all commits in the database. + """ + columns, rows = utils.read_table("commits") + return [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] + + def check_reveal(subtensor: Subtensor) -> bool: """ Checks if there are any commits to reveal and performs the reveal if necessary. @@ -378,8 +440,7 @@ def check_reveal(subtensor: Subtensor) -> bool: bool: True if a commit was revealed, False otherwise. """ try: - columns, rows = utils.read_table("commits") - commits = [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] + commits = get_all_commits() except Exception as e: print(f"Error reading table 'commits': {e}") return False @@ -389,11 +450,34 @@ def check_reveal(subtensor: Subtensor) -> bool: # Filter for commits that are ready to be revealed reveal_candidates = [commit for commit in commits if commit.reveal_block <= curr_block] + return len(reveal_candidates) > 0 + return False + + +def reveal_candidates(subtensor: Subtensor) -> None: + """ + Checks if there are any commits to reveal and performs the reveal if necessary. + + Args: + subtensor (Subtensor): The subtensor network object. + + Returns: + bool: True if a commit was revealed, False otherwise. + """ + try: + commits = get_all_commits() + except Exception as e: + print(f"Error reading table 'commits': {e}") - if reveal_candidates: + if commits: + curr_block = subtensor.get_current_block() + + # Filter for commits that are ready to be revealed + ready_for_reveal = [commit for commit in commits if commit.reveal_block <= curr_block] + if ready_for_reveal: # Group commits by wallet_hotkey_ss58 grouped_reveals = {} - for commit in reveal_candidates: + for commit in ready_for_reveal: key = commit.wallet_hotkey_ss58 if key not in grouped_reveals: grouped_reveals[key] = [] @@ -403,16 +487,10 @@ def check_reveal(subtensor: Subtensor) -> bool: for hotkey_ss58, group in grouped_reveals.items(): if len(group) > 1: # Batch reveal if there are 2 or more reveal candidates - print("Revealing with batch") reveal_batch(subtensor, group) else: # Otherwise, reveal individually - print("Revealing without batch") reveal(subtensor, group[0]) - # for commit in group: - # revealed_hash(commit.commit_hash) - return True - return False def handle_client_connection(client_socket: socket.socket) -> None: @@ -510,23 +588,31 @@ def main(args: argparse.Namespace) -> None: Returns: None """ - print("Initializing database...") initialize_db() subtensor = Subtensor(network=args.network, subprocess_initialization=False) server_thread = threading.Thread(target=start_socket_server) server_thread.start() + counter = 0 # Initialize counter + while running: + counter += 1 + if check_reveal(subtensor=subtensor): - print(f"Revealing commit for block {subtensor.get_current_block()}") - else: - print(f"Nothing to reveal for block {subtensor.get_current_block()}") + reveal_candidates(subtensor=subtensor) + print(f"Revealing commit on block {subtensor.get_current_block()}") + + # Every 100th run, perform an additional check to verify reveal list alignment with the backend + if counter % 100 == 0: + chain_hash_check(subtensor=subtensor) + time.sleep(args.sleep_interval) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") - parser.add_argument("--network", type=str, default="ws://localhost:9945", help="Subtensor network address") + parser.add_argument("--network", type=str, default="wss://entrypoint-finney.opentensor.ai:443", + help="Subtensor network address") parser.add_argument("--sleep-interval", type=float, default=12, help="Interval between block checks in seconds") args = parser.parse_args() main(args) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index 6ba400c3b3..0d96caf9b5 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -11,7 +11,11 @@ def is_process_running(process_name: str) -> bool: - """Check if a process with a given name is currently running.""" + """Check if a process with a given name is currently running. + + :param process_name: Name of the process to check + :return: True if the process is running, False otherwise + """ for proc in psutil.process_iter(['pid', 'name', 'cmdline']): cmdline = proc.info['cmdline'] if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): @@ -20,7 +24,11 @@ def is_process_running(process_name: str) -> bool: def get_process(process_name: str) -> Optional[int]: - """Check if a process with a given name is currently running, and return its PID if found.""" + """Check if a process with a given name is currently running, and return its PID if found. + + :param process_name: Name of the process to check + :return: PID of the process if found, None otherwise + """ for proc in psutil.process_iter(['pid', 'name', 'cmdline']): cmdline = proc.info['cmdline'] if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): @@ -28,8 +36,40 @@ def get_process(process_name: str) -> Optional[int]: return None -def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None): - """Start the commit reveal subprocess if not already running.""" +def read_commit_reveal_logs() -> None: + """Read and print the last 50 lines of logs from the log path. + + :return: None + """ + log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) + stdout_path = os.path.join(log_path, STDOUT_LOG.lstrip("/")) + stderr_path = os.path.join(log_path, STDERR_LOG.lstrip("/")) + + def read_last_n_lines(file_path: str, n: int) -> list: + """Reads the last N lines from a file.""" + with open(file_path, 'r') as file: + return file.readlines()[-n:] + + if os.path.exists(stdout_path): + print("----- STDOUT LOG -----") + print(''.join(read_last_n_lines(stdout_path, 50))) + else: + print(f"STDOUT log file not found at {stdout_path}") + + if os.path.exists(stderr_path): + print("----- STDERR LOG -----") + print(''.join(read_last_n_lines(stderr_path, 50))) + else: + print(f"STDERR log file not found at {stderr_path}") + + +def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None) -> None: + """Start the commit reveal subprocess if not already running. + + :param network: Network name if any, optional + :param sleep_interval: Sleep interval if any, optional + :return: None + """ log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) @@ -60,8 +100,11 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval print(f"Subprocess '{PROCESS_NAME}' is already running.") -def stop_commit_reveal_subprocess(): - """Stop the commit reveal subprocess if it is running.""" +def stop_commit_reveal_subprocess() -> None: + """Stop the commit reveal subprocess if it is running. + + :return: None + """ pid = get_process(PROCESS_NAME) if pid is not None: @@ -89,7 +132,7 @@ def __init__( self.conn: Optional[sqlite3.Connection] = None self.row_factory = row_factory - def __enter__(self): + def __enter__(self) -> tuple[sqlite3.Connection, sqlite3.Cursor]: self.conn = sqlite3.connect(self.db_path) self.conn.row_factory = self.row_factory return self.conn, self.conn.cursor() @@ -134,6 +177,7 @@ def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) - def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: """ Reads a table from a SQLite database, returning back a column names and rows as a tuple + :param table_name: the table name in the database :param order_by: the order of the columns in the table, optional :return: ([column names], [rows]) From baa42bfb87cb648bd26c5860b3f7a99ce79f948a Mon Sep 17 00:00:00 2001 From: opendansor Date: Mon, 21 Oct 2024 17:21:10 -0700 Subject: [PATCH 14/58] Change subprocess initialization flag and refactor socket handling. Subprocess initialization is now disabled by default. Reorganized socket server code with ThreadPoolExecutor and improved error handling. Removed redundant return type annotations and added docstrings. --- bittensor/core/extrinsics/commit_weights.py | 32 +++++ bittensor/core/subtensor.py | 2 +- scripts/subprocess/commit_reveal.py | 127 ++++++++------------ scripts/subprocess_utils.py | 77 +++++++----- tests/e2e_tests/conftest.py | 13 +- 5 files changed, 138 insertions(+), 113 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index c243202578..7b1fdee2cb 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -156,6 +156,22 @@ def commit_weights_process( salt: list[int], version_key: int = settings.version_as_int, ): + """ + Lets the subprocess know what a commit was submitted to the chain. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + commit_hash (str): The hash of the neuron's weights to be committed. + uids (list[int]): List of neuron UIDs for which weights are being committed. + weights (list[int]): List of weight values corresponding to each UID. + salt (list[int]): List of salt values for the hash function. + version_key (int): Version key for network compatibility (default is settings.version_as_int). + + The function calculates the necessary blocks until the next epoch and the reveal block, then the subprocess will + wait until the appropriate time to reveal the weights. + """ def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', 9949)) @@ -311,6 +327,22 @@ def reveal_weights_process( salt: list[int], version_key: int = settings.version_as_int, ): + """ + Coordinates the process of revealing weights with the background subprocess. + + This method generates a hash of the weights using the provided wallet and network + parameters, and sends a command to a local subprocess that this commit was revealed. + In case of any exception during hash generation, it sends a command with detailed information + including wallet details and weight parameters. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. + netuid (int): The unique identifier of the subnet. + uids (list[int]): List of neuron UIDs for which weights are being revealed. + weights (list[int]): List of weight values corresponding to each UID. + salt (list[int]): List of salt values corresponding to the hash function. + version_key (int): Version key for compatibility with the network. Defaults to `settings.version_as_int`. + """ def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', 9949)) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index b554664e8f..b699eae8fa 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -140,7 +140,7 @@ def __init__( _mock: bool = False, log_verbose: bool = False, connection_timeout: int = 600, - subprocess_initialization: bool = True, + subprocess_initialization: bool = False, subprocess_sleep_interval: float = 12, ) -> None: """ diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index 576bda9c2b..c581919a03 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -6,6 +6,8 @@ import time import socket import threading +from concurrent.futures import ThreadPoolExecutor + from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet from scripts import subprocess_utils as utils @@ -152,12 +154,9 @@ def is_table_empty(table_name: str) -> bool: return False -def initialize_db() -> None: +def initialize_db(): """ Initializes the database by creating the 'commits' table if it does not exist. - - Returns: - None """ columns = [ ("wallet_hotkey_name", "TEXT"), @@ -180,16 +179,13 @@ def initialize_db() -> None: print("Table 'commits' already exists.") -def reveal(subtensor: Subtensor, commit: Commit) -> None: +def reveal(subtensor: Subtensor, commit: Commit): """ Reveals the weights for a commit to the subtensor network. Args: subtensor (Subtensor): The subtensor network object. commit (Commit): The commit object containing the data to be revealed. - - Returns: - None """ wallet = Wallet(name=commit.wallet_name, path=commit.wallet_path, hotkey=commit.wallet_hotkey_name) success, message = subtensor.reveal_weights( @@ -210,17 +206,18 @@ def reveal(subtensor: Subtensor, commit: Commit) -> None: print(f"Reveal failure for commit: {message}") -def reveal_batch(subtensor: Subtensor, commits: List[Commit]) -> None: +def reveal_batch(subtensor: Subtensor, commits: List[Commit]): """ Reveals the weights for a batch of commits to the subtensor network. Args: subtensor (Subtensor): The subtensor network object. commits (List[Commit]): A list of commit objects to be revealed. - - Returns: - None """ + if not commits: + print("reveal_batch has no commits to reveal.") + return + wallet = Wallet(name=commits[0].wallet_name, path=commits[0].wallet_path, hotkey=commits[0].wallet_hotkey_name) netuid = commits[0].netuid uids = [commit.uids for commit in commits] @@ -248,15 +245,12 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]) -> None: print(f"Reveal failure for batch commits: {message}") -def chain_hash_check(subtensor: Subtensor) -> None: +def chain_hash_check(subtensor: Subtensor): """ Perform a verification to check if the local reveal list is consistent with the chain. Args: subtensor (Subtensor): The subtensor network object. - - Returns: - None """ try: # Retrieve all commits from the local database @@ -300,7 +294,7 @@ def chain_hash_check(subtensor: Subtensor) -> None: def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_hotkey_ss58: str, netuid: int, - uids: List[int], weights: List[int], salt: List[int], version_key: int) -> None: + uids: List[int], weights: List[int], salt: List[int], version_key: int): """ Handles the revealed command by removing the corresponding commit from the database. @@ -314,9 +308,6 @@ def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_ weights (List[int]): The list of weights. salt (List[int]): The salt used for the commit. version_key (int): The version key. - - Returns: - None """ try: with utils.DB(db_path=DB_PATH) as (conn, cursor): @@ -345,15 +336,12 @@ def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_ print(f"Error removing from table 'commits': {e}") -def revealed_hash(commit_hash: str) -> None: +def revealed_hash(commit_hash: str): """ Handles the revealed_hash command by removing the corresponding commit from the database using the commit hash. Args: commit_hash (str): The commit hash. - - Returns: - None """ try: with utils.DB(db_path=DB_PATH) as (conn, cursor): @@ -371,15 +359,12 @@ def revealed_hash(commit_hash: str) -> None: print(f"Error removing from table 'commits': {e}") -def revealed_batch_hash(commit_hashes: List[str]) -> None: +def revealed_batch_hash(commit_hashes: List[str]): """ Handles the revealed_batch_hash command by removing the corresponding commits from the database using the commit hashes. Args: commit_hashes (List[str]): The list of commit hashes. - - Returns: - None """ try: with utils.DB(db_path=DB_PATH) as (conn, cursor): @@ -398,15 +383,12 @@ def revealed_batch_hash(commit_hashes: List[str]) -> None: print(f"Error removing from table 'commits': {e}") -def committed(commit: Commit) -> None: +def committed(commit: Commit): """ Commits a new commit object to the database. Args: commit (Commit): The commit object to save. - - Returns: - None """ with utils.DB(db_path=DB_PATH) as (conn, cursor): commit_data = commit.to_dict() @@ -454,15 +436,12 @@ def check_reveal(subtensor: Subtensor) -> bool: return False -def reveal_candidates(subtensor: Subtensor) -> None: +def reveal_candidates(subtensor: Subtensor): """ - Checks if there are any commits to reveal and performs the reveal if necessary. + Performs reveal on commits that are ready to be revealed. Args: subtensor (Subtensor): The subtensor network object. - - Returns: - bool: True if a commit was revealed, False otherwise. """ try: commits = get_all_commits() @@ -493,15 +472,11 @@ def reveal_candidates(subtensor: Subtensor) -> None: reveal(subtensor, group[0]) -def handle_client_connection(client_socket: socket.socket) -> None: +def handle_client_connection(client_socket: socket.socket): """ Handles incoming client connections for the socket server. - Args: client_socket (socket.socket): The client socket connection. - - Returns: - None """ try: while True: @@ -510,13 +485,14 @@ def handle_client_connection(client_socket: socket.socket) -> None: break args = shlex.split(request) command = args[0] - if command == 'revealed': - revealed(args[1], args[2], args[3], args[4], int(args[5]), json.loads(args[6]), json.loads(args[7]), - json.loads(args[8]), int(args[9])) - elif command == 'revealed_hash': - revealed_hash(args[1]) - elif command == 'committed': - commit = Commit( + commands = { + 'revealed': lambda: revealed( + args[1], args[2], args[3], args[4], int(args[5]), + json.loads(args[6]), json.loads(args[7]), + json.loads(args[8]), int(args[9]) + ), + 'revealed_hash': lambda: revealed_hash(args[1]), + 'committed': lambda: committed(Commit( wallet_hotkey_name=args[3], wallet_hotkey_ss58=args[4], wallet_name=args[1], @@ -529,64 +505,67 @@ def handle_client_connection(client_socket: socket.socket) -> None: weights=json.loads(args[10]), salt=json.loads(args[11]), version_key=int(args[12]) - ) - committed(commit) - elif command == 'terminate': - terminate_process(None, None) + )), + 'terminate': lambda: terminate_process(None, None) + } + if command in commands: + try: + commands[command]() + except (IndexError, ValueError, json.JSONDecodeError) as e: + print(f"Error in processing command {command}: {e}") else: - print("Command not recognized") + print(f"Command not recognized: {command}") + except socket.error as e: + print(f"Socket error: {e}") except Exception as e: print(f"Error: {e}") finally: client_socket.close() -def start_socket_server() -> None: +def start_socket_server(): """ Starts the socket server to listen for incoming connections. - - Returns: - None """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('127.0.0.1', 9949)) server.listen(5) + server.settimeout(2) # Set timeout for any incoming requests to 2 seconds print('Listening on port 9949...') - while running: - client_sock, addr = server.accept() - client_handler = threading.Thread( - target=handle_client_connection, - args=(client_sock,) - ) - client_handler.start() + + with ThreadPoolExecutor(max_workers=10) as executor: # limit of workers amount + while running: + try: + client_sock, addr = server.accept() + print(f"Accepted connection from {addr[0]}.") + executor.submit(handle_client_connection, client_sock) + except socket.timeout: + print("Socket timeout, continuing to listen...") + except Exception as e: + print(f"Error accepting connection: {e}.") + break -def terminate_process(signal_number: Optional[int], frame: Optional[Any]) -> None: +def terminate_process(signal_number: Optional[int], frame: Optional[Any]): """ Terminates the process gracefully. Args: signal_number (Optional[int]): The signal number causing the termination. frame (Optional[Any]): The current stack frame. - - Returns: - None """ global running - print(f"Terminating process with signal {signal_number}") + print(f"Terminating process with signal {signal_number} and/or frame {frame}") running = False sys.exit(0) -def main(args: argparse.Namespace) -> None: +def main(args: argparse.Namespace): """ The main function to run the Bittensor commit-reveal subprocess script. Args: args (argparse.Namespace): The command-line arguments. - - Returns: - None """ initialize_db() subtensor = Subtensor(network=args.network, subprocess_initialization=False) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index 0d96caf9b5..762b9bd9cb 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -11,10 +11,14 @@ def is_process_running(process_name: str) -> bool: - """Check if a process with a given name is currently running. + """ + Check if a process with a given name is currently running. + + Args: + process_name (str): Name of the process to check. - :param process_name: Name of the process to check - :return: True if the process is running, False otherwise + Returns: + bool: True if the process is running, False otherwise. """ for proc in psutil.process_iter(['pid', 'name', 'cmdline']): cmdline = proc.info['cmdline'] @@ -24,10 +28,14 @@ def is_process_running(process_name: str) -> bool: def get_process(process_name: str) -> Optional[int]: - """Check if a process with a given name is currently running, and return its PID if found. + """ + Check if a process with a given name is currently running, and return its PID if found. - :param process_name: Name of the process to check - :return: PID of the process if found, None otherwise + Args: + process_name (str): Name of the process to check. + + Returns: + Optional[int]: PID of the process if found, None otherwise. """ for proc in psutil.process_iter(['pid', 'name', 'cmdline']): cmdline = proc.info['cmdline'] @@ -36,10 +44,9 @@ def get_process(process_name: str) -> Optional[int]: return None -def read_commit_reveal_logs() -> None: - """Read and print the last 50 lines of logs from the log path. - - :return: None +def read_commit_reveal_logs(): + """ + Read and print the last 50 lines of logs from the log path. """ log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) stdout_path = os.path.join(log_path, STDOUT_LOG.lstrip("/")) @@ -63,12 +70,13 @@ def read_last_n_lines(file_path: str, n: int) -> list: print(f"STDERR log file not found at {stderr_path}") -def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None) -> None: - """Start the commit reveal subprocess if not already running. +def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None): + """ + Start the commit reveal subprocess if not already running. - :param network: Network name if any, optional - :param sleep_interval: Sleep interval if any, optional - :return: None + Args: + network (Optional[str]): Network name if any, optional. + sleep_interval (Optional[float]): Sleep interval if any, optional. """ log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) @@ -100,11 +108,10 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval print(f"Subprocess '{PROCESS_NAME}' is already running.") -def stop_commit_reveal_subprocess() -> None: - """Stop the commit reveal subprocess if it is running. - - :return: None +def stop_commit_reveal_subprocess(): """ + Stop the commit reveal subprocess if it is running. + """ pid = get_process(PROCESS_NAME) if pid is not None: @@ -142,14 +149,14 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.conn.close() -def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) -> None: +def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]): """ Creates and populates the rows of a table in the SQLite database. - :param title: title of the table - :param columns: [(column name, column type), ...] - :param rows: [[element, element, ...], ...] - :return: None + Args: + title (str): title of the table. + columns (list[tuple[str, str]]): List of tuples where each tuple contains column name and column type. + rows (list[list]): List of lists where each sublist contains elements representing a row. """ blob_cols = [] for idx, (_, col_type) in enumerate(columns): @@ -176,17 +183,27 @@ def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) - def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: """ - Reads a table from a SQLite database, returning back a column names and rows as a tuple + Reads a table from a SQLite database, returning back a column names and rows. - :param table_name: the table name in the database - :param order_by: the order of the columns in the table, optional - :return: ([column names], [rows]) + Args: + table_name (str): The table name in the database. + order_by (str): The order of the columns in the table, optional. + + Returns: + tuple[list, list]: A tuple containing a list of column names and a list of rows. """ with DB() as (conn, cursor): cursor.execute(f"PRAGMA table_info({table_name})") columns_info = cursor.fetchall() - column_names = [info[1] for info in columns_info] - column_types = [info[2] for info in columns_info] + column_names = [] + column_types = [] + for info in columns_info: + try: + column_names.append(info[1]) + column_types.append(info[2]) + except IndexError: + print(f"Error retrieving column info: {info}") + cursor.execute(f"SELECT * FROM {table_name} {order_by}") rows = cursor.fetchall() blob_cols = [] diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index cdde3f1ff1..d005ea8501 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -1,4 +1,3 @@ -import logging import os import re import shlex @@ -45,11 +44,10 @@ def local_chain(request): # Determine the port to check based on `param` port = 9945 # Default port if `param` is None - # TODO: uncomment templates when done # Always perform template installation - # logging.info("Downloading and installing neuron templates from GitHub") - # templates_dir = clone_or_update_templates() - # install_templates(templates_dir) + logging.info("Downloading and installing neuron templates from GitHub") + templates_dir = clone_or_update_templates() + install_templates(templates_dir) already_running = False if is_chain_running(port): @@ -96,9 +94,8 @@ def wait_for_node_start(process, pattern): # Ensure the process has terminated process.wait() - # TODO: uncomment templates when done - # logging.info("Uninstalling neuron templates") - # uninstall_templates(templates_dir) + logging.info("Uninstalling neuron templates") + uninstall_templates(templates_dir) # kill subprocess if its running subprocess_utils.stop_commit_reveal_subprocess() From 86b07f2d1ce06c4a06e492898cbc71e6f60c3e36 Mon Sep 17 00:00:00 2001 From: opendansor Date: Mon, 21 Oct 2024 17:24:33 -0700 Subject: [PATCH 15/58] Ruff --- bittensor/core/extrinsics/commit_weights.py | 40 ++-- bittensor/core/extrinsics/set_weights.py | 19 +- bittensor/core/subtensor.py | 193 +++++++++---------- scripts/subprocess/commit_reveal.py | 195 ++++++++++++++------ scripts/subprocess_utils.py | 62 ++++--- tests/e2e_tests/test_commit_weights.py | 9 +- tests/e2e_tests/test_reveal_weights.py | 57 +++--- 7 files changed, 345 insertions(+), 230 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 7b1fdee2cb..617c8775fa 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -172,9 +172,10 @@ def commit_weights_process( The function calculates the necessary blocks until the next epoch and the reveal block, then the subprocess will wait until the appropriate time to reveal the weights. """ + def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client.connect(('127.0.0.1', 9949)) + client.connect(("127.0.0.1", 9949)) client.send(command.encode()) client.close() @@ -182,7 +183,9 @@ def send_command(command): blocks_until_next_epoch = subtensor.blocks_until_next_epoch(netuid=netuid) subnet_tempo_blocks = subtensor.get_subnet_hyperparameters(netuid=netuid).tempo epoch_start_block = curr_block + blocks_until_next_epoch - cr_periods = subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods + cr_periods = subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_periods reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' @@ -320,21 +323,21 @@ def reveal_weights_extrinsic( def reveal_weights_process( - wallet: "Wallet", - netuid: int, - uids: list[int], - weights: list[int], - salt: list[int], - version_key: int = settings.version_as_int, + wallet: "Wallet", + netuid: int, + uids: list[int], + weights: list[int], + salt: list[int], + version_key: int = settings.version_as_int, ): """ Coordinates the process of revealing weights with the background subprocess. - - This method generates a hash of the weights using the provided wallet and network - parameters, and sends a command to a local subprocess that this commit was revealed. - In case of any exception during hash generation, it sends a command with detailed information + + This method generates a hash of the weights using the provided wallet and network + parameters, and sends a command to a local subprocess that this commit was revealed. + In case of any exception during hash generation, it sends a command with detailed information including wallet details and weight parameters. - + Args: wallet (bittensor_wallet.Wallet): The wallet associated with the neuron revealing the weights. netuid (int): The unique identifier of the subnet. @@ -343,9 +346,10 @@ def reveal_weights_process( salt (list[int]): List of salt values corresponding to the hash function. version_key (int): Version key for compatibility with the network. Defaults to `settings.version_as_int`. """ + def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client.connect(('127.0.0.1', 9949)) + client.connect(("127.0.0.1", 9949)) client.send(command.encode()) client.close() @@ -503,7 +507,7 @@ def batch_reveal_weights_process( uids: list[list[int]], weights: list[list[int]], salt: list[list[int]], - version_keys: list[int] + version_keys: list[int], ): """ Processes a batch reveal of weights for a specific subnet on the Bittensor blockchain using the provided wallet. @@ -522,12 +526,14 @@ def batch_reveal_weights_process( def send_command(command): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - client.connect(('127.0.0.1', 9949)) + client.connect(("127.0.0.1", 9949)) client.send(command.encode()) client.close() try: - for batch_uids, batch_weights, batch_salt, batch_version_key in zip(uids, weights, salt, version_keys): + for batch_uids, batch_weights, batch_salt, batch_version_key in zip( + uids, weights, salt, version_keys + ): # Generate the hash of the weights for each individual batch commit_hash = generate_weight_hash( address=wallet.hotkey.ss58_address, diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 6dccf08cba..ea954af066 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -133,20 +133,22 @@ def set_weights_extrinsic( tuple[bool, str]: A tuple containing a success flag and an optional response message. """ - if subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_enabled: + if subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled: # if cr is enabled, commit instead of setting the weights. salt = [random.randint(0, 350) for _ in range(8)] # Ask before moving on. if prompt: if not Confirm.ask( - f"Do you want to commit weights:\n[bold white] weights: {weights}\n" - f"uids: {uids}[/bold white ]?" + f"Do you want to commit weights:\n[bold white] weights: {weights}\n" + f"uids: {uids}[/bold white ]?" ): return False, "Prompt refused." with bt_console.status( - f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." + f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." ): try: success, message = subtensor.commit_weights( @@ -163,7 +165,9 @@ def set_weights_extrinsic( return True, "Not waiting for finalization or inclusion." if success is True: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + bt_console.print( + ":white_heavy_check_mark: [green]Finalized[/green]" + ) logging.success( msg=str(success), prefix="Committed weights", @@ -207,7 +211,6 @@ def set_weights_extrinsic( with bt_console.status( f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." ): - try: success, error_message = do_set_weights( self=subtensor, @@ -224,7 +227,9 @@ def set_weights_extrinsic( return True, "Not waiting for finalization or inclusion." if success is True: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") + bt_console.print( + ":white_heavy_check_mark: [green]Finalized[/green]" + ) logging.success( msg=str(success), prefix="Set weights", diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index b699eae8fa..950b02ad64 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -48,7 +48,10 @@ from bittensor.core.config import Config from bittensor.core.extrinsics.commit_weights import ( commit_weights_extrinsic, - reveal_weights_extrinsic, reveal_weights_process, commit_weights_process, batch_reveal_weights_extrinsic, + reveal_weights_extrinsic, + reveal_weights_process, + commit_weights_process, + batch_reveal_weights_extrinsic, batch_reveal_weights_process, ) from bittensor.core.extrinsics.prometheus import ( @@ -134,14 +137,14 @@ class Subtensor: """ def __init__( - self, - network: Optional[str] = None, - config: Optional["Config"] = None, - _mock: bool = False, - log_verbose: bool = False, - connection_timeout: int = 600, - subprocess_initialization: bool = False, - subprocess_sleep_interval: float = 12, + self, + network: Optional[str] = None, + config: Optional["Config"] = None, + _mock: bool = False, + log_verbose: bool = False, + connection_timeout: int = 600, + subprocess_initialization: bool = False, + subprocess_sleep_interval: float = 12, ) -> None: """ Initializes a Subtensor interface for interacting with the Bittensor blockchain. @@ -175,8 +178,8 @@ def __init__( ) if ( - self.network == "finney" - or self.chain_endpoint == settings.FINNEY_ENTRYPOINT + self.network == "finney" + or self.chain_endpoint == settings.FINNEY_ENTRYPOINT ) and log_verbose: logging.info( f"You are connecting to {self.network} network with endpoint {self.chain_endpoint}." @@ -191,7 +194,9 @@ def __init__( ) if subprocess_initialization: - subprocess_utils.start_commit_reveal_subprocess(network=network, sleep_interval=subprocess_sleep_interval) + subprocess_utils.start_commit_reveal_subprocess( + network=network, sleep_interval=subprocess_sleep_interval + ) self.log_verbose = log_verbose self._connection_timeout = connection_timeout @@ -391,9 +396,9 @@ def add_args(cls, parser: "argparse.ArgumentParser", prefix: Optional[str] = Non # Inner private functions @networking.ensure_connected def _encode_params( - self, - call_definition: list["ParamWithTypes"], - params: Union[list[Any], dict[str, Any]], + self, + call_definition: list["ParamWithTypes"], + params: Union[list[Any], dict[str, Any]], ) -> str: """Returns a hex encoded string of the params using their types.""" param_data = scalecodec.ScaleBytes(b"") @@ -411,7 +416,7 @@ def _encode_params( return param_data.to_hex() def _get_hyperparameter( - self, param_name: str, netuid: int, block: Optional[int] = None + self, param_name: str, netuid: int, block: Optional[int] = None ) -> Optional[Any]: """ Retrieves a specified hyperparameter for a specific subnet. @@ -436,7 +441,7 @@ def _get_hyperparameter( # Calls methods @networking.ensure_connected def query_subtensor( - self, name: str, block: Optional[int] = None, params: Optional[list] = None + self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "ScaleType": """ Queries named storage from the Subtensor module on the Bittensor blockchain. This function is used to retrieve specific data or parameters from the blockchain, such as stake, rank, or other neuron-specific attributes. @@ -467,7 +472,7 @@ def make_substrate_call_with_retry() -> "ScaleType": @networking.ensure_connected def query_map_subtensor( - self, name: str, block: Optional[int] = None, params: Optional[list] = None + self, name: str, block: Optional[int] = None, params: Optional[list] = None ) -> "QueryMapResult": """ Queries map storage from the Subtensor module on the Bittensor blockchain. This function is designed to retrieve a map-like data structure, which can include various neuron-specific details or network-wide attributes. @@ -497,11 +502,11 @@ def make_substrate_call_with_retry(): return make_substrate_call_with_retry() def query_runtime_api( - self, - runtime_api: str, - method: str, - params: Optional[Union[list[int], dict[str, int]]], - block: Optional[int] = None, + self, + runtime_api: str, + method: str, + params: Optional[Union[list[int], dict[str, int]]], + block: Optional[int] = None, ) -> Optional[str]: """ Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types. @@ -550,7 +555,7 @@ def query_runtime_api( @networking.ensure_connected def state_call( - self, method: str, data: str, block: Optional[int] = None + self, method: str, data: str, block: Optional[int] = None ) -> dict[Any, Any]: """ Makes a state call to the Bittensor blockchain, allowing for direct queries of the blockchain's state. This function is typically used for advanced queries that require specific method calls and data inputs. @@ -578,11 +583,11 @@ def make_substrate_call_with_retry() -> dict[Any, Any]: @networking.ensure_connected def query_map( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, ) -> "QueryMapResult": """ Queries map storage from any module on the Bittensor blockchain. This function retrieves data structures that represent key-value mappings, essential for accessing complex and structured data within the blockchain modules. @@ -614,7 +619,7 @@ def make_substrate_call_with_retry() -> "QueryMapResult": @networking.ensure_connected def query_constant( - self, module_name: str, constant_name: str, block: Optional[int] = None + self, module_name: str, constant_name: str, block: Optional[int] = None ) -> Optional["ScaleType"]: """ Retrieves a constant from the specified module on the Bittensor blockchain. This function is used to access fixed parameters or values defined within the blockchain's modules, which are essential for understanding the network's configuration and rules. @@ -644,11 +649,11 @@ def make_substrate_call_with_retry(): @networking.ensure_connected def query_module( - self, - module: str, - name: str, - block: Optional[int] = None, - params: Optional[list] = None, + self, + module: str, + name: str, + block: Optional[int] = None, + params: Optional[list] = None, ) -> "ScaleType": """ Queries any module storage on the Bittensor blockchain with the specified parameters and block number. This function is a generic query interface that allows for flexible and diverse data retrieval from various blockchain modules. @@ -680,7 +685,7 @@ def make_substrate_call_with_retry() -> "ScaleType": # Common subtensor methods def metagraph( - self, netuid: int, lite: bool = True, block: Optional[int] = None + self, netuid: int, lite: bool = True, block: Optional[int] = None ) -> "Metagraph": # type: ignore """ Returns a synced metagraph for a specified subnet within the Bittensor network. The metagraph represents the network's structure, including neuron connections and interactions. @@ -704,7 +709,7 @@ def metagraph( @staticmethod def determine_chain_endpoint_and_network( - network: str, + network: str, ) -> tuple[Optional[str], Optional[str]]: """Determines the chain endpoint and network from the passed network or chain_endpoint. @@ -729,18 +734,18 @@ def determine_chain_endpoint_and_network( return network, settings.ARCHIVE_ENTRYPOINT else: if ( - network == settings.FINNEY_ENTRYPOINT - or "entrypoint-finney.opentensor.ai" in network + network == settings.FINNEY_ENTRYPOINT + or "entrypoint-finney.opentensor.ai" in network ): return "finney", settings.FINNEY_ENTRYPOINT elif ( - network == settings.FINNEY_TEST_ENTRYPOINT - or "test.finney.opentensor.ai" in network + network == settings.FINNEY_TEST_ENTRYPOINT + or "test.finney.opentensor.ai" in network ): return "test", settings.FINNEY_TEST_ENTRYPOINT elif ( - network == settings.ARCHIVE_ENTRYPOINT - or "archive.chain.opentensor.ai" in network + network == settings.ARCHIVE_ENTRYPOINT + or "archive.chain.opentensor.ai" in network ): return "archive", settings.ARCHIVE_ENTRYPOINT elif "127.0.0.1" in network or "localhost" in network: @@ -750,7 +755,7 @@ def determine_chain_endpoint_and_network( return None, None def get_netuids_for_hotkey( - self, hotkey_ss58: str, block: Optional[int] = None + self, hotkey_ss58: str, block: Optional[int] = None ) -> list[int]: """ Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active. @@ -787,7 +792,7 @@ def make_substrate_call_with_retry(): return make_substrate_call_with_retry() def is_hotkey_registered_any( - self, hotkey_ss58: str, block: Optional[int] = None + self, hotkey_ss58: str, block: Optional[int] = None ) -> bool: """ Checks if a neuron's hotkey is registered on any subnet within the Bittensor network. @@ -804,7 +809,7 @@ def is_hotkey_registered_any( return len(self.get_netuids_for_hotkey(hotkey_ss58, block)) > 0 def is_hotkey_registered_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> bool: """ Checks if a neuron's hotkey is registered on a specific subnet within the Bittensor network. @@ -822,10 +827,10 @@ def is_hotkey_registered_on_subnet( return self.get_uid_for_hotkey_on_subnet(hotkey_ss58, netuid, block) is not None def is_hotkey_registered( - self, - hotkey_ss58: str, - netuid: Optional[int] = None, - block: Optional[int] = None, + self, + hotkey_ss58: str, + netuid: Optional[int] = None, + block: Optional[int] = None, ) -> bool: """ Determines whether a given hotkey (public key) is registered in the Bittensor network, either globally across any subnet or specifically on a specified subnet. This function checks the registration status of a neuron identified by its hotkey, which is crucial for validating its participation and activities within the network. @@ -847,16 +852,16 @@ def is_hotkey_registered( # Not used in Bittensor, but is actively used by the community in almost all subnets def set_weights( - self, - wallet: "Wallet", - netuid: int, - uids: Union[NDArray[np.int64], "torch.LongTensor", list], - weights: Union[NDArray[np.float32], "torch.FloatTensor", list], - version_key: int = settings.version_as_int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, + self, + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = settings.version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, + max_retries: int = 5, ) -> tuple[bool, str]: """ Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture. @@ -909,11 +914,11 @@ def set_weights( return success, message def serve_axon( - self, - netuid: int, - axon: "Axon", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, + self, + netuid: int, + axon: "Axon", + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, ) -> bool: """ Registers an ``Axon`` serving endpoint on the Bittensor network for a specific neuron. This function is used to set up the Axon, a key component of a neuron that handles incoming queries and data processing tasks. @@ -1017,13 +1022,13 @@ def subnetwork_n(self, netuid: int, block: Optional[int] = None) -> Optional[int # Community uses this method def transfer( - self, - wallet: "Wallet", - dest: str, - amount: Union["Balance", float], - wait_for_inclusion: bool = True, - wait_for_finalization: bool = False, - prompt: bool = False, + self, + wallet: "Wallet", + dest: str, + amount: Union["Balance", float], + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + prompt: bool = False, ) -> bool: """ Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons. @@ -1053,7 +1058,7 @@ def transfer( # Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic` def get_neuron_for_pubkey_and_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional["NeuronInfo"]: """ Retrieves information about a neuron based on its public key (hotkey SS58 address) and the specific subnet UID (netuid). This function provides detailed neuron information for a particular subnet within the Bittensor network. @@ -1076,7 +1081,7 @@ def get_neuron_for_pubkey_and_subnet( @networking.ensure_connected def neuron_for_uid( - self, uid: Optional[int], netuid: int, block: Optional[int] = None + self, uid: Optional[int], netuid: int, block: Optional[int] = None ) -> "NeuronInfo": """ Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status. @@ -1114,12 +1119,12 @@ def make_substrate_call_with_retry(): # Community uses this method def serve_prometheus( - self, - wallet: "Wallet", - port: int, - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, + self, + wallet: "Wallet", + port: int, + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, ) -> bool: """ Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization. @@ -1145,7 +1150,7 @@ def serve_prometheus( # Community uses this method def get_subnet_hyperparameters( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[Union[list, "SubnetHyperparameters"]]: """ Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior. @@ -1179,7 +1184,7 @@ def get_subnet_hyperparameters( # Community uses this method # Returns network ImmunityPeriod hyper parameter. def immunity_period( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Retrieves the 'ImmunityPeriod' hyperparameter for a specific subnet. This parameter defines the duration during which new neurons are protected from certain network penalties or restrictions. @@ -1200,7 +1205,7 @@ def immunity_period( # Community uses this method def get_uid_for_hotkey_on_subnet( - self, hotkey_ss58: str, netuid: int, block: Optional[int] = None + self, hotkey_ss58: str, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet. @@ -1257,7 +1262,7 @@ def get_commitment(self, netuid: int, uid: int, block: Optional[int] = None) -> # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. def min_allowed_weights( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[int]: """ Returns network MinAllowedWeights hyperparameter. @@ -1276,7 +1281,7 @@ def min_allowed_weights( # Community uses this via `bittensor.utils.weight_utils.process_weights_for_netuid` function. def max_weight_limit( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> Optional[float]: """ Returns network MaxWeightsLimit hyperparameter. @@ -1295,7 +1300,7 @@ def max_weight_limit( # # Community uses this method. It is used in subtensor in neuron_info, and serving. def get_prometheus_info( - self, netuid: int, hotkey_ss58: str, block: Optional[int] = None + self, netuid: int, hotkey_ss58: str, block: Optional[int] = None ) -> Optional["PrometheusInfo"]: """ Returns the prometheus information for this hotkey account. @@ -1338,7 +1343,7 @@ def subnet_exists(self, netuid: int, block: Optional[int] = None) -> bool: # Metagraph uses this method def bonds( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence. @@ -1430,7 +1435,7 @@ def get_subnets(self, block: Optional[int] = None) -> list[int]: # Metagraph uses this method def neurons_lite( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list["NeuronInfoLite"]: """ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation. @@ -1463,7 +1468,7 @@ def neurons_lite( # Used in the `neurons` method which is used in metagraph.py def weights( - self, netuid: int, block: Optional[int] = None + self, netuid: int, block: Optional[int] = None ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms. @@ -1527,7 +1532,7 @@ def make_substrate_call_with_retry(): # Used in community via `bittensor.core.subtensor.Subtensor.transfer` @networking.ensure_connected def get_transfer_fee( - self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] + self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] ) -> "Balance": """ Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity. @@ -1578,7 +1583,7 @@ def get_transfer_fee( # Used in community via `bittensor.core.subtensor.Subtensor.transfer` def get_existential_deposit( - self, block: Optional[int] = None + self, block: Optional[int] = None ) -> Optional["Balance"]: """ Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with balances below this threshold can be reaped to conserve network resources. @@ -1677,7 +1682,7 @@ def commit_weights( uids=list(uids), weights=list(weights), salt=salt, - version_key=version_key + version_key=version_key, ) break except Exception as e: @@ -1752,7 +1757,7 @@ def reveal_weights( uids=list(uids), weights=list(weights), salt=list(salt), - version_key=version_key + version_key=version_key, ) break except Exception as e: @@ -1846,7 +1851,7 @@ def batch_reveal_weights( uids=uids, weights=weights, salt=salt, - version_keys=version_keys + version_keys=version_keys, ) return success, message except Exception as e: diff --git a/scripts/subprocess/commit_reveal.py b/scripts/subprocess/commit_reveal.py index c581919a03..a4eb2c92b2 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/scripts/subprocess/commit_reveal.py @@ -38,9 +38,21 @@ class Commit: version_key (int): The version key. """ - def __init__(self, wallet_hotkey_name: str, wallet_hotkey_ss58: str, wallet_name: str, wallet_path: str, - commit_hash: str, netuid: int, commit_block: int, reveal_block: int, uids: List[int], - weights: List[int], salt: List[int], version_key: int): + def __init__( + self, + wallet_hotkey_name: str, + wallet_hotkey_ss58: str, + wallet_name: str, + wallet_path: str, + commit_hash: str, + netuid: int, + commit_block: int, + reveal_block: int, + uids: List[int], + weights: List[int], + salt: List[int], + version_key: int, + ): self.wallet_hotkey_name = wallet_hotkey_name self.wallet_hotkey_ss58 = wallet_hotkey_ss58 self.wallet_name = wallet_name @@ -73,11 +85,11 @@ def to_dict(self) -> Dict[str, Any]: "uids": json.dumps(self.uids), "weights": json.dumps(self.weights), "salt": json.dumps(self.salt), - "version_key": self.version_key + "version_key": self.version_key, } @staticmethod - def from_dict(data: Dict[str, Any]) -> 'Commit': + def from_dict(data: Dict[str, Any]) -> "Commit": """ Creates a Commit object from a dictionary. @@ -99,7 +111,7 @@ def from_dict(data: Dict[str, Any]) -> 'Commit': uids=json.loads(data["uids"]), weights=json.loads(data["weights"]), salt=json.loads(data["salt"]), - version_key=data["version_key"] + version_key=data["version_key"], ) def __str__(self) -> str: @@ -170,7 +182,7 @@ def initialize_db(): ("uids", "TEXT"), ("weights", "TEXT"), ("salt", "TEXT"), - ("version_key", "INTEGER") + ("version_key", "INTEGER"), ] if not table_exists("commits"): print("Creating table 'commits'...") @@ -187,7 +199,11 @@ def reveal(subtensor: Subtensor, commit: Commit): subtensor (Subtensor): The subtensor network object. commit (Commit): The commit object containing the data to be revealed. """ - wallet = Wallet(name=commit.wallet_name, path=commit.wallet_path, hotkey=commit.wallet_hotkey_name) + wallet = Wallet( + name=commit.wallet_name, + path=commit.wallet_path, + hotkey=commit.wallet_hotkey_name, + ) success, message = subtensor.reveal_weights( wallet=wallet, netuid=commit.netuid, @@ -196,7 +212,7 @@ def reveal(subtensor: Subtensor, commit: Commit): salt=commit.salt, version_key=commit.version_key, wait_for_inclusion=True, - wait_for_finalization=True + wait_for_finalization=True, ) del wallet if success: @@ -218,7 +234,11 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]): print("reveal_batch has no commits to reveal.") return - wallet = Wallet(name=commits[0].wallet_name, path=commits[0].wallet_path, hotkey=commits[0].wallet_hotkey_name) + wallet = Wallet( + name=commits[0].wallet_name, + path=commits[0].wallet_path, + hotkey=commits[0].wallet_hotkey_name, + ) netuid = commits[0].netuid uids = [commit.uids for commit in commits] weights = [commit.weights for commit in commits] @@ -233,7 +253,7 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]): salt=salt, version_keys=version_keys, wait_for_inclusion=True, - wait_for_finalization=True + wait_for_finalization=True, ) del wallet @@ -279,22 +299,37 @@ def chain_hash_check(subtensor: Subtensor): for commit_hash, commit_block in response.value: # Check if any commit on Subtensor is absent in local database if not any(c.commit_hash == commit_hash for c in ss58_commits): - print(f"There is a commit on Subtensor (hash: {commit_hash}) that we don't have locally.") + print( + f"There is a commit on Subtensor (hash: {commit_hash}) that we don't have locally." + ) # Check if any local commit is absent in Subtensor local_commit_hashes = {c.commit_hash for c in ss58_commits} - subtensor_commit_hashes = {commit_hash for commit_hash, _ in response.value} + subtensor_commit_hashes = { + commit_hash for commit_hash, _ in response.value + } for local_commit_hash in local_commit_hashes: if local_commit_hash not in subtensor_commit_hashes: - print(f"There is a local commit (hash: {local_commit_hash}) that is not on Subtensor.") + print( + f"There is a local commit (hash: {local_commit_hash}) that is not on Subtensor." + ) revealed_hash(local_commit_hash) except Exception as e: print(f"Error during chain_hash_check: {e}") -def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_hotkey_ss58: str, netuid: int, - uids: List[int], weights: List[int], salt: List[int], version_key: int): +def revealed( + wallet_name: str, + wallet_path: str, + wallet_hotkey_str: str, + wallet_hotkey_ss58: str, + netuid: int, + uids: List[int], + weights: List[int], + salt: List[int], + version_key: int, +): """ Handles the revealed command by removing the corresponding commit from the database. @@ -313,25 +348,50 @@ def revealed(wallet_name: str, wallet_path: str, wallet_hotkey_str: str, wallet_ with utils.DB(db_path=DB_PATH) as (conn, cursor): sql = ( "SELECT COUNT(*) FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " - "uids=? AND weights=? AND salt=? AND version_key=?") - cursor.execute(sql, ( - wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), - json.dumps(weights), - json.dumps(salt), version_key)) + "uids=? AND weights=? AND salt=? AND version_key=?" + ) + cursor.execute( + sql, + ( + wallet_hotkey_str, + wallet_hotkey_ss58, + wallet_name, + wallet_path, + netuid, + json.dumps(uids), + json.dumps(weights), + json.dumps(salt), + version_key, + ), + ) count = cursor.fetchone()[0] if count > 0: delete_sql = ( "DELETE FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " - "uids=? AND weights=? AND salt=? AND version_key=?") - cursor.execute(delete_sql, ( - wallet_hotkey_str, wallet_hotkey_ss58, wallet_name, wallet_path, netuid, json.dumps(uids), - json.dumps(weights), json.dumps(salt), version_key)) + "uids=? AND weights=? AND salt=? AND version_key=?" + ) + cursor.execute( + delete_sql, + ( + wallet_hotkey_str, + wallet_hotkey_ss58, + wallet_name, + wallet_path, + netuid, + json.dumps(uids), + json.dumps(weights), + json.dumps(salt), + version_key, + ), + ) conn.commit() print( - f"Deleted existing row with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}") + f"Deleted existing row with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}" + ) else: print( - f"No existing row found with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}") + f"No existing row found with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}" + ) except Exception as e: print(f"Error removing from table 'commits': {e}") @@ -431,7 +491,9 @@ def check_reveal(subtensor: Subtensor) -> bool: curr_block = subtensor.get_current_block() # Filter for commits that are ready to be revealed - reveal_candidates = [commit for commit in commits if commit.reveal_block <= curr_block] + reveal_candidates = [ + commit for commit in commits if commit.reveal_block <= curr_block + ] return len(reveal_candidates) > 0 return False @@ -452,7 +514,9 @@ def reveal_candidates(subtensor: Subtensor): curr_block = subtensor.get_current_block() # Filter for commits that are ready to be revealed - ready_for_reveal = [commit for commit in commits if commit.reveal_block <= curr_block] + ready_for_reveal = [ + commit for commit in commits if commit.reveal_block <= curr_block + ] if ready_for_reveal: # Group commits by wallet_hotkey_ss58 grouped_reveals = {} @@ -486,27 +550,35 @@ def handle_client_connection(client_socket: socket.socket): args = shlex.split(request) command = args[0] commands = { - 'revealed': lambda: revealed( - args[1], args[2], args[3], args[4], int(args[5]), - json.loads(args[6]), json.loads(args[7]), - json.loads(args[8]), int(args[9]) + "revealed": lambda: revealed( + args[1], + args[2], + args[3], + args[4], + int(args[5]), + json.loads(args[6]), + json.loads(args[7]), + json.loads(args[8]), + int(args[9]), + ), + "revealed_hash": lambda: revealed_hash(args[1]), + "committed": lambda: committed( + Commit( + wallet_hotkey_name=args[3], + wallet_hotkey_ss58=args[4], + wallet_name=args[1], + wallet_path=args[2], + commit_hash=args[7], + netuid=int(args[8]), + commit_block=int(args[5]), + reveal_block=int(args[6]), + uids=json.loads(args[9]), + weights=json.loads(args[10]), + salt=json.loads(args[11]), + version_key=int(args[12]), + ) ), - 'revealed_hash': lambda: revealed_hash(args[1]), - 'committed': lambda: committed(Commit( - wallet_hotkey_name=args[3], - wallet_hotkey_ss58=args[4], - wallet_name=args[1], - wallet_path=args[2], - commit_hash=args[7], - netuid=int(args[8]), - commit_block=int(args[5]), - reveal_block=int(args[6]), - uids=json.loads(args[9]), - weights=json.loads(args[10]), - salt=json.loads(args[11]), - version_key=int(args[12]) - )), - 'terminate': lambda: terminate_process(None, None) + "terminate": lambda: terminate_process(None, None), } if command in commands: try: @@ -528,10 +600,10 @@ def start_socket_server(): Starts the socket server to listen for incoming connections. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.bind(('127.0.0.1', 9949)) + server.bind(("127.0.0.1", 9949)) server.listen(5) - server.settimeout(2) # Set timeout for any incoming requests to 2 seconds - print('Listening on port 9949...') + server.settimeout(2) # Set timeout for any incoming requests to 2 seconds + print("Listening on port 9949...") with ThreadPoolExecutor(max_workers=10) as executor: # limit of workers amount while running: @@ -589,9 +661,20 @@ def main(args: argparse.Namespace): if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Run the Bittensor commit-reveal subprocess script.") - parser.add_argument("--network", type=str, default="wss://entrypoint-finney.opentensor.ai:443", - help="Subtensor network address") - parser.add_argument("--sleep-interval", type=float, default=12, help="Interval between block checks in seconds") + parser = argparse.ArgumentParser( + description="Run the Bittensor commit-reveal subprocess script." + ) + parser.add_argument( + "--network", + type=str, + default="wss://entrypoint-finney.opentensor.ai:443", + help="Subtensor network address", + ) + parser.add_argument( + "--sleep-interval", + type=float, + default=12, + help="Interval between block checks in seconds", + ) args = parser.parse_args() main(args) diff --git a/scripts/subprocess_utils.py b/scripts/subprocess_utils.py index 762b9bd9cb..0c8ea6b55c 100644 --- a/scripts/subprocess_utils.py +++ b/scripts/subprocess_utils.py @@ -20,9 +20,12 @@ def is_process_running(process_name: str) -> bool: Returns: bool: True if the process is running, False otherwise. """ - for proc in psutil.process_iter(['pid', 'name', 'cmdline']): - cmdline = proc.info['cmdline'] - if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): + for proc in psutil.process_iter(["pid", "name", "cmdline"]): + cmdline = proc.info["cmdline"] + if cmdline and ( + process_name in proc.info["name"] + or any(process_name in cmd for cmd in cmdline) + ): return True return False @@ -37,10 +40,13 @@ def get_process(process_name: str) -> Optional[int]: Returns: Optional[int]: PID of the process if found, None otherwise. """ - for proc in psutil.process_iter(['pid', 'name', 'cmdline']): - cmdline = proc.info['cmdline'] - if cmdline and (process_name in proc.info['name'] or any(process_name in cmd for cmd in cmdline)): - return proc.info['pid'] + for proc in psutil.process_iter(["pid", "name", "cmdline"]): + cmdline = proc.info["cmdline"] + if cmdline and ( + process_name in proc.info["name"] + or any(process_name in cmd for cmd in cmdline) + ): + return proc.info["pid"] return None @@ -48,29 +54,33 @@ def read_commit_reveal_logs(): """ Read and print the last 50 lines of logs from the log path. """ - log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) + log_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "subprocess", "logs") + ) stdout_path = os.path.join(log_path, STDOUT_LOG.lstrip("/")) stderr_path = os.path.join(log_path, STDERR_LOG.lstrip("/")) def read_last_n_lines(file_path: str, n: int) -> list: """Reads the last N lines from a file.""" - with open(file_path, 'r') as file: + with open(file_path, "r") as file: return file.readlines()[-n:] if os.path.exists(stdout_path): print("----- STDOUT LOG -----") - print(''.join(read_last_n_lines(stdout_path, 50))) + print("".join(read_last_n_lines(stdout_path, 50))) else: print(f"STDOUT log file not found at {stdout_path}") if os.path.exists(stderr_path): print("----- STDERR LOG -----") - print(''.join(read_last_n_lines(stderr_path, 50))) + print("".join(read_last_n_lines(stderr_path, 50))) else: print(f"STDERR log file not found at {stderr_path}") -def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval: Optional[float] = None): +def start_commit_reveal_subprocess( + network: Optional[str] = None, sleep_interval: Optional[float] = None +): """ Start the commit reveal subprocess if not already running. @@ -78,9 +88,13 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval network (Optional[str]): Network name if any, optional. sleep_interval (Optional[float]): Sleep interval if any, optional. """ - log_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "logs")) - script_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py")) - project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + log_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "subprocess", "logs") + ) + script_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py") + ) + project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) if not is_process_running(PROCESS_NAME): stdout_file = open(log_path + STDOUT_LOG, "w") @@ -89,18 +103,18 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") - args = ['python3', script_path] + args = ["python3", script_path] if network: - args.extend(['--network', network]) + args.extend(["--network", network]) if sleep_interval: - args.extend(['--sleep-interval', str(sleep_interval)]) + args.extend(["--sleep-interval", str(sleep_interval)]) process = subprocess.Popen( args=args, stdout=stdout_file, stderr=stderr_file, preexec_fn=os.setsid, - env=env + env=env, ) print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") @@ -110,8 +124,8 @@ def start_commit_reveal_subprocess(network: Optional[str] = None, sleep_interval def stop_commit_reveal_subprocess(): """ - Stop the commit reveal subprocess if it is running. - """ + Stop the commit reveal subprocess if it is running. + """ pid = get_process(PROCESS_NAME) if pid is not None: @@ -128,9 +142,9 @@ class DB: """ def __init__( - self, - db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), - row_factory=None, + self, + db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), + row_factory=None, ): if not os.path.exists(os.path.dirname(db_path)): os.makedirs(os.path.dirname(db_path)) diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index ad6c478e19..0ceb0b0825 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -59,7 +59,9 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_initialization=False) + subtensor = bittensor.Subtensor( + network="ws://localhost:9945", subprocess_initialization=False + ) assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -74,10 +76,7 @@ async def test_commit_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_periods - == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 ), "Failed to set commit/reveal periods" assert ( diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index b1941ec878..4a3a19a539 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -60,7 +60,9 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=0.25) + subtensor = bittensor.Subtensor( + network="ws://localhost:9945", subprocess_sleep_interval=0.25 + ) assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -75,14 +77,11 @@ async def test_commit_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_periods - == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 ), "Failed to set commit/reveal interval" assert ( - subtensor.weights_rate_limit(netuid=netuid) > 0 + subtensor.weights_rate_limit(netuid=netuid) > 0 ), "Weights rate limit is below 0" # Lower the rate limit assert sudo_set_hyperparameter_values( @@ -94,7 +93,7 @@ async def test_commit_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 @@ -142,7 +141,9 @@ async def test_commit_and_reveal_weights(local_chain): assert commit_reveal_subprocess.is_table_empty("commits") is False # Wait until the reveal block range - await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) + await wait_interval( + subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor + ) # allow one more block to pass time.sleep(12) @@ -161,7 +162,7 @@ async def test_commit_and_reveal_weights(local_chain): assert revealed_weights.value is not None, "Weight reveal not found in storage" assert ( - weight_vals[0] == revealed_weights.value[0][1] + weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" logging.info("✅ Passed test_commit_and_reveal_weights") @@ -208,7 +209,9 @@ async def test_set_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=0.25) # Subprocess works with fast blocks + subtensor = bittensor.Subtensor( + network="ws://localhost:9945", subprocess_sleep_interval=0.25 + ) # Subprocess works with fast blocks assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -223,14 +226,11 @@ async def test_set_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_periods - == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 ), "Failed to set commit/reveal period" assert ( - subtensor.weights_rate_limit(netuid=netuid) > 0 + subtensor.weights_rate_limit(netuid=netuid) > 0 ), "Weights rate limit is below 0" # Lower the rate limit assert sudo_set_hyperparameter_values( @@ -242,7 +242,7 @@ async def test_set_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 @@ -288,7 +288,9 @@ async def test_set_and_reveal_weights(local_chain): assert commit_reveal_subprocess.is_table_empty("commits") is False # Wait until the reveal block range - await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) + await wait_interval( + subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor + ) # allow one more block to pass time.sleep(12) @@ -307,7 +309,7 @@ async def test_set_and_reveal_weights(local_chain): assert revealed_weights.value is not None, "Weight reveal not found in storage" assert ( - weight_vals[0] == revealed_weights.value[0][1] + weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" logging.info("✅ Passed test_commit_and_reveal_weights") @@ -354,7 +356,9 @@ async def test_set_and_reveal_batch_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945", subprocess_sleep_interval=2) # Subprocess works with fast blocks + subtensor = bittensor.Subtensor( + network="ws://localhost:9945", subprocess_sleep_interval=2 + ) # Subprocess works with fast blocks assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -369,14 +373,11 @@ async def test_set_and_reveal_batch_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_periods - == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 ), "Failed to set commit/reveal periods" assert ( - subtensor.weights_rate_limit(netuid=netuid) > 0 + subtensor.weights_rate_limit(netuid=netuid) > 0 ), "Weights rate limit is below 0" # Lower the rate limit assert sudo_set_hyperparameter_values( @@ -387,7 +388,7 @@ async def test_set_and_reveal_batch_weights(local_chain): return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 @@ -471,7 +472,9 @@ async def test_set_and_reveal_batch_weights(local_chain): assert commit_reveal_subprocess.is_table_empty("commits") is False # Wait until the reveal block range - await wait_interval(subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor) + await wait_interval( + subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor + ) # allow one more block to pass time.sleep(12) @@ -490,6 +493,6 @@ async def test_set_and_reveal_batch_weights(local_chain): assert revealed_weights.value is not None, "Weight reveal not found in storage" assert ( - weight_vals[0] == revealed_weights.value[0][1] + weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" logging.info("✅ Passed test_commit_and_reveal_weights") From ca653c34c833edf591d917b9d78c6123ec07ca6e Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 22 Oct 2024 12:12:50 -0700 Subject: [PATCH 16/58] Rename and refactor subprocess utilities and related tests Renamed subprocess utility scripts from `/scripts` to `/bittensor/utils`. Updated import paths and refactored handling of subprocess logging by adding dynamic PID log management. Adjusted tests accordingly to reflect these changes. --- bittensor/core/subtensor.py | 5 +- .../utils}/subprocess/__init__.py | 0 .../utils}/subprocess/commit_reveal.py | 9 +-- .../utils}/subprocess_utils.py | 62 +++++++++++++------ tests/e2e_tests/conftest.py | 3 +- tests/e2e_tests/test_commit_weights.py | 14 +++-- tests/e2e_tests/test_reveal_weights.py | 5 +- 7 files changed, 60 insertions(+), 38 deletions(-) rename {scripts => bittensor/utils}/subprocess/__init__.py (100%) rename {scripts => bittensor/utils}/subprocess/commit_reveal.py (98%) rename {scripts => bittensor/utils}/subprocess_utils.py (80%) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 83e4e4cb76..d41901adf8 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -79,12 +79,11 @@ transfer_extrinsic, ) from bittensor.core.metagraph import Metagraph -from bittensor.utils import ss58_to_vec_u8, torch, U64_MAX, u16_normalized_float, networking +from bittensor.utils import ss58_to_vec_u8, torch, U64_MAX, u16_normalized_float, networking, subprocess_utils from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.registration import legacy_torch_api_compat from bittensor.utils.weight_utils import generate_weight_hash -from scripts import subprocess_utils KEY_NONCE: dict[str, int] = {} @@ -153,7 +152,7 @@ def __init__( _mock: bool = False, log_verbose: bool = False, connection_timeout: int = 600, - subprocess_initialization: bool = False, + subprocess_initialization: bool = True, subprocess_sleep_interval: float = 12, ) -> None: """ diff --git a/scripts/subprocess/__init__.py b/bittensor/utils/subprocess/__init__.py similarity index 100% rename from scripts/subprocess/__init__.py rename to bittensor/utils/subprocess/__init__.py diff --git a/scripts/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py similarity index 98% rename from scripts/subprocess/commit_reveal.py rename to bittensor/utils/subprocess/commit_reveal.py index a4eb2c92b2..71a306202e 100644 --- a/scripts/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -10,7 +10,7 @@ from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet -from scripts import subprocess_utils as utils +from bittensor.utils import subprocess_utils as utils from typing import List, Any, Dict, Optional # Path to the SQLite database @@ -612,7 +612,8 @@ def start_socket_server(): print(f"Accepted connection from {addr[0]}.") executor.submit(handle_client_connection, client_sock) except socket.timeout: - print("Socket timeout, continuing to listen...") + # print("Socket timeout, continuing to listen...") + continue except Exception as e: print(f"Error accepting connection: {e}.") break @@ -648,10 +649,10 @@ def main(args: argparse.Namespace): while running: counter += 1 - + curr_block = subtensor.get_current_block() if check_reveal(subtensor=subtensor): reveal_candidates(subtensor=subtensor) - print(f"Revealing commit on block {subtensor.get_current_block()}") + print(f"Revealing commit on block {curr_block}") # Every 100th run, perform an additional check to verify reveal list alignment with the backend if counter % 100 == 0: diff --git a/scripts/subprocess_utils.py b/bittensor/utils/subprocess_utils.py similarity index 80% rename from scripts/subprocess_utils.py rename to bittensor/utils/subprocess_utils.py index 0c8ea6b55c..968547e01d 100644 --- a/scripts/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -5,10 +5,27 @@ import subprocess import psutil -STDOUT_LOG = "/commit_reveal_stdout.log" -STDERR_LOG = "/commit_reveal_stderr.log" +LOG_DIR = os.path.expanduser("~/.bittensor/logs") PROCESS_NAME = "commit_reveal.py" +# Ensure the log directory exists +os.makedirs(LOG_DIR, exist_ok=True) + + +def get_pid_log_files() -> tuple[str, str]: + """ + Get the log files for the current running process. + Returns: + tuple[str, str]: Paths to the stdout log file and stderr log file. + """ + pid = get_process(PROCESS_NAME) + if pid is None: + raise RuntimeError(f"Process '{PROCESS_NAME}' is not running.") + + stdout_log = os.path.join(LOG_DIR, f"commit_reveal_stdout_{pid}.log") + stderr_log = os.path.join(LOG_DIR, f"commit_reveal_stderr_{pid}.log") + return stdout_log, stderr_log + def is_process_running(process_name: str) -> bool: """ @@ -52,34 +69,34 @@ def get_process(process_name: str) -> Optional[int]: def read_commit_reveal_logs(): """ - Read and print the last 50 lines of logs from the log path. + Read and print the last 50 lines of logs from the most recent subprocess log. """ - log_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "subprocess", "logs") - ) - stdout_path = os.path.join(log_path, STDOUT_LOG.lstrip("/")) - stderr_path = os.path.join(log_path, STDERR_LOG.lstrip("/")) + try: + stdout_log, stderr_log = get_pid_log_files() + except RuntimeError as e: + print(str(e)) + return def read_last_n_lines(file_path: str, n: int) -> list: """Reads the last N lines from a file.""" with open(file_path, "r") as file: return file.readlines()[-n:] - if os.path.exists(stdout_path): + if os.path.exists(stdout_log): print("----- STDOUT LOG -----") - print("".join(read_last_n_lines(stdout_path, 50))) + print("".join(read_last_n_lines(stdout_log, 50))) else: - print(f"STDOUT log file not found at {stdout_path}") + print(f"STDOUT log file not found at {stdout_log}") - if os.path.exists(stderr_path): + if os.path.exists(stderr_log): print("----- STDERR LOG -----") - print("".join(read_last_n_lines(stderr_path, 50))) + print("".join(read_last_n_lines(stderr_log, 50))) else: - print(f"STDERR log file not found at {stderr_path}") + print(f"STDERR log file not found at {stderr_log}") def start_commit_reveal_subprocess( - network: Optional[str] = None, sleep_interval: Optional[float] = None + network: Optional[str] = None, sleep_interval: Optional[float] = None ): """ Start the commit reveal subprocess if not already running. @@ -88,17 +105,21 @@ def start_commit_reveal_subprocess( network (Optional[str]): Network name if any, optional. sleep_interval (Optional[float]): Sleep interval if any, optional. """ - log_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "subprocess", "logs") - ) script_path = os.path.abspath( os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py") ) project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) if not is_process_running(PROCESS_NAME): - stdout_file = open(log_path + STDOUT_LOG, "w") - stderr_file = open(log_path + STDERR_LOG, "w") + # Correctly construct the paths for STDOUT and STDERR log files + stdout_log = os.path.join(LOG_DIR, "commit_reveal_stdout.log") + stderr_log = os.path.join(LOG_DIR, "commit_reveal_stderr.log") + + os.makedirs(LOG_DIR, exist_ok=True) + + stdout_file = open(stdout_log, "w") + stderr_file = open(stderr_log, "w") + print(f"Starting subprocess '{PROCESS_NAME}'...") env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") @@ -109,6 +130,7 @@ def start_commit_reveal_subprocess( if sleep_interval: args.extend(["--sleep-interval", str(sleep_interval)]) + # Create a new subprocess process = subprocess.Popen( args=args, stdout=stdout_file, diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index d005ea8501..acfafb29c6 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -8,12 +8,11 @@ import pytest from substrateinterface import SubstrateInterface -from scripts import subprocess_utils +from bittensor.utils import subprocess_utils from bittensor import logging from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, install_templates, - template_path, uninstall_templates, ) diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 0ceb0b0825..75f339f2d2 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -63,7 +63,7 @@ async def test_commit_and_reveal_weights(local_chain): network="ws://localhost:9945", subprocess_initialization=False ) assert subtensor.get_subnet_hyperparameters( - netuid=netuid + netuid=netuid, ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" # Lower the commit_reveal interval @@ -126,14 +126,16 @@ async def test_commit_and_reveal_weights(local_chain): assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map - weight_commit_reveal_interval = subtensor.query_module( - module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + reveal_periods = subtensor.query_module( + module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid] ) - interval = weight_commit_reveal_interval.value - assert interval > 0, "Invalid WeightCommitRevealInterval" + periods = reveal_periods.value + assert periods > 0, "Invalid RevealPeriodEpochs" # Wait until the reveal block range - await wait_interval(interval, subtensor) + await wait_interval( + subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor + ) # Reveal weights success, message = subtensor.reveal_weights( diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 4a3a19a539..868d3f2cc6 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -1,9 +1,8 @@ import time -from time import sleep import numpy as np import pytest -import scripts.subprocess.commit_reveal as commit_reveal_subprocess +import bittensor.utils.subprocess.commit_reveal as commit_reveal_subprocess import bittensor from bittensor import logging from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit @@ -357,7 +356,7 @@ async def test_set_and_reveal_batch_weights(local_chain): ), "Unable to enable commit reveal on the subnet" subtensor = bittensor.Subtensor( - network="ws://localhost:9945", subprocess_sleep_interval=2 + network="ws://localhost:9945", subprocess_sleep_interval=0.25 ) # Subprocess works with fast blocks assert subtensor.get_subnet_hyperparameters( netuid=netuid From 65b7e89108978a656f0abc1fee8b1503580f7ce5 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 22 Oct 2024 15:21:57 -0700 Subject: [PATCH 17/58] prepare for emmit on set weights Rearranged calls to 'convert_weights_and_uids_for_emit' to align with the logic flow across test_reveal_weights.py and set_weights.py. This ensures 'convert_weights_and_uids_for_emit' is invoked only in commit-reveal scenarios, enhancing code clarity and maintaining intended functionality. --- bittensor/core/extrinsics/set_weights.py | 6 +++- tests/e2e_tests/test_reveal_weights.py | 39 ++++++++++++------------ 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index ea954af066..a1b3ceea33 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -29,6 +29,7 @@ from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected from bittensor.utils.registration import torch, use_torch +from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit # For annotation purposes if TYPE_CHECKING: @@ -138,6 +139,9 @@ def set_weights_extrinsic( ).commit_reveal_weights_enabled: # if cr is enabled, commit instead of setting the weights. salt = [random.randint(0, 350) for _ in range(8)] + uids, vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) # Ask before moving on. if prompt: @@ -156,7 +160,7 @@ def set_weights_extrinsic( netuid=netuid, salt=salt, uids=uids, - weights=weights, + weights=vals, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, prompt=prompt, diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 868d3f2cc6..e6547a19bc 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -248,9 +248,6 @@ async def test_set_and_reveal_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.1], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) # Assert no local CR processes in table assert commit_reveal_subprocess.is_table_empty("commits") @@ -259,12 +256,16 @@ async def test_set_and_reveal_weights(local_chain): success, message = subtensor.set_weights( alice_wallet, netuid, - uids=weight_uids, - weights=weight_vals, + uids=uids, + weights=weights, wait_for_inclusion=True, wait_for_finalization=True, ) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + weight_commits = subtensor.query_module( module="SubtensorModule", name="WeightCommits", @@ -394,9 +395,7 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.1], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) + # Assert no local CR processes in table assert commit_reveal_subprocess.is_table_empty("commits") @@ -405,8 +404,8 @@ async def test_set_and_reveal_batch_weights(local_chain): success, message = subtensor.set_weights( alice_wallet, netuid, - uids=weight_uids, - weights=weight_vals, + uids=uids, + weights=weights, wait_for_inclusion=True, wait_for_finalization=True, ) @@ -416,16 +415,14 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.2], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) + # add second weights with CR enabled success, message = subtensor.set_weights( alice_wallet, netuid, - uids=weight_uids, - weights=weight_vals, + uids=uids, + weights=weights, wait_for_inclusion=True, wait_for_finalization=True, ) @@ -435,16 +432,14 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.3], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) + # add second weights with CR enabled success, message = subtensor.set_weights( alice_wallet, netuid, - uids=weight_uids, - weights=weight_vals, + uids=uids, + weights=weights, wait_for_inclusion=True, wait_for_finalization=True, ) @@ -455,6 +450,10 @@ async def test_set_and_reveal_batch_weights(local_chain): params=[netuid, alice_wallet.hotkey.ss58_address], ) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" commit_hash, commit_block = weight_commits.value[0] From dd9c20d57666292ad5085bff30a9d3bd5773ff96 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 22 Oct 2024 15:58:13 -0700 Subject: [PATCH 18/58] remove convert to emmit --- bittensor/core/extrinsics/set_weights.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index a1b3ceea33..8a7ba59540 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -139,9 +139,6 @@ def set_weights_extrinsic( ).commit_reveal_weights_enabled: # if cr is enabled, commit instead of setting the weights. salt = [random.randint(0, 350) for _ in range(8)] - uids, vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) # Ask before moving on. if prompt: @@ -160,7 +157,7 @@ def set_weights_extrinsic( netuid=netuid, salt=salt, uids=uids, - weights=vals, + weights=weights, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, prompt=prompt, From ba82e5504429686226414a5ecd43025e2bb7823a Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 22 Oct 2024 16:00:40 -0700 Subject: [PATCH 19/58] if weights is a list of floats --- bittensor/core/subtensor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index d41901adf8..2035e3f6b6 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -83,7 +83,7 @@ from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.registration import legacy_torch_api_compat -from bittensor.utils.weight_utils import generate_weight_hash +from bittensor.utils.weight_utils import generate_weight_hash, convert_weights_and_uids_for_emit KEY_NONCE: dict[str, int] = {} @@ -1868,6 +1868,9 @@ def commit_weights( f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" ) + if isinstance(weights, list) and all(isinstance(w, float) for w in weights): + _, weights = convert_weights_and_uids_for_emit(uids, weights) + # Generate the hash of the weights commit_hash = generate_weight_hash( address=wallet.hotkey.ss58_address, From fb0c7696e41334c90447fb10b720db1735201c64 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 22 Oct 2024 16:23:35 -0700 Subject: [PATCH 20/58] Refactor weight conversion logic in set_weights. Moved weight conversion and normalization steps into the beginning of the `set_weights` function, ensuring consistency between weight submission and commit operations. This update affects both the `set_weights` logic and its corresponding end-to-end tests. --- bittensor/core/extrinsics/set_weights.py | 22 ++++++++++++-- tests/e2e_tests/test_reveal_weights.py | 38 +++++++++++++----------- 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 8a7ba59540..1626384f20 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -152,12 +152,30 @@ def set_weights_extrinsic( f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." ): try: + + # First convert types. + if use_torch(): + if isinstance(uids, list): + uids = torch.tensor(uids, dtype=torch.int64) + if isinstance(weights, list): + weights = torch.tensor(weights, dtype=torch.float32) + else: + if isinstance(uids, list): + uids = np.array(uids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + # Reformat and normalize. + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + uids, weights + ) + success, message = subtensor.commit_weights( wallet=wallet, netuid=netuid, salt=salt, - uids=uids, - weights=weights, + uids=weight_uids, + weights=weight_vals, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, prompt=prompt, diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index e6547a19bc..4a73eb6a4a 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -248,6 +248,9 @@ async def test_set_and_reveal_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.1], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) # Assert no local CR processes in table assert commit_reveal_subprocess.is_table_empty("commits") @@ -256,16 +259,12 @@ async def test_set_and_reveal_weights(local_chain): success, message = subtensor.set_weights( alice_wallet, netuid, - uids=uids, - weights=weights, + uids=weight_uids, + weights=weight_vals, wait_for_inclusion=True, wait_for_finalization=True, ) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - weight_commits = subtensor.query_module( module="SubtensorModule", name="WeightCommits", @@ -395,7 +394,10 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.1], dtype=np.float32) - + # Customers run this before submitting weights + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) # Assert no local CR processes in table assert commit_reveal_subprocess.is_table_empty("commits") @@ -404,8 +406,8 @@ async def test_set_and_reveal_batch_weights(local_chain): success, message = subtensor.set_weights( alice_wallet, netuid, - uids=uids, - weights=weights, + uids=weight_uids, + weights=weight_vals, wait_for_inclusion=True, wait_for_finalization=True, ) @@ -415,14 +417,15 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.2], dtype=np.float32) - - + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) # add second weights with CR enabled success, message = subtensor.set_weights( alice_wallet, netuid, - uids=uids, - weights=weights, + uids=weight_uids, + weights=weight_vals, wait_for_inclusion=True, wait_for_finalization=True, ) @@ -432,14 +435,15 @@ async def test_set_and_reveal_batch_weights(local_chain): # Commit-reveal values uids = np.array([0], dtype=np.int64) weights = np.array([0.3], dtype=np.float32) - - + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) # add second weights with CR enabled success, message = subtensor.set_weights( alice_wallet, netuid, - uids=uids, - weights=weights, + uids=weight_uids, + weights=weight_vals, wait_for_inclusion=True, wait_for_finalization=True, ) From 0ea65c115da1f6bd7698ae1f16726528af5b4b91 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 23 Oct 2024 11:08:53 -0700 Subject: [PATCH 21/58] Add test for commit-reveal batch weights over limit Introduces a test to validate the commit-reveal mechanism for weights when the number of commits exceeds the limit. The test sets up a subnet, registers a neuron, and verifies behavior when attempting more than the allowed number of weight commits. --- bittensor/core/subtensor.py | 2 +- tests/e2e_tests/test_reveal_weights.py | 134 +++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 1 deletion(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 2035e3f6b6..03a918e02e 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1869,7 +1869,7 @@ def commit_weights( ) if isinstance(weights, list) and all(isinstance(w, float) for w in weights): - _, weights = convert_weights_and_uids_for_emit(uids, weights) + _, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore # Generate the hash of the weights commit_hash = generate_weight_hash( diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 4a73eb6a4a..3f87daf919 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -498,3 +498,137 @@ async def test_set_and_reveal_batch_weights(local_chain): weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" logging.info("✅ Passed test_commit_and_reveal_weights") + + +@pytest.mark.asyncio +async def test_set_and_reveal_batch_weights_over_limit(local_chain): + """ + Tests the commit/reveal batch weights mechanism with 11 commits, which should throw an exception + + Steps: + 1. Register a subnet through Alice + 2. Register Alice's neuron and add stake + 3. Enable commit-reveal mechanism on the subnet + 4. Lower the commit_reveal interval and rate limit + 5. Commit weights and verify + 6. Wait interval & see if subprocess did the reveal weights and verify + Raises: + AssertionError: If any of the checks or verifications fail + """ + netuid = 1 + logging.info("Testing test_set_and_reveal_weights") + # Register root as Alice + keypair, alice_wallet = setup_wallet("//Alice") + assert register_subnet(local_chain, alice_wallet), "Unable to register the subnet" + + # Verify subnet 1 created successfully + assert local_chain.query( + "SubtensorModule", "NetworksAdded", [1] + ).serialize(), "Subnet wasn't created successfully" + + assert register_neuron( + local_chain, alice_wallet, netuid + ), "Unable to register Alice as a neuron" + + # Stake to become to top neuron after the first epoch + add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + + # Enable commit_reveal on the subnet + assert sudo_set_hyperparameter_bool( + local_chain, + alice_wallet, + "sudo_set_commit_reveal_weights_enabled", + True, + netuid, + ), "Unable to enable commit reveal on the subnet" + + subtensor = bittensor.Subtensor( + network="ws://localhost:9945", subprocess_sleep_interval=0.25 + ) # Subprocess works with fast blocks + assert subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" + + # Lower the commit_reveal interval + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_commit_reveal_weights_periods", + call_params={"netuid": netuid, "periods": "2"}, + return_error_message=True, + ) + + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 2 + ), "Failed to set commit/reveal periods" + + assert ( + subtensor.weights_rate_limit(netuid=netuid) > 0 + ), "Weights rate limit is below 0" + # Lower the rate limit + assert sudo_set_hyperparameter_values( + local_chain, + alice_wallet, + call_function="sudo_set_weights_set_rate_limit", + call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, + return_error_message=True, + ) + assert ( + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + ), "Failed to set weights_rate_limit" + assert subtensor.weights_rate_limit(netuid=netuid) == 0 + + # Commit-reveal values and weights for different steps + weights_steps = [ + (np.array([0], dtype=np.int64), np.array([0.1], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.2], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.3], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.4], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.5], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.6], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.7], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.8], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.9], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.10], dtype=np.float32)), + ] + + for uids, weights in weights_steps: + # Customers run this before submitting weights + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Set weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + print(message) + assert success + + time.sleep(3) + + # 11th time (should throw error) + # Commit-reveal values + uids = np.array([0], dtype=np.int64) + weights = np.array([0.3], dtype=np.float32) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + + # Set weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + + print(message) + assert success is False From bb60aa5aab86fdb486e984c72c12cdd40897b0c2 Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 24 Oct 2024 15:39:36 -0700 Subject: [PATCH 22/58] Enable commit reveal subprocess control and expiry handling Added logic to conditionally start the commit reveal subprocess based on existing commits and introduced an expire block to manage commit object lifecycle. Updated tests and utilities to reflect these changes, ensuring proper subprocess and database handling. --- bittensor/core/extrinsics/commit_weights.py | 3 +- bittensor/core/subtensor.py | 11 ++-- bittensor/utils/subprocess/commit_reveal.py | 48 ++++++++++++++--- bittensor/utils/subprocess_utils.py | 60 +++++++++++++++++++-- tests/e2e_tests/test_reveal_weights.py | 16 +++--- 5 files changed, 115 insertions(+), 23 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 617c8775fa..bef8098079 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -187,8 +187,9 @@ def send_command(command): netuid=netuid ).commit_reveal_periods reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 + expire_block = reveal_block + subnet_tempo_blocks - 1 - command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' + command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{expire_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 03a918e02e..76725888a3 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -203,10 +203,12 @@ def __init__( ) if subprocess_initialization: - subprocess_utils.start_commit_reveal_subprocess( + subprocess_utils.start_if_existing_commits( network=network, sleep_interval=subprocess_sleep_interval ) + self.subprocess_initialization = subprocess_initialization + self.subprocess_sleep_interval = subprocess_sleep_interval self.log_verbose = log_verbose self._connection_timeout = connection_timeout self.substrate: "SubstrateInterface" = None @@ -1869,7 +1871,7 @@ def commit_weights( ) if isinstance(weights, list) and all(isinstance(w, float) for w in weights): - _, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore + uids, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore # Generate the hash of the weights commit_hash = generate_weight_hash( @@ -1896,7 +1898,10 @@ def commit_weights( ) if success: # add to local db if called directly - if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + if self.subprocess_initialization: + if not subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + logging.info("Starting commit_reveal subprocess from commit.") + subprocess_utils.start_commit_reveal_subprocess(network=self.chain_endpoint, sleep_interval=self.subprocess_sleep_interval) commit_weights_process( self, wallet=wallet, diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 71a306202e..1b793c6bbf 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -32,6 +32,7 @@ class Commit: netuid (int): The network UID. commit_block (int): The block number at which the commit was made. reveal_block (int): The block number at which the commit will be revealed. + expire_block (int): The block number at which the commit will be expired. uids (List[int]): The list of UIDs. weights (List[int]): The list of weights. salt (List[int]): The salt used for the commit. @@ -48,6 +49,7 @@ def __init__( netuid: int, commit_block: int, reveal_block: int, + expire_block: int, uids: List[int], weights: List[int], salt: List[int], @@ -61,6 +63,7 @@ def __init__( self.netuid = netuid self.commit_block = commit_block self.reveal_block = reveal_block + self.expire_block = expire_block self.uids = uids self.weights = weights self.salt = salt @@ -82,6 +85,7 @@ def to_dict(self) -> Dict[str, Any]: "netuid": self.netuid, "commit_block": self.commit_block, "reveal_block": self.reveal_block, + "expire_block": self.expire_block, "uids": json.dumps(self.uids), "weights": json.dumps(self.weights), "salt": json.dumps(self.salt), @@ -108,6 +112,7 @@ def from_dict(data: Dict[str, Any]) -> "Commit": netuid=data["netuid"], commit_block=data["commit_block"], reveal_block=data["reveal_block"], + expire_block=data["expire_block"], uids=json.loads(data["uids"]), weights=json.loads(data["weights"]), salt=json.loads(data["salt"]), @@ -121,7 +126,7 @@ def __str__(self) -> str: Returns: str: String representation of the commit. """ - return f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, version_key={self.version_key})" + return f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, expire_block={self.expire_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, version_key={self.version_key})" def table_exists(table_name: str) -> bool: @@ -179,6 +184,7 @@ def initialize_db(): ("netuid", "INTEGER"), ("commit_block", "INTEGER"), ("reveal_block", "INTEGER"), + ("expire_block", "INTEGER"), ("uids", "TEXT"), ("weights", "TEXT"), ("salt", "TEXT"), @@ -319,6 +325,31 @@ def chain_hash_check(subtensor: Subtensor): print(f"Error during chain_hash_check: {e}") +def delete_expired_commits(current_block: int): + """ + Deletes rows in the database where the current block is greater than the expire_block. + Prints each commit before deleting it. + + Args: + current_block (int): The current block number. + """ + try: + commits = get_all_commits() + if not commits: + print("No commits found in the database.") + return + + with utils.DB(db_path=DB_PATH) as (conn, cursor): + for commit in commits: + if current_block > commit.expire_block: + delete_sql = "DELETE FROM commits WHERE commit_hash=?" + cursor.execute(delete_sql, (commit.commit_hash,)) + conn.commit() + print(f"Current block: {current_block}. Deleting expired Commit: {commit}") + except Exception as e: + print(f"Error deleting expired commits: {e}") + + def revealed( wallet_name: str, wallet_path: str, @@ -568,14 +599,15 @@ def handle_client_connection(client_socket: socket.socket): wallet_hotkey_ss58=args[4], wallet_name=args[1], wallet_path=args[2], - commit_hash=args[7], - netuid=int(args[8]), + commit_hash=args[8], + netuid=int(args[9]), commit_block=int(args[5]), reveal_block=int(args[6]), - uids=json.loads(args[9]), - weights=json.loads(args[10]), - salt=json.loads(args[11]), - version_key=int(args[12]), + expire_block=int(args[7]), + uids=json.loads(args[10]), + weights=json.loads(args[11]), + salt=json.loads(args[12]), + version_key=int(args[13]), ) ), "terminate": lambda: terminate_process(None, None), @@ -641,6 +673,7 @@ def main(args: argparse.Namespace): args (argparse.Namespace): The command-line arguments. """ initialize_db() + print(f"initializing subtensor with network: {args.network} and sleep time: {args.sleep_interval} seconds") subtensor = Subtensor(network=args.network, subprocess_initialization=False) server_thread = threading.Thread(target=start_socket_server) server_thread.start() @@ -657,6 +690,7 @@ def main(args: argparse.Namespace): # Every 100th run, perform an additional check to verify reveal list alignment with the backend if counter % 100 == 0: chain_hash_check(subtensor=subtensor) + # delete_expired_commits(current_block=curr_block) time.sleep(args.sleep_interval) diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess_utils.py index 968547e01d..90f0ea09a5 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -1,5 +1,6 @@ import os import sqlite3 +import time from typing import Optional import subprocess @@ -40,8 +41,8 @@ def is_process_running(process_name: str) -> bool: for proc in psutil.process_iter(["pid", "name", "cmdline"]): cmdline = proc.info["cmdline"] if cmdline and ( - process_name in proc.info["name"] - or any(process_name in cmd for cmd in cmdline) + process_name in proc.info["name"] + or any(process_name in cmd for cmd in cmdline) ): return True return False @@ -95,6 +96,39 @@ def read_last_n_lines(file_path: str, n: int) -> list: print(f"STDERR log file not found at {stderr_log}") +def is_table_empty(table_name: str) -> bool: + """ + Checks if a table in the database is empty. + + Args: + table_name (str): The name of the table to check. + + Returns: + bool: True if the table is empty, False otherwise. + """ + try: + columns, rows = read_table(table_name) + if not rows: + print(f"Table '{table_name}' is empty.") + return True + else: + print(f"Table '{table_name}' is not empty.") + return False + except Exception as e: + print(f"Error checking if table '{table_name}' is empty: {e}") + return False + + +def start_if_existing_commits(network: Optional[str] = None, sleep_interval: Optional[float] = None): + # check if table is empty + if not is_table_empty("commits"): + start_commit_reveal_subprocess(network, sleep_interval) + else: + print( + "Existing commits table is empty. Skipping starting commit reveal subprocess until a commit is there." + ) + + def start_commit_reveal_subprocess( network: Optional[str] = None, sleep_interval: Optional[float] = None ): @@ -111,9 +145,12 @@ def start_commit_reveal_subprocess( project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) if not is_process_running(PROCESS_NAME): + from datetime import datetime + current_time = datetime.now().strftime("%Y%m%d_%H%M%S") + # Correctly construct the paths for STDOUT and STDERR log files - stdout_log = os.path.join(LOG_DIR, "commit_reveal_stdout.log") - stderr_log = os.path.join(LOG_DIR, "commit_reveal_stderr.log") + stdout_log = os.path.join(LOG_DIR, f"commit_reveal_stdout_{current_time}.log") + stderr_log = os.path.join(LOG_DIR, f"commit_reveal_stderr_{current_time}.log") os.makedirs(LOG_DIR, exist_ok=True) @@ -139,7 +176,7 @@ def start_commit_reveal_subprocess( env=env, ) print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") - + time.sleep(1) # wait a second for subprocess to initialize else: print(f"Subprocess '{PROCESS_NAME}' is already running.") @@ -252,3 +289,16 @@ def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: for idx in blob_cols: row[idx] = int.from_bytes(row[idx], byteorder="big") return column_names, rows + + +def delete_all_rows(table_name: str): + """ + Deletes all rows from a table in the SQLite database. + + Args: + table_name (str): The name of the table where all rows should be deleted. + """ + with DB() as (conn, cursor): + delete_query = f"DELETE FROM {table_name}" + cursor.execute(delete_query) + conn.commit() \ No newline at end of file diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 3f87daf919..2e67ada75c 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -5,6 +5,7 @@ import bittensor.utils.subprocess.commit_reveal as commit_reveal_subprocess import bittensor from bittensor import logging +from bittensor.utils import subprocess_utils from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit from tests.e2e_tests.utils.chain_interactions import ( add_stake, @@ -501,6 +502,7 @@ async def test_set_and_reveal_batch_weights(local_chain): @pytest.mark.asyncio +@pytest.mark.timeout(120) # 4 minute timeout async def test_set_and_reveal_batch_weights_over_limit(local_chain): """ Tests the commit/reveal batch weights mechanism with 11 commits, which should throw an exception @@ -554,16 +556,16 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): local_chain, alice_wallet, call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "2"}, + call_params={"netuid": netuid, "periods": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 2 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 ), "Failed to set commit/reveal periods" assert ( - subtensor.weights_rate_limit(netuid=netuid) > 0 + subtensor.weights_rate_limit(netuid=netuid) > 0 ), "Weights rate limit is below 0" # Lower the rate limit assert sudo_set_hyperparameter_values( @@ -574,7 +576,7 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 @@ -607,10 +609,9 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): wait_for_inclusion=True, wait_for_finalization=True, ) - print(message) assert success - time.sleep(3) + time.sleep(1) # 11th time (should throw error) # Commit-reveal values @@ -630,5 +631,6 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): wait_for_finalization=True, ) - print(message) assert success is False + # remove commits + subprocess_utils.delete_all_rows("commits") From 686b2ccc03a66cc277dc3d2b26db7b97553c935d Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 25 Oct 2024 10:58:58 -0700 Subject: [PATCH 23/58] Add max_retries parameter and improve commit log details Added 'max_retries' parameter to set_weights to handle retries. Updated commit log in commit_reveal to include commit_block. Also removed unnecessary part of the check_reveal function docstring for clarity. --- bittensor/core/extrinsics/set_weights.py | 1 + bittensor/utils/subprocess/commit_reveal.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 1626384f20..4032290870 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -179,6 +179,7 @@ def set_weights_extrinsic( wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, prompt=prompt, + max_retries=1 ) if not wait_for_finalization and not wait_for_inclusion: return True, "Not waiting for finalization or inclusion." diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 1b793c6bbf..5d9054140c 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -306,7 +306,7 @@ def chain_hash_check(subtensor: Subtensor): # Check if any commit on Subtensor is absent in local database if not any(c.commit_hash == commit_hash for c in ss58_commits): print( - f"There is a commit on Subtensor (hash: {commit_hash}) that we don't have locally." + f"There is a commit on Subtensor (hash: {commit_hash}, commit_block: {commit_block}) that we don't have locally." ) # Check if any local commit is absent in Subtensor @@ -504,7 +504,7 @@ def get_all_commits() -> List[Commit]: def check_reveal(subtensor: Subtensor) -> bool: """ - Checks if there are any commits to reveal and performs the reveal if necessary. + Checks if there are any commits to reveal. Args: subtensor (Subtensor): The subtensor network object. From 77d097f9bba1dad5f165b10d4c3d4fa3bb672eaf Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 25 Oct 2024 12:40:35 -0700 Subject: [PATCH 24/58] Change weight commit behavior and add debug prints Set `wait_for_inclusion` to `True` for weight commits, ensuring inclusion before proceeding. Added debug prints for commit hashes and blocks in the `subtensor` and `commit_reveal` modules to assist with troubleshooting and monitoring. --- bittensor/core/subtensor.py | 12 ++++++++++-- bittensor/utils/subprocess/commit_reveal.py | 15 +++++++++++---- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 76725888a3..18832ea5ef 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1834,7 +1834,7 @@ def commit_weights( uids: Union[NDArray[np.int64], list], weights: Union[NDArray[np.int64], list], version_key: int = settings.version_as_int, - wait_for_inclusion: bool = False, + wait_for_inclusion: bool = True, wait_for_finalization: bool = False, prompt: bool = False, max_retries: int = 5, @@ -1885,7 +1885,7 @@ def commit_weights( logging.info(f"Commit Hash: {commit_hash}") - while retries < max_retries: + while retries < max_retries and not success: try: success, message = commit_weights_extrinsic( subtensor=self, @@ -1912,6 +1912,14 @@ def commit_weights( salt=salt, version_key=version_key, ) + print("This node has these commits now: ") + response = self.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, wallet.hotkey.ss58_address], + ) + for commit_hash, commit_block in response.value: + print(f"commit: {commit_hash}, block: {commit_block}") break except Exception as e: logging.error(f"Error committing weights: {e}") diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 5d9054140c..92109c76d7 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -281,7 +281,7 @@ def chain_hash_check(subtensor: Subtensor): try: # Retrieve all commits from the local database commits = get_all_commits() - + print(f"chain hash check commits:\n {commits}") # Group commits by wallet_hotkey_ss58 commits_by_ss58 = {} for commit in commits: @@ -294,6 +294,7 @@ def chain_hash_check(subtensor: Subtensor): for ss58, ss58_commits in commits_by_ss58.items(): # Get a set of unique netuids from the commits netuids = set(commit.netuid for commit in ss58_commits) + print("Checking subtensor commits: ") for netuid in netuids: # Query the subtensor backend for commit hashes and blocks response = subtensor.query_module( @@ -303,6 +304,7 @@ def chain_hash_check(subtensor: Subtensor): ) for commit_hash, commit_block in response.value: + print(f"subtensor has commit: {commit_hash}, block: {commit_block}") # Check if any commit on Subtensor is absent in local database if not any(c.commit_hash == commit_hash for c in ss58_commits): print( @@ -458,11 +460,15 @@ def revealed_batch_hash(commit_hashes: List[str]): commit_hashes (List[str]): The list of commit hashes. """ try: + if not commit_hashes: + print("No commit hashes") + return with utils.DB(db_path=DB_PATH) as (conn, cursor): for commit_hash in commit_hashes: sql = "SELECT COUNT(*) FROM commits WHERE commit_hash=?" cursor.execute(sql, (commit_hash,)) count = cursor.fetchone()[0] + print(f"count of revealed batch hash: {count}") if count > 0: delete_sql = "DELETE FROM commits WHERE commit_hash=?" cursor.execute(delete_sql, (commit_hash,)) @@ -488,7 +494,7 @@ def committed(commit: Commit): sql = f"INSERT INTO commits ({column_names}) VALUES ({data})" cursor.execute(sql, tuple(commit_data.values())) conn.commit() - print(f"Committed commit data: {commit_data}") + print(f"Committed data: {commit_data}") def get_all_commits() -> List[Commit]: @@ -529,7 +535,7 @@ def check_reveal(subtensor: Subtensor) -> bool: return False -def reveal_candidates(subtensor: Subtensor): +def reveal_commits(subtensor: Subtensor): """ Performs reveal on commits that are ready to be revealed. @@ -684,11 +690,12 @@ def main(args: argparse.Namespace): counter += 1 curr_block = subtensor.get_current_block() if check_reveal(subtensor=subtensor): - reveal_candidates(subtensor=subtensor) print(f"Revealing commit on block {curr_block}") + reveal_commits(subtensor=subtensor) # Every 100th run, perform an additional check to verify reveal list alignment with the backend if counter % 100 == 0: + print("\nDoing chain hash check:") chain_hash_check(subtensor=subtensor) # delete_expired_commits(current_block=curr_block) From 727ea44c4c72f2a9bb377124c31b5652c47cc865 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 29 Oct 2024 14:39:02 -0700 Subject: [PATCH 25/58] Refactor subprocess to only delete old commits + update response from subtensor + add revealed flag This commit adjusts the commit weights process by ensuring the commit_reveal subprocess starts appropriately, checking for its readiness, and syncing commit data. It also simplifies the test cases and enhances the subprocess logging mechanism. --- bittensor/core/extrinsics/commit_weights.py | 21 +- bittensor/core/subtensor.py | 42 +-- bittensor/utils/subprocess/commit_reveal.py | 387 +++++++++----------- bittensor/utils/subprocess_utils.py | 70 +++- tests/e2e_tests/test_commit_weights.py | 2 +- tests/e2e_tests/test_reveal_weights.py | 112 ++---- 6 files changed, 307 insertions(+), 327 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index bef8098079..928799ed32 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -16,7 +16,7 @@ # DEALINGS IN THE SOFTWARE. """Module commit weights and reveal weights extrinsic.""" - +import json from typing import Optional, TYPE_CHECKING import socket from retry import retry @@ -155,6 +155,7 @@ def commit_weights_process( weights: list[int], salt: list[int], version_key: int = settings.version_as_int, + block: Optional[int] = None, ): """ Lets the subprocess know what a commit was submitted to the chain. @@ -168,6 +169,7 @@ def commit_weights_process( weights (list[int]): List of weight values corresponding to each UID. salt (list[int]): List of salt values for the hash function. version_key (int): Version key for network compatibility (default is settings.version_as_int). + block (Optional[int]): Specific block number to use (default is None). The function calculates the necessary blocks until the next epoch and the reveal block, then the subprocess will wait until the appropriate time to reveal the weights. @@ -179,7 +181,7 @@ def send_command(command): client.send(command.encode()) client.close() - curr_block = subtensor.get_current_block() + curr_block = block if block is not None else subtensor.get_current_block() blocks_until_next_epoch = subtensor.blocks_until_next_epoch(netuid=netuid) subnet_tempo_blocks = subtensor.get_subnet_hyperparameters(netuid=netuid).tempo epoch_start_block = curr_block + blocks_until_next_epoch @@ -187,7 +189,7 @@ def send_command(command): netuid=netuid ).commit_reveal_periods reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 - expire_block = reveal_block + subnet_tempo_blocks - 1 + expire_block = reveal_block + subnet_tempo_blocks command = f'committed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{curr_block}" "{reveal_block}" "{expire_block}" "{commit_hash}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' send_command(command) @@ -365,9 +367,9 @@ def send_command(command): version_key=version_key, ) command = f'revealed_hash "{commit_hash}"' + send_command(command) except Exception as e: - command = f'revealed "{wallet.name}" "{wallet.path}" "{wallet.hotkey_str}" "{wallet.hotkey.ss58_address}" "{netuid}" "{uids}" "{weights}" "{salt}" "{version_key}"' - send_command(command) + logging.error(f"Not able to generate hash to reveal weights on subprocess: {e}") # Chain call for `batch_reveal_weights_extrinsic` @@ -532,8 +534,9 @@ def send_command(command): client.close() try: + commit_hashes = [] for batch_uids, batch_weights, batch_salt, batch_version_key in zip( - uids, weights, salt, version_keys + uids, weights, salt, version_keys ): # Generate the hash of the weights for each individual batch commit_hash = generate_weight_hash( @@ -544,7 +547,9 @@ def send_command(command): salt=batch_salt, version_key=batch_version_key, ) - command = f'revealed_hash "{commit_hash}"' - send_command(command) + commit_hashes.append(commit_hash) + + command = f'revealed_hash_batch {json.dumps(commit_hashes)}' + send_command(command) except Exception as e: logging.error(f"Failed batch reveal weights subprocess: {e}") diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 18832ea5ef..495f93aa56 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -23,6 +23,7 @@ import argparse import copy import socket +import time from typing import Union, Optional, TypedDict, Any import numpy as np @@ -1870,6 +1871,11 @@ def commit_weights( f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" ) + # start subprocess if permitted and not yet running + if self.subprocess_initialization and not subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + logging.info("Starting commit_reveal subprocess from commit.") + subprocess_utils.start_commit_reveal_subprocess(network=self.chain_endpoint, sleep_interval=self.subprocess_sleep_interval) + if isinstance(weights, list) and all(isinstance(w, float) for w in weights): uids, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore @@ -1887,6 +1893,19 @@ def commit_weights( while retries < max_retries and not success: try: + if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + curr_block = self.get_current_block() + commit_weights_process( + self, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + uids=list(uids), + weights=list(weights), + salt=salt, + version_key=version_key, + block=curr_block + ) success, message = commit_weights_extrinsic( subtensor=self, wallet=wallet, @@ -1897,29 +1916,6 @@ def commit_weights( prompt=prompt, ) if success: - # add to local db if called directly - if self.subprocess_initialization: - if not subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): - logging.info("Starting commit_reveal subprocess from commit.") - subprocess_utils.start_commit_reveal_subprocess(network=self.chain_endpoint, sleep_interval=self.subprocess_sleep_interval) - commit_weights_process( - self, - wallet=wallet, - netuid=netuid, - commit_hash=commit_hash, - uids=list(uids), - weights=list(weights), - salt=salt, - version_key=version_key, - ) - print("This node has these commits now: ") - response = self.query_module( - module="SubtensorModule", - name="WeightCommits", - params=[netuid, wallet.hotkey.ss58_address], - ) - for commit_hash, commit_block in response.value: - print(f"commit: {commit_hash}, block: {commit_block}") break except Exception as e: logging.error(f"Error committing weights: {e}") diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 92109c76d7..e609b22db0 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -37,6 +37,7 @@ class Commit: weights (List[int]): The list of weights. salt (List[int]): The salt used for the commit. version_key (int): The version key. + revealed (bool): Whether the commit has been revealed. """ def __init__( @@ -54,6 +55,7 @@ def __init__( weights: List[int], salt: List[int], version_key: int, + revealed: bool = False, ): self.wallet_hotkey_name = wallet_hotkey_name self.wallet_hotkey_ss58 = wallet_hotkey_ss58 @@ -68,6 +70,7 @@ def __init__( self.weights = weights self.salt = salt self.version_key = version_key + self.revealed = revealed def to_dict(self) -> Dict[str, Any]: """ @@ -90,6 +93,7 @@ def to_dict(self) -> Dict[str, Any]: "weights": json.dumps(self.weights), "salt": json.dumps(self.salt), "version_key": self.version_key, + "revealed": self.revealed, } @staticmethod @@ -117,6 +121,7 @@ def from_dict(data: Dict[str, Any]) -> "Commit": weights=json.loads(data["weights"]), salt=json.loads(data["salt"]), version_key=data["version_key"], + revealed=data.get("revealed", False), # Default to False if not present ) def __str__(self) -> str: @@ -126,7 +131,11 @@ def __str__(self) -> str: Returns: str: String representation of the commit. """ - return f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, expire_block={self.expire_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, version_key={self.version_key})" + return (f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, " + f"wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, " + f"netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, " + f"expire_block={self.expire_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, " + f"version_key={self.version_key}, revealed={self.revealed})") def table_exists(table_name: str) -> bool: @@ -189,6 +198,7 @@ def initialize_db(): ("weights", "TEXT"), ("salt", "TEXT"), ("version_key", "INTEGER"), + ("revealed", "BOOLEAN DEFAULT FALSE"), ] if not table_exists("commits"): print("Creating table 'commits'...") @@ -222,7 +232,7 @@ def reveal(subtensor: Subtensor, commit: Commit): ) del wallet if success: - revealed_hash(commit.commit_hash) + revealed_commit(commit.commit_hash) print(f"Reveal success for commit {commit}") else: print(f"Reveal failure for commit: {message}") @@ -265,74 +275,94 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]): if success: for commit in commits: - revealed_hash(commit.commit_hash) + revealed_commit(commit.commit_hash) print(f"Reveal success for batch commit: {commit}") else: print(f"Reveal failure for batch commits: {message}") -def chain_hash_check(subtensor: Subtensor): +def sync_commit_data(matching_commit, commit_block, reveal_block, expire_block): + """ + Sync the commit data with the given block details. + + Args: + matching_commit (Commit): The local commit object to be synced. + commit_block (int): The block at which the commit occurred. + reveal_block (int): The block at which the commit was revealed. + expire_block (int): The block at which the commit will expire. + """ + try: + matching_commit.commit_block = commit_block + matching_commit.reveal_block = reveal_block + matching_commit.expire_block = expire_block + + with utils.DB(db_path=DB_PATH) as (conn, cursor): + update_sql = """ + UPDATE commits + SET commit_block=?, reveal_block=?, expire_block=? + WHERE commit_hash=? + """ + cursor.execute(update_sql, + (commit_block, reveal_block, expire_block, matching_commit.commit_hash) + ) + conn.commit() + print( + f"Updated commit {matching_commit.commit_hash} with commit_block={commit_block}, reveal_block={reveal_block}, expire_block={expire_block}") + except Exception as e: + print(f"Error updating commit data: {e}") + + +def chain_hash_sync(subtensor: Subtensor, current_block: int): """ Perform a verification to check if the local reveal list is consistent with the chain. Args: + current_block (int): The current block number. subtensor (Subtensor): The subtensor network object. """ try: # Retrieve all commits from the local database commits = get_all_commits() - print(f"chain hash check commits:\n {commits}") + # Filter commits to only those that are not revealed + commits = [commit for commit in commits if not commit.revealed] + # Group commits by wallet_hotkey_ss58 - commits_by_ss58 = {} - for commit in commits: - ss58 = commit.wallet_hotkey_ss58 - if ss58 not in commits_by_ss58: - commits_by_ss58[ss58] = [] - commits_by_ss58[ss58].append(commit) - - if commits_by_ss58: - for ss58, ss58_commits in commits_by_ss58.items(): - # Get a set of unique netuids from the commits - netuids = set(commit.netuid for commit in ss58_commits) - print("Checking subtensor commits: ") - for netuid in netuids: - # Query the subtensor backend for commit hashes and blocks + if commits: + unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in commits}) + + for combination in unique_combinations: + ss58, netuid = combination + try: response = subtensor.query_module( module="SubtensorModule", name="WeightCommits", params=[netuid, ss58], ) - for commit_hash, commit_block in response.value: - print(f"subtensor has commit: {commit_hash}, block: {commit_block}") - # Check if any commit on Subtensor is absent in local database - if not any(c.commit_hash == commit_hash for c in ss58_commits): - print( - f"There is a commit on Subtensor (hash: {commit_hash}, commit_block: {commit_block}) that we don't have locally." - ) - - # Check if any local commit is absent in Subtensor - local_commit_hashes = {c.commit_hash for c in ss58_commits} - subtensor_commit_hashes = { - commit_hash for commit_hash, _ in response.value - } - - for local_commit_hash in local_commit_hashes: - if local_commit_hash not in subtensor_commit_hashes: - print( - f"There is a local commit (hash: {local_commit_hash}) that is not on Subtensor." - ) - revealed_hash(local_commit_hash) + for commit_hash, commit_block, reveal_block, expire_block in response.value: + if expire_block < current_block: + continue + if any(c.commit_hash == commit_hash for c in commits) and reveal_block <= current_block: + matching_commit = next((commit for commit in commits if commit.commit_hash == commit_hash), + None) + if matching_commit: + if commit_block != matching_commit.commit_block or reveal_block != matching_commit.reveal_block or expire_block != matching_commit.expire_block: + sync_commit_data(matching_commit, commit_block, reveal_block, expire_block) + else: + print(f"Could not find matching commit for hash: {commit_hash}") + except Exception as e: + print(f"Error during subtensor query chain sync: {e}") except Exception as e: - print(f"Error during chain_hash_check: {e}") + print(f"Error during chain_hash_sync: {e}") -def delete_expired_commits(current_block: int): +def delete_old_commits(current_block: int, offset: int): """ Deletes rows in the database where the current block is greater than the expire_block. Prints each commit before deleting it. Args: + offset (int): The expired block offset to delete expired commits. current_block (int): The current block number. """ try: @@ -343,7 +373,7 @@ def delete_expired_commits(current_block: int): with utils.DB(db_path=DB_PATH) as (conn, cursor): for commit in commits: - if current_block > commit.expire_block: + if current_block + offset < commit.expire_block: delete_sql = "DELETE FROM commits WHERE commit_hash=?" cursor.execute(delete_sql, (commit.commit_hash,)) conn.commit() @@ -352,86 +382,9 @@ def delete_expired_commits(current_block: int): print(f"Error deleting expired commits: {e}") -def revealed( - wallet_name: str, - wallet_path: str, - wallet_hotkey_str: str, - wallet_hotkey_ss58: str, - netuid: int, - uids: List[int], - weights: List[int], - salt: List[int], - version_key: int, -): - """ - Handles the revealed command by removing the corresponding commit from the database. - - Args: - wallet_name (str): The wallet name. - wallet_path (str): The path to the wallet. - wallet_hotkey_str (str): The wallet hotkey as a string. - wallet_hotkey_ss58 (str): The wallet hotkey SS58 address. - netuid (int): The network UID. - uids (List[int]): The list of UIDs. - weights (List[int]): The list of weights. - salt (List[int]): The salt used for the commit. - version_key (int): The version key. - """ - try: - with utils.DB(db_path=DB_PATH) as (conn, cursor): - sql = ( - "SELECT COUNT(*) FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " - "uids=? AND weights=? AND salt=? AND version_key=?" - ) - cursor.execute( - sql, - ( - wallet_hotkey_str, - wallet_hotkey_ss58, - wallet_name, - wallet_path, - netuid, - json.dumps(uids), - json.dumps(weights), - json.dumps(salt), - version_key, - ), - ) - count = cursor.fetchone()[0] - if count > 0: - delete_sql = ( - "DELETE FROM commits WHERE wallet_hotkey_str=? AND wallet_hotkey_ss58=? AND wallet_name=? AND wallet_path=? AND netuid=? AND " - "uids=? AND weights=? AND salt=? AND version_key=?" - ) - cursor.execute( - delete_sql, - ( - wallet_hotkey_str, - wallet_hotkey_ss58, - wallet_name, - wallet_path, - netuid, - json.dumps(uids), - json.dumps(weights), - json.dumps(salt), - version_key, - ), - ) - conn.commit() - print( - f"Deleted existing row with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}" - ) - else: - print( - f"No existing row found with specified data: wallet_hotkey_str={wallet_hotkey_str}, wallet_hotkey_ss58={wallet_hotkey_ss58}, wallet_name={wallet_name}, wallet_path={wallet_path}, netuid={netuid}, uids={uids}, weights={weights}, salt={salt}, version_key={version_key}" - ) - except Exception as e: - print(f"Error removing from table 'commits': {e}") - - -def revealed_hash(commit_hash: str): +def revealed_commit(commit_hash: str): """ - Handles the revealed_hash command by removing the corresponding commit from the database using the commit hash. + Handles the revealed_hash command by updating the revealed status on the corresponding commit from the database using the commit hash. Args: commit_hash (str): The commit hash. @@ -442,17 +395,18 @@ def revealed_hash(commit_hash: str): cursor.execute(sql, (commit_hash,)) count = cursor.fetchone()[0] if count > 0: - delete_sql = "DELETE FROM commits WHERE commit_hash=?" - cursor.execute(delete_sql, (commit_hash,)) + # Update the revealed status in the database + update_sql = "UPDATE commits SET revealed = ? WHERE commit_hash = ?" + cursor.execute(update_sql, (True, commit_hash)) conn.commit() - print(f"\nDeleted existing row with commit hash {commit_hash}") + print(f"\nUpdated revealed status on existing row with commit hash {commit_hash}") else: print(f"\nNo existing row found with commit hash {commit_hash}") except Exception as e: - print(f"Error removing from table 'commits': {e}") + print(f"Error updating from table 'commits': {e}") -def revealed_batch_hash(commit_hashes: List[str]): +def revealed_commit_batch(commit_hashes: List[str]): """ Handles the revealed_batch_hash command by removing the corresponding commits from the database using the commit hashes. @@ -461,23 +415,21 @@ def revealed_batch_hash(commit_hashes: List[str]): """ try: if not commit_hashes: - print("No commit hashes") return with utils.DB(db_path=DB_PATH) as (conn, cursor): for commit_hash in commit_hashes: sql = "SELECT COUNT(*) FROM commits WHERE commit_hash=?" cursor.execute(sql, (commit_hash,)) count = cursor.fetchone()[0] - print(f"count of revealed batch hash: {count}") if count > 0: - delete_sql = "DELETE FROM commits WHERE commit_hash=?" - cursor.execute(delete_sql, (commit_hash,)) + update_sql = "UPDATE commits SET revealed = ? WHERE commit_hash = ?" + cursor.execute(update_sql, (True, commit_hash)) conn.commit() - print(f"\nDeleted existing row with commit hash {commit_hash}") + print(f"\nUpdated revealed status on existing row with commit hash {commit_hash}") else: print(f"\nNo existing row found with commit hash {commit_hash}") except Exception as e: - print(f"Error removing from table 'commits': {e}") + print(f"Error updating from table 'commits': {e}") def committed(commit: Commit): @@ -508,25 +460,25 @@ def get_all_commits() -> List[Commit]: return [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] -def check_reveal(subtensor: Subtensor) -> bool: +def check_reveal(curr_block: int) -> bool: """ Checks if there are any commits to reveal. Args: - subtensor (Subtensor): The subtensor network object. + curr_block (int): The current block number. Returns: bool: True if a commit was revealed, False otherwise. """ try: commits = get_all_commits() + commits = [commit for commit in commits if not commit.revealed] + except Exception as e: print(f"Error reading table 'commits': {e}") return False if commits: - curr_block = subtensor.get_current_block() - # Filter for commits that are ready to be revealed reveal_candidates = [ commit for commit in commits if commit.reveal_block <= curr_block @@ -535,43 +487,59 @@ def check_reveal(subtensor: Subtensor) -> bool: return False -def reveal_commits(subtensor: Subtensor): +def reveal_commits(subtensor: Subtensor, current_block: int): """ Performs reveal on commits that are ready to be revealed. Args: + current_block(int): The current block number. subtensor (Subtensor): The subtensor network object. """ try: commits = get_all_commits() + commits = [commit for commit in commits if not commit.revealed] + if commits: + unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in commits}) + print(f"Unique ss58,netuid combinations: {unique_combinations}") + + for combination in unique_combinations: + ss58, netuid = combination + ready_to_reveal = [] + try: + response = subtensor.query_module( + module="SubtensorModule", + name="WeightCommits", + params=[netuid, ss58], + ) + + if not response.value: + print(f"No commits found for {combination}") + continue + + for commit_hash, commit_block, reveal_block, expire_block in response.value: + if expire_block < current_block: + print(f"Commit {commit_hash} is expired.") + continue + if any(c.commit_hash == commit_hash for c in + commits) and reveal_block <= current_block <= expire_block: + matching_commit = next((commit for commit in commits if commit.commit_hash == commit_hash), + None) + if matching_commit: + print(f"found matching commit {matching_commit}") + ready_to_reveal.append(matching_commit) + else: + print(f"Could not find commit hash {commit_hash} locally.") + + if len(ready_to_reveal) > 1: + reveal_batch(subtensor, ready_to_reveal) + elif len(ready_to_reveal) == 1: + reveal(subtensor, ready_to_reveal[0]) + + except Exception as e: + print(f"Error querying expected hashes for {combination}: {e}") except Exception as e: print(f"Error reading table 'commits': {e}") - if commits: - curr_block = subtensor.get_current_block() - - # Filter for commits that are ready to be revealed - ready_for_reveal = [ - commit for commit in commits if commit.reveal_block <= curr_block - ] - if ready_for_reveal: - # Group commits by wallet_hotkey_ss58 - grouped_reveals = {} - for commit in ready_for_reveal: - key = commit.wallet_hotkey_ss58 - if key not in grouped_reveals: - grouped_reveals[key] = [] - grouped_reveals[key].append(commit) - - # Process each group separately - for hotkey_ss58, group in grouped_reveals.items(): - if len(group) > 1: - # Batch reveal if there are 2 or more reveal candidates - reveal_batch(subtensor, group) - else: - # Otherwise, reveal individually - reveal(subtensor, group[0]) - def handle_client_connection(client_socket: socket.socket): """ @@ -584,47 +552,51 @@ def handle_client_connection(client_socket: socket.socket): request = client_socket.recv(1024).decode() if not request: break - args = shlex.split(request) - command = args[0] - commands = { - "revealed": lambda: revealed( - args[1], - args[2], - args[3], - args[4], - int(args[5]), - json.loads(args[6]), - json.loads(args[7]), - json.loads(args[8]), - int(args[9]), - ), - "revealed_hash": lambda: revealed_hash(args[1]), - "committed": lambda: committed( - Commit( - wallet_hotkey_name=args[3], - wallet_hotkey_ss58=args[4], - wallet_name=args[1], - wallet_path=args[2], - commit_hash=args[8], - netuid=int(args[9]), - commit_block=int(args[5]), - reveal_block=int(args[6]), - expire_block=int(args[7]), - uids=json.loads(args[10]), - weights=json.loads(args[11]), - salt=json.loads(args[12]), - version_key=int(args[13]), - ) - ), - "terminate": lambda: terminate_process(None, None), - } - if command in commands: + print(f"Received request: {request}") + + if request.startswith('revealed_hash_batch'): try: - commands[command]() - except (IndexError, ValueError, json.JSONDecodeError) as e: - print(f"Error in processing command {command}: {e}") + command = 'revealed_hash_batch' + json_start_index = request.index('[') + json_payload = request[json_start_index:] + args = json.loads(json_payload) + revealed_commit_batch(args) + except json.JSONDecodeError as e: + print(f"Error decoding JSON for {command}: {e}") + except Exception as e: + print(f"Error processing {command}: {e}") else: - print(f"Command not recognized: {command}") + args = shlex.split(request) + command = args[0] + commands = { + "revealed_hash": lambda: revealed_commit(args[1]), + "revealed_hash_batch": lambda: revealed_commit_batch(json.loads(args[1])), + "committed": lambda: committed( + Commit( + wallet_hotkey_name=args[3], + wallet_hotkey_ss58=args[4], + wallet_name=args[1], + wallet_path=args[2], + commit_hash=args[8], + netuid=int(args[9]), + commit_block=int(args[5]), + reveal_block=int(args[6]), + expire_block=int(args[7]), + uids=json.loads(args[10]), + weights=json.loads(args[11]), + salt=json.loads(args[12]), + version_key=int(args[13]), + ) + ), + "terminate": lambda: terminate_process(None, None), + } + if command in commands: + try: + commands[command]() + except (IndexError, ValueError, json.JSONDecodeError) as e: + print(f"Error in processing command {command}: {e}") + else: + print(f"Command not recognized: {command}") except socket.error as e: print(f"Socket error: {e}") except Exception as e: @@ -650,7 +622,6 @@ def start_socket_server(): print(f"Accepted connection from {addr[0]}.") executor.submit(handle_client_connection, client_sock) except socket.timeout: - # print("Socket timeout, continuing to listen...") continue except Exception as e: print(f"Error accepting connection: {e}.") @@ -685,19 +656,19 @@ def main(args: argparse.Namespace): server_thread.start() counter = 0 # Initialize counter - + print("commit_reveal subprocess is ready") while running: counter += 1 curr_block = subtensor.get_current_block() - if check_reveal(subtensor=subtensor): + if check_reveal(curr_block): print(f"Revealing commit on block {curr_block}") - reveal_commits(subtensor=subtensor) + reveal_commits(subtensor=subtensor, current_block=curr_block) # Every 100th run, perform an additional check to verify reveal list alignment with the backend if counter % 100 == 0: - print("\nDoing chain hash check:") - chain_hash_check(subtensor=subtensor) - # delete_expired_commits(current_block=curr_block) + print("\nDoing chain hash sync:") + chain_hash_sync(subtensor=subtensor, current_block=curr_block) + delete_old_commits(current_block=curr_block, offset=1000) time.sleep(args.sleep_interval) diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess_utils.py index 90f0ea09a5..23a3af9e54 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -1,9 +1,11 @@ import os +import re import sqlite3 +import subprocess import time +from datetime import datetime from typing import Optional -import subprocess import psutil LOG_DIR = os.path.expanduser("~/.bittensor/logs") @@ -13,7 +15,7 @@ os.makedirs(LOG_DIR, exist_ok=True) -def get_pid_log_files() -> tuple[str, str]: +def get_cr_log_files() -> tuple[str, str]: """ Get the log files for the current running process. Returns: @@ -23,8 +25,28 @@ def get_pid_log_files() -> tuple[str, str]: if pid is None: raise RuntimeError(f"Process '{PROCESS_NAME}' is not running.") - stdout_log = os.path.join(LOG_DIR, f"commit_reveal_stdout_{pid}.log") - stderr_log = os.path.join(LOG_DIR, f"commit_reveal_stderr_{pid}.log") + # Define a regex pattern to match log files with timestamps + log_pattern = re.compile(r"commit_reveal_(stdout|stderr)_(\d{8}_\d{6})\.log") + + stdout_log = None + stderr_log = None + latest_timestamp = None + + for log_file in os.listdir(LOG_DIR): + match = log_pattern.match(log_file) + if match: + log_type, timestamp = match.groups() + # Update latest log files if this file is more recent + if latest_timestamp is None or timestamp > latest_timestamp: + latest_timestamp = timestamp + if log_type == "stdout": + stdout_log = os.path.join(LOG_DIR, log_file) + elif log_type == "stderr": + stderr_log = os.path.join(LOG_DIR, log_file) + + if not (stdout_log and stderr_log): + raise RuntimeError("Log files not found or incomplete.") + return stdout_log, stderr_log @@ -68,12 +90,38 @@ def get_process(process_name: str) -> Optional[int]: return None +def is_commit_reveal_subprocess_ready() -> bool: + """ + Check the logs for the message 'commit_reveal subprocess is ready' and return True if it's found. + + Returns: + bool: True if the message is found in the logs, False otherwise. + """ + try: + stdout_log, stderr_log = get_cr_log_files() + except RuntimeError as e: + print(str(e)) + return False + + def check_message_in_log(file_path: str, message: str) -> bool: + """Check if a specific message is present in the log file.""" + if os.path.exists(file_path): + with open(file_path, "r") as file: + for line in file: + if message in line: + return True + return False + + message = "commit_reveal subprocess is ready" + return check_message_in_log(stdout_log, message) or check_message_in_log(stderr_log, message) + + def read_commit_reveal_logs(): """ Read and print the last 50 lines of logs from the most recent subprocess log. """ try: - stdout_log, stderr_log = get_pid_log_files() + stdout_log, stderr_log = get_cr_log_files() except RuntimeError as e: print(str(e)) return @@ -145,7 +193,6 @@ def start_commit_reveal_subprocess( project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) if not is_process_running(PROCESS_NAME): - from datetime import datetime current_time = datetime.now().strftime("%Y%m%d_%H%M%S") # Correctly construct the paths for STDOUT and STDERR log files @@ -176,7 +223,16 @@ def start_commit_reveal_subprocess( env=env, ) print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") - time.sleep(1) # wait a second for subprocess to initialize + + attempt_count = 0 + + while not is_commit_reveal_subprocess_ready() and attempt_count < 5: + time.sleep(3) + print("Waiting for commit_reveal subprocess to be ready.") + attempt_count += 1 + + if attempt_count >= 5: + print("Max attempts reached. Subprocess may not be ready.") else: print(f"Subprocess '{PROCESS_NAME}' is already running.") diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 75f339f2d2..66ba68d929 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -122,7 +122,7 @@ async def test_commit_and_reveal_weights(local_chain): ) # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value[0] + commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 2e67ada75c..ee259a8f81 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -105,9 +105,6 @@ async def test_commit_and_reveal_weights(local_chain): uids=uids, weights=weights ) - # Assert no local CR processes in table - assert commit_reveal_subprocess.is_table_empty("commits") - # Commit weights success, message = subtensor.commit_weights( alice_wallet, @@ -127,7 +124,7 @@ async def test_commit_and_reveal_weights(local_chain): # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value[0] + commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map @@ -148,9 +145,6 @@ async def test_commit_and_reveal_weights(local_chain): # allow one more block to pass time.sleep(12) - # Verify that subprocess did the reveal and deleted entry from local table - assert commit_reveal_subprocess.is_table_empty("commits") - # Query the Weights storage map revealed_weights = subtensor.query_module( module="SubtensorModule", @@ -253,9 +247,6 @@ async def test_set_and_reveal_weights(local_chain): uids=uids, weights=weights ) - # Assert no local CR processes in table - assert commit_reveal_subprocess.is_table_empty("commits") - # Set weights with CR enabled success, message = subtensor.set_weights( alice_wallet, @@ -274,7 +265,7 @@ async def test_set_and_reveal_weights(local_chain): # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value[0] + commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map @@ -295,9 +286,6 @@ async def test_set_and_reveal_weights(local_chain): # allow one more block to pass time.sleep(12) - # Verify that subprocess did the reveal and deleted entry from local table - assert commit_reveal_subprocess.is_table_empty("commits") - # Query the Weights storage map revealed_weights = subtensor.query_module( module="SubtensorModule", @@ -392,62 +380,38 @@ async def test_set_and_reveal_batch_weights(local_chain): ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 - # Commit-reveal values - uids = np.array([0], dtype=np.int64) - weights = np.array([0.1], dtype=np.float32) - # Customers run this before submitting weights - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - - # Assert no local CR processes in table - assert commit_reveal_subprocess.is_table_empty("commits") - - # Set weights with CR enabled - success, message = subtensor.set_weights( - alice_wallet, - netuid, - uids=weight_uids, - weights=weight_vals, - wait_for_inclusion=True, - wait_for_finalization=True, - ) - - time.sleep(3) + # Commit-reveal values and weights for different steps + weights_steps = [ + (np.array([0], dtype=np.int64), np.array([0.1], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.2], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.3], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.4], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.5], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.6], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.7], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.8], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.9], dtype=np.float32)), + (np.array([0], dtype=np.int64), np.array([0.10], dtype=np.float32)), + ] - # Commit-reveal values - uids = np.array([0], dtype=np.int64) - weights = np.array([0.2], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - # add second weights with CR enabled - success, message = subtensor.set_weights( - alice_wallet, - netuid, - uids=weight_uids, - weights=weight_vals, - wait_for_inclusion=True, - wait_for_finalization=True, - ) + for uids, weights in weights_steps: + # Customers run this before submitting weights + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) - time.sleep(3) + # Set weights with CR enabled + success, message = subtensor.set_weights( + alice_wallet, + netuid, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=True, + wait_for_finalization=True, + ) + assert success - # Commit-reveal values - uids = np.array([0], dtype=np.int64) - weights = np.array([0.3], dtype=np.float32) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - # add second weights with CR enabled - success, message = subtensor.set_weights( - alice_wallet, - netuid, - uids=weight_uids, - weights=weight_vals, - wait_for_inclusion=True, - wait_for_finalization=True, - ) + time.sleep(2) weight_commits = subtensor.query_module( module="SubtensorModule", @@ -455,13 +419,9 @@ async def test_set_and_reveal_batch_weights(local_chain): params=[netuid, alice_wallet.hotkey.ss58_address], ) - weight_uids, weight_vals = convert_weights_and_uids_for_emit( - uids=uids, weights=weights - ) - # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value[0] + commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map @@ -471,9 +431,6 @@ async def test_set_and_reveal_batch_weights(local_chain): periods = reveal_periods.value assert periods > 0, "Invalid RevealPeriodEpochs" - # Verify that sqlite has entry - assert commit_reveal_subprocess.is_table_empty("commits") is False - # Wait until the reveal block range await wait_interval( subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor @@ -482,9 +439,6 @@ async def test_set_and_reveal_batch_weights(local_chain): # allow one more block to pass time.sleep(12) - # Verify that subprocess did the reveal and deleted all entry from local table - assert commit_reveal_subprocess.is_table_empty("commits") - # Query the Weights storage map revealed_weights = subtensor.query_module( module="SubtensorModule", @@ -632,5 +586,3 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): ) assert success is False - # remove commits - subprocess_utils.delete_all_rows("commits") From e4abb6f3bc31e91bb3da6ce9b8c486259d801aed Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 29 Oct 2024 16:23:53 -0700 Subject: [PATCH 26/58] Refactor commit reveal logic and clean up logging Refactor the commit reveal process to only include candidates with a reveal block less than or equal to the current block. Also, remove unnecessary logging of received requests to improve code readability and reduce noise. --- bittensor/utils/subprocess/commit_reveal.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index e609b22db0..9c31fa8d97 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -498,8 +498,11 @@ def reveal_commits(subtensor: Subtensor, current_block: int): try: commits = get_all_commits() commits = [commit for commit in commits if not commit.revealed] - if commits: - unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in commits}) + reveal_candidates = [ + commit for commit in commits if commit.reveal_block <= current_block + ] + if reveal_candidates: + unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in reveal_candidates}) print(f"Unique ss58,netuid combinations: {unique_combinations}") for combination in unique_combinations: @@ -521,7 +524,7 @@ def reveal_commits(subtensor: Subtensor, current_block: int): print(f"Commit {commit_hash} is expired.") continue if any(c.commit_hash == commit_hash for c in - commits) and reveal_block <= current_block <= expire_block: + reveal_candidates) and reveal_block <= current_block <= expire_block: matching_commit = next((commit for commit in commits if commit.commit_hash == commit_hash), None) if matching_commit: @@ -552,8 +555,6 @@ def handle_client_connection(client_socket: socket.socket): request = client_socket.recv(1024).decode() if not request: break - print(f"Received request: {request}") - if request.startswith('revealed_hash_batch'): try: command = 'revealed_hash_batch' From 1da70d8e350d10020f64b4aa56e636ac23480811 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 29 Oct 2024 19:47:23 -0700 Subject: [PATCH 27/58] Reduce max_retries and refactor substrate calls. Fix local and chain alignment with commits Reduced the max_retries parameter to 1 in multiple methods to expedite failure recognition. Refactored inline substrate call compositions to use a more functional approach by passing the composed extrinsic directly into the retry wrapper, streamlining the code and improving readability. --- bittensor/core/extrinsics/commit_weights.py | 96 +++++++++--------- bittensor/core/extrinsics/set_weights.py | 36 +++---- bittensor/core/extrinsics/transfer.py | 20 ++-- bittensor/core/subtensor.py | 4 +- bittensor/utils/subprocess/commit_reveal.py | 106 ++++++++++++-------- 5 files changed, 142 insertions(+), 120 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 928799ed32..9292771079 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -64,19 +64,7 @@ def do_commit_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="commit_weights", - call_params={ - "netuid": netuid, - "commit_hash": commit_hash, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - ) + def make_substrate_call_with_retry(extrinsic): response = submit_extrinsic( substrate=self.substrate, extrinsic=extrinsic, @@ -93,7 +81,19 @@ def make_substrate_call_with_retry(): else: return False, response.error_message - return make_substrate_call_with_retry() + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="commit_weights", + call_params={ + "netuid": netuid, + "commit_hash": commit_hash, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + return make_substrate_call_with_retry(extrinsic) def commit_weights_extrinsic( @@ -230,22 +230,7 @@ def do_reveal_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="reveal_weights", - call_params={ - "netuid": netuid, - "uids": uids, - "values": values, - "salt": salt, - "version_key": version_key, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - ) + def make_substrate_call_with_retry(extrinsic): response = submit_extrinsic( substrate=self.substrate, extrinsic=extrinsic, @@ -262,7 +247,22 @@ def make_substrate_call_with_retry(): else: return False, response.error_message - return make_substrate_call_with_retry() + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="reveal_weights", + call_params={ + "netuid": netuid, + "uids": uids, + "values": values, + "salt": salt, + "version_key": version_key, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + return make_substrate_call_with_retry(extrinsic) def reveal_weights_extrinsic( @@ -407,22 +407,7 @@ def do_batch_reveal_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="batch_reveal_weights", - call_params={ - "netuid": netuid, - "uids_list": uids, - "values_list": values, - "salts_list": salt, - "version_keys": version_keys, - }, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - ) + def make_substrate_call_with_retry(extrinsic): response = submit_extrinsic( substrate=self.substrate, extrinsic=extrinsic, @@ -439,7 +424,22 @@ def make_substrate_call_with_retry(): else: return False, response.error_message - return make_substrate_call_with_retry() + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="batch_reveal_weights", + call_params={ + "netuid": netuid, + "uids_list": uids, + "values_list": values, + "salts_list": salt, + "version_keys": version_keys, + }, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + return make_substrate_call_with_retry(extrinsic=extrinsic) def batch_reveal_weights_extrinsic( diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 4032290870..00b3b6a17f 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -69,23 +69,7 @@ def do_set_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="set_weights", - call_params={ - "dests": uids, - "weights": vals, - "netuid": netuid, - "version_key": version_key, - }, - ) - # Period dictates how long the extrinsic will stay as part of waiting pool - extrinsic = self.substrate.create_signed_extrinsic( - call=call, - keypair=wallet.hotkey, - era={"period": 5}, - ) + def make_substrate_call_with_retry(extrinsic): response = submit_extrinsic( substrate=self.substrate, extrinsic=extrinsic, @@ -102,7 +86,23 @@ def make_substrate_call_with_retry(): else: return False, response.error_message - return make_substrate_call_with_retry() + call = self.substrate.compose_call( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": uids, + "weights": vals, + "netuid": netuid, + "version_key": version_key, + }, + ) + # Period dictates how long the extrinsic will stay as part of waiting pool + extrinsic = self.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + era={"period": 5}, + ) + return make_substrate_call_with_retry(extrinsic) # Community uses this extrinsic directly and via `subtensor.set_weights` diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index 896fecbf96..c82a7db690 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -63,15 +63,7 @@ def do_transfer( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="Balances", - call_function="transfer_allow_death", - call_params={"dest": dest, "value": transfer_balance.rao}, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.coldkey - ) + def make_substrate_call_with_retry(extrinsic): response = submit_extrinsic( substrate=self.substrate, extrinsic=extrinsic, @@ -90,7 +82,15 @@ def make_substrate_call_with_retry(): else: return False, None, response.error_message - return make_substrate_call_with_retry() + call = self.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": dest, "value": transfer_balance.rao}, + ) + extrinsic = self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + return make_substrate_call_with_retry(extrinsic) # Community uses this extrinsic directly and via `subtensor.transfer` diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 495f93aa56..20b9aa845c 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -873,7 +873,7 @@ def set_weights( wait_for_inclusion: bool = False, wait_for_finalization: bool = False, prompt: bool = False, - max_retries: int = 5, + max_retries: int = 1, ) -> tuple[bool, str]: """ Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture. @@ -1838,7 +1838,7 @@ def commit_weights( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, prompt: bool = False, - max_retries: int = 5, + max_retries: int = 1, ) -> tuple[bool, str]: """ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 9c31fa8d97..3330322141 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -41,21 +41,21 @@ class Commit: """ def __init__( - self, - wallet_hotkey_name: str, - wallet_hotkey_ss58: str, - wallet_name: str, - wallet_path: str, - commit_hash: str, - netuid: int, - commit_block: int, - reveal_block: int, - expire_block: int, - uids: List[int], - weights: List[int], - salt: List[int], - version_key: int, - revealed: bool = False, + self, + wallet_hotkey_name: str, + wallet_hotkey_ss58: str, + wallet_name: str, + wallet_path: str, + commit_hash: str, + netuid: int, + commit_block: int, + reveal_block: int, + expire_block: int, + uids: List[int], + weights: List[int], + salt: List[int], + version_key: int, + revealed: bool = False, ): self.wallet_hotkey_name = wallet_hotkey_name self.wallet_hotkey_ss58 = wallet_hotkey_ss58 @@ -322,13 +322,13 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): """ try: # Retrieve all commits from the local database - commits = get_all_commits() + local_commits = get_all_commits() # Filter commits to only those that are not revealed - commits = [commit for commit in commits if not commit.revealed] - + local_commits = [commit for commit in local_commits if not commit.revealed] + chain_commits = [] # Group commits by wallet_hotkey_ss58 - if commits: - unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in commits}) + if local_commits: + unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_commits}) for combination in unique_combinations: ss58, netuid = combination @@ -339,12 +339,18 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): params=[netuid, ss58], ) + if not response.value: + print(f"No commits found for {combination}") + continue + for commit_hash, commit_block, reveal_block, expire_block in response.value: + chain_commits.append(commit_hash) if expire_block < current_block: continue - if any(c.commit_hash == commit_hash for c in commits) and reveal_block <= current_block: - matching_commit = next((commit for commit in commits if commit.commit_hash == commit_hash), - None) + if any(c.commit_hash == commit_hash for c in local_commits) and reveal_block <= current_block: + matching_commit = next( + (commit for commit in local_commits if commit.commit_hash == commit_hash), + None) if matching_commit: if commit_block != matching_commit.commit_block or reveal_block != matching_commit.reveal_block or expire_block != matching_commit.expire_block: sync_commit_data(matching_commit, commit_block, reveal_block, expire_block) @@ -460,12 +466,12 @@ def get_all_commits() -> List[Commit]: return [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] -def check_reveal(curr_block: int) -> bool: +def check_reveal(current_block: int) -> bool: """ Checks if there are any commits to reveal. Args: - curr_block (int): The current block number. + current_block(int): The current block number. Returns: bool: True if a commit was revealed, False otherwise. @@ -473,7 +479,7 @@ def check_reveal(curr_block: int) -> bool: try: commits = get_all_commits() commits = [commit for commit in commits if not commit.revealed] - + except Exception as e: print(f"Error reading table 'commits': {e}") return False @@ -481,7 +487,7 @@ def check_reveal(curr_block: int) -> bool: if commits: # Filter for commits that are ready to be revealed reveal_candidates = [ - commit for commit in commits if commit.reveal_block <= curr_block + commit for commit in commits if commit.reveal_block <= current_block <= commit.expire_block ] return len(reveal_candidates) > 0 return False @@ -496,15 +502,16 @@ def reveal_commits(subtensor: Subtensor, current_block: int): subtensor (Subtensor): The subtensor network object. """ try: - commits = get_all_commits() - commits = [commit for commit in commits if not commit.revealed] - reveal_candidates = [ - commit for commit in commits if commit.reveal_block <= current_block + local_commits = get_all_commits() + local_commits = [commit for commit in local_commits if not commit.revealed] + local_reveals = [ + commit for commit in local_commits if commit.reveal_block <= current_block <= commit.expire_block ] - if reveal_candidates: - unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in reveal_candidates}) - print(f"Unique ss58,netuid combinations: {unique_combinations}") - + chain_reveals = [] + if local_reveals: + unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_reveals}) + # Dict that has ss58 as key, and latest commit block as value + commit_dict: Dict[str, int] = {} for combination in unique_combinations: ss58, netuid = combination ready_to_reveal = [] @@ -524,22 +531,39 @@ def reveal_commits(subtensor: Subtensor, current_block: int): print(f"Commit {commit_hash} is expired.") continue if any(c.commit_hash == commit_hash for c in - reveal_candidates) and reveal_block <= current_block <= expire_block: - matching_commit = next((commit for commit in commits if commit.commit_hash == commit_hash), - None) + local_reveals) and reveal_block <= current_block <= expire_block: + matching_commit = next( + (commit for commit in local_commits if commit.commit_hash == commit_hash), + None) if matching_commit: - print(f"found matching commit {matching_commit}") ready_to_reveal.append(matching_commit) else: print(f"Could not find commit hash {commit_hash} locally.") + if commit_block > commit_dict.get(ss58, 0): + commit_dict[ss58] = commit_block + if len(ready_to_reveal) > 1: + chain_reveals.extend(ready_to_reveal) reveal_batch(subtensor, ready_to_reveal) elif len(ready_to_reveal) == 1: + chain_reveals.extend(ready_to_reveal) reveal(subtensor, ready_to_reveal[0]) - except Exception as e: print(f"Error querying expected hashes for {combination}: {e}") + + # Compare reveal candidates and ready_to_reveal + if set(chain_reveals) != set(local_reveals): # there are left over local reveals + print("there is a difference between local commits and chain commits") + # Filter commits that are older than the newest one in commit_dict that was revealed + for ss58, newest_commit_block in commit_dict.items(): + for commit in local_reveals: + if commit.wallet_hotkey_ss58 == ss58 and commit.commit_block <= newest_commit_block: + # Mark the commit as revealed, as a newer commit as already been revealed + print(f"revealing commit {commit.commit_hash} as a newer hash was submitted") + commit.revealed = True + revealed_commit(commit.commit_hash) + except Exception as e: print(f"Error reading table 'commits': {e}") @@ -665,9 +689,7 @@ def main(args: argparse.Namespace): print(f"Revealing commit on block {curr_block}") reveal_commits(subtensor=subtensor, current_block=curr_block) - # Every 100th run, perform an additional check to verify reveal list alignment with the backend if counter % 100 == 0: - print("\nDoing chain hash sync:") chain_hash_sync(subtensor=subtensor, current_block=curr_block) delete_old_commits(current_block=curr_block, offset=1000) From 96c4584b36736067cf786c57cd51745f932d2cf4 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 30 Oct 2024 21:41:23 -0700 Subject: [PATCH 28/58] Refactor commit reveal periods to interval in codebase, add combination check to subprocess. Updated references from 'commit_reveal_periods' to 'commit_reveal_weights_interval' across multiple files for consistency. Also casted float values explicitly for consistency in data types. --- .../core/chain_data/subnet_hyperparameters.py | 6 ++--- bittensor/core/chain_data/utils.py | 2 +- bittensor/core/extrinsics/commit_weights.py | 2 +- bittensor/core/subtensor.py | 2 +- bittensor/utils/subprocess/commit_reveal.py | 12 +++++----- tests/e2e_tests/test_commit_weights.py | 6 ++--- tests/e2e_tests/test_reveal_weights.py | 24 +++++++++---------- 7 files changed, 27 insertions(+), 27 deletions(-) diff --git a/bittensor/core/chain_data/subnet_hyperparameters.py b/bittensor/core/chain_data/subnet_hyperparameters.py index adc93f0bdb..6cd8b1ace9 100644 --- a/bittensor/core/chain_data/subnet_hyperparameters.py +++ b/bittensor/core/chain_data/subnet_hyperparameters.py @@ -32,7 +32,7 @@ class SubnetHyperparameters: max_validators (int): Maximum number of validators. adjustment_alpha (int): Alpha value for adjustments. difficulty (int): Difficulty level. - commit_reveal_periods (int): Periods for commit-reveal weights. + commit_reveal_weights_interval (int): Periods for commit-reveal weights. commit_reveal_weights_enabled (bool): Flag indicating if commit-reveal weights are enabled. alpha_high (int): High value of alpha. alpha_low (int): Low value of alpha. @@ -61,7 +61,7 @@ class SubnetHyperparameters: max_validators: int adjustment_alpha: int difficulty: int - commit_reveal_periods: int + commit_reveal_weights_interval: int commit_reveal_weights_enabled: bool alpha_high: int alpha_low: int @@ -104,7 +104,7 @@ def from_vec_u8(cls, vec_u8: bytes) -> Optional["SubnetHyperparameters"]: max_validators=decoded.max_validators, adjustment_alpha=decoded.adjustment_alpha, difficulty=decoded.difficulty, - commit_reveal_periods=decoded.commit_reveal_periods, + commit_reveal_weights_interval=decoded.commit_reveal_weights_interval, commit_reveal_weights_enabled=decoded.commit_reveal_weights_enabled, alpha_high=decoded.alpha_high, alpha_low=decoded.alpha_low, diff --git a/bittensor/core/chain_data/utils.py b/bittensor/core/chain_data/utils.py index f96e0d7e64..9c21c9d22e 100644 --- a/bittensor/core/chain_data/utils.py +++ b/bittensor/core/chain_data/utils.py @@ -241,7 +241,7 @@ def from_scale_encoding_using_type_string( ["max_validators", "Compact"], ["adjustment_alpha", "Compact"], ["difficulty", "Compact"], - ["commit_reveal_periods", "Compact"], + ["commit_reveal_weights_interval", "Compact"], ["commit_reveal_weights_enabled", "bool"], ["alpha_high", "Compact"], ["alpha_low", "Compact"], diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 9292771079..c9ac6cb310 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -187,7 +187,7 @@ def send_command(command): epoch_start_block = curr_block + blocks_until_next_epoch cr_periods = subtensor.get_subnet_hyperparameters( netuid=netuid - ).commit_reveal_periods + ).commit_reveal_weights_interval reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 expire_block = reveal_block + subnet_tempo_blocks diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 20b9aa845c..9952305340 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -154,7 +154,7 @@ def __init__( log_verbose: bool = False, connection_timeout: int = 600, subprocess_initialization: bool = True, - subprocess_sleep_interval: float = 12, + subprocess_sleep_interval: float = 12.0, ) -> None: """ Initializes a Subtensor interface for interacting with the Bittensor blockchain. diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 3330322141..f0da1412cf 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -511,7 +511,7 @@ def reveal_commits(subtensor: Subtensor, current_block: int): if local_reveals: unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_reveals}) # Dict that has ss58 as key, and latest commit block as value - commit_dict: Dict[str, int] = {} + commit_dict: Dict[tuple[str, int], int] = {} for combination in unique_combinations: ss58, netuid = combination ready_to_reveal = [] @@ -540,8 +540,8 @@ def reveal_commits(subtensor: Subtensor, current_block: int): else: print(f"Could not find commit hash {commit_hash} locally.") - if commit_block > commit_dict.get(ss58, 0): - commit_dict[ss58] = commit_block + if commit_block > commit_dict.get(combination, 0): + commit_dict[combination] = commit_block if len(ready_to_reveal) > 1: chain_reveals.extend(ready_to_reveal) @@ -556,9 +556,9 @@ def reveal_commits(subtensor: Subtensor, current_block: int): if set(chain_reveals) != set(local_reveals): # there are left over local reveals print("there is a difference between local commits and chain commits") # Filter commits that are older than the newest one in commit_dict that was revealed - for ss58, newest_commit_block in commit_dict.items(): + for (ss58, netuid), newest_commit_block in commit_dict.items(): for commit in local_reveals: - if commit.wallet_hotkey_ss58 == ss58 and commit.commit_block <= newest_commit_block: + if commit.wallet_hotkey_ss58 == ss58 and commit.netuid == netuid and commit.commit_block <= newest_commit_block: # Mark the commit as revealed, as a newer commit as already been revealed print(f"revealing commit {commit.commit_hash} as a newer hash was submitted") commit.revealed = True @@ -709,7 +709,7 @@ def main(args: argparse.Namespace): parser.add_argument( "--sleep-interval", type=float, - default=12, + default=12.0, help="Interval between block checks in seconds", ) args = parser.parse_args() diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 66ba68d929..ba04d972f6 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -70,13 +70,13 @@ async def test_commit_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "1"}, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 ), "Failed to set commit/reveal periods" assert ( diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index ee259a8f81..73cb2c8dee 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -71,13 +71,13 @@ async def test_commit_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "1"}, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 ), "Failed to set commit/reveal interval" assert ( @@ -214,13 +214,13 @@ async def test_set_and_reveal_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "1"}, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 ), "Failed to set commit/reveal period" assert ( @@ -355,13 +355,13 @@ async def test_set_and_reveal_batch_weights(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "1"}, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 ), "Failed to set commit/reveal periods" assert ( @@ -509,13 +509,13 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): assert sudo_set_hyperparameter_values( local_chain, alice_wallet, - call_function="sudo_set_commit_reveal_weights_periods", - call_params={"netuid": netuid, "periods": "1"}, + call_function="sudo_set_commit_reveal_weights_interval", + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_periods == 1 + subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 ), "Failed to set commit/reveal periods" assert ( From 3c30f198a0d1ac5d5146673ce92908e931d4da47 Mon Sep 17 00:00:00 2001 From: opendansor Date: Wed, 30 Oct 2024 22:50:13 -0700 Subject: [PATCH 29/58] Fix typo in database commit operation Corrected a typo from `conn.committed()` to `conn.commit()` in `subprocess_utils.py` to ensure proper database commit operations. Added a newline at the end of the file for consistency. --- bittensor/utils/subprocess_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess_utils.py index 23a3af9e54..7aa9c0e375 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -303,7 +303,7 @@ def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]): creation_query = f"CREATE TABLE IF NOT EXISTS {title} ({columns_})" conn.commit() cursor.execute(creation_query) - conn.committed() + conn.commit() query = f"INSERT INTO {title} ({', '.join([x[0] for x in columns])}) VALUES ({', '.join(['?'] * len(columns))})" cursor.executemany(query, rows) conn.commit() @@ -357,4 +357,4 @@ def delete_all_rows(table_name: str): with DB() as (conn, cursor): delete_query = f"DELETE FROM {table_name}" cursor.execute(delete_query) - conn.commit() \ No newline at end of file + conn.commit() From 95d5139bc896313508197cb66455ecd0cdc666ac Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Thu, 31 Oct 2024 13:31:22 -0700 Subject: [PATCH 30/58] default port from 9946 to 9944 (#2376) --- bittensor/core/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index 29948b612e..fe2a707b4e 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -75,7 +75,7 @@ def turn_console_on(): FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443" FINNEY_TEST_ENTRYPOINT = "wss://test.finney.opentensor.ai:443/" ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/" -LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9946" +LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9944" # Currency Symbols Bittensor TAO_SYMBOL: str = chr(0x03C4) From 8f20eab36b27ca4812ca4152ec9e46a4d110e20a Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 31 Oct 2024 15:24:34 -0700 Subject: [PATCH 31/58] Ruff --- bittensor/core/extrinsics/commit_weights.py | 5 +- bittensor/core/extrinsics/set_weights.py | 7 +- bittensor/core/subtensor.py | 25 ++- bittensor/utils/subprocess/commit_reveal.py | 172 ++++++++++++++------ bittensor/utils/subprocess_utils.py | 14 +- 5 files changed, 155 insertions(+), 68 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index c9ac6cb310..b78132d7fd 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -16,6 +16,7 @@ # DEALINGS IN THE SOFTWARE. """Module commit weights and reveal weights extrinsic.""" + import json from typing import Optional, TYPE_CHECKING import socket @@ -536,7 +537,7 @@ def send_command(command): try: commit_hashes = [] for batch_uids, batch_weights, batch_salt, batch_version_key in zip( - uids, weights, salt, version_keys + uids, weights, salt, version_keys ): # Generate the hash of the weights for each individual batch commit_hash = generate_weight_hash( @@ -549,7 +550,7 @@ def send_command(command): ) commit_hashes.append(commit_hash) - command = f'revealed_hash_batch {json.dumps(commit_hashes)}' + command = f"revealed_hash_batch {json.dumps(commit_hashes)}" send_command(command) except Exception as e: logging.error(f"Failed batch reveal weights subprocess: {e}") diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 00b3b6a17f..46cf15f2b3 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -152,7 +152,6 @@ def set_weights_extrinsic( f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." ): try: - # First convert types. if use_torch(): if isinstance(uids, list): @@ -166,8 +165,8 @@ def set_weights_extrinsic( weights = np.array(weights, dtype=np.float32) # Reformat and normalize. - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - uids, weights + weight_uids, weight_vals = ( + weight_utils.convert_weights_and_uids_for_emit(uids, weights) ) success, message = subtensor.commit_weights( @@ -179,7 +178,7 @@ def set_weights_extrinsic( wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, prompt=prompt, - max_retries=1 + max_retries=1, ) if not wait_for_finalization and not wait_for_inclusion: return True, "Not waiting for finalization or inclusion." diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 9952305340..a1bab4d1ff 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -80,11 +80,21 @@ transfer_extrinsic, ) from bittensor.core.metagraph import Metagraph -from bittensor.utils import ss58_to_vec_u8, torch, U64_MAX, u16_normalized_float, networking, subprocess_utils +from bittensor.utils import ( + ss58_to_vec_u8, + torch, + U64_MAX, + u16_normalized_float, + networking, + subprocess_utils, +) from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.registration import legacy_torch_api_compat -from bittensor.utils.weight_utils import generate_weight_hash, convert_weights_and_uids_for_emit +from bittensor.utils.weight_utils import ( + generate_weight_hash, + convert_weights_and_uids_for_emit, +) KEY_NONCE: dict[str, int] = {} @@ -1872,9 +1882,14 @@ def commit_weights( ) # start subprocess if permitted and not yet running - if self.subprocess_initialization and not subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + if self.subprocess_initialization and not subprocess_utils.is_process_running( + COMMIT_REVEAL_PROCESS + ): logging.info("Starting commit_reveal subprocess from commit.") - subprocess_utils.start_commit_reveal_subprocess(network=self.chain_endpoint, sleep_interval=self.subprocess_sleep_interval) + subprocess_utils.start_commit_reveal_subprocess( + network=self.chain_endpoint, + sleep_interval=self.subprocess_sleep_interval, + ) if isinstance(weights, list) and all(isinstance(w, float) for w in weights): uids, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore @@ -1904,7 +1919,7 @@ def commit_weights( weights=list(weights), salt=salt, version_key=version_key, - block=curr_block + block=curr_block, ) success, message = commit_weights_extrinsic( subtensor=self, diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index f0da1412cf..ee1b387946 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -41,21 +41,21 @@ class Commit: """ def __init__( - self, - wallet_hotkey_name: str, - wallet_hotkey_ss58: str, - wallet_name: str, - wallet_path: str, - commit_hash: str, - netuid: int, - commit_block: int, - reveal_block: int, - expire_block: int, - uids: List[int], - weights: List[int], - salt: List[int], - version_key: int, - revealed: bool = False, + self, + wallet_hotkey_name: str, + wallet_hotkey_ss58: str, + wallet_name: str, + wallet_path: str, + commit_hash: str, + netuid: int, + commit_block: int, + reveal_block: int, + expire_block: int, + uids: List[int], + weights: List[int], + salt: List[int], + version_key: int, + revealed: bool = False, ): self.wallet_hotkey_name = wallet_hotkey_name self.wallet_hotkey_ss58 = wallet_hotkey_ss58 @@ -131,11 +131,13 @@ def __str__(self) -> str: Returns: str: String representation of the commit. """ - return (f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, " - f"wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, " - f"netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, " - f"expire_block={self.expire_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, " - f"version_key={self.version_key}, revealed={self.revealed})") + return ( + f"Commit(wallet_hotkey_name={self.wallet_hotkey_name}, wallet_hotkey_ss58={self.wallet_hotkey_ss58}, " + f"wallet_name={self.wallet_name}, wallet_path={self.wallet_path}, commit_hash={self.commit_hash}, " + f"netuid={self.netuid}, commit_block={self.commit_block}, reveal_block={self.reveal_block}, " + f"expire_block={self.expire_block}, uids={self.uids}, weights={self.weights}, salt={self.salt}, " + f"version_key={self.version_key}, revealed={self.revealed})" + ) def table_exists(table_name: str) -> bool: @@ -302,12 +304,14 @@ def sync_commit_data(matching_commit, commit_block, reveal_block, expire_block): SET commit_block=?, reveal_block=?, expire_block=? WHERE commit_hash=? """ - cursor.execute(update_sql, - (commit_block, reveal_block, expire_block, matching_commit.commit_hash) - ) + cursor.execute( + update_sql, + (commit_block, reveal_block, expire_block, matching_commit.commit_hash), + ) conn.commit() print( - f"Updated commit {matching_commit.commit_hash} with commit_block={commit_block}, reveal_block={reveal_block}, expire_block={expire_block}") + f"Updated commit {matching_commit.commit_hash} with commit_block={commit_block}, reveal_block={reveal_block}, expire_block={expire_block}" + ) except Exception as e: print(f"Error updating commit data: {e}") @@ -328,7 +332,9 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): chain_commits = [] # Group commits by wallet_hotkey_ss58 if local_commits: - unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_commits}) + unique_combinations = list( + {(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_commits} + ) for combination in unique_combinations: ss58, netuid = combination @@ -343,19 +349,43 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): print(f"No commits found for {combination}") continue - for commit_hash, commit_block, reveal_block, expire_block in response.value: + for ( + commit_hash, + commit_block, + reveal_block, + expire_block, + ) in response.value: chain_commits.append(commit_hash) if expire_block < current_block: continue - if any(c.commit_hash == commit_hash for c in local_commits) and reveal_block <= current_block: + if ( + any(c.commit_hash == commit_hash for c in local_commits) + and reveal_block <= current_block + ): matching_commit = next( - (commit for commit in local_commits if commit.commit_hash == commit_hash), - None) + ( + commit + for commit in local_commits + if commit.commit_hash == commit_hash + ), + None, + ) if matching_commit: - if commit_block != matching_commit.commit_block or reveal_block != matching_commit.reveal_block or expire_block != matching_commit.expire_block: - sync_commit_data(matching_commit, commit_block, reveal_block, expire_block) + if ( + commit_block != matching_commit.commit_block + or reveal_block != matching_commit.reveal_block + or expire_block != matching_commit.expire_block + ): + sync_commit_data( + matching_commit, + commit_block, + reveal_block, + expire_block, + ) else: - print(f"Could not find matching commit for hash: {commit_hash}") + print( + f"Could not find matching commit for hash: {commit_hash}" + ) except Exception as e: print(f"Error during subtensor query chain sync: {e}") except Exception as e: @@ -383,7 +413,9 @@ def delete_old_commits(current_block: int, offset: int): delete_sql = "DELETE FROM commits WHERE commit_hash=?" cursor.execute(delete_sql, (commit.commit_hash,)) conn.commit() - print(f"Current block: {current_block}. Deleting expired Commit: {commit}") + print( + f"Current block: {current_block}. Deleting expired Commit: {commit}" + ) except Exception as e: print(f"Error deleting expired commits: {e}") @@ -405,7 +437,9 @@ def revealed_commit(commit_hash: str): update_sql = "UPDATE commits SET revealed = ? WHERE commit_hash = ?" cursor.execute(update_sql, (True, commit_hash)) conn.commit() - print(f"\nUpdated revealed status on existing row with commit hash {commit_hash}") + print( + f"\nUpdated revealed status on existing row with commit hash {commit_hash}" + ) else: print(f"\nNo existing row found with commit hash {commit_hash}") except Exception as e: @@ -431,7 +465,9 @@ def revealed_commit_batch(commit_hashes: List[str]): update_sql = "UPDATE commits SET revealed = ? WHERE commit_hash = ?" cursor.execute(update_sql, (True, commit_hash)) conn.commit() - print(f"\nUpdated revealed status on existing row with commit hash {commit_hash}") + print( + f"\nUpdated revealed status on existing row with commit hash {commit_hash}" + ) else: print(f"\nNo existing row found with commit hash {commit_hash}") except Exception as e: @@ -487,7 +523,9 @@ def check_reveal(current_block: int) -> bool: if commits: # Filter for commits that are ready to be revealed reveal_candidates = [ - commit for commit in commits if commit.reveal_block <= current_block <= commit.expire_block + commit + for commit in commits + if commit.reveal_block <= current_block <= commit.expire_block ] return len(reveal_candidates) > 0 return False @@ -505,11 +543,15 @@ def reveal_commits(subtensor: Subtensor, current_block: int): local_commits = get_all_commits() local_commits = [commit for commit in local_commits if not commit.revealed] local_reveals = [ - commit for commit in local_commits if commit.reveal_block <= current_block <= commit.expire_block + commit + for commit in local_commits + if commit.reveal_block <= current_block <= commit.expire_block ] chain_reveals = [] if local_reveals: - unique_combinations = list({(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_reveals}) + unique_combinations = list( + {(commit.wallet_hotkey_ss58, commit.netuid) for commit in local_reveals} + ) # Dict that has ss58 as key, and latest commit block as value commit_dict: Dict[tuple[str, int], int] = {} for combination in unique_combinations: @@ -526,19 +568,33 @@ def reveal_commits(subtensor: Subtensor, current_block: int): print(f"No commits found for {combination}") continue - for commit_hash, commit_block, reveal_block, expire_block in response.value: + for ( + commit_hash, + commit_block, + reveal_block, + expire_block, + ) in response.value: if expire_block < current_block: print(f"Commit {commit_hash} is expired.") continue - if any(c.commit_hash == commit_hash for c in - local_reveals) and reveal_block <= current_block <= expire_block: + if ( + any(c.commit_hash == commit_hash for c in local_reveals) + and reveal_block <= current_block <= expire_block + ): matching_commit = next( - (commit for commit in local_commits if commit.commit_hash == commit_hash), - None) + ( + commit + for commit in local_commits + if commit.commit_hash == commit_hash + ), + None, + ) if matching_commit: ready_to_reveal.append(matching_commit) else: - print(f"Could not find commit hash {commit_hash} locally.") + print( + f"Could not find commit hash {commit_hash} locally." + ) if commit_block > commit_dict.get(combination, 0): commit_dict[combination] = commit_block @@ -553,14 +609,22 @@ def reveal_commits(subtensor: Subtensor, current_block: int): print(f"Error querying expected hashes for {combination}: {e}") # Compare reveal candidates and ready_to_reveal - if set(chain_reveals) != set(local_reveals): # there are left over local reveals + if set(chain_reveals) != set( + local_reveals + ): # there are left over local reveals print("there is a difference between local commits and chain commits") # Filter commits that are older than the newest one in commit_dict that was revealed for (ss58, netuid), newest_commit_block in commit_dict.items(): for commit in local_reveals: - if commit.wallet_hotkey_ss58 == ss58 and commit.netuid == netuid and commit.commit_block <= newest_commit_block: + if ( + commit.wallet_hotkey_ss58 == ss58 + and commit.netuid == netuid + and commit.commit_block <= newest_commit_block + ): # Mark the commit as revealed, as a newer commit as already been revealed - print(f"revealing commit {commit.commit_hash} as a newer hash was submitted") + print( + f"revealing commit {commit.commit_hash} as a newer hash was submitted" + ) commit.revealed = True revealed_commit(commit.commit_hash) @@ -579,10 +643,10 @@ def handle_client_connection(client_socket: socket.socket): request = client_socket.recv(1024).decode() if not request: break - if request.startswith('revealed_hash_batch'): + if request.startswith("revealed_hash_batch"): try: - command = 'revealed_hash_batch' - json_start_index = request.index('[') + command = "revealed_hash_batch" + json_start_index = request.index("[") json_payload = request[json_start_index:] args = json.loads(json_payload) revealed_commit_batch(args) @@ -595,7 +659,9 @@ def handle_client_connection(client_socket: socket.socket): command = args[0] commands = { "revealed_hash": lambda: revealed_commit(args[1]), - "revealed_hash_batch": lambda: revealed_commit_batch(json.loads(args[1])), + "revealed_hash_batch": lambda: revealed_commit_batch( + json.loads(args[1]) + ), "committed": lambda: committed( Commit( wallet_hotkey_name=args[3], @@ -675,7 +741,9 @@ def main(args: argparse.Namespace): args (argparse.Namespace): The command-line arguments. """ initialize_db() - print(f"initializing subtensor with network: {args.network} and sleep time: {args.sleep_interval} seconds") + print( + f"initializing subtensor with network: {args.network} and sleep time: {args.sleep_interval} seconds" + ) subtensor = Subtensor(network=args.network, subprocess_initialization=False) server_thread = threading.Thread(target=start_socket_server) server_thread.start() diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess_utils.py index 7aa9c0e375..d084a5c8ee 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -63,8 +63,8 @@ def is_process_running(process_name: str) -> bool: for proc in psutil.process_iter(["pid", "name", "cmdline"]): cmdline = proc.info["cmdline"] if cmdline and ( - process_name in proc.info["name"] - or any(process_name in cmd for cmd in cmdline) + process_name in proc.info["name"] + or any(process_name in cmd for cmd in cmdline) ): return True return False @@ -113,7 +113,9 @@ def check_message_in_log(file_path: str, message: str) -> bool: return False message = "commit_reveal subprocess is ready" - return check_message_in_log(stdout_log, message) or check_message_in_log(stderr_log, message) + return check_message_in_log(stdout_log, message) or check_message_in_log( + stderr_log, message + ) def read_commit_reveal_logs(): @@ -167,7 +169,9 @@ def is_table_empty(table_name: str) -> bool: return False -def start_if_existing_commits(network: Optional[str] = None, sleep_interval: Optional[float] = None): +def start_if_existing_commits( + network: Optional[str] = None, sleep_interval: Optional[float] = None +): # check if table is empty if not is_table_empty("commits"): start_commit_reveal_subprocess(network, sleep_interval) @@ -178,7 +182,7 @@ def start_if_existing_commits(network: Optional[str] = None, sleep_interval: Opt def start_commit_reveal_subprocess( - network: Optional[str] = None, sleep_interval: Optional[float] = None + network: Optional[str] = None, sleep_interval: Optional[float] = None ): """ Start the commit reveal subprocess if not already running. From af1c0f67f548048bcfc929e75d7234ec812f1e2d Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 31 Oct 2024 15:27:53 -0700 Subject: [PATCH 32/58] Ruff --- tests/e2e_tests/test_commit_weights.py | 5 ++++- tests/e2e_tests/test_reveal_weights.py | 24 ++++++++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index ba04d972f6..a715059674 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -76,7 +76,10 @@ async def test_commit_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 1 ), "Failed to set commit/reveal periods" assert ( diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 73cb2c8dee..5bd54c8936 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -77,7 +77,10 @@ async def test_commit_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 1 ), "Failed to set commit/reveal interval" assert ( @@ -220,7 +223,10 @@ async def test_set_and_reveal_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 1 ), "Failed to set commit/reveal period" assert ( @@ -361,7 +367,10 @@ async def test_set_and_reveal_batch_weights(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 1 ), "Failed to set commit/reveal periods" assert ( @@ -515,11 +524,14 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).commit_reveal_weights_interval == 1 + subtensor.get_subnet_hyperparameters( + netuid=netuid + ).commit_reveal_weights_interval + == 1 ), "Failed to set commit/reveal periods" assert ( - subtensor.weights_rate_limit(netuid=netuid) > 0 + subtensor.weights_rate_limit(netuid=netuid) > 0 ), "Weights rate limit is below 0" # Lower the rate limit assert sudo_set_hyperparameter_values( @@ -530,7 +542,7 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): return_error_message=True, ) assert ( - subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 + subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" assert subtensor.weights_rate_limit(netuid=netuid) == 0 From 116e2001704d11949ce4e5b047a3290dc7a858b9 Mon Sep 17 00:00:00 2001 From: opendansor Date: Thu, 31 Oct 2024 21:35:17 -0700 Subject: [PATCH 33/58] Fix unit test, add prepare values for emmit on commit. --- bittensor/core/extrinsics/set_weights.py | 4 +++- bittensor/core/subtensor.py | 3 +-- bittensor/utils/weight_utils.py | 6 ++++-- tests/e2e_tests/test_incentive.py | 1 + tests/e2e_tests/test_reveal_weights.py | 4 ++-- tests/unit_tests/test_subtensor.py | 9 +++++++-- 6 files changed, 18 insertions(+), 9 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 46cf15f2b3..6855c53622 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -48,11 +48,13 @@ def do_set_weights( version_key: int = version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, + period: int = 5, ) -> tuple[bool, Optional[dict]]: # (success, error_message) """ Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons. This method constructs and submits the transaction, handling retries and blockchain communication. Args: + period (int): Period dictates how long the extrinsic will stay as part of waiting pool self (bittensor.core.subtensor.Subtensor): Subtensor interface wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. uids (list[int]): List of neuron UIDs for which weights are being set. @@ -100,7 +102,7 @@ def make_substrate_call_with_retry(extrinsic): extrinsic = self.substrate.create_signed_extrinsic( call=call, keypair=wallet.hotkey, - era={"period": 5}, + era={"period": period}, ) return make_substrate_call_with_retry(extrinsic) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index a1bab4d1ff..6c34ab823b 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1891,8 +1891,7 @@ def commit_weights( sleep_interval=self.subprocess_sleep_interval, ) - if isinstance(weights, list) and all(isinstance(w, float) for w in weights): - uids, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore + uids, weights = convert_weights_and_uids_for_emit(uids, weights) # type: ignore # Generate the hash of the weights commit_hash = generate_weight_hash( diff --git a/bittensor/utils/weight_utils.py b/bittensor/utils/weight_utils.py index f004af446c..67ccfd32e5 100644 --- a/bittensor/utils/weight_utils.py +++ b/bittensor/utils/weight_utils.py @@ -195,8 +195,10 @@ def convert_weights_and_uids_for_emit( weight_vals (list[int]): Weights as a list. """ # Checks. - weights = weights.tolist() - uids = uids.tolist() + if isinstance(weights, (np.ndarray, torch.Tensor)): + weights = weights.tolist() + if isinstance(uids, (np.ndarray, torch.Tensor)): + uids = uids.tolist() if min(weights) < 0: raise ValueError(f"Passed weight is negative cannot exist on chain {weights}") if min(uids) < 0: diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py index 3e309f4f64..c456e97647 100644 --- a/tests/e2e_tests/test_incentive.py +++ b/tests/e2e_tests/test_incentive.py @@ -160,6 +160,7 @@ async def test_incentive(local_chain): version_key=0, wait_for_inclusion=True, wait_for_finalization=True, + period=25, ) logging.info("Alice neuron set weights successfully") diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index 5bd54c8936..e20222f133 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -413,8 +413,8 @@ async def test_set_and_reveal_batch_weights(local_chain): success, message = subtensor.set_weights( alice_wallet, netuid, - uids=weight_uids, - weights=weight_vals, + uids=uids, + weights=weights, wait_for_inclusion=True, wait_for_finalization=True, ) diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index 6d8fb1ff5f..10bfd2be7a 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -29,6 +29,7 @@ from bittensor.core.subtensor import Subtensor, logging from bittensor.utils import u16_normalized_float, u64_normalized_float from bittensor.utils.balance import Balance +from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit U16_MAX = 65535 U64_MAX = 18446744073709551615 @@ -1922,12 +1923,16 @@ def test_commit_weights(subtensor, mocker): max_retries=max_retries, ) + weight_uids, weight_vals = convert_weights_and_uids_for_emit( + uids=uids, weights=weights + ) + # Asserts mocked_generate_weight_hash.assert_called_once_with( address=fake_wallet.hotkey.ss58_address, netuid=netuid, - uids=list(uids), - values=list(weights), + uids=list(weight_uids), + values=list(weight_vals), salt=list(salt), version_key=settings.version_as_int, ) From 70246e0ccb3aa2427650f91d5749df53a7c23ab4 Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 1 Nov 2024 09:59:18 -0700 Subject: [PATCH 34/58] Dont initialize subprocess for unit test. --- bittensor/core/subtensor.py | 2 +- tests/unit_tests/test_subtensor.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 6c34ab823b..835bf4b97e 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1907,7 +1907,7 @@ def commit_weights( while retries < max_retries and not success: try: - if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + if self.subprocess_initialization and subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): curr_block = self.get_current_block() commit_weights_process( self, diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index 10bfd2be7a..c3abc91618 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -1910,6 +1910,7 @@ def test_commit_weights(subtensor, mocker): ) # Call + subtensor.subprocess_initialization = False result = subtensor.commit_weights( wallet=fake_wallet, netuid=netuid, From 26c988f6f8478523cbb6997bb4fc7b12bfbb6011 Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 1 Nov 2024 11:34:07 -0700 Subject: [PATCH 35/58] Refactor subprocess initialization condition. Update the conditional statement for subprocess initialization in `subtensor.py` for clarity. Adjust the unit tests in `test_set_weights.py` to patch `get_subnet_hyperparameters` method to return an object with `commit_reveal_weights_enabled` set to `False`. --- bittensor/core/subtensor.py | 5 ++++- tests/unit_tests/extrinsics/test_set_weights.py | 7 ++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 835bf4b97e..89bbbf5e95 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1907,7 +1907,10 @@ def commit_weights( while retries < max_retries and not success: try: - if self.subprocess_initialization and subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): + if ( + self.subprocess_initialization + and subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS) + ): curr_block = self.get_current_block() commit_weights_process( self, diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py index 9c32fc9bdf..7c517e9564 100644 --- a/tests/unit_tests/extrinsics/test_set_weights.py +++ b/tests/unit_tests/extrinsics/test_set_weights.py @@ -87,7 +87,12 @@ def test_set_weights_extrinsic( ): uids_tensor = torch.tensor(uids, dtype=torch.int64) weights_tensor = torch.tensor(weights, dtype=torch.float32) - with patch( + # Patch subtensor.get_subnet_hyperparameters to return an object with commit_reveal_weights_enabled=False + with patch.object( + mock_subtensor, + "get_subnet_hyperparameters", + return_value=type("obj", (object,), {"commit_reveal_weights_enabled": False}), + ), patch( "bittensor.utils.weight_utils.convert_weights_and_uids_for_emit", return_value=(uids_tensor, weights_tensor), ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch( From 4182475a77c8282674fa588307d53c1c9120d013 Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 1 Nov 2024 11:44:06 -0700 Subject: [PATCH 36/58] Lint --- bittensor/core/extrinsics/set_weights.py | 3 +-- bittensor/core/subtensor.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 6855c53622..9edd9b3165 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -29,7 +29,6 @@ from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected from bittensor.utils.registration import torch, use_torch -from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit # For annotation purposes if TYPE_CHECKING: @@ -202,7 +201,7 @@ def set_weights_extrinsic( except Exception as e: bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") logging.debug(str(e)) - return False, str(e) + return False, str(e) else: # First convert types. if use_torch(): diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 89bbbf5e95..ff134cedcc 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -23,7 +23,6 @@ import argparse import copy import socket -import time from typing import Union, Optional, TypedDict, Any import numpy as np From 50309bc6158753cc056e6942429515ca071baee2 Mon Sep 17 00:00:00 2001 From: opendansor Date: Fri, 1 Nov 2024 16:04:02 -0700 Subject: [PATCH 37/58] Refactor subprocess initialization and update network defaults. Change the network parameter to self.chain_endpoint in subprocess initialization for better encapsulation. Clarify the commit_reveal_weights_interval docstring and update the default network address in argument parsing. --- bittensor/core/chain_data/subnet_hyperparameters.py | 2 +- bittensor/core/subtensor.py | 2 +- bittensor/utils/subprocess/commit_reveal.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bittensor/core/chain_data/subnet_hyperparameters.py b/bittensor/core/chain_data/subnet_hyperparameters.py index 6cd8b1ace9..df21b12a4c 100644 --- a/bittensor/core/chain_data/subnet_hyperparameters.py +++ b/bittensor/core/chain_data/subnet_hyperparameters.py @@ -32,7 +32,7 @@ class SubnetHyperparameters: max_validators (int): Maximum number of validators. adjustment_alpha (int): Alpha value for adjustments. difficulty (int): Difficulty level. - commit_reveal_weights_interval (int): Periods for commit-reveal weights. + commit_reveal_weights_interval (int): Intervals (tempo intervals) for commit-reveal weights. commit_reveal_weights_enabled (bool): Flag indicating if commit-reveal weights are enabled. alpha_high (int): High value of alpha. alpha_low (int): Low value of alpha. diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index ff134cedcc..2d39e079c0 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -214,7 +214,7 @@ def __init__( if subprocess_initialization: subprocess_utils.start_if_existing_commits( - network=network, sleep_interval=subprocess_sleep_interval + network=self.chain_endpoint, sleep_interval=subprocess_sleep_interval ) self.subprocess_initialization = subprocess_initialization diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index ee1b387946..829361a3b8 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -771,7 +771,7 @@ def main(args: argparse.Namespace): parser.add_argument( "--network", type=str, - default="wss://entrypoint-finney.opentensor.ai:443", + default="wss://test.finney.opentensor.ai:443", help="Subtensor network address", ) parser.add_argument( From 87a2ab295b0138a46173582e65efe07c51c0a275 Mon Sep 17 00:00:00 2001 From: opendansor Date: Sun, 3 Nov 2024 22:29:32 -0800 Subject: [PATCH 38/58] Refactor logging and subprocess management Remove redundant `read_commit_reveal_logs` function. Enhance `is_table_empty` to handle table existence check. Add error handling and logging improvements for subprocess management. --- bittensor/utils/subprocess_utils.py | 106 ++++++++++++---------------- 1 file changed, 47 insertions(+), 59 deletions(-) diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess_utils.py index d084a5c8ee..18331bd221 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess_utils.py @@ -113,57 +113,39 @@ def check_message_in_log(file_path: str, message: str) -> bool: return False message = "commit_reveal subprocess is ready" - return check_message_in_log(stdout_log, message) or check_message_in_log( - stderr_log, message - ) - - -def read_commit_reveal_logs(): - """ - Read and print the last 50 lines of logs from the most recent subprocess log. - """ - try: - stdout_log, stderr_log = get_cr_log_files() - except RuntimeError as e: - print(str(e)) - return - - def read_last_n_lines(file_path: str, n: int) -> list: - """Reads the last N lines from a file.""" - with open(file_path, "r") as file: - return file.readlines()[-n:] - - if os.path.exists(stdout_log): - print("----- STDOUT LOG -----") - print("".join(read_last_n_lines(stdout_log, 50))) - else: - print(f"STDOUT log file not found at {stdout_log}") - - if os.path.exists(stderr_log): - print("----- STDERR LOG -----") - print("".join(read_last_n_lines(stderr_log, 50))) - else: - print(f"STDERR log file not found at {stderr_log}") + return check_message_in_log(stdout_log, message) def is_table_empty(table_name: str) -> bool: """ - Checks if a table in the database is empty. + Checks if a table in the database exists and is empty. Args: table_name (str): The name of the table to check. Returns: - bool: True if the table is empty, False otherwise. + bool: True if the table does not exist or is empty, False otherwise. """ try: - columns, rows = read_table(table_name) - if not rows: - print(f"Table '{table_name}' is empty.") - return True - else: - print(f"Table '{table_name}' is not empty.") - return False + with DB() as (conn, cursor): + # Check if table exists + cursor.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,) + ) + table_exists = cursor.fetchone() + if not table_exists: + print(f"Table '{table_name}' does not exist.") + return True + + # Check if table is empty + cursor.execute(f"SELECT COUNT(*) FROM {table_name}") + count = cursor.fetchone()[0] + if count == 0: + print(f"Table '{table_name}' is empty.") + return True + else: + print(f"Table '{table_name}' is not empty.") + return False except Exception as e: print(f"Error checking if table '{table_name}' is empty: {e}") return False @@ -174,6 +156,8 @@ def start_if_existing_commits( ): # check if table is empty if not is_table_empty("commits"): + # Stop then restart in case there are updates to the code + stop_commit_reveal_subprocess() start_commit_reveal_subprocess(network, sleep_interval) else: print( @@ -218,25 +202,29 @@ def start_commit_reveal_subprocess( if sleep_interval: args.extend(["--sleep-interval", str(sleep_interval)]) - # Create a new subprocess - process = subprocess.Popen( - args=args, - stdout=stdout_file, - stderr=stderr_file, - preexec_fn=os.setsid, - env=env, - ) - print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") - - attempt_count = 0 - - while not is_commit_reveal_subprocess_ready() and attempt_count < 5: - time.sleep(3) - print("Waiting for commit_reveal subprocess to be ready.") - attempt_count += 1 - - if attempt_count >= 5: - print("Max attempts reached. Subprocess may not be ready.") + try: + # Create a new subprocess + process = subprocess.Popen( + args=args, + stdout=open(stdout_log, "a"), # Redirect subprocess stdout to log file + stderr=open(stderr_log, "a"), # Redirect subprocess stderr to log file + preexec_fn=os.setsid, + env=env, + ) + print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") + + attempt_count = 0 + while not is_commit_reveal_subprocess_ready() and attempt_count < 5: + time.sleep(5) + print( + f"Waiting for commit_reveal subprocess to be ready. Attempt {attempt_count + 1}..." + ) + attempt_count += 1 + + if attempt_count >= 5: + print("Max attempts reached. Subprocess may not be ready.") + except Exception as e: + print(f"Failed to start subprocess '{PROCESS_NAME}': {e}") else: print(f"Subprocess '{PROCESS_NAME}' is already running.") From 5aa979ceced339c059adab05f1766cb109988284 Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:03:54 -0800 Subject: [PATCH 39/58] remove unused prometheus extrinsic (#2378) --- bittensor/core/extrinsics/prometheus.py | 187 ------------------ bittensor/core/subtensor.py | 37 ---- .../unit_tests/extrinsics/test_prometheus.py | 167 ---------------- tests/unit_tests/test_subtensor.py | 154 --------------- 4 files changed, 545 deletions(-) delete mode 100644 bittensor/core/extrinsics/prometheus.py delete mode 100644 tests/unit_tests/extrinsics/test_prometheus.py diff --git a/bittensor/core/extrinsics/prometheus.py b/bittensor/core/extrinsics/prometheus.py deleted file mode 100644 index a6ab1cfb16..0000000000 --- a/bittensor/core/extrinsics/prometheus.py +++ /dev/null @@ -1,187 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2024 Opentensor Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. -# -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import json -from typing import Optional, TYPE_CHECKING - -from retry import retry - -from bittensor.core.extrinsics.utils import submit_extrinsic -from bittensor.core.settings import version_as_int, bt_console -from bittensor.utils import networking as net, format_error_message -from bittensor.utils.btlogging import logging -from bittensor.utils.networking import ensure_connected - -# For annotation purposes -if TYPE_CHECKING: - from bittensor_wallet import Wallet - from bittensor.core.subtensor import Subtensor - from bittensor.core.types import PrometheusServeCallParams - - -# Chain call for `prometheus_extrinsic` -@ensure_connected -def do_serve_prometheus( - self: "Subtensor", - wallet: "Wallet", - call_params: "PrometheusServeCallParams", - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, -) -> tuple[bool, Optional[dict]]: - """ - Sends a serve prometheus extrinsic to the chain. - - Args: - self (bittensor.core.subtensor.Subtensor): Bittensor subtensor object - wallet (bittensor_wallet.Wallet): Wallet object. - call_params (bittensor.core.types.PrometheusServeCallParams): Prometheus serve call parameters. - wait_for_inclusion (bool): If ``true``, waits for inclusion. - wait_for_finalization (bool): If ``true``, waits for finalization. - - Returns: - success (bool): ``True`` if serve prometheus was successful. - error (Optional[str]): Error message if serve prometheus failed, ``None`` otherwise. - """ - - @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(): - call = self.substrate.compose_call( - call_module="SubtensorModule", - call_function="serve_prometheus", - call_params=call_params, - ) - extrinsic = self.substrate.create_signed_extrinsic( - call=call, keypair=wallet.hotkey - ) - response = submit_extrinsic( - substrate=self.substrate, - extrinsic=extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - if wait_for_inclusion or wait_for_finalization: - response.process_events() - if response.is_success: - return True, None - else: - return False, response.error_message - else: - return True, None - - return make_substrate_call_with_retry() - - -def prometheus_extrinsic( - subtensor: "Subtensor", - wallet: "Wallet", - port: int, - netuid: int, - ip: int = None, - wait_for_inclusion: bool = False, - wait_for_finalization=True, -) -> bool: - """Subscribes a Bittensor endpoint to the Subtensor chain. - - Args: - subtensor (bittensor.core.subtensor.Subtensor): Bittensor subtensor object. - wallet (bittensor_wallet.Wallet): Bittensor wallet object. - ip (str): Endpoint host port i.e., ``192.122.31.4``. - port (int): Endpoint port number i.e., `9221`. - netuid (int): Network `uid` to serve on. - wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. - wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - - Returns: - success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. - """ - - # Get external ip - if ip is None: - try: - external_ip = net.get_external_ip() - bt_console.print( - f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]" - ) - logging.success(prefix="External IP", suffix="{external_ip}") - except Exception as e: - raise RuntimeError( - f"Unable to attain your external ip. Check your internet connection. error: {e}" - ) from e - else: - external_ip = ip - - call_params: "PrometheusServeCallParams" = { - "version": version_as_int, - "ip": net.ip_to_int(external_ip), - "port": port, - "ip_type": net.ip_version(external_ip), - } - - with bt_console.status(":satellite: Checking Prometheus..."): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - neuron_up_to_date = not neuron.is_null and call_params == { - "version": neuron.prometheus_info.version, - "ip": net.ip_to_int(neuron.prometheus_info.ip), - "port": neuron.prometheus_info.port, - "ip_type": neuron.prometheus_info.ip_type, - } - - if neuron_up_to_date: - bt_console.print( - f":white_heavy_check_mark: [green]Prometheus already Served[/green]\n" - f"[green not bold]- Status: [/green not bold] |" - f"[green not bold] ip: [/green not bold][white not bold]{neuron.prometheus_info.ip}[/white not bold] |" - f"[green not bold] ip_type: [/green not bold][white not bold]{neuron.prometheus_info.ip_type}[/white not bold] |" - f"[green not bold] port: [/green not bold][white not bold]{neuron.prometheus_info.port}[/white not bold] | " - f"[green not bold] version: [/green not bold][white not bold]{neuron.prometheus_info.version}[/white not bold] |" - ) - - bt_console.print( - f":white_heavy_check_mark: [white]Prometheus already served.[/white]" - ) - return True - - # Add netuid, not in prometheus_info - call_params["netuid"] = netuid - - with bt_console.status( - f":satellite: Serving prometheus on: [white]{subtensor.network}:{netuid}[/white] ..." - ): - success, error_message = do_serve_prometheus( - self=subtensor, - wallet=wallet, - call_params=call_params, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if wait_for_inclusion or wait_for_finalization: - if success is True: - json_ = json.dumps(call_params, indent=4, sort_keys=True) - bt_console.print( - f":white_heavy_check_mark: [green]Served prometheus[/green]\n [bold white]{json_}[/bold white]" - ) - return True - else: - bt_console.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}" - ) - return False - else: - return True diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index ac6c46bc46..3ca0dc146d 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -52,10 +52,6 @@ commit_weights_extrinsic, reveal_weights_extrinsic, ) -from bittensor.core.extrinsics.prometheus import ( - do_serve_prometheus, - prometheus_extrinsic, -) from bittensor.core.extrinsics.registration import ( burned_register_extrinsic, register_extrinsic, @@ -1269,37 +1265,6 @@ def make_substrate_call_with_retry(): return NeuronInfo.from_vec_u8(result) - # Community uses this method - def serve_prometheus( - self, - wallet: "Wallet", - port: int, - netuid: int, - wait_for_inclusion: bool = False, - wait_for_finalization: bool = True, - ) -> bool: - """ - Serves Prometheus metrics by submitting an extrinsic to a blockchain network via the specified wallet. The function allows configuring whether to wait for the transaction's inclusion in a block and its finalization. - - Args: - wallet (bittensor_wallet.Wallet): Bittensor wallet instance used for submitting the extrinsic. - port (int): The port number on which Prometheus metrics are served. - netuid (int): The unique identifier of the subnetwork. - wait_for_inclusion (bool): If True, waits for the transaction to be included in a block. Defaults to ``False``. - wait_for_finalization (bool): If True, waits for the transaction to be finalized. Defaults to ``True``. - - Returns: - bool: Returns True if the Prometheus extrinsic is successfully processed, otherwise False. - """ - return prometheus_extrinsic( - self, - wallet=wallet, - port=port, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - # Community uses this method def get_subnet_hyperparameters( self, netuid: int, block: Optional[int] = None @@ -2047,7 +2012,5 @@ def make_substrate_call_with_retry(encoded_hotkey_: list[int]): return DelegateInfo.from_vec_u8(result) - # Subnet 27 uses this method - _do_serve_prometheus = do_serve_prometheus # Subnet 27 uses this method name _do_serve_axon = do_serve_axon diff --git a/tests/unit_tests/extrinsics/test_prometheus.py b/tests/unit_tests/extrinsics/test_prometheus.py deleted file mode 100644 index dbcfed1e47..0000000000 --- a/tests/unit_tests/extrinsics/test_prometheus.py +++ /dev/null @@ -1,167 +0,0 @@ -# The MIT License (MIT) -# Copyright © 2024 Opentensor Foundation -# -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. -# -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from unittest.mock import MagicMock, patch - -import pytest -from bittensor_wallet import Wallet - -from bittensor.core.extrinsics.prometheus import ( - prometheus_extrinsic, -) -from bittensor.core.subtensor import Subtensor -from bittensor.core.settings import version_as_int - - -# Mocking the bittensor and networking modules -@pytest.fixture -def mock_bittensor(): - with patch("bittensor.core.subtensor.Subtensor") as mock: - yield mock - - -@pytest.fixture -def mock_wallet(): - with patch("bittensor_wallet.Wallet") as mock: - yield mock - - -@pytest.fixture -def mock_net(): - with patch("bittensor.utils.networking") as mock: - yield mock - - -@pytest.mark.parametrize( - "ip, port, netuid, wait_for_inclusion, wait_for_finalization, expected_result, test_id", - [ - (None, 9221, 0, False, True, True, "happy-path-default-ip"), - ("192.168.0.1", 9221, 0, False, True, True, "happy-path-custom-ip"), - (None, 9221, 0, True, False, True, "happy-path-wait-for-inclusion"), - (None, 9221, 0, False, False, True, "happy-path-no-waiting"), - ], -) -def test_prometheus_extrinsic_happy_path( - mock_bittensor, - mock_wallet, - mock_net, - ip, - port, - netuid, - wait_for_inclusion, - wait_for_finalization, - expected_result, - test_id, -): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - subtensor.substrate = MagicMock() - wallet = MagicMock(spec=Wallet) - mock_net.get_external_ip.return_value = "192.168.0.1" - mock_net.ip_to_int.return_value = 3232235521 # IP in integer form - mock_net.ip_version.return_value = 4 - neuron = MagicMock() - neuron.is_null = False - neuron.prometheus_info.version = version_as_int - neuron.prometheus_info.ip = 3232235521 - neuron.prometheus_info.port = port - neuron.prometheus_info.ip_type = 4 - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True, None) - - # Act - result = prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=ip, - port=port, - netuid=netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # Assert - assert result == expected_result, f"Test ID: {test_id}" - - -# Edge cases -@pytest.mark.parametrize( - "ip, port, netuid, test_id", - [ - ("0.0.0.0", 0, 0, "edge-case-min-values"), - ("255.255.255.255", 65535, 2147483647, "edge-case-max-values"), - ], -) -def test_prometheus_extrinsic_edge_cases( - mock_bittensor, mock_wallet, mock_net, ip, port, netuid, test_id -): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - subtensor.substrate = MagicMock() - wallet = MagicMock(spec=Wallet) - mock_net.get_external_ip.return_value = ip - mock_net.ip_to_int.return_value = 3232235521 # IP in integer form - mock_net.ip_version.return_value = 4 - neuron = MagicMock() - neuron.is_null = True - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True, None) - - # Act - result = prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=ip, - port=port, - netuid=netuid, - wait_for_inclusion=False, - wait_for_finalization=True, - ) - - # Assert - assert result is True, f"Test ID: {test_id}" - - -# Error cases -def test_prometheus_extrinsic_error_cases(mock_bittensor, mock_wallet, mocker): - # Arrange - subtensor = MagicMock(spec=Subtensor) - subtensor.network = "test_network" - subtensor.substrate = MagicMock() - subtensor.substrate.websocket.sock.getsockopt.return_value = 0 - wallet = MagicMock(spec=Wallet) - neuron = MagicMock() - neuron.is_null = True - subtensor.get_neuron_for_pubkey_and_subnet.return_value = neuron - subtensor._do_serve_prometheus.return_value = (True,) - - with mocker.patch( - "bittensor.utils.networking.get_external_ip", side_effect=RuntimeError - ): - # Act & Assert - with pytest.raises(RuntimeError): - prometheus_extrinsic( - subtensor=subtensor, - wallet=wallet, - ip=None, - port=9221, - netuid=1, - wait_for_inclusion=False, - wait_for_finalization=True, - ) diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index 6d8fb1ff5f..a818f22c55 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -1406,160 +1406,6 @@ def test_neuron_for_uid_success(subtensor, mocker): assert result == mocked_neuron_from_vec_u8.return_value -def test_do_serve_prometheus_is_success(subtensor, mocker): - """Successful do_serve_prometheus call.""" - # Prep - fake_wallet = mocker.MagicMock() - fake_call_params = mocker.MagicMock() - fake_wait_for_inclusion = True - fake_wait_for_finalization = True - - subtensor.substrate.submit_extrinsic.return_value.is_success = True - - # Call - result = subtensor._do_serve_prometheus( - wallet=fake_wallet, - call_params=fake_call_params, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - - # Asserts - subtensor.substrate.compose_call.assert_called_once_with( - call_module="SubtensorModule", - call_function="serve_prometheus", - call_params=fake_call_params, - ) - - subtensor.substrate.create_signed_extrinsic.assert_called_once_with( - call=subtensor.substrate.compose_call.return_value, - keypair=fake_wallet.hotkey, - ) - - subtensor.substrate.submit_extrinsic.assert_called_once_with( - subtensor.substrate.create_signed_extrinsic.return_value, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - - subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() - assert result == (True, None) - - -def test_do_serve_prometheus_is_not_success(subtensor, mocker): - """Unsuccessful do_serve_axon call.""" - # Prep - fake_wallet = mocker.MagicMock() - fake_call_params = mocker.MagicMock() - fake_wait_for_inclusion = True - fake_wait_for_finalization = True - - subtensor.substrate.submit_extrinsic.return_value.is_success = None - - # Call - result = subtensor._do_serve_prometheus( - wallet=fake_wallet, - call_params=fake_call_params, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - - # Asserts - subtensor.substrate.compose_call.assert_called_once_with( - call_module="SubtensorModule", - call_function="serve_prometheus", - call_params=fake_call_params, - ) - - subtensor.substrate.create_signed_extrinsic.assert_called_once_with( - call=subtensor.substrate.compose_call.return_value, - keypair=fake_wallet.hotkey, - ) - - subtensor.substrate.submit_extrinsic.assert_called_once_with( - subtensor.substrate.create_signed_extrinsic.return_value, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - - subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() - assert result == ( - False, - subtensor.substrate.submit_extrinsic.return_value.error_message, - ) - - -def test_do_serve_prometheus_no_waits(subtensor, mocker): - """Unsuccessful do_serve_axon call.""" - # Prep - fake_wallet = mocker.MagicMock() - fake_call_params = mocker.MagicMock() - fake_wait_for_inclusion = False - fake_wait_for_finalization = False - - # Call - result = subtensor._do_serve_prometheus( - wallet=fake_wallet, - call_params=fake_call_params, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - - # Asserts - subtensor.substrate.compose_call.assert_called_once_with( - call_module="SubtensorModule", - call_function="serve_prometheus", - call_params=fake_call_params, - ) - - subtensor.substrate.create_signed_extrinsic.assert_called_once_with( - call=subtensor.substrate.compose_call.return_value, - keypair=fake_wallet.hotkey, - ) - - subtensor.substrate.submit_extrinsic.assert_called_once_with( - subtensor.substrate.create_signed_extrinsic.return_value, - wait_for_inclusion=fake_wait_for_inclusion, - wait_for_finalization=fake_wait_for_finalization, - ) - assert result == (True, None) - - -def test_serve_prometheus(subtensor, mocker): - """Test serve_prometheus function successful call.""" - # Preps - fake_wallet = mocker.MagicMock() - fake_port = 1234 - fake_netuid = 1 - wait_for_inclusion = True - wait_for_finalization = False - - mocked_prometheus_extrinsic = mocker.patch.object( - subtensor_module, "prometheus_extrinsic" - ) - - # Call - result = subtensor.serve_prometheus( - fake_wallet, - fake_port, - fake_netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - # Asserts - mocked_prometheus_extrinsic.assert_called_once_with( - subtensor, - wallet=fake_wallet, - port=fake_port, - netuid=fake_netuid, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - assert result == mocked_prometheus_extrinsic.return_value - - def test_do_serve_axon_is_success(subtensor, mocker): """Successful do_serve_axon call.""" # Prep From ffbc2d373f99a43fd6b2ca43fba96af0308b30da Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:40:35 -0800 Subject: [PATCH 40/58] Replace rich.console to btlogging.loggin (#2377) * replace `rich.console` to `btlogging.logging` * update requirements * use whole path import * fix some logging * fix registration.py * ruff * del prometheus.py * fix review comments --- bittensor/core/extrinsics/registration.py | 247 ++++++++---------- bittensor/core/extrinsics/root.py | 128 ++++----- bittensor/core/extrinsics/serving.py | 7 +- bittensor/core/extrinsics/set_weights.py | 67 +++-- bittensor/core/extrinsics/transfer.py | 95 ++++--- bittensor/core/metagraph.py | 9 +- bittensor/core/settings.py | 30 --- bittensor/core/subtensor.py | 4 +- bittensor/utils/btlogging/format.py | 4 + bittensor/utils/btlogging/loggingmachine.py | 13 +- bittensor/utils/registration.py | 18 +- requirements/prod.txt | 1 - scripts/environments/apple_m1_environment.yml | 1 - tests/helpers/__init__.py | 1 - tests/helpers/helpers.py | 55 +--- .../test_subtensor_integration.py | 24 +- 16 files changed, 285 insertions(+), 419 deletions(-) diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py index 2528368094..8f7f3292b9 100644 --- a/bittensor/core/extrinsics/registration.py +++ b/bittensor/core/extrinsics/registration.py @@ -22,7 +22,6 @@ from retry import retry from rich.prompt import Confirm -from bittensor.core.settings import bt_console from bittensor.utils import format_error_message from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected @@ -142,24 +141,22 @@ def register_extrinsic( Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. """ if not subtensor.subnet_exists(netuid): - bt_console.print( - ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( - netuid - ) + logging.error( + f":cross_mark: Failed: Subnet {netuid} does not exist." ) return False - with bt_console.status( - f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." - ): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid + logging.info( + f":satellite: Checking Account on subnet {netuid}..." + ) + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + if not neuron.is_null: + logging.debug( + f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}." ) - if not neuron.is_null: - logging.debug( - f"Wallet {wallet} is already registered on {neuron.netuid} with {neuron.uid}" - ) - return True + return True if prompt: if not Confirm.ask( @@ -178,14 +175,14 @@ def register_extrinsic( # Attempt rolling registration. attempts = 1 while True: - bt_console.print( - ":satellite: Registering...({}/{})".format(attempts, max_allowed_attempts) + logging.info( + f":satellite: Registering... ({attempts}/{max_allowed_attempts})" ) # Solve latest POW. if cuda: if not torch.cuda.is_available(): if prompt: - bt_console.print("CUDA is not available.") + logging.info("CUDA is not available.") return False pow_result: Optional[POWSolution] = create_pow( subtensor, @@ -218,73 +215,71 @@ def register_extrinsic( netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address ) if is_registered: - bt_console.print( - f":white_heavy_check_mark: [green]Already registered on netuid:{netuid}[/green]" + logging.info( + f":white_heavy_check_mark: Already registered on netuid: {netuid}." ) return True # pow successful, proceed to submit pow to chain for registration else: - with bt_console.status(":satellite: Submitting POW..."): - # check if pow result is still valid - while not pow_result.is_stale(subtensor=subtensor): - result: tuple[bool, Optional[str]] = _do_pow_register( - self=subtensor, - netuid=netuid, - wallet=wallet, - pow_result=pow_result, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - success, err_msg = result - - if not success: - # Look error here - # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs - if "HotKeyAlreadyRegisteredInSubNet" in err_msg: - bt_console.print( - f":white_heavy_check_mark: [green]Already Registered on [bold]subnet:{netuid}[/bold][/green]" - ) - return True + logging.info(":satellite: Submitting POW...") + # check if pow result is still valid + while not pow_result.is_stale(subtensor=subtensor): + result: tuple[bool, Optional[str]] = _do_pow_register( + self=subtensor, + netuid=netuid, + wallet=wallet, + pow_result=pow_result, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + success, err_msg = result + + if not success: + # Look error here + # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs + if "HotKeyAlreadyRegisteredInSubNet" in err_msg: + logging.info( + f":white_heavy_check_mark: Already Registered on subnet {netuid}." + ) + return True - bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) + logging.error(f":cross_mark: Failed: {err_msg}") + time.sleep(0.5) - # Successful registration, final check for neuron and pubkey + # Successful registration, final check for neuron and pubkey + else: + logging.info(":satellite: Checking Balance...") + is_registered = subtensor.is_hotkey_registered( + hotkey_ss58=wallet.hotkey.ss58_address, + netuid=netuid, + ) + if is_registered: + logging.info( + ":white_heavy_check_mark: Registered" + ) + return True else: - bt_console.print(":satellite: Checking Balance...") - is_registered = subtensor.is_hotkey_registered( - hotkey_ss58=wallet.hotkey.ss58_address, - netuid=netuid, + # neuron not found, try again + logging.error( + ":cross_mark: Unknown error. Neuron not found." ) - if is_registered: - bt_console.print( - ":white_heavy_check_mark: [green]Registered[/green]" - ) - return True - else: - # neuron not found, try again - bt_console.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) - continue - else: - # Exited loop because pow is no longer valid. - bt_console.print("[red]POW is stale.[/red]") - # Try again. - continue + continue + else: + # Exited loop because pow is no longer valid. + logging.error("POW is stale.") + # Try again. + continue if attempts < max_allowed_attempts: # Failed registration, retry pow attempts += 1 - bt_console.print( - ":satellite: Failed registration, retrying pow ...({}/{})".format( - attempts, max_allowed_attempts - ) + logging.info( + f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})" ) else: # Failed to register after max attempts. - bt_console.print("[red]No more attempts.[/red]") + logging.error("No more attempts.") return False @@ -370,82 +365,70 @@ def burned_register_extrinsic( success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. """ if not subtensor.subnet_exists(netuid): - bt_console.print( - ":cross_mark: [red]Failed[/red]: error: [bold white]subnet:{}[/bold white] does not exist.".format( - netuid - ) + logging.error( + f":cross_mark: Failed error: subnet {netuid} does not exist." ) return False try: wallet.unlock_coldkey() except KeyFileError: - bt_console.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + logging.error( + ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid." ) return False - with bt_console.status( - f":satellite: Checking Account on [bold]subnet:{netuid}[/bold]..." - ): - neuron = subtensor.get_neuron_for_pubkey_and_subnet( - wallet.hotkey.ss58_address, netuid=netuid - ) - - old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - - recycle_amount = subtensor.recycle(netuid=netuid) - if not neuron.is_null: - bt_console.print( - ":white_heavy_check_mark: [green]Already Registered[/green]:\n" - "uid: [bold white]{}[/bold white]\n" - "netuid: [bold white]{}[/bold white]\n" - "hotkey: [bold white]{}[/bold white]\n" - "coldkey: [bold white]{}[/bold white]".format( - neuron.uid, neuron.netuid, neuron.hotkey, neuron.coldkey - ) - ) - return True + logging.info( + f":satellite: Checking Account on subnet {netuid} ..." + ) + neuron = subtensor.get_neuron_for_pubkey_and_subnet( + wallet.hotkey.ss58_address, netuid=netuid + ) + + old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) + + recycle_amount = subtensor.recycle(netuid=netuid) + if not neuron.is_null: + logging.info(":white_heavy_check_mark: Already Registered") + logging.info(f"\t\tuid: {neuron.uid}") + logging.info(f"\t\tnetuid: {neuron.netuid}") + logging.info(f"\t\thotkey: {neuron.hotkey}") + logging.info(f"\t\tcoldkey: {neuron.coldkey}") + return True if prompt: # Prompt user for confirmation. if not Confirm.ask(f"Recycle {recycle_amount} to register on subnet:{netuid}?"): return False - with bt_console.status(":satellite: Recycling TAO for Registration..."): - success, err_msg = _do_burned_register( - self=subtensor, - netuid=netuid, - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, + logging.info(":satellite: Recycling TAO for Registration...") + success, err_msg = _do_burned_register( + self=subtensor, + netuid=netuid, + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not success: + logging.error(f":cross_mark: Failed: {err_msg}") + time.sleep(0.5) + return False + # Successful registration, final check for neuron and pubkey + else: + logging.info(":satellite: Checking Balance...") + block = subtensor.get_current_block() + new_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address, block=block) + + logging.info( + f"Balance: {old_balance} :arrow_right: {new_balance}" ) - - if not success: - bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) - return False - # Successful registration, final check for neuron and pubkey + is_registered = subtensor.is_hotkey_registered( + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + logging.info(":white_heavy_check_mark: Registered") + return True else: - bt_console.print(":satellite: Checking Balance...") - block = subtensor.get_current_block() - new_balance = subtensor.get_balance( - wallet.coldkeypub.ss58_address, block=block - ) - - bt_console.print( - "Balance:\n [blue]{}[/blue] :arrow_right: [green]{}[/green]".format( - old_balance, new_balance - ) - ) - is_registered = subtensor.is_hotkey_registered( - netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bt_console.print(":white_heavy_check_mark: [green]Registered[/green]") - return True - else: - # neuron not found, try again - bt_console.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) - return False + # neuron not found, try again + logging.error(":cross_mark: Unknown error. Neuron not found.") + return False diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py index 1fd7e7b26e..129e852777 100644 --- a/bittensor/core/extrinsics/root.py +++ b/bittensor/core/extrinsics/root.py @@ -7,7 +7,7 @@ from retry import retry from rich.prompt import Confirm -from bittensor.core.settings import bt_console, version_as_int +from bittensor.core.settings import version_as_int from bittensor.utils import format_error_message, weight_utils from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected @@ -80,8 +80,8 @@ def root_register_extrinsic( try: wallet.unlock_coldkey() except KeyFileError: - bt_console.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + logging.error( + "Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid." ) return False @@ -89,8 +89,8 @@ def root_register_extrinsic( netuid=0, hotkey_ss58=wallet.hotkey.ss58_address ) if is_registered: - bt_console.print( - ":white_heavy_check_mark: [green]Already registered on root network.[/green]" + logging.info( + ":white_heavy_check_mark: Already registered on root network." ) return True @@ -99,30 +99,28 @@ def root_register_extrinsic( if not Confirm.ask("Register to root network?"): return False - with bt_console.status(":satellite: Registering to root network..."): - success, err_msg = _do_root_register( - wallet=wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) + logging.info(":satellite: Registering to root network...") + success, err_msg = _do_root_register( + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) - if not success: - bt_console.print(f":cross_mark: [red]Failed[/red]: {err_msg}") - time.sleep(0.5) + if not success: + logging.error(f":cross_mark: Failed: {err_msg}") + time.sleep(0.5) - # Successful registration, final check for neuron and pubkey + # Successful registration, final check for neuron and pubkey + else: + is_registered = subtensor.is_hotkey_registered( + netuid=0, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + logging.success(":white_heavy_check_mark: Registered") + return True else: - is_registered = subtensor.is_hotkey_registered( - netuid=0, hotkey_ss58=wallet.hotkey.ss58_address - ) - if is_registered: - bt_console.print(":white_heavy_check_mark: [green]Registered[/green]") - return True - else: - # neuron not found, try again - bt_console.print( - ":cross_mark: [red]Unknown error. Neuron not found.[/red]" - ) + # neuron not found, try again + logging.error(":cross_mark: Unknown error. Neuron not found.") @ensure_connected @@ -222,8 +220,8 @@ def set_root_weights_extrinsic( try: wallet.unlock_coldkey() except KeyFileError: - bt_console.print( - ":cross_mark: [red]Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid[/red]:[bold white]\n [/bold white]" + logging.error( + ":cross_mark: Keyfile is corrupt, non-writable, non-readable or the password used to decrypt is invalid." ) return False @@ -252,8 +250,8 @@ def set_root_weights_extrinsic( formatted_weights = weight_utils.normalize_max_weight( x=weights, limit=max_weight_limit ) - bt_console.print( - f"\nRaw Weights -> Normalized weights: \n\t{weights} -> \n\t{formatted_weights}\n" + logging.info( + f"Raw Weights -> Normalized weights: {weights} -> {formatted_weights}" ) # Ask before moving on. @@ -265,46 +263,36 @@ def set_root_weights_extrinsic( ): return False - with bt_console.status( - ":satellite: Setting root weights on [white]{}[/white] ...".format( - subtensor.network + logging.info( + f":satellite: Setting root weights on {subtensor.network} ..." + ) + try: + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + netuids, weights ) - ): - try: - weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( - netuids, weights - ) - success, error_message = _do_set_root_weights( - wallet=wallet, - netuid=0, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) + success, error_message = _do_set_root_weights( + wallet=wallet, + netuid=0, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True - bt_console.print(success, error_message) - - if not wait_for_finalization and not wait_for_inclusion: - return True - - if success is True: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") - logging.success( - prefix="Set weights", - suffix="Finalized: " + str(success), - ) - return True - else: - bt_console.print(f":cross_mark: [red]Failed[/red]: {error_message}") - logging.warning( - prefix="Set weights", - suffix="Failed: " + str(error_message), - ) - return False - - except Exception as e: - bt_console.print(":cross_mark: [red]Failed[/red]: error:{}".format(e)) - logging.warning(prefix="Set weights", suffix="Failed: " + str(e)) + if success is True: + logging.info(":white_heavy_check_mark: Finalized") + logging.success(f"Set weights {str(success)}") + return True + else: + logging.error( + f":cross_mark: Failed set weights. {str(error_message)}" + ) return False + + except Exception as e: + logging.error(f":cross_mark: Failed set weights. {str(e)}") + return False diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py index 490f9c268e..ac712cd8cb 100644 --- a/bittensor/core/extrinsics/serving.py +++ b/bittensor/core/extrinsics/serving.py @@ -23,7 +23,7 @@ from bittensor.core.errors import MetadataError from bittensor.core.extrinsics.utils import submit_extrinsic -from bittensor.core.settings import version_as_int, bt_console +from bittensor.core.settings import version_as_int from bittensor.utils import format_error_message, networking as net from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected @@ -219,10 +219,9 @@ def serve_axon_extrinsic( if axon.external_ip is None: try: external_ip = net.get_external_ip() - bt_console.print( - f":white_heavy_check_mark: [green]Found external ip: {external_ip}[/green]" + logging.success( + f":white_heavy_check_mark: Found external ip: {external_ip}" ) - logging.success(prefix="External IP", suffix=f"{external_ip}") except Exception as e: raise RuntimeError( f"Unable to attain your external ip. Check your internet connection. error: {e}" diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 7680061c5b..98f4c16917 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -24,7 +24,7 @@ from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic -from bittensor.core.settings import bt_console, version_as_int +from bittensor.core.settings import version_as_int from bittensor.utils import format_error_message, weight_utils from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected @@ -157,38 +157,33 @@ def set_weights_extrinsic( ): return False, "Prompt refused." - with bt_console.status( - f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." - ): - try: - success, error_message = do_set_weights( - self=subtensor, - wallet=wallet, - netuid=netuid, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - if success is True: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") - logging.success( - msg=str(success), - prefix="Set weights", - suffix="Finalized: ", - ) - return True, "Successfully set weights and Finalized." - else: - error_message = format_error_message(error_message) - logging.error(error_message) - return False, error_message - - except Exception as e: - bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") - logging.debug(str(e)) - return False, str(e) + logging.info( + f":satellite: Setting weights on {subtensor.network} ..." + ) + try: + success, error_message = do_set_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + logging.success(f"Finalized! Set weights: {str(success)}") + return True, "Successfully set weights and Finalized." + else: + error_message = format_error_message(error_message) + logging.error(error_message) + return False, error_message + + except Exception as e: + logging.error(f":cross_mark: Failed.: Error: {e}") + logging.debug(str(e)) + return False, str(e) diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index 896fecbf96..aaa2795583 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -21,13 +21,14 @@ from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic -from bittensor.core.settings import bt_console, NETWORK_EXPLORER_MAP +from bittensor.core.settings import NETWORK_EXPLORER_MAP from bittensor.utils import ( get_explorer_url_for_network, format_error_message, is_valid_bittensor_address_or_public_key, ) from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected # For annotation purposes @@ -121,9 +122,7 @@ def transfer_extrinsic( """ # Validate destination address. if not is_valid_bittensor_address_or_public_key(dest): - bt_console.print( - f":cross_mark: [red]Invalid destination address[/red]:[bold white]\n {dest}[/bold white]" - ) + logging.error(f"Invalid destination address: {dest}") return False if isinstance(dest, bytes): @@ -140,15 +139,15 @@ def transfer_extrinsic( transfer_balance = amount # Check balance. - with bt_console.status(":satellite: Checking Balance..."): - account_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - # check existential deposit. - existential_deposit = subtensor.get_existential_deposit() - - with bt_console.status(":satellite: Transferring..."): - fee = subtensor.get_transfer_fee( - wallet=wallet, dest=dest, value=transfer_balance.rao - ) + logging.info(":satellite: Checking Balance...") + account_balance = subtensor.get_balance(wallet.coldkey.ss58_address) + # check existential deposit. + existential_deposit = subtensor.get_existential_deposit() + + logging.info(":satellite: Transferring...") + fee = subtensor.get_transfer_fee( + wallet=wallet, dest=dest, value=transfer_balance.rao + ) if not keep_alive: # Check if the transfer should keep_alive the account @@ -156,12 +155,10 @@ def transfer_extrinsic( # Check if we have enough balance. if account_balance < (transfer_balance + fee + existential_deposit): - bt_console.print( - ":cross_mark: [red]Not enough balance[/red]:[bold white]\n" - f" balance: {account_balance}\n" - f" amount: {transfer_balance}\n" - f" for fee: {fee}[/bold white]" - ) + logging.error(":cross_mark: Not enough balance:") + logging.info(f"\t\tBalance: \t{account_balance}") + logging.info(f"\t\tAmount: \t{transfer_balance}") + logging.info(f"\t\tFor fee: \t{fee}") return False # Ask before moving on. @@ -175,41 +172,41 @@ def transfer_extrinsic( ): return False - with bt_console.status(":satellite: Transferring..."): - success, block_hash, error_message = do_transfer( - self=subtensor, - wallet=wallet, - dest=dest, - transfer_balance=transfer_balance, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) + logging.info(":satellite: Transferring...") + success, block_hash, error_message = do_transfer( + self=subtensor, + wallet=wallet, + dest=dest, + transfer_balance=transfer_balance, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) - if success: - bt_console.print(":white_heavy_check_mark: [green]Finalized[/green]") - bt_console.print(f"[green]Block Hash: {block_hash}[/green]") + if success: + logging.success(":white_heavy_check_mark: Finalized") + logging.info(f"Block Hash: {block_hash}") - explorer_urls = get_explorer_url_for_network( - subtensor.network, block_hash, NETWORK_EXPLORER_MAP + explorer_urls = get_explorer_url_for_network( + subtensor.network, block_hash, NETWORK_EXPLORER_MAP + ) + if explorer_urls != {} and explorer_urls: + logging.info( + f"Opentensor Explorer Link: {explorer_urls.get('opentensor')}" ) - if explorer_urls != {} and explorer_urls: - bt_console.print( - f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}[/green]" - ) - bt_console.print( - f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}[/green]" - ) - else: - bt_console.print( - f":cross_mark: [red]Failed[/red]: {format_error_message(error_message)}" + logging.info( + f"Taostats Explorer Link: {explorer_urls.get('taostats')}" ) + else: + logging.error( + f":cross_mark: Failed: {format_error_message(error_message)}" + ) if success: - with bt_console.status(":satellite: Checking Balance..."): - new_balance = subtensor.get_balance(wallet.coldkey.ss58_address) - bt_console.print( - f"Balance:\n [blue]{account_balance}[/blue] :arrow_right: [green]{new_balance}[/green]" - ) - return True + logging.info(":satellite: Checking Balance...") + new_balance = subtensor.get_balance(wallet.coldkey.ss58_address) + logging.success( + f"Balance: {account_balance} :arrow_right: {new_balance}" + ) + return True return False diff --git a/bittensor/core/metagraph.py b/bittensor/core/metagraph.py index 208eaa6b9f..75e8d947c9 100644 --- a/bittensor/core/metagraph.py +++ b/bittensor/core/metagraph.py @@ -1249,12 +1249,11 @@ def load_from_path(self, dir_path: str) -> "Metagraph": with open(graph_filename, "rb") as graph_file: state_dict = pickle.load(graph_file) except pickle.UnpicklingError: - settings.bt_console.print( + logging.info( "Unable to load file. Attempting to restore metagraph using torch." ) - settings.bt_console.print( - ":warning:[yellow]Warning:[/yellow] This functionality exists to load " - "metagraph state from legacy saves, but will not be supported in the future." + logging.warning( + ":warning: This functionality exists to load metagraph state from legacy saves, but will not be supported in the future." ) try: import torch as real_torch @@ -1264,7 +1263,7 @@ def load_from_path(self, dir_path: str) -> "Metagraph": state_dict[key] = state_dict[key].detach().numpy() del real_torch except (RuntimeError, ImportError): - settings.bt_console.print("Unable to load file. It may be corrupted.") + logging.error("Unable to load file. It may be corrupted.") raise self.n = state_dict["n"] diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index fe2a707b4e..8413b5329f 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -23,36 +23,6 @@ from pathlib import Path from munch import munchify -from rich.console import Console -from rich.traceback import install - -# Rich console. -__console__ = Console() -__use_console__ = True - -# Remove overdue locals in debug training. -install(show_locals=False) - - -def turn_console_off(): - global __use_console__ - global __console__ - from io import StringIO - - __use_console__ = False - __console__ = Console(file=StringIO(), stderr=False) - - -def turn_console_on(): - global __use_console__ - global __console__ - __use_console__ = True - __console__ = Console() - - -turn_console_off() - -bt_console = __console__ HOME_DIR = Path.home() diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 3ca0dc146d..3e3c61b017 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1735,9 +1735,7 @@ def get_transfer_fee( call=call, keypair=wallet.coldkeypub ) except Exception as e: - settings.bt_console.print( - f":cross_mark: [red]Failed to get payment info[/red]:[bold white]\n {e}[/bold white]" - ) + logging.error(f"Failed to get payment info. {e}") payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao fee = Balance.from_rao(payment_info["partialFee"]) diff --git a/bittensor/utils/btlogging/format.py b/bittensor/utils/btlogging/format.py index 1aa505c82c..9e279a3b26 100644 --- a/bittensor/utils/btlogging/format.py +++ b/bittensor/utils/btlogging/format.py @@ -54,6 +54,8 @@ def _success(self, message: str, *args, **kws): ":white_heavy_check_mark:": "✅", ":cross_mark:": "❌", ":satellite:": "🛰️", + ":warning:": "⚠️", + ":arrow_right:": "➡️", } @@ -64,6 +66,8 @@ def _success(self, message: str, *args, **kws): "": Style.RESET_ALL, "": Fore.GREEN, "": Style.RESET_ALL, + "": Fore.MAGENTA, + "": Style.RESET_ALL, } diff --git a/bittensor/utils/btlogging/loggingmachine.py b/bittensor/utils/btlogging/loggingmachine.py index abc4758bf8..66d7cc7595 100644 --- a/bittensor/utils/btlogging/loggingmachine.py +++ b/bittensor/utils/btlogging/loggingmachine.py @@ -49,7 +49,8 @@ def _concat_message(msg="", prefix="", suffix=""): """Concatenates a message with optional prefix and suffix.""" - msg = f"{f'{prefix} - ' if prefix else ''}{msg}{f' - {suffix}' if suffix else ''}" + empty_pref_suf = [None, ""] + msg = f"{f'{prefix} - ' if prefix not in empty_pref_suf else ''}{msg}{f' - {suffix}' if suffix not in empty_pref_suf else ''}" return msg @@ -443,27 +444,27 @@ def info(self, msg="", prefix="", suffix="", *args, **kwargs): def success(self, msg="", prefix="", suffix="", *args, **kwargs): """Wraps success message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" + msg = _concat_message(msg, prefix, suffix) self._logger.success(msg, *args, **kwargs) def warning(self, msg="", prefix="", suffix="", *args, **kwargs): """Wraps warning message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" + msg = _concat_message(msg, prefix, suffix) self._logger.warning(msg, *args, **kwargs) def error(self, msg="", prefix="", suffix="", *args, **kwargs): """Wraps error message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" + msg = _concat_message(msg, prefix, suffix) self._logger.error(msg, *args, **kwargs) def critical(self, msg="", prefix="", suffix="", *args, **kwargs): """Wraps critical message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" + msg = _concat_message(msg, prefix, suffix) self._logger.critical(msg, *args, **kwargs) def exception(self, msg="", prefix="", suffix="", *args, **kwargs): """Wraps exception message with prefix and suffix.""" - msg = f"{prefix} - {msg} - {suffix}" + msg = _concat_message(msg, prefix, suffix) self._logger.exception(msg, *args, **kwargs) def on(self): diff --git a/bittensor/utils/registration.py b/bittensor/utils/registration.py index 46c39d3d40..4dd6d8ec67 100644 --- a/bittensor/utils/registration.py +++ b/bittensor/utils/registration.py @@ -30,12 +30,12 @@ from queue import Empty, Full from typing import Any, Callable, Optional, Union, TYPE_CHECKING -import backoff import numpy from Crypto.Hash import keccak +from retry import retry from rich import console as rich_console, status as rich_status +from rich.console import Console -from bittensor.core.settings import bt_console from bittensor.utils.btlogging import logging from bittensor.utils.formatting import get_human_readable, millify from bittensor.utils.register_cuda import solve_cuda @@ -488,12 +488,16 @@ class RegistrationStatistics: class RegistrationStatisticsLogger: """Logs statistics for a registration.""" - console: rich_console.Console status: Optional[rich_status.Status] def __init__( - self, console: rich_console.Console, output_in_place: bool = True + self, + console: Optional[rich_console.Console] = None, + output_in_place: bool = True, ) -> None: + if console is None: + console = Console() + self.console = console if output_in_place: @@ -649,7 +653,7 @@ def _solve_for_difficulty_fast( start_time_perpetual = time.time() - logger = RegistrationStatisticsLogger(bt_console, output_in_place) + logger = RegistrationStatisticsLogger(output_in_place=output_in_place) logger.start() solution = None @@ -735,7 +739,7 @@ def _solve_for_difficulty_fast( return solution -@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3) +@retry(Exception, tries=3, delay=1) def _get_block_with_retry( subtensor: "Subtensor", netuid: int ) -> tuple[int, int, bytes]: @@ -953,7 +957,7 @@ def _solve_for_difficulty_fast_cuda( start_time_perpetual = time.time() - logger = RegistrationStatisticsLogger(bt_console, output_in_place) + logger = RegistrationStatisticsLogger(output_in_place=output_in_place) logger.start() hash_rates = [0] * n_samples # The last n true hash_rates diff --git a/requirements/prod.txt b/requirements/prod.txt index bed65e9d2e..17c73f6f25 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -1,7 +1,6 @@ wheel setuptools~=70.0.0 aiohttp~=3.9 -backoff bittensor-cli bt-decode colorama~=0.4.6 diff --git a/scripts/environments/apple_m1_environment.yml b/scripts/environments/apple_m1_environment.yml index 25824aa64e..7d949c7e4e 100644 --- a/scripts/environments/apple_m1_environment.yml +++ b/scripts/environments/apple_m1_environment.yml @@ -126,7 +126,6 @@ dependencies: - argparse==1.4.0 - arrow==1.2.3 - async-timeout==4.0.2 - - backoff==2.1.0 - blinker==1.6.2 - cachetools==4.2.4 - certifi==2024.2.2 diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py index f876d249bd..3c6badb91c 100644 --- a/tests/helpers/__init__.py +++ b/tests/helpers/__init__.py @@ -18,7 +18,6 @@ import os from .helpers import ( # noqa: F401 CLOSE_IN_VALUE, - MockConsole, __mock_wallet_factory__, ) from bittensor_wallet.mock.wallet_mock import ( # noqa: F401 diff --git a/tests/helpers/helpers.py b/tests/helpers/helpers.py index 417bd643b3..41109ee5e6 100644 --- a/tests/helpers/helpers.py +++ b/tests/helpers/helpers.py @@ -22,14 +22,11 @@ from bittensor_wallet.mock.wallet_mock import get_mock_hotkey from bittensor_wallet.mock.wallet_mock import get_mock_wallet -from rich.console import Console -from rich.text import Text - from bittensor.utils.balance import Balance from bittensor.core.chain_data import AxonInfo, NeuronInfo, PrometheusInfo -def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet: +def __mock_wallet_factory__(*_, **__) -> _MockWallet: """Returns a mock wallet object.""" mock_wallet = get_mock_wallet() @@ -118,53 +115,3 @@ def get_mock_neuron_by_uid(uid: int, **kwargs) -> NeuronInfo: return get_mock_neuron( uid=uid, hotkey=get_mock_hotkey(uid), coldkey=get_mock_coldkey(uid), **kwargs ) - - -class MockStatus: - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def start(self): - pass - - def stop(self): - pass - - def update(self, *args, **kwargs): - MockConsole().print(*args, **kwargs) - - -class MockConsole: - """ - Mocks the console object for status and print. - Captures the last print output as a string. - """ - - captured_print = None - - def status(self, *args, **kwargs): - return MockStatus() - - def print(self, *args, **kwargs): - console = Console( - width=1000, no_color=True, markup=False - ) # set width to 1000 to avoid truncation - console.begin_capture() - console.print(*args, **kwargs) - self.captured_print = console.end_capture() - - def clear(self, *args, **kwargs): - pass - - @staticmethod - def remove_rich_syntax(text: str) -> str: - """ - Removes rich syntax from the given text. - Removes markup and ansi syntax. - """ - output_no_syntax = Text.from_ansi(Text.from_markup(text).plain).plain - - return output_no_syntax diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py index 552e5ab993..bacb340f2c 100644 --- a/tests/integration_tests/test_subtensor_integration.py +++ b/tests/integration_tests/test_subtensor_integration.py @@ -30,7 +30,6 @@ from bittensor.utils.mock import MockSubtensor from tests.helpers import ( get_mock_coldkey, - MockConsole, get_mock_keypair, get_mock_wallet, ) @@ -52,12 +51,6 @@ def setUp(self): @classmethod def setUpClass(cls) -> None: - # mock rich console status - mock_console = MockConsole() - cls._mock_console_patcher = patch( - "bittensor.core.settings.bt_console", mock_console - ) - cls._mock_console_patcher.start() # Keeps the same mock network for all tests. This stops the network from being re-setup for each test. cls._mock_subtensor = MockSubtensor() cls._do_setup_subnet() @@ -69,10 +62,6 @@ def _do_setup_subnet(cls): # Setup the mock subnet 3 cls._mock_subtensor.create_subnet(netuid=3) - @classmethod - def tearDownClass(cls) -> None: - cls._mock_console_patcher.stop() - def test_network_overrides(self): """Tests that the network overrides the chain_endpoint.""" # Argument importance: chain_endpoint (arg) > network (arg) > config.subtensor.chain_endpoint > config.subtensor.network @@ -284,15 +273,10 @@ def test_registration_multiprocessed_already_registered(self): ) self.subtensor._do_pow_register = MagicMock(return_value=(True, None)) - with patch("bittensor.core.settings.bt_console") as mock_set_status: - # Need to patch the console status to avoid opening a parallel live display - mock_set_status.__enter__ = MagicMock(return_value=True) - mock_set_status.__exit__ = MagicMock(return_value=True) - - # should return True - assert self.subtensor.register( - wallet=wallet, netuid=3, num_processes=3, update_interval=5 - ) + # should return True + assert self.subtensor.register( + wallet=wallet, netuid=3, num_processes=3, update_interval=5 + ) # calls until True and once again before exiting subtensor class # This assertion is currently broken when difficulty is too low From 835dfdb876226f80cd7b17ec4b1b688c8037c95b Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:32:14 -0800 Subject: [PATCH 41/58] SDK (AsyncSubtensor) Part 1 (#2374) * create the copy of `bittensor/core/subtensor.py` with async suffix. * add async_substrate_interface.py * update `bittensor.utils.format_error_message` to be compatible with async_subtensor * update `bittensor.core.chain_data` * update `bittensor.core.async_subtensor.py` from btcli * add DelegatesDetails for async_subtensor * add validate_chain_endpoint for async_subtensor * update async_substrate_interface.py by Optional where acceptable and doesn't brake logic * improve settings for async_subtensor.py * fix format errors * fix annotations * add async_subtensor.py with adaptation to SDK (all methods checked and work well) * update settings.py to be compatible with async_extrinsics * add async_transfer extrinsic * add async_registration extrinsic * add async_root extrinsics * ruff * Update bittensor/core/extrinsics/async_transfer.py Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> * fix comments review * avoid non-direct import within inner code (fix circular import) * del unused code * del prometheus.py * solving conflict * ruff --------- Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> --- bittensor/core/async_subtensor.py | 1222 ++++++++ bittensor/core/chain_data/__init__.py | 2 +- .../core/extrinsics/async_registration.py | 1609 ++++++++++ bittensor/core/extrinsics/async_root.py | 245 ++ bittensor/core/extrinsics/async_transfer.py | 200 ++ bittensor/core/extrinsics/commit_weights.py | 8 +- bittensor/core/extrinsics/registration.py | 8 +- bittensor/core/extrinsics/root.py | 4 +- bittensor/core/extrinsics/serving.py | 8 +- bittensor/core/extrinsics/set_weights.py | 4 +- bittensor/core/extrinsics/transfer.py | 2 +- bittensor/core/settings.py | 17 +- bittensor/core/subtensor.py | 2 +- bittensor/utils/__init__.py | 115 +- bittensor/utils/async_substrate_interface.py | 2742 +++++++++++++++++ bittensor/utils/delegates_details.py | 43 + bittensor/utils/deprecated.py | 2 + requirements/prod.txt | 2 + tests/e2e_tests/conftest.py | 2 +- tests/e2e_tests/test_axon.py | 2 +- tests/e2e_tests/test_commit_weights.py | 13 +- tests/e2e_tests/test_dendrite.py | 25 +- tests/e2e_tests/test_liquid_alpha.py | 11 +- tests/e2e_tests/test_metagraph.py | 13 +- tests/e2e_tests/test_subtensor_functions.py | 8 +- tests/e2e_tests/utils/chain_interactions.py | 2 +- tests/unit_tests/extrinsics/test_init.py | 13 +- 27 files changed, 6255 insertions(+), 69 deletions(-) create mode 100644 bittensor/core/async_subtensor.py create mode 100644 bittensor/core/extrinsics/async_registration.py create mode 100644 bittensor/core/extrinsics/async_root.py create mode 100644 bittensor/core/extrinsics/async_transfer.py create mode 100644 bittensor/utils/async_substrate_interface.py create mode 100644 bittensor/utils/delegates_details.py diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py new file mode 100644 index 0000000000..aa2b65fb30 --- /dev/null +++ b/bittensor/core/async_subtensor.py @@ -0,0 +1,1222 @@ +import asyncio +from typing import Optional, Any, Union, TypedDict, Iterable + +import aiohttp +import numpy as np +import scalecodec +import typer +from bittensor_wallet import Wallet +from bittensor_wallet.utils import SS58_FORMAT +from rich.prompt import Confirm +from scalecodec import GenericCall +from scalecodec.base import RuntimeConfiguration +from scalecodec.type_registry import load_type_registry_preset +from substrateinterface.exceptions import SubstrateRequestException + +from bittensor.core.chain_data import ( + DelegateInfo, + custom_rpc_type_registry, + StakeInfo, + NeuronInfoLite, + NeuronInfo, + SubnetHyperparameters, + decode_account_id, +) +from bittensor.core.extrinsics.async_registration import register_extrinsic +from bittensor.core.extrinsics.async_root import ( + set_root_weights_extrinsic, + root_register_extrinsic, +) +from bittensor.core.extrinsics.async_transfer import transfer_extrinsic +from bittensor.core.settings import ( + TYPE_REGISTRY, + DEFAULTS, + NETWORK_MAP, + DELEGATES_DETAILS_URL, + DEFAULT_NETWORK, +) +from bittensor.utils import ( + ss58_to_vec_u8, + format_error_message, + decode_hex_identity_dict, + validate_chain_endpoint, +) +from bittensor.utils.async_substrate_interface import ( + AsyncSubstrateInterface, + TimeoutException, +) +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging +from bittensor.utils.delegates_details import DelegatesDetails + + +class ParamWithTypes(TypedDict): + name: str # Name of the parameter. + type: str # ScaleType string of the parameter. + + +class ProposalVoteData: + index: int + threshold: int + ayes: list[str] + nays: list[str] + end: int + + def __init__(self, proposal_dict: dict) -> None: + self.index = proposal_dict["index"] + self.threshold = proposal_dict["threshold"] + self.ayes = self.decode_ss58_tuples(proposal_dict["ayes"]) + self.nays = self.decode_ss58_tuples(proposal_dict["nays"]) + self.end = proposal_dict["end"] + + @staticmethod + def decode_ss58_tuples(line: tuple): + """Decodes a tuple of ss58 addresses formatted as bytes tuples.""" + return [decode_account_id(line[x][0]) for x in range(len(line))] + + +class AsyncSubtensor: + """Thin layer for interacting with Substrate Interface. Mostly a collection of frequently-used calls.""" + + def __init__(self, network: str = DEFAULT_NETWORK): + if network in NETWORK_MAP: + self.chain_endpoint = NETWORK_MAP[network] + self.network = network + if network == "local": + logging.warning( + "[yellow]Warning[/yellow]: Verify your local subtensor is running on port 9944." + ) + else: + is_valid, _ = validate_chain_endpoint(network) + if is_valid: + self.chain_endpoint = network + if network in NETWORK_MAP.values(): + self.network = next( + key for key, value in NETWORK_MAP.items() if value == network + ) + else: + self.network = "custom" + else: + logging.info( + f"Network not specified or not valid. Using default chain endpoint: {NETWORK_MAP[DEFAULTS.subtensor.network]}." + ) + logging.info( + "You can set this for commands with the --network flag, or by setting this in the config." + ) + self.chain_endpoint = NETWORK_MAP[DEFAULTS.subtensor.network] + self.network = DEFAULTS.subtensor.network + + self.substrate = AsyncSubstrateInterface( + chain_endpoint=self.chain_endpoint, + ss58_format=SS58_FORMAT, + type_registry=TYPE_REGISTRY, + chain_name="Bittensor", + ) + + def __str__(self): + return f"Network: {self.network}, Chain: {self.chain_endpoint}" + + async def __aenter__(self): + logging.info( + f"Connecting to Substrate: {self}..." + ) + try: + async with self.substrate: + return self + except TimeoutException: + logging.error( + f"Error: Timeout occurred connecting to substrate. Verify your chain and network settings: {self}" + ) + raise typer.Exit(code=1) + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.substrate.close() + + async def encode_params( + self, + call_definition: list["ParamWithTypes"], + params: Union[list[Any], dict[str, Any]], + ) -> str: + """Returns a hex encoded string of the params using their types.""" + param_data = scalecodec.ScaleBytes(b"") + + for i, param in enumerate(call_definition["params"]): # type: ignore + scale_obj = await self.substrate.create_scale_object(param["type"]) + if isinstance(params, list): + param_data += scale_obj.encode(params[i]) + else: + if param["name"] not in params: + raise ValueError(f"Missing param {param['name']} in params dict.") + + param_data += scale_obj.encode(params[param["name"]]) + + return param_data.to_hex() + + async def get_all_subnet_netuids( + self, block_hash: Optional[str] = None + ) -> list[int]: + """ + Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network. + + :param block_hash: The hash of the block to retrieve the subnet unique identifiers from. + :return: A list of subnet netuids. + + This function provides a comprehensive view of the subnets within the Bittensor network, + offering insights into its diversity and scale. + """ + result = await self.substrate.query_map( + module="SubtensorModule", + storage_function="NetworksAdded", + block_hash=block_hash, + reuse_block_hash=True, + ) + return ( + [] + if result is None or not hasattr(result, "records") + else [netuid async for netuid, exists in result if exists] + ) + + async def is_hotkey_delegate( + self, + hotkey_ss58: str, + block_hash: Optional[str] = None, + reuse_block: Optional[bool] = False, + ) -> bool: + """ + Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function + checks if the neuron associated with the hotkey is part of the network's delegation system. + + :param hotkey_ss58: The SS58 address of the neuron's hotkey. + :param block_hash: The hash of the blockchain block number for the query. + :param reuse_block: Whether to reuse the last-used block hash. + + :return: `True` if the hotkey is a delegate, `False` otherwise. + + Being a delegate is a significant status within the Bittensor network, indicating a neuron's + involvement in consensus and governance processes. + """ + delegates = await self.get_delegates( + block_hash=block_hash, reuse_block=reuse_block + ) + return hotkey_ss58 in [info.hotkey_ss58 for info in delegates] + + async def get_delegates( + self, block_hash: Optional[str] = None, reuse_block: Optional[bool] = False + ) -> list[DelegateInfo]: + """ + Fetches all delegates on the chain + + :param block_hash: hash of the blockchain block number for the query. + :param reuse_block: whether to reuse the last-used block hash. + + :return: List of DelegateInfo objects, or an empty list if there are no delegates. + """ + hex_bytes_result = await self.query_runtime_api( + runtime_api="DelegateInfoRuntimeApi", + method="get_delegates", + params=[], + block_hash=block_hash, + ) + if hex_bytes_result is not None: + try: + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + except ValueError: + bytes_result = bytes.fromhex(hex_bytes_result) + + return DelegateInfo.list_from_vec_u8(bytes_result) + else: + return [] + + async def get_stake_info_for_coldkey( + self, + coldkey_ss58: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> list[StakeInfo]: + """ + Retrieves stake information associated with a specific coldkey. This function provides details + about the stakes held by an account, including the staked amounts and associated delegates. + + :param coldkey_ss58: The ``SS58`` address of the account's coldkey. + :param block_hash: The hash of the blockchain block number for the query. + :param reuse_block: Whether to reuse the last-used block hash. + + :return: A list of StakeInfo objects detailing the stake allocations for the account. + + Stake information is vital for account holders to assess their investment and participation + in the network's delegation and consensus processes. + """ + encoded_coldkey = ss58_to_vec_u8(coldkey_ss58) + + hex_bytes_result = await self.query_runtime_api( + runtime_api="StakeInfoRuntimeApi", + method="get_stake_info_for_coldkey", + params=[encoded_coldkey], + block_hash=block_hash, + reuse_block=reuse_block, + ) + + if hex_bytes_result is None: + return [] + + try: + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + except ValueError: + bytes_result = bytes.fromhex(hex_bytes_result) + + return StakeInfo.list_from_vec_u8(bytes_result) + + async def get_stake_for_coldkey_and_hotkey( + self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str] + ) -> Balance: + """ + Retrieves stake information associated with a specific coldkey and hotkey. + :param hotkey_ss58: the hotkey SS58 address to query + :param coldkey_ss58: the coldkey SS58 address to query + :param block_hash: the hash of the blockchain block number for the query. + :return: Stake Balance for the given coldkey and hotkey + """ + _result = await self.substrate.query( + module="SubtensorModule", + storage_function="Stake", + params=[hotkey_ss58, coldkey_ss58], + block_hash=block_hash, + ) + return Balance.from_rao(_result or 0) + + async def query_runtime_api( + self, + runtime_api: str, + method: str, + params: Optional[Union[list[list[int]], dict[str, int]]], + block_hash: Optional[str] = None, + reuse_block: Optional[bool] = False, + ) -> Optional[str]: + """ + Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying + runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users + who need to interact with specific runtime methods and decode complex data types. + + :param runtime_api: The name of the runtime API to query. + :param method: The specific method within the runtime API to call. + :param params: The parameters to pass to the method call. + :param block_hash: The hash of the blockchain block number at which to perform the query. + :param reuse_block: Whether to reuse the last-used block hash. + + :return: The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails. + + This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed + and specific interactions with the network's runtime environment. + """ + call_definition = TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][method] + + data = ( + "0x" + if params is None + else await self.encode_params( + call_definition=call_definition, params=params + ) + ) + api_method = f"{runtime_api}_{method}" + + json_result = await self.substrate.rpc_request( + method="state_call", + params=[api_method, data, block_hash] if block_hash else [api_method, data], + ) + + if json_result is None: + return None + + return_type = call_definition["type"] + + as_scale_bytes = scalecodec.ScaleBytes(json_result["result"]) # type: ignore + + rpc_runtime_config = RuntimeConfiguration() + rpc_runtime_config.update_type_registry(load_type_registry_preset("legacy")) + rpc_runtime_config.update_type_registry(custom_rpc_type_registry) + + obj = rpc_runtime_config.create_scale_object(return_type, as_scale_bytes) + if obj.data.to_hex() == "0x0400": # RPC returned None result + return None + + return obj.decode() + + async def get_balance( + self, + *addresses: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> dict[str, Balance]: + """ + Retrieves the balance for given coldkey(s) + :param addresses: coldkey addresses(s) + :param block_hash: the block hash, optional + :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + :return: dict of {address: Balance objects} + """ + calls = [ + ( + await self.substrate.create_storage_key( + "System", "Account", [address], block_hash=block_hash + ) + ) + for address in addresses + ] + batch_call = await self.substrate.query_multi(calls, block_hash=block_hash) + results = {} + for item in batch_call: + value = item[1] or {"data": {"free": 0}} + results.update({item[0].params[0]: Balance(value["data"]["free"])}) + return results + + async def get_total_stake_for_coldkey( + self, + *ss58_addresses, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> dict[str, Balance]: + """ + Returns the total stake held on a coldkey. + + :param ss58_addresses: The SS58 address(es) of the coldkey(s) + :param block_hash: The hash of the block number to retrieve the stake from. + :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + + :return: {address: Balance objects} + """ + calls = [ + ( + await self.substrate.create_storage_key( + "SubtensorModule", + "TotalColdkeyStake", + [address], + block_hash=block_hash, + ) + ) + for address in ss58_addresses + ] + batch_call = await self.substrate.query_multi(calls, block_hash=block_hash) + results = {} + for item in batch_call: + results.update({item[0].params[0]: Balance.from_rao(item[1] or 0)}) + return results + + async def get_total_stake_for_hotkey( + self, + *ss58_addresses, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> dict[str, Balance]: + """ + Returns the total stake held on a hotkey. + + :param ss58_addresses: The SS58 address(es) of the hotkey(s) + :param block_hash: The hash of the block number to retrieve the stake from. + :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + + :return: {address: Balance objects} + """ + results = await self.substrate.query_multiple( + params=[s for s in ss58_addresses], + module="SubtensorModule", + storage_function="TotalHotkeyStake", + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + return {k: Balance.from_rao(r or 0) for (k, r) in results.items()} + + async def get_netuids_for_hotkey( + self, + hotkey_ss58: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> list[int]: + """ + Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function + identifies the specific subnets within the Bittensor network where the neuron associated with + the hotkey is active. + + :param hotkey_ss58: The ``SS58`` address of the neuron's hotkey. + :param block_hash: The hash of the blockchain block number at which to perform the query. + :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + + :return: A list of netuids where the neuron is a member. + """ + + result = await self.substrate.query_map( + module="SubtensorModule", + storage_function="IsNetworkMember", + params=[hotkey_ss58], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + return ( + [record[0] async for record in result if record[1]] + if result and hasattr(result, "records") + else [] + ) + + async def subnet_exists( + self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False + ) -> bool: + """ + Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network. + + :param netuid: The unique identifier of the subnet. + :param block_hash: The hash of the blockchain block number at which to check the subnet existence. + :param reuse_block: Whether to reuse the last-used block hash. + + :return: `True` if the subnet exists, `False` otherwise. + + This function is critical for verifying the presence of specific subnets in the network, + enabling a deeper understanding of the network's structure and composition. + """ + result = await self.substrate.query( + module="SubtensorModule", + storage_function="NetworksAdded", + params=[netuid], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + return result + + async def get_hyperparameter( + self, + param_name: str, + netuid: int, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> Optional[Any]: + """ + Retrieves a specified hyperparameter for a specific subnet. + + :param param_name: The name of the hyperparameter to retrieve. + :param netuid: The unique identifier of the subnet. + :param block_hash: The hash of blockchain block number for the query. + :param reuse_block: Whether to reuse the last-used block hash. + + :return: The value of the specified hyperparameter if the subnet exists, or None + """ + if not await self.subnet_exists(netuid, block_hash): + print("subnet does not exist") + return None + + result = await self.substrate.query( + module="SubtensorModule", + storage_function=param_name, + params=[netuid], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + + if result is None: + return None + + return result + + async def filter_netuids_by_registered_hotkeys( + self, + all_netuids: Iterable[int], + filter_for_netuids: Iterable[int], + all_hotkeys: Iterable[Wallet], + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> list[int]: + """ + Filters a given list of all netuids for certain specified netuids and hotkeys + + :param all_netuids: A list of netuids to filter. + :param filter_for_netuids: A subset of all_netuids to filter from the main list + :param all_hotkeys: Hotkeys to filter from the main list + :param block_hash: hash of the blockchain block number at which to perform the query. + :param reuse_block: whether to reuse the last-used blockchain hash when retrieving info. + + :return: the filtered list of netuids. + """ + netuids_with_registered_hotkeys = [ + item + for sublist in await asyncio.gather( + *[ + self.get_netuids_for_hotkey( + wallet.hotkey.ss58_address, + reuse_block=reuse_block, + block_hash=block_hash, + ) + for wallet in all_hotkeys + ] + ) + for item in sublist + ] + + if not filter_for_netuids: + all_netuids = netuids_with_registered_hotkeys + + else: + filtered_netuids = [ + netuid for netuid in all_netuids if netuid in filter_for_netuids + ] + + registered_hotkeys_filtered = [ + netuid + for netuid in netuids_with_registered_hotkeys + if netuid in filter_for_netuids + ] + + # Combine both filtered lists + all_netuids = filtered_netuids + registered_hotkeys_filtered + + return list(set(all_netuids)) + + async def get_existential_deposit( + self, block_hash: Optional[str] = None, reuse_block: bool = False + ) -> Balance: + """ + Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit + is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with + balances below this threshold can be reaped to conserve network resources. + + :param block_hash: Block hash at which to query the deposit amount. If `None`, the current block is used. + :param reuse_block: Whether to reuse the last-used blockchain block hash. + + :return: The existential deposit amount + + The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring + efficient use of storage and preventing the proliferation of dust accounts. + """ + result = await self.substrate.get_constant( + module_name="Balances", + constant_name="ExistentialDeposit", + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + + if result is None: + raise Exception("Unable to retrieve existential deposit amount.") + + return Balance.from_rao(result) + + async def neurons( + self, netuid: int, block_hash: Optional[str] = None + ) -> list[NeuronInfo]: + """ + Retrieves a list of all neurons within a specified subnet of the Bittensor network. This function + provides a snapshot of the subnet's neuron population, including each neuron's attributes and network + interactions. + + :param netuid: The unique identifier of the subnet. + :param block_hash: The hash of the blockchain block number for the query. + + :return: A list of NeuronInfo objects detailing each neuron's characteristics in the subnet. + + Understanding the distribution and status of neurons within a subnet is key to comprehending the + network's decentralized structure and the dynamics of its consensus and governance processes. + """ + neurons_lite, weights, bonds = await asyncio.gather( + self.neurons_lite(netuid=netuid, block_hash=block_hash), + self.weights(netuid=netuid, block_hash=block_hash), + self.bonds(netuid=netuid, block_hash=block_hash), + ) + + weights_as_dict = {uid: w for uid, w in weights} + bonds_as_dict = {uid: b for uid, b in bonds} + + neurons = [ + NeuronInfo.from_weights_bonds_and_neuron_lite( + neuron_lite, weights_as_dict, bonds_as_dict + ) + for neuron_lite in neurons_lite + ] + + return neurons + + async def neurons_lite( + self, netuid: int, block_hash: Optional[str] = None, reuse_block: bool = False + ) -> list[NeuronInfoLite]: + """ + Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. + This function provides a streamlined view of the neurons, focusing on key attributes such as stake + and network participation. + + :param netuid: The unique identifier of the subnet. + :param block_hash: The hash of the blockchain block number for the query. + :param reuse_block: Whether to reuse the last-used blockchain block hash. + + :return: A list of simplified neuron information for the subnet. + + This function offers a quick overview of the neuron population within a subnet, facilitating + efficient analysis of the network's decentralized structure and neuron dynamics. + """ + hex_bytes_result = await self.query_runtime_api( + runtime_api="NeuronInfoRuntimeApi", + method="get_neurons_lite", + params=[ + netuid + ], # TODO check to see if this can accept more than one at a time + block_hash=block_hash, + reuse_block=reuse_block, + ) + + if hex_bytes_result is None: + return [] + + try: + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + except ValueError: + bytes_result = bytes.fromhex(hex_bytes_result) + + return NeuronInfoLite.list_from_vec_u8(bytes_result) + + async def neuron_for_uid( + self, uid: Optional[int], netuid: int, block_hash: Optional[str] = None + ) -> NeuronInfo: + """ + Retrieves detailed information about a specific neuron identified by its unique identifier (UID) + within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive + view of a neuron's attributes, including its stake, rank, and operational status. + + + :param uid: The unique identifier of the neuron. + :param netuid: The unique identifier of the subnet. + :param block_hash: The hash of the blockchain block number for the query. + + :return: Detailed information about the neuron if found, a null neuron otherwise + + This function is crucial for analyzing individual neurons' contributions and status within a specific + subnet, offering insights into their roles in the network's consensus and validation mechanisms. + """ + if uid is None: + return NeuronInfo.get_null_neuron() + + params = [netuid, uid, block_hash] if block_hash else [netuid, uid] + json_body = await self.substrate.rpc_request( + method="neuronInfo_getNeuron", + params=params, # custom rpc method + ) + + if not (result := json_body.get("result", None)): + return NeuronInfo.get_null_neuron() + + bytes_result = bytes(result) + return NeuronInfo.from_vec_u8(bytes_result) + + async def get_delegated( + self, + coldkey_ss58: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> list[tuple[DelegateInfo, Balance]]: + """ + Retrieves a list of delegates and their associated stakes for a given coldkey. This function + identifies the delegates that a specific account has staked tokens on. + + :param coldkey_ss58: The `SS58` address of the account's coldkey. + :param block_hash: The hash of the blockchain block number for the query. + :param reuse_block: Whether to reuse the last-used blockchain block hash. + + :return: A list of tuples, each containing a delegate's information and staked amount. + + This function is important for account holders to understand their stake allocations and their + involvement in the network's delegation and consensus mechanisms. + """ + + block_hash = ( + block_hash + if block_hash + else (self.substrate.last_block_hash if reuse_block else None) + ) + encoded_coldkey = ss58_to_vec_u8(coldkey_ss58) + json_body = await self.substrate.rpc_request( + method="delegateInfo_getDelegated", + params=([block_hash, encoded_coldkey] if block_hash else [encoded_coldkey]), + ) + + if not (result := json_body.get("result")): + return [] + + return DelegateInfo.delegated_list_from_vec_u8(bytes(result)) + + async def query_identity( + self, + key: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> dict: + """ + Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves + detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized + identity and governance system. + + Note: + See the `Bittensor CLI documentation `_ for supported identity + parameters. + + :param key: The key used to query the neuron's identity, typically the neuron's SS58 address. + :param block_hash: The hash of the blockchain block number at which to perform the query. + :param reuse_block: Whether to reuse the last-used blockchain block hash. + + :return: An object containing the identity information of the neuron if found, ``None`` otherwise. + + The identity information can include various attributes such as the neuron's stake, rank, and other + network-specific details, providing insights into the neuron's role and status within the Bittensor network. + """ + + def decode_hex_identity_dict_(info_dictionary): + for k, v in info_dictionary.items(): + if isinstance(v, dict): + item = next(iter(v.values())) + else: + item = v + if isinstance(item, tuple) and item: + if len(item) > 1: + try: + info_dictionary[k] = ( + bytes(item).hex(sep=" ", bytes_per_sep=2).upper() + ) + except UnicodeDecodeError: + print(f"Could not decode: {k}: {item}") + else: + try: + info_dictionary[k] = bytes(item[0]).decode("utf-8") + except UnicodeDecodeError: + print(f"Could not decode: {k}: {item}") + else: + info_dictionary[k] = item + + return info_dictionary + + identity_info = await self.substrate.query( + module="Registry", + storage_function="IdentityOf", + params=[key], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + try: + return decode_hex_identity_dict_(identity_info["info"]) + except TypeError: + return {} + + async def weights( + self, netuid: int, block_hash: Optional[str] = None + ) -> list[tuple[int, list[tuple[int, int]]]]: + """ + Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. + This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the + network's trust and value assignment mechanisms. + + Args: + :param netuid: The network UID of the subnet to query. + :param block_hash: The hash of the blockchain block for the query. + + :return: A list of tuples mapping each neuron's UID to its assigned weights. + + The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, + influencing their influence and reward allocation within the subnet. + """ + # TODO look into seeing if we can speed this up with storage query + w_map_encoded = await self.substrate.query_map( + module="SubtensorModule", + storage_function="Weights", + params=[netuid], + block_hash=block_hash, + ) + w_map = [(uid, w or []) async for uid, w in w_map_encoded] + + return w_map + + async def bonds( + self, netuid: int, block_hash: Optional[str] = None + ) -> list[tuple[int, list[tuple[int, int]]]]: + """ + Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. + Bonds represent the investments or commitments made by neurons in one another, indicating a level + of trust and perceived value. This bonding mechanism is integral to the network's market-based approach + to measuring and rewarding machine intelligence. + + :param netuid: The network UID of the subnet to query. + :param block_hash: The hash of the blockchain block number for the query. + + :return: list of tuples mapping each neuron's UID to its bonds with other neurons. + + Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior + within the subnet. It reflects how neurons recognize and invest in each other's intelligence and + contributions, supporting diverse and niche systems within the Bittensor ecosystem. + """ + b_map_encoded = await self.substrate.query_map( + module="SubtensorModule", + storage_function="Bonds", + params=[netuid], + block_hash=block_hash, + ) + b_map = [(uid, b) async for uid, b in b_map_encoded] + + return b_map + + async def does_hotkey_exist( + self, + hotkey_ss58: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> bool: + """ + Returns true if the hotkey is known by the chain and there are accounts. + + :param hotkey_ss58: The SS58 address of the hotkey. + :param block_hash: The hash of the block number to check the hotkey against. + :param reuse_block: Whether to reuse the last-used blockchain hash. + + :return: `True` if the hotkey is known by the chain and there are accounts, `False` otherwise. + """ + _result = await self.substrate.query( + module="SubtensorModule", + storage_function="Owner", + params=[hotkey_ss58], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + result = decode_account_id(_result[0]) + return_val = ( + False + if result is None + else result != "5C4hrfjw9DjXZTzV3MwzrrAr9P1MJhSrvWGWqi1eSuyUpnhM" + ) + return return_val + + async def get_hotkey_owner( + self, hotkey_ss58: str, block_hash: str + ) -> Optional[str]: + hk_owner_query = await self.substrate.query( + module="SubtensorModule", + storage_function="Owner", + params=[hotkey_ss58], + block_hash=block_hash, + ) + val = decode_account_id(hk_owner_query[0]) + if val: + exists = await self.does_hotkey_exist(hotkey_ss58, block_hash=block_hash) + else: + exists = False + hotkey_owner = val if exists else None + return hotkey_owner + + async def sign_and_send_extrinsic( + self, + call: GenericCall, + wallet: Wallet, + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + ) -> tuple[bool, str]: + """ + Helper method to sign and submit an extrinsic call to chain. + + :param call: a prepared Call object + :param wallet: the wallet whose coldkey will be used to sign the extrinsic + :param wait_for_inclusion: whether to wait until the extrinsic call is included on the chain + :param wait_for_finalization: whether to wait until the extrinsic call is finalized on the chain + + :return: (success, error message) + """ + extrinsic = await self.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) # sign with coldkey + try: + response = await self.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "" + await response.process_events() + if await response.is_success: + return True, "" + else: + return False, format_error_message( + await response.error_message, substrate=self.substrate + ) + except SubstrateRequestException as e: + return False, format_error_message(e, substrate=self.substrate) + + async def get_children(self, hotkey, netuid) -> tuple[bool, list, str]: + """ + This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys + storage function to get the children and formats them before returning as a tuple. + + :param hotkey: The hotkey value. + :param netuid: The netuid value. + + :return: A tuple containing a boolean indicating success or failure, a list of formatted children, and an error + message (if applicable) + """ + try: + children = await self.substrate.query( + module="SubtensorModule", + storage_function="ChildKeys", + params=[hotkey, netuid], + ) + if children: + formatted_children = [] + for proportion, child in children: + # Convert U64 to int + formatted_child = decode_account_id(child[0]) + int_proportion = int(proportion) + formatted_children.append((int_proportion, formatted_child)) + return True, formatted_children, "" + else: + return True, [], "" + except SubstrateRequestException as e: + return False, [], format_error_message(e, self.substrate) + + async def get_subnet_hyperparameters( + self, netuid: int, block_hash: Optional[str] = None + ) -> Optional[Union[list, SubnetHyperparameters]]: + """ + Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters + define the operational settings and rules governing the subnet's behavior. + + :param netuid: The network UID of the subnet to query. + :param block_hash: The hash of the blockchain block number for the query. + + :return: The subnet's hyperparameters, or `None` if not available. + + Understanding the hyperparameters is crucial for comprehending how subnets are configured and + managed, and how they interact with the network's consensus and incentive mechanisms. + """ + hex_bytes_result = await self.query_runtime_api( + runtime_api="SubnetInfoRuntimeApi", + method="get_subnet_hyperparams", + params=[netuid], + block_hash=block_hash, + ) + + if hex_bytes_result is None: + return [] + + if hex_bytes_result.startswith("0x"): + bytes_result = bytes.fromhex(hex_bytes_result[2:]) + else: + bytes_result = bytes.fromhex(hex_bytes_result) + + return SubnetHyperparameters.from_vec_u8(bytes_result) + + async def get_vote_data( + self, + proposal_hash: str, + block_hash: Optional[str] = None, + reuse_block: bool = False, + ) -> Optional["ProposalVoteData"]: + """ + Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes + information about how senate members have voted on the proposal. + + :param proposal_hash: The hash of the proposal for which voting data is requested. + :param block_hash: The hash of the blockchain block number to query the voting data. + :param reuse_block: Whether to reuse the last-used blockchain block hash. + + :return: An object containing the proposal's voting data, or `None` if not found. + + This function is important for tracking and understanding the decision-making processes within + the Bittensor network, particularly how proposals are received and acted upon by the governing body. + """ + vote_data = await self.substrate.query( + module="Triumvirate", + storage_function="Voting", + params=[proposal_hash], + block_hash=block_hash, + reuse_block_hash=reuse_block, + ) + if vote_data is None: + return None + else: + return ProposalVoteData(vote_data) + + async def get_delegate_identities( + self, block_hash: Optional[str] = None + ) -> dict[str, DelegatesDetails]: + """ + Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info + is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info + from GitHub, but chain data is still limited in that regard. + + Args: + block_hash: the hash of the blockchain block for the query + + Returns: {ss58: DelegatesDetails, ...} + + """ + timeout = aiohttp.ClientTimeout(10.0) + async with aiohttp.ClientSession(timeout=timeout) as session: + identities_info, response = await asyncio.gather( + self.substrate.query_map( + module="Registry", + storage_function="IdentityOf", + block_hash=block_hash, + ), + session.get(DELEGATES_DETAILS_URL), + ) + + all_delegates_details = { + decode_account_id(ss58_address[0]): DelegatesDetails.from_chain_data( + decode_hex_identity_dict(identity["info"]) + ) + for ss58_address, identity in identities_info + } + + if response.ok: + all_delegates: dict[str, Any] = await response.json(content_type=None) + + for delegate_hotkey, delegate_details in all_delegates.items(): + delegate_info = all_delegates_details.setdefault( + delegate_hotkey, + DelegatesDetails( + display=delegate_details.get("name", ""), + web=delegate_details.get("url", ""), + additional=delegate_details.get("description", ""), + pgp_fingerprint=delegate_details.get("fingerprint", ""), + ), + ) + delegate_info.display = ( + delegate_info.display or delegate_details.get("name", "") + ) + delegate_info.web = delegate_info.web or delegate_details.get( + "url", "" + ) + delegate_info.additional = ( + delegate_info.additional + or delegate_details.get("description", "") + ) + delegate_info.pgp_fingerprint = ( + delegate_info.pgp_fingerprint + or delegate_details.get("fingerprint", "") + ) + + return all_delegates_details + + async def is_hotkey_registered(self, netuid: int, hotkey_ss58: str) -> bool: + """Checks to see if the hotkey is registered on a given netuid""" + _result = await self.substrate.query( + module="SubtensorModule", + storage_function="Uids", + params=[netuid, hotkey_ss58], + ) + if _result is not None: + return True + else: + return False + + # extrinsics + + async def transfer( + self, + wallet: Wallet, + destination: str, + amount: float, + transfer_all: bool, + prompt: bool, + ): + """Transfer token of amount to destination.""" + return await transfer_extrinsic( + self, + wallet, + destination, + Balance.from_tao(amount), + transfer_all, + prompt=prompt, + ) + + async def register(self, wallet: Wallet, prompt: bool): + """Register neuron by recycling some TAO.""" + logging.info( + f"Registering on netuid 0 on network: {self.network}" + ) + + # Check current recycle amount + logging.info("Fetching recycle amount & balance.") + recycle_call, balance_ = await asyncio.gather( + self.get_hyperparameter(param_name="Burn", netuid=0, reuse_block=True), + self.get_balance(wallet.coldkeypub.ss58_address, reuse_block=True), + ) + current_recycle = Balance.from_rao(int(recycle_call)) + try: + balance: Balance = balance_[wallet.coldkeypub.ss58_address] + except TypeError as e: + logging.error(f"Unable to retrieve current recycle. {e}") + return False + except KeyError: + logging.error("Unable to retrieve current balance.") + return False + + # Check balance is sufficient + if balance < current_recycle: + logging.error( + f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO" + ) + return False + + if prompt: + if not Confirm.ask( + f"Your balance is: [bold green]{balance}[/bold green]\n" + f"The cost to register by recycle is [bold red]{current_recycle}[/bold red]\n" + f"Do you want to continue?", + default=False, + ): + return False + + return await root_register_extrinsic( + self, + wallet, + wait_for_inclusion=True, + wait_for_finalization=True, + prompt=prompt, + ) + + async def pow_register( + self: "AsyncSubtensor", + wallet: Wallet, + netuid, + processors, + update_interval, + output_in_place, + verbose, + use_cuda, + dev_id, + threads_per_block, + ): + """Register neuron.""" + return await register_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + prompt=True, + tpb=threads_per_block, + update_interval=update_interval, + num_processes=processors, + cuda=use_cuda, + dev_id=dev_id, + output_in_place=output_in_place, + log_verbose=verbose, + ) + + async def set_weights( + self, + wallet: "Wallet", + netuids: list[int], + weights: list[float], + prompt: bool, + ): + """Set weights for root network.""" + netuids_ = np.array(netuids, dtype=np.int64) + weights_ = np.array(weights, dtype=np.float32) + logging.info(f"Setting weights in network: {self.network}") + # Run the set weights operation. + return await set_root_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuids=netuids_, + weights=weights_, + version_key=0, + prompt=prompt, + wait_for_finalization=True, + wait_for_inclusion=True, + ) diff --git a/bittensor/core/chain_data/__init__.py b/bittensor/core/chain_data/__init__.py index 9ad1e38881..68936a6b5f 100644 --- a/bittensor/core/chain_data/__init__.py +++ b/bittensor/core/chain_data/__init__.py @@ -17,6 +17,6 @@ from .stake_info import StakeInfo from .subnet_hyperparameters import SubnetHyperparameters from .subnet_info import SubnetInfo -from .utils import custom_rpc_type_registry +from .utils import custom_rpc_type_registry, decode_account_id, process_stake_data ProposalCallData = GenericCall diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py new file mode 100644 index 0000000000..4da7785b1b --- /dev/null +++ b/bittensor/core/extrinsics/async_registration.py @@ -0,0 +1,1609 @@ +import asyncio +import binascii +import functools +import hashlib +import io +import math +import multiprocessing as mp +import os +import random +import subprocess +import time +import typing +from contextlib import redirect_stdout +from dataclasses import dataclass +from datetime import timedelta +from multiprocessing import Process, Event, Lock, Array, Value, Queue +from multiprocessing.queues import Queue as Queue_Type +from queue import Empty, Full +from typing import Optional + +import backoff +import numpy as np +from Crypto.Hash import keccak +from bittensor_wallet import Wallet +from bittensor_wallet.errors import KeyFileError +from rich.console import Console +from rich.prompt import Confirm +from rich.status import Status +from substrateinterface.exceptions import SubstrateRequestException + +from bittensor.core.chain_data import NeuronInfo +from bittensor.utils import format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.formatting import millify, get_human_readable + +if typing.TYPE_CHECKING: + from bittensor.core.async_subtensor import AsyncSubtensor + + +# TODO: compair and remove existing code (bittensor.utils.registration) + + +def use_torch() -> bool: + """Force the use of torch over numpy for certain operations.""" + return True if os.getenv("USE_TORCH") == "1" else False + + +def legacy_torch_api_compat(func: typing.Callable): + """ + Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True. + + :param func: Function with numpy Input/Output to be decorated. + + :return: Decorated function + """ + + @functools.wraps(func) + def decorated(*args, **kwargs): + if use_torch(): + # if argument is a Torch tensor, convert it to numpy + args = [ + arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg + for arg in args + ] + kwargs = { + key: value.cpu().numpy() if isinstance(value, torch.Tensor) else value + for key, value in kwargs.items() + } + ret = func(*args, **kwargs) + if use_torch(): + # if return value is a numpy array, convert it to Torch tensor + if isinstance(ret, np.ndarray): + ret = torch.from_numpy(ret) + return ret + + return decorated + + +@functools.cache +def _get_real_torch(): + try: + import torch as _real_torch + except ImportError: + _real_torch = None + return _real_torch + + +def log_no_torch_error(): + logging.info( + "This command requires torch. You can install torch with `pip install torch` and run the command again." + ) + + +@dataclass +class POWSolution: + """A solution to the registration PoW problem.""" + + nonce: int + block_number: int + difficulty: int + seal: bytes + + async def is_stale(self, subtensor: "AsyncSubtensor") -> bool: + """Returns True if the POW is stale. + This means the block the POW is solved for is within 3 blocks of the current block. + """ + current_block = await subtensor.substrate.get_block_number(None) + return self.block_number < current_block - 3 + + +@dataclass +class RegistrationStatistics: + """Statistics for a registration.""" + + time_spent_total: float + rounds_total: int + time_average: float + time_spent: float + hash_rate_perpetual: float + hash_rate: float + difficulty: int + block_number: int + block_hash: str + + +class RegistrationStatisticsLogger: + """Logs statistics for a registration.""" + + console: Console + status: Optional[Status] + + def __init__( + self, console_: Optional["Console"] = None, output_in_place: bool = True + ) -> None: + if console_ is None: + console_ = Console() + self.console = console_ + + if output_in_place: + self.status = self.console.status("Solving") + else: + self.status = None + + def start(self) -> None: + if self.status is not None: + self.status.start() + + def stop(self) -> None: + if self.status is not None: + self.status.stop() + + @classmethod + def get_status_message( + cls, stats: RegistrationStatistics, verbose: bool = False + ) -> str: + """ + Provides a message of the current status of the block solving as a str for a logger or stdout + """ + message = ( + "Solving\n" + + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" + + ( + f"Time Spent This Round: {timedelta(seconds=stats.time_spent)}\n" + + f"Time Spent Average: {timedelta(seconds=stats.time_average)}\n" + if verbose + else "" + ) + + f"Registration Difficulty: [bold white]{millify(stats.difficulty)}[/bold white]\n" + + f"Iters (Inst/Perp): [bold white]{get_human_readable(stats.hash_rate, 'H')}/s / " + + f"{get_human_readable(stats.hash_rate_perpetual, 'H')}/s[/bold white]\n" + + f"Block Number: [bold white]{stats.block_number}[/bold white]\n" + + f"Block Hash: [bold white]{stats.block_hash.encode('utf-8')}[/bold white]\n" + ) + return message + + def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None: + """ + Passes the current status to the logger + """ + if self.status is not None: + self.status.update(self.get_status_message(stats, verbose=verbose)) + else: + self.console.log(self.get_status_message(stats, verbose=verbose)) + + +class _SolverBase(Process): + """ + A process that solves the registration PoW problem. + + :param proc_num: The number of the process being created. + :param num_proc: The total number of processes running. + :param update_interval: The number of nonces to try to solve before checking for a new block. + :param finished_queue: The queue to put the process number when a process finishes each update_interval. + Used for calculating the average time per update_interval across all processes. + :param solution_queue: The queue to put the solution the process has found during the pow solve. + :param stop_event: The event to set by the main process when all the solver processes should stop. + The solver process will check for the event after each update_interval. + The solver process will stop when the event is set. + Used to stop the solver processes when a solution is found. + :param curr_block: The array containing this process's current block hash. + The main process will set the array to the new block hash when a new block is finalized in the + network. The solver process will get the new block hash from this array when newBlockEvent is set + :param curr_block_num: The value containing this process's current block number. + The main process will set the value to the new block number when a new block is finalized in + the network. The solver process will get the new block number from this value when + new_block_event is set. + :param curr_diff: The array containing this process's current difficulty. The main process will set the array to + the new difficulty when a new block is finalized in the network. The solver process will get the + new difficulty from this array when newBlockEvent is set. + :param check_block: The lock to prevent this process from getting the new block data while the main process is + updating the data. + :param limit: The limit of the pow solve for a valid solution. + + :var new_block_event: The event to set by the main process when a new block is finalized in the network. + The solver process will check for the event after each update_interval. + The solver process will get the new block hash and difficulty and start solving for a new + nonce. + """ + + proc_num: int + num_proc: int + update_interval: int + finished_queue: Queue_Type + solution_queue: Queue_Type + new_block_event: Event + stop_event: Event + hotkey_bytes: bytes + curr_block: Array + curr_block_num: Value + curr_diff: Array + check_block: Lock + limit: int + + def __init__( + self, + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stop_event, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ): + Process.__init__(self, daemon=True) + self.proc_num = proc_num + self.num_proc = num_proc + self.update_interval = update_interval + self.finished_queue = finished_queue + self.solution_queue = solution_queue + self.new_block_event = Event() + self.new_block_event.clear() + self.curr_block = curr_block + self.curr_block_num = curr_block_num + self.curr_diff = curr_diff + self.check_block = check_block + self.stop_event = stop_event + self.limit = limit + + def run(self): + raise NotImplementedError("_SolverBase is an abstract class") + + @staticmethod + def create_shared_memory() -> tuple[Array, Value, Array]: + """Creates shared memory for the solver processes to use.""" + curr_block = Array("h", 32, lock=True) # byte array + curr_block_num = Value("i", 0, lock=True) # int + curr_diff = Array("Q", [0, 0], lock=True) # [high, low] + + return curr_block, curr_block_num, curr_diff + + +class _Solver(_SolverBase): + """ + Performs POW Solution + """ + + def run(self): + block_number: int + block_and_hotkey_hash_bytes: bytes + block_difficulty: int + nonce_limit = int(math.pow(2, 64)) - 1 + + # Start at random nonce + nonce_start = random.randint(0, nonce_limit) + nonce_end = nonce_start + self.update_interval + while not self.stop_event.is_set(): + if self.new_block_event.is_set(): + with self.check_block: + block_number = self.curr_block_num.value + block_and_hotkey_hash_bytes = bytes(self.curr_block) + block_difficulty = _registration_diff_unpack(self.curr_diff) + + self.new_block_event.clear() + + # Do a block of nonces + solution = _solve_for_nonce_block( + nonce_start, + nonce_end, + block_and_hotkey_hash_bytes, + block_difficulty, + self.limit, + block_number, + ) + if solution is not None: + self.solution_queue.put(solution) + + try: + # Send time + self.finished_queue.put_nowait(self.proc_num) + except Full: + pass + + nonce_start = random.randint(0, nonce_limit) + nonce_start = nonce_start % nonce_limit + nonce_end = nonce_start + self.update_interval + + +class _CUDASolver(_SolverBase): + """ + Performs POW Solution using CUDA + """ + + dev_id: int + tpb: int + + def __init__( + self, + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stop_event, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + dev_id: int, + tpb: int, + ): + super().__init__( + proc_num, + num_proc, + update_interval, + finished_queue, + solution_queue, + stop_event, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ) + self.dev_id = dev_id + self.tpb = tpb + + def run(self): + block_number: int = 0 # dummy value + block_and_hotkey_hash_bytes: bytes = b"0" * 32 # dummy value + block_difficulty: int = int(math.pow(2, 64)) - 1 # dummy value + nonce_limit = int(math.pow(2, 64)) - 1 # U64MAX + + # Start at random nonce + nonce_start = random.randint(0, nonce_limit) + while not self.stop_event.is_set(): + if self.new_block_event.is_set(): + with self.check_block: + block_number = self.curr_block_num.value + block_and_hotkey_hash_bytes = bytes(self.curr_block) + block_difficulty = _registration_diff_unpack(self.curr_diff) + + self.new_block_event.clear() + + # Do a block of nonces + solution = _solve_for_nonce_block_cuda( + nonce_start, + self.update_interval, + block_and_hotkey_hash_bytes, + block_difficulty, + self.limit, + block_number, + self.dev_id, + self.tpb, + ) + if solution is not None: + self.solution_queue.put(solution) + + try: + # Signal that a nonce_block was finished using queue + # send our proc_num + self.finished_queue.put(self.proc_num) + except Full: + pass + + # increase nonce by number of nonces processed + nonce_start += self.update_interval * self.tpb + nonce_start = nonce_start % nonce_limit + + +class LazyLoadedTorch: + def __bool__(self): + return bool(_get_real_torch()) + + def __getattr__(self, name): + if real_torch := _get_real_torch(): + return getattr(real_torch, name) + else: + log_no_torch_error() + raise ImportError("torch not installed") + + +if typing.TYPE_CHECKING: + import torch +else: + torch = LazyLoadedTorch() + + +class MaxSuccessException(Exception): + """ + Raised when the POW Solver has reached the max number of successful solutions + """ + + +class MaxAttemptsException(Exception): + """ + Raised when the POW Solver has reached the max number of attempts + """ + + +async def is_hotkey_registered( + subtensor: "AsyncSubtensor", netuid: int, hotkey_ss58: str +) -> bool: + """Checks to see if the hotkey is registered on a given netuid""" + _result = await subtensor.substrate.query( + module="SubtensorModule", + storage_function="Uids", + params=[netuid, hotkey_ss58], + ) + if _result is not None: + return True + else: + return False + + +async def register_extrinsic( + subtensor: "AsyncSubtensor", + wallet: "Wallet", + netuid: int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + max_allowed_attempts: int = 3, + output_in_place: bool = True, + cuda: bool = False, + dev_id: typing.Union[list[int], int] = 0, + tpb: int = 256, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + log_verbose: bool = False, +) -> bool: + """Registers the wallet to the chain. + + :param subtensor: initialized AsyncSubtensor object to use for chain interactions + :param wallet: Bittensor wallet object. + :param netuid: The ``netuid`` of the subnet to register on. + :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns + `False` if the extrinsic fails to enter the block within the timeout. + :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, + or returns `False` if the extrinsic fails to be finalized within the timeout. + :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + :param max_allowed_attempts: Maximum number of attempts to register the wallet. + :param output_in_place: Whether the POW solving should be outputted to the console as it goes along. + :param cuda: If `True`, the wallet should be registered using CUDA device(s). + :param dev_id: The CUDA device id to use, or a list of device ids. + :param tpb: The number of threads per block (CUDA). + :param num_processes: The number of processes to use to register. + :param update_interval: The number of nonces to solve between updates. + :param log_verbose: If `True`, the registration process will log more information. + + :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, + the response is `True`. + """ + + async def get_neuron_for_pubkey_and_subnet(): + uid = await subtensor.substrate.query( + "SubtensorModule", "Uids", [netuid, wallet.hotkey.ss58_address] + ) + if uid is None: + return NeuronInfo.get_null_neuron() + + params = [netuid, uid] + json_body = await subtensor.substrate.rpc_request( + method="neuronInfo_getNeuron", + params=params, + ) + + if not (result := json_body.get("result", None)): + return NeuronInfo.get_null_neuron() + + return NeuronInfo.from_vec_u8(bytes(result)) + + logging.debug("Checking subnet status") + if not await subtensor.subnet_exists(netuid): + logging.error( + f":cross_mark: Failed error: subnet {netuid} does not exist." + ) + return False + + logging.info( + f":satellite: Checking Account on subnet {netuid} ..." + ) + neuron = await get_neuron_for_pubkey_and_subnet() + if not neuron.is_null: + logging.debug( + f"Wallet {wallet} is already registered on subnet {neuron.netuid} with uid{neuron.uid}." + ) + return True + + if prompt: + if not Confirm.ask( + f"Continue Registration?\n" + f" hotkey ({wallet.hotkey_str}):\t[bold white]{wallet.hotkey.ss58_address}[/bold white]\n" + f" coldkey ({wallet.name}):\t[bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n" + f" network:\t\t[bold white]{subtensor.network}[/bold white]" + ): + return False + + if not torch: + log_no_torch_error() + return False + + # Attempt rolling registration. + attempts = 1 + pow_result: Optional[POWSolution] + while True: + logging.info( + f":satellite: Registering... ({attempts}/{max_allowed_attempts})" + ) + # Solve latest POW. + if cuda: + if not torch.cuda.is_available(): + if prompt: + logging.info("CUDA is not available.") + return False + pow_result = await create_pow( + subtensor, + wallet, + netuid, + output_in_place, + cuda=cuda, + dev_id=dev_id, + tpb=tpb, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + else: + pow_result = await create_pow( + subtensor, + wallet, + netuid, + output_in_place, + cuda=cuda, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + + # pow failed + if not pow_result: + # might be registered already on this subnet + is_registered = await is_hotkey_registered( + subtensor, netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + logging.error( + f":white_heavy_check_mark: Already registered on netuid: {netuid}" + ) + return True + + # pow successful, proceed to submit pow to chain for registration + else: + logging.info(":satellite: Submitting POW...") + # check if pow result is still valid + while not await pow_result.is_stale(subtensor=subtensor): + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="register", + call_params={ + "netuid": netuid, + "block_number": pow_result.block_number, + "nonce": pow_result.nonce, + "work": [int(byte_) for byte_ in pow_result.seal], + "hotkey": wallet.hotkey.ss58_address, + "coldkey": wallet.coldkeypub.ss58_address, + }, + ) + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, keypair=wallet.hotkey + ) + response = await subtensor.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if not wait_for_finalization and not wait_for_inclusion: + success, err_msg = True, "" + else: + await response.process_events() + success = await response.is_success + if not success: + success, err_msg = ( + False, + format_error_message( + await response.error_message, + substrate=subtensor.substrate, + ), + ) + # Look error here + # https://github.com/opentensor/subtensor/blob/development/pallets/subtensor/src/errors.rs + + if "HotKeyAlreadyRegisteredInSubNet" in err_msg: + logging.info( + f":white_heavy_check_mark: Already Registered on subnet: {netuid}." + ) + return True + logging.error(f":cross_mark: Failed: {err_msg}") + await asyncio.sleep(0.5) + + # Successful registration, final check for neuron and pubkey + if success: + logging.info(":satellite: Checking Registration status...") + is_registered = await is_hotkey_registered( + subtensor, + netuid=netuid, + hotkey_ss58=wallet.hotkey.ss58_address, + ) + if is_registered: + logging.success( + ":white_heavy_check_mark: Registered" + ) + return True + else: + # neuron not found, try again + logging.error( + ":cross_mark: Unknown error. Neuron not found." + ) + continue + else: + # Exited loop because pow is no longer valid. + logging.error("POW is stale.") + # Try again. + continue + + if attempts < max_allowed_attempts: + # Failed registration, retry pow + attempts += 1 + logging.error( + f":satellite: Failed registration, retrying pow ... ({attempts}/{max_allowed_attempts})" + ) + else: + # Failed to register after max attempts. + logging.error("No more attempts.") + return False + + +async def run_faucet_extrinsic( + subtensor: "AsyncSubtensor", + wallet: Wallet, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = True, + prompt: bool = False, + max_allowed_attempts: int = 3, + output_in_place: bool = True, + cuda: bool = False, + dev_id: int = 0, + tpb: int = 256, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + log_verbose: bool = False, + max_successes: int = 3, +) -> tuple[bool, str]: + r"""Runs a continual POW to get a faucet of TAO on the test net. + + :param subtensor: The subtensor interface object used to run the extrinsic + :param wallet: Bittensor wallet object. + :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, + or returns `False` if the extrinsic fails to enter the block within the timeout. + :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, + or returns `False` if the extrinsic fails to be finalized within the timeout. + :param max_allowed_attempts: Maximum number of attempts to register the wallet. + :param output_in_place: Whether to output logging data as the process runs. + :param cuda: If `True`, the wallet should be registered using CUDA device(s). + :param dev_id: The CUDA device id to use + :param tpb: The number of threads per block (CUDA). + :param num_processes: The number of processes to use to register. + :param update_interval: The number of nonces to solve between updates. + :param log_verbose: If `True`, the registration process will log more information. + :param max_successes: The maximum number of successful faucet runs for the wallet. + + :return: `True` if extrinsic was finalized or included in the block. If we did not wait for + finalization/inclusion, the response is also `True` + """ + if prompt: + if not Confirm.ask( + "Run Faucet?\n" + f" wallet name: [bold white]{wallet.name}[/bold white]\n" + f" coldkey: [bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n" + f" network: [bold white]{subtensor}[/bold white]" + ): + return False, "" + + if not torch: + log_no_torch_error() + return False, "Requires torch" + + # Unlock coldkey + try: + wallet.unlock_coldkey() + except KeyFileError: + return False, "There was an error unlocking your coldkey" + + # Get previous balance. + old_balance = await subtensor.get_balance(wallet.coldkeypub.ss58_address) + + # Attempt rolling registration. + attempts = 1 + successes = 1 + while True: + try: + pow_result = None + while pow_result is None or await pow_result.is_stale(subtensor=subtensor): + # Solve latest POW. + if cuda: + if not torch.cuda.is_available(): + if prompt: + logging.error("CUDA is not available.") + return False, "CUDA is not available." + pow_result: Optional[POWSolution] = await create_pow( + subtensor, + wallet, + -1, + output_in_place, + cuda=cuda, + dev_id=dev_id, + tpb=tpb, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + else: + pow_result: Optional[POWSolution] = await create_pow( + subtensor, + wallet, + -1, + output_in_place, + cuda=cuda, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="faucet", + call_params={ + "block_number": pow_result.block_number, + "nonce": pow_result.nonce, + "work": [int(byte_) for byte_ in pow_result.seal], + }, + ) + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + response = await subtensor.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + # process if registration successful, try again if pow is still valid + await response.process_events() + if not await response.is_success: + logging.error( + f":cross_mark: Failed: {format_error_message(await response.error_message, subtensor.substrate)}" + ) + if attempts == max_allowed_attempts: + raise MaxAttemptsException + attempts += 1 + # Wait a bit before trying again + time.sleep(1) + + # Successful registration + else: + new_balance = await subtensor.get_balance( + wallet.coldkeypub.ss58_address + ) + logging.info( + f"Balance: {old_balance[wallet.coldkeypub.ss58_address]} :arrow_right: {new_balance[wallet.coldkeypub.ss58_address]}" + ) + old_balance = new_balance + + if successes == max_successes: + raise MaxSuccessException + + attempts = 1 # Reset attempts on success + successes += 1 + + except KeyboardInterrupt: + return True, "Done" + + except MaxSuccessException: + return True, f"Max successes reached: {3}" + + except MaxAttemptsException: + return False, f"Max attempts reached: {max_allowed_attempts}" + + +async def _check_for_newest_block_and_update( + subtensor: "AsyncSubtensor", + netuid: int, + old_block_number: int, + hotkey_bytes: bytes, + curr_diff: Array, + curr_block: Array, + curr_block_num: Value, + update_curr_block: typing.Callable, + check_block: Lock, + solvers: list[_Solver], + curr_stats: RegistrationStatistics, +) -> int: + """ + Checks for a new block and updates the current block information if a new block is found. + + :param subtensor: The subtensor object to use for getting the current block. + :param netuid: The netuid to use for retrieving the difficulty. + :param old_block_number: The old block number to check against. + :param hotkey_bytes: The bytes of the hotkey's pubkey. + :param curr_diff: The current difficulty as a multiprocessing array. + :param curr_block: Where the current block is stored as a multiprocessing array. + :param curr_block_num: Where the current block number is stored as a multiprocessing value. + :param update_curr_block: A function that updates the current block. + :param check_block: A mp lock that is used to check for a new block. + :param solvers: A list of solvers to update the current block for. + :param curr_stats: The current registration statistics to update. + + :return: The current block number. + """ + block_number = await subtensor.substrate.get_block_number(None) + if block_number != old_block_number: + old_block_number = block_number + # update block information + block_number, difficulty, block_hash = await _get_block_with_retry( + subtensor=subtensor, netuid=netuid + ) + block_bytes = bytes.fromhex(block_hash[2:]) + + update_curr_block( + curr_diff, + curr_block, + curr_block_num, + block_number, + block_bytes, + difficulty, + hotkey_bytes, + check_block, + ) + # Set new block events for each solver + + for worker in solvers: + worker.new_block_event.set() + + # update stats + curr_stats.block_number = block_number + curr_stats.block_hash = block_hash + curr_stats.difficulty = difficulty + + return old_block_number + + +async def _block_solver( + subtensor: "AsyncSubtensor", + wallet: Wallet, + num_processes: int, + netuid: int, + dev_id: list[int], + tpb: int, + update_interval: int, + curr_block, + curr_block_num, + curr_diff, + n_samples, + alpha_, + output_in_place, + log_verbose, + cuda: bool, +): + """ + Shared code used by the Solvers to solve the POW solution + """ + limit = int(math.pow(2, 256)) - 1 + + # Establish communication queues + ## See the _Solver class for more information on the queues. + stop_event = Event() + stop_event.clear() + + solution_queue = Queue() + finished_queues = [Queue() for _ in range(num_processes)] + check_block = Lock() + + hotkey_bytes = ( + wallet.coldkeypub.public_key if netuid == -1 else wallet.hotkey.public_key + ) + + if cuda: + ## Create a worker per CUDA device + num_processes = len(dev_id) + solvers = [ + _CUDASolver( + i, + num_processes, + update_interval, + finished_queues[i], + solution_queue, + stop_event, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + dev_id[i], + tpb, + ) + for i in range(num_processes) + ] + else: + # Start consumers + solvers = [ + _Solver( + i, + num_processes, + update_interval, + finished_queues[i], + solution_queue, + stop_event, + curr_block, + curr_block_num, + curr_diff, + check_block, + limit, + ) + for i in range(num_processes) + ] + + # Get first block + block_number, difficulty, block_hash = await _get_block_with_retry( + subtensor=subtensor, netuid=netuid + ) + + block_bytes = bytes.fromhex(block_hash[2:]) + old_block_number = block_number + # Set to current block + _update_curr_block( + curr_diff, + curr_block, + curr_block_num, + block_number, + block_bytes, + difficulty, + hotkey_bytes, + check_block, + ) + + # Set new block events for each solver to start at the initial block + for worker in solvers: + worker.new_block_event.set() + + for worker in solvers: + worker.start() # start the solver processes + + start_time = time.time() # time that the registration started + time_last = start_time # time that the last work blocks completed + + curr_stats = RegistrationStatistics( + time_spent_total=0.0, + time_average=0.0, + rounds_total=0, + time_spent=0.0, + hash_rate_perpetual=0.0, + hash_rate=0.0, + difficulty=difficulty, + block_number=block_number, + block_hash=block_hash, + ) + + start_time_perpetual = time.time() + + logger = RegistrationStatisticsLogger(output_in_place=output_in_place) + logger.start() + + solution = None + + hash_rates = [0] * n_samples # The last n true hash_rates + weights = [alpha_**i for i in range(n_samples)] # weights decay by alpha + + timeout = 0.15 if cuda else 0.15 + while netuid == -1 or not await is_hotkey_registered( + subtensor, netuid, wallet.hotkey.ss58_address + ): + # Wait until a solver finds a solution + try: + solution = solution_queue.get(block=True, timeout=timeout) + if solution is not None: + break + except Empty: + # No solution found, try again + pass + + # check for new block + old_block_number = await _check_for_newest_block_and_update( + subtensor=subtensor, + netuid=netuid, + hotkey_bytes=hotkey_bytes, + old_block_number=old_block_number, + curr_diff=curr_diff, + curr_block=curr_block, + curr_block_num=curr_block_num, + curr_stats=curr_stats, + update_curr_block=_update_curr_block, + check_block=check_block, + solvers=solvers, + ) + + num_time = 0 + for finished_queue in finished_queues: + try: + finished_queue.get(timeout=0.1) + num_time += 1 + + except Empty: + continue + + time_now = time.time() # get current time + time_since_last = time_now - time_last # get time since last work block(s) + if num_time > 0 and time_since_last > 0.0: + # create EWMA of the hash_rate to make measure more robust + + if cuda: + hash_rate_ = (num_time * tpb * update_interval) / time_since_last + else: + hash_rate_ = (num_time * update_interval) / time_since_last + hash_rates.append(hash_rate_) + hash_rates.pop(0) # remove the 0th data point + curr_stats.hash_rate = sum( + [hash_rates[i] * weights[i] for i in range(n_samples)] + ) / (sum(weights)) + + # update time last to now + time_last = time_now + + curr_stats.time_average = ( + curr_stats.time_average * curr_stats.rounds_total + + curr_stats.time_spent + ) / (curr_stats.rounds_total + num_time) + curr_stats.rounds_total += num_time + + # Update stats + curr_stats.time_spent = time_since_last + new_time_spent_total = time_now - start_time_perpetual + if cuda: + curr_stats.hash_rate_perpetual = ( + curr_stats.rounds_total * (tpb * update_interval) + ) / new_time_spent_total + else: + curr_stats.hash_rate_perpetual = ( + curr_stats.rounds_total * update_interval + ) / new_time_spent_total + curr_stats.time_spent_total = new_time_spent_total + + # Update the logger + logger.update(curr_stats, verbose=log_verbose) + + # exited while, solution contains the nonce or wallet is registered + stop_event.set() # stop all other processes + logger.stop() + + # terminate and wait for all solvers to exit + _terminate_workers_and_wait_for_exit(solvers) + + return solution + + +async def _solve_for_difficulty_fast_cuda( + subtensor: "AsyncSubtensor", + wallet: Wallet, + netuid: int, + output_in_place: bool = True, + update_interval: int = 50_000, + tpb: int = 512, + dev_id: typing.Union[list[int], int] = 0, + n_samples: int = 10, + alpha_: float = 0.80, + log_verbose: bool = False, +) -> Optional[POWSolution]: + """ + Solves the registration fast using CUDA + + :param subtensor: The subtensor node to grab blocks + :param wallet: The wallet to register + :param netuid: The netuid of the subnet to register to. + :param output_in_place: If true, prints the output in place, otherwise prints to new lines + :param update_interval: The number of nonces to try before checking for more blocks + :param tpb: The number of threads per block. CUDA param that should match the GPU capability + :param dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices + :param n_samples: The number of samples of the hash_rate to keep for the EWMA + :param alpha_: The alpha for the EWMA for the hash_rate calculation + :param log_verbose: If true, prints more verbose logging of the registration metrics. + + Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more + robust. + """ + if isinstance(dev_id, int): + dev_id = [dev_id] + elif dev_id is None: + dev_id = [0] + + if update_interval is None: + update_interval = 50_000 + + if not torch.cuda.is_available(): + raise Exception("CUDA not available") + + # Set mp start to use spawn so CUDA doesn't complain + with _UsingSpawnStartMethod(force=True): + curr_block, curr_block_num, curr_diff = _CUDASolver.create_shared_memory() + + solution = await _block_solver( + subtensor=subtensor, + wallet=wallet, + num_processes=None, + netuid=netuid, + dev_id=dev_id, + tpb=tpb, + update_interval=update_interval, + curr_block=curr_block, + curr_block_num=curr_block_num, + curr_diff=curr_diff, + n_samples=n_samples, + alpha_=alpha_, + output_in_place=output_in_place, + log_verbose=log_verbose, + cuda=True, + ) + + return solution + + +async def _solve_for_difficulty_fast( + subtensor, + wallet: Wallet, + netuid: int, + output_in_place: bool = True, + num_processes: Optional[int] = None, + update_interval: Optional[int] = None, + n_samples: int = 10, + alpha_: float = 0.80, + log_verbose: bool = False, +) -> Optional[POWSolution]: + """ + Solves the POW for registration using multiprocessing. + + :param subtensor: Subtensor to connect to for block information and to submit. + :param wallet: wallet to use for registration. + :param netuid: The netuid of the subnet to register to. + :param output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line. + :param num_processes: Number of processes to use. + :param update_interval: Number of nonces to solve before updating block information. + :param n_samples: The number of samples of the hash_rate to keep for the EWMA + :param alpha_: The alpha for the EWMA for the hash_rate calculation + :param log_verbose: If true, prints more verbose logging of the registration metrics. + + Notes: + + - The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. + - We can also modify the update interval to do smaller blocks of work, while still updating the block information + after a different number of nonces, to increase the transparency of the process while still keeping the speed. + """ + if not num_processes: + # get the number of allowed processes for this process + num_processes = min(1, get_cpu_count()) + + if update_interval is None: + update_interval = 50_000 + + curr_block, curr_block_num, curr_diff = _Solver.create_shared_memory() + + solution = await _block_solver( + subtensor=subtensor, + wallet=wallet, + num_processes=num_processes, + netuid=netuid, + dev_id=None, + tpb=None, + update_interval=update_interval, + curr_block=curr_block, + curr_block_num=curr_block_num, + curr_diff=curr_diff, + n_samples=n_samples, + alpha_=alpha_, + output_in_place=output_in_place, + log_verbose=log_verbose, + cuda=False, + ) + + return solution + + +def _terminate_workers_and_wait_for_exit( + workers: list[typing.Union[Process, Queue_Type]], +) -> None: + for worker in workers: + if isinstance(worker, Queue_Type): + worker.join_thread() + else: + try: + worker.join(3.0) + except subprocess.TimeoutExpired: + worker.terminate() + try: + worker.close() + except ValueError: + worker.terminate() + + +# TODO verify this works with async +@backoff.on_exception(backoff.constant, Exception, interval=1, max_tries=3) +async def _get_block_with_retry( + subtensor: "AsyncSubtensor", netuid: int +) -> tuple[int, int, bytes]: + """ + Gets the current block number, difficulty, and block hash from the substrate node. + + :param subtensor: The subtensor object to use to get the block number, difficulty, and block hash. + :param netuid: The netuid of the network to get the block number, difficulty, and block hash from. + + :return: The current block number, difficulty of the subnet, block hash + + :raises Exception: If the block hash is None. + :raises ValueError: If the difficulty is None. + """ + block_number = await subtensor.substrate.get_block_number(None) + block_hash = await subtensor.substrate.get_block_hash( + block_number + ) # TODO check if I need to do all this + try: + difficulty = ( + 1_000_000 + if netuid == -1 + else int( + await subtensor.get_hyperparameter( + param_name="Difficulty", netuid=netuid, block_hash=block_hash + ) + ) + ) + except TypeError: + raise ValueError("Chain error. Difficulty is None") + except SubstrateRequestException: + raise Exception( + "Network error. Could not connect to substrate to get block hash" + ) + return block_number, difficulty, block_hash + + +def _registration_diff_unpack(packed_diff: Array) -> int: + """Unpacks the packed two 32-bit integers into one 64-bit integer. Little endian.""" + return int(packed_diff[0] << 32 | packed_diff[1]) + + +def _registration_diff_pack(diff: int, packed_diff: Array): + """Packs the difficulty into two 32-bit integers. Little endian.""" + packed_diff[0] = diff >> 32 + packed_diff[1] = diff & 0xFFFFFFFF # low 32 bits + + +class _UsingSpawnStartMethod: + def __init__(self, force: bool = False): + self._old_start_method = None + self._force = force + + def __enter__(self): + self._old_start_method = mp.get_start_method(allow_none=True) + if self._old_start_method is None: + self._old_start_method = "spawn" # default to spawn + + mp.set_start_method("spawn", force=self._force) + + def __exit__(self, *args): + # restore the old start method + mp.set_start_method(self._old_start_method, force=True) + + +async def create_pow( + subtensor: "AsyncSubtensor", + wallet: Wallet, + netuid: int, + output_in_place: bool = True, + cuda: bool = False, + dev_id: typing.Union[list[int], int] = 0, + tpb: int = 256, + num_processes: int = None, + update_interval: int = None, + log_verbose: bool = False, +) -> Optional[dict[str, typing.Any]]: + """ + Creates a proof of work for the given subtensor and wallet. + + :param subtensor: The subtensor to create a proof of work for. + :param wallet: The wallet to create a proof of work for. + :param netuid: The netuid for the subnet to create a proof of work for. + :param output_in_place: If true, prints the progress of the proof of work to the console + in-place. Meaning the progress is printed on the same lines. + :param cuda: If true, uses CUDA to solve the proof of work. + :param dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list, + then multiple CUDA devices will be used to solve the proof of work. + :param tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32. + :param num_processes: The number of processes to use when solving the proof of work. + If None, then the number of processes is equal to the number of CPU cores. + :param update_interval: The number of nonces to run before checking for a new block. + :param log_verbose: If true, prints the progress of the proof of work more verbosely. + + :return: The proof of work solution or None if the wallet is already registered or there is a different error. + + :raises ValueError: If the subnet does not exist. + """ + if netuid != -1: + if not await subtensor.subnet_exists(netuid=netuid): + raise ValueError(f"Subnet {netuid} does not exist") + + if cuda: + solution: Optional[POWSolution] = await _solve_for_difficulty_fast_cuda( + subtensor, + wallet, + netuid=netuid, + output_in_place=output_in_place, + dev_id=dev_id, + tpb=tpb, + update_interval=update_interval, + log_verbose=log_verbose, + ) + else: + solution: Optional[POWSolution] = await _solve_for_difficulty_fast( + subtensor, + wallet, + netuid=netuid, + output_in_place=output_in_place, + num_processes=num_processes, + update_interval=update_interval, + log_verbose=log_verbose, + ) + + return solution + + +def _solve_for_nonce_block_cuda( + nonce_start: int, + update_interval: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + block_number: int, + dev_id: int, + tpb: int, +) -> Optional[POWSolution]: + """ + Tries to solve the POW on a CUDA device for a block of nonces (nonce_start, nonce_start + update_interval * tpb + """ + solution, seal = solve_cuda( + nonce_start, + update_interval, + tpb, + block_and_hotkey_hash_bytes, + difficulty, + limit, + dev_id, + ) + + if solution != -1: + # Check if solution is valid (i.e. not -1) + return POWSolution(solution, block_number, difficulty, seal) + + return None + + +def _solve_for_nonce_block( + nonce_start: int, + nonce_end: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + block_number: int, +) -> Optional[POWSolution]: + """ + Tries to solve the POW for a block of nonces (nonce_start, nonce_end) + """ + for nonce in range(nonce_start, nonce_end): + # Create seal. + seal = _create_seal_hash(block_and_hotkey_hash_bytes, nonce) + + # Check if seal meets difficulty + if _seal_meets_difficulty(seal, difficulty, limit): + # Found a solution, save it. + return POWSolution(nonce, block_number, difficulty, seal) + + return None + + +class CUDAException(Exception): + """An exception raised when an error occurs in the CUDA environment.""" + + +def _hex_bytes_to_u8_list(hex_bytes: bytes): + hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] + return hex_chunks + + +def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes: + """ + Create a cryptographic seal hash from the given block and hotkey hash bytes and nonce. + + This function generates a seal hash by combining the given block and hotkey hash bytes with a nonce. + It first converts the nonce to a byte representation, then concatenates it with the first 64 hex + characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by + the Keccak-256 algorithm to produce the final seal hash. + + :param block_and_hotkey_hash_bytes: The combined hash bytes of the block and hotkey. + :param nonce: The nonce value used for hashing. + + :return: The resulting seal hash. + """ + nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) + pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64] + seal_sh256 = hashlib.sha256(bytearray(_hex_bytes_to_u8_list(pre_seal))).digest() + kec = keccak.new(digest_bits=256) + seal = kec.update(seal_sh256).digest() + return seal + + +def _seal_meets_difficulty(seal: bytes, difficulty: int, limit: int) -> bool: + """Determines if a seal meets the specified difficulty""" + seal_number = int.from_bytes(seal, "big") + product = seal_number * difficulty + return product < limit + + +def _hash_block_with_hotkey(block_bytes: bytes, hotkey_bytes: bytes) -> bytes: + """Hashes the block with the hotkey using Keccak-256 to get 32 bytes""" + kec = keccak.new(digest_bits=256) + kec = kec.update(bytearray(block_bytes + hotkey_bytes)) + block_and_hotkey_hash_bytes = kec.digest() + return block_and_hotkey_hash_bytes + + +def _update_curr_block( + curr_diff: Array, + curr_block: Array, + curr_block_num: Value, + block_number: int, + block_bytes: bytes, + diff: int, + hotkey_bytes: bytes, + lock: Lock, +): + """ + Update the current block data with the provided block information and difficulty. + + This function updates the current block and its difficulty in a thread-safe manner. It sets the current block + number, hashes the block with the hotkey, updates the current block bytes, and packs the difficulty. + + :param curr_diff: Shared array to store the current difficulty. + :param curr_block: Shared array to store the current block data. + :param curr_block_num: Shared value to store the current block number. + :param block_number: The block number to set as the current block number. + :param block_bytes: The block data bytes to be hashed with the hotkey. + :param diff: The difficulty value to be packed into the current difficulty array. + :param hotkey_bytes: The hotkey bytes used for hashing the block. + :param lock: A lock to ensure thread-safe updates. + """ + with lock: + curr_block_num.value = block_number + # Hash the block with the hotkey + block_and_hotkey_hash_bytes = _hash_block_with_hotkey(block_bytes, hotkey_bytes) + for i in range(32): + curr_block[i] = block_and_hotkey_hash_bytes[i] + _registration_diff_pack(diff, curr_diff) + + +def get_cpu_count() -> int: + try: + return len(os.sched_getaffinity(0)) + except AttributeError: + # macOS does not have sched_getaffinity + return os.cpu_count() + + +@dataclass +class RegistrationStatistics: + """Statistics for a registration.""" + + time_spent_total: float + rounds_total: int + time_average: float + time_spent: float + hash_rate_perpetual: float + hash_rate: float + difficulty: int + block_number: int + block_hash: bytes + + +def solve_cuda( + nonce_start: np.int64, + update_interval: np.int64, + tpb: int, + block_and_hotkey_hash_bytes: bytes, + difficulty: int, + limit: int, + dev_id: int = 0, +) -> tuple[np.int64, bytes]: + """ + Solves the PoW problem using CUDA. + + :param nonce_start: Starting nonce. + :param update_interval: Number of nonces to solve before updating block information. + :param tpb: Threads per block. + :param block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes. + :param difficulty: Difficulty of the PoW problem. + :param limit: Upper limit of the nonce. + :param dev_id: The CUDA device ID + + :return: (nonce, seal) corresponding to the solution. Returns -1 for nonce if no solution is found. + """ + + try: + import cubit + except ImportError: + raise ImportError("Please install cubit") + + upper = int(limit // difficulty) + + upper_bytes = upper.to_bytes(32, byteorder="little", signed=False) + + # Call cython function + # int blockSize, uint64 nonce_start, uint64 update_interval, const unsigned char[:] limit, + # const unsigned char[:] block_bytes, int dev_id + block_and_hotkey_hash_hex = binascii.hexlify(block_and_hotkey_hash_bytes)[:64] + + solution = cubit.solve_cuda( + tpb, + nonce_start, + update_interval, + upper_bytes, + block_and_hotkey_hash_hex, + dev_id, + ) # 0 is first GPU + seal = None + if solution != -1: + seal = _create_seal_hash(block_and_hotkey_hash_hex, solution) + if _seal_meets_difficulty(seal, difficulty, limit): + return solution, seal + else: + return -1, b"\x00" * 32 + + return solution, seal + + +def reset_cuda(): + """ + Resets the CUDA environment. + """ + try: + import cubit + except ImportError: + raise ImportError("Please install cubit") + + cubit.reset_cuda() + + +def log_cuda_errors() -> str: + """ + Logs any CUDA errors. + """ + try: + import cubit + except ImportError: + raise ImportError("Please install cubit") + + f = io.StringIO() + with redirect_stdout(f): + cubit.log_cuda_errors() + + s = f.getvalue() + + return s diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py new file mode 100644 index 0000000000..9e73f98a30 --- /dev/null +++ b/bittensor/core/extrinsics/async_root.py @@ -0,0 +1,245 @@ +import asyncio +import time +from typing import Union, TYPE_CHECKING + +import numpy as np +from bittensor_wallet import Wallet +from bittensor_wallet.errors import KeyFileError +from numpy.typing import NDArray +from rich.prompt import Confirm +from rich.table import Table, Column +from substrateinterface.exceptions import SubstrateRequestException + +from bittensor.utils import u16_normalized_float, format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.weight_utils import ( + normalize_max_weight, + convert_weights_and_uids_for_emit, +) + +if TYPE_CHECKING: + from bittensor.core.async_subtensor import AsyncSubtensor + + +async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]: + # Get weight restrictions. + maw, mwl = await asyncio.gather( + subtensor.get_hyperparameter("MinAllowedWeights", netuid=0), + subtensor.get_hyperparameter("MaxWeightsLimit", netuid=0), + ) + min_allowed_weights = int(maw) + max_weight_limit = u16_normalized_float(int(mwl)) + return min_allowed_weights, max_weight_limit + + +async def root_register_extrinsic( + subtensor: "AsyncSubtensor", + wallet: Wallet, + wait_for_inclusion: bool = True, + wait_for_finalization: bool = True, + prompt: bool = False, +) -> bool: + """Registers the wallet to root network. + + :param subtensor: The AsyncSubtensor object + :param wallet: Bittensor wallet object. + :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + + :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. + """ + + try: + wallet.unlock_coldkey() + except KeyFileError: + logging.error("Error decrypting coldkey (possibly incorrect password)") + return False + + logging.debug( + f"Checking if hotkey ({wallet.hotkey_str}) is registered on root." + ) + is_registered = await subtensor.is_hotkey_registered( + netuid=0, hotkey_ss58=wallet.hotkey.ss58_address + ) + if is_registered: + logging.error( + ":white_heavy_check_mark: Already registered on root network." + ) + return True + + logging.info(":satellite: Registering to root network...") + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="root_register", + call_params={"hotkey": wallet.hotkey.ss58_address}, + ) + success, err_msg = await subtensor.sign_and_send_extrinsic( + call, + wallet=wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not success: + logging.error(f":cross_mark: Failed: {err_msg}") + time.sleep(0.5) + return False + + # Successful registration, final check for neuron and pubkey + else: + uid = await subtensor.substrate.query( + module="SubtensorModule", + storage_function="Uids", + params=[0, wallet.hotkey.ss58_address], + ) + if uid is not None: + logging.info( + f":white_heavy_check_mark: Registered with UID {uid}" + ) + return True + else: + # neuron not found, try again + logging.error(":cross_mark: Unknown error. Neuron not found.") + return False + + +async def set_root_weights_extrinsic( + subtensor: "AsyncSubtensor", + wallet: Wallet, + netuids: Union[NDArray[np.int64], list[int]], + weights: Union[NDArray[np.float32], list[float]], + version_key: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + prompt: bool = False, +) -> bool: + """Sets the given weights and values on chain for wallet hotkey account. + + :param subtensor: The AsyncSubtensor object + :param wallet: Bittensor wallet object. + :param netuids: The `netuid` of the subnet to set weights for. + :param weights: Weights to set. These must be `float` s and must correspond to the passed `netuid` s. + :param version_key: The version key of the validator. + :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns + `False` if the extrinsic fails to enter the block within the timeout. + :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, + or returns `False` if the extrinsic fails to be finalized within the timeout. + :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, + the response is `True`. + """ + + async def _do_set_weights(): + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="set_root_weights", + call_params={ + "dests": weight_uids, + "weights": weight_vals, + "netuid": 0, + "version_key": version_key, + "hotkey": wallet.hotkey.ss58_address, + }, + ) + # Period dictates how long the extrinsic will stay as part of waiting pool + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.coldkey, + era={"period": 5}, + ) + response = await subtensor.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + await response.process_events() + if await response.is_success: + return True, "Successfully set weights." + else: + return False, await response.error_message + + my_uid = await subtensor.substrate.query( + "SubtensorModule", "Uids", [0, wallet.hotkey.ss58_address] + ) + + if my_uid is None: + logging.error("Your hotkey is not registered to the root network") + return False + + try: + wallet.unlock_coldkey() + except KeyFileError: + logging.error("Error decrypting coldkey (possibly incorrect password)") + return False + + # First convert types. + if isinstance(netuids, list): + netuids = np.array(netuids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + logging.debug("Fetching weight limits") + min_allowed_weights, max_weight_limit = await get_limits(subtensor) + + # Get non zero values. + non_zero_weight_idx = np.argwhere(weights > 0).squeeze(axis=1) + non_zero_weights = weights[non_zero_weight_idx] + if non_zero_weights.size < min_allowed_weights: + raise ValueError( + "The minimum number of weights required to set weights is {}, got {}".format( + min_allowed_weights, non_zero_weights.size + ) + ) + + # Normalize the weights to max value. + logging.info("Normalizing weights") + formatted_weights = normalize_max_weight(x=weights, limit=max_weight_limit) + logging.info( + f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}" + ) + + # Ask before moving on. + if prompt: + table = Table( + Column("[dark_orange]Netuid", justify="center", style="bold green"), + Column( + "[dark_orange]Weight", justify="center", style="bold light_goldenrod2" + ), + expand=False, + show_edge=False, + ) + print("Netuid | Weight") + + for netuid, weight in zip(netuids, formatted_weights): + table.add_row(str(netuid), f"{weight:.8f}") + print(f"{netuid} | {weight}") + + if not Confirm.ask("\nDo you want to set these root weights?"): + return False + + try: + logging.info(":satellite: Setting root weights...") + weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights) + + success, error_message = await _do_set_weights() + + if not wait_for_finalization and not wait_for_inclusion: + return True + + if success is True: + logging.info(":white_heavy_check_mark: Finalized") + return True + else: + fmt_err = format_error_message(error_message, subtensor.substrate) + logging.error(f":cross_mark: Failed: {fmt_err}") + return False + + except SubstrateRequestException as e: + fmt_err = format_error_message(e, subtensor.substrate) + logging.error(f":cross_mark: Failed: error:{fmt_err}") + return False diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py new file mode 100644 index 0000000000..b9072ae9b8 --- /dev/null +++ b/bittensor/core/extrinsics/async_transfer.py @@ -0,0 +1,200 @@ +import asyncio +from typing import TYPE_CHECKING + +from bittensor_wallet import Wallet +from bittensor_wallet.errors import KeyFileError +from rich.prompt import Confirm +from substrateinterface.exceptions import SubstrateRequestException + +from bittensor.core.settings import NETWORK_EXPLORER_MAP +from bittensor.utils import ( + format_error_message, + get_explorer_url_for_network, + is_valid_bittensor_address_or_public_key, +) +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging + +if TYPE_CHECKING: + from bittensor.core.async_subtensor import AsyncSubtensor + + +async def transfer_extrinsic( + subtensor: "AsyncSubtensor", + wallet: Wallet, + destination: str, + amount: Balance, + transfer_all: bool = False, + wait_for_inclusion: bool = True, + wait_for_finalization: bool = False, + keep_alive: bool = True, + prompt: bool = False, +) -> bool: + """Transfers funds from this wallet to the destination public key address. + + :param subtensor: initialized AsyncSubtensor object used for transfer + :param wallet: Bittensor wallet object to make transfer from. + :param destination: Destination public key address (ss58_address or ed25519) of recipient. + :param amount: Amount to stake as Bittensor balance. + :param transfer_all: Whether to transfer all funds from this wallet to the destination address. + :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, + or returns `False` if the extrinsic fails to enter the block within the timeout. + :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning + `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + :param keep_alive: If set, keeps the account alive by keeping the balance above the existential deposit. + :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + :return: success: Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for + finalization / inclusion, the response is `True`, regardless of its inclusion. + """ + + async def get_transfer_fee() -> Balance: + """ + Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. + This function simulates the transfer to estimate the associated cost, taking into account the current + network conditions and transaction complexity. + """ + call = await subtensor.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": destination, "value": amount.rao}, + ) + + try: + payment_info = await subtensor.substrate.get_payment_info( + call=call, keypair=wallet.coldkeypub + ) + except SubstrateRequestException as e: + payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao + logging.error(f":cross_mark: Failed to get payment info:") + logging.error(f"\t\t{format_error_message(e, subtensor.substrate)}") + logging.error( + f"\t\tDefaulting to default transfer fee: {payment_info['partialFee']}" + ) + + return Balance.from_rao(payment_info["partialFee"]) + + async def do_transfer() -> tuple[bool, str, str]: + """ + Makes transfer from wallet to destination public key address. + :return: success, block hash, formatted error message + """ + call = await subtensor.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": destination, "value": amount.rao}, + ) + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, keypair=wallet.coldkey + ) + response = await subtensor.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "", "" + + # Otherwise continue with finalization. + await response.process_events() + if await response.is_success: + block_hash_ = response.block_hash + return True, block_hash_, "" + else: + return ( + False, + "", + format_error_message( + await response.error_message, substrate=subtensor.substrate + ), + ) + + # Validate destination address. + if not is_valid_bittensor_address_or_public_key(destination): + logging.error( + f":cross_mark: Invalid destination SS58 address:[bold white]\n {destination}[/bold white]" + ) + return False + logging.info(f"Initiating transfer on network: {subtensor.network}") + # Unlock wallet coldkey. + try: + wallet.unlock_coldkey() + except KeyFileError: + logging.error("Error decrypting coldkey (possibly incorrect password)") + return False + + # Check balance. + logging.info( + f":satellite: Checking balance and fees on chain {subtensor.network}" + ) + # check existential deposit and fee + logging.debug("Fetching existential and fee") + block_hash = await subtensor.substrate.get_chain_head() + account_balance_, existential_deposit = await asyncio.gather( + subtensor.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash), + subtensor.get_existential_deposit(block_hash=block_hash), + ) + account_balance = account_balance_[wallet.coldkeypub.ss58_address] + fee = await get_transfer_fee() + + if not keep_alive: + # Check if the transfer should keep_alive the account + existential_deposit = Balance(0) + + # Check if we have enough balance. + if transfer_all is True: + amount = account_balance - fee - existential_deposit + if amount < Balance(0): + logging.error("Not enough balance to transfer") + return False + + if account_balance < (amount + fee + existential_deposit): + logging.error(":cross_mark: Not enough balance") + logging.error(f"\t\tBalance:\t{account_balance}") + logging.error(f"\t\tAmount:\t{amount}") + logging.error(f"\t\tFor fee:\t{fee}") + return False + + # Ask before moving on. + if prompt: + if not Confirm.ask( + "Do you want to transfer:[bold white]\n" + f" amount: [bright_cyan]{amount}[/bright_cyan]\n" + f" from: [light_goldenrod2]{wallet.name}[/light_goldenrod2] : [bright_magenta]{wallet.coldkey.ss58_address}\n[/bright_magenta]" + f" to: [bright_magenta]{destination}[/bright_magenta]\n for fee: [bright_cyan]{fee}[/bright_cyan]" + ): + return False + + logging.info(":satellite: Transferring...") + logging.info(f"[green]Block Hash: {block_hash}") + + if subtensor.network == "finney": + logging.debug("Fetching explorer URLs") + explorer_urls = get_explorer_url_for_network( + subtensor.network, block_hash, NETWORK_EXPLORER_MAP + ) + if explorer_urls != {} and explorer_urls: + logging.info( + f"[green]Opentensor Explorer Link: {explorer_urls.get('opentensor')}" + ) + logging.info( + f"[green]Taostats Explorer Link: {explorer_urls.get('taostats')}" + ) + else: + logging.error(f":cross_mark: Failed: {err_msg}") + + if success: + logging.info(":satellite: Checking Balance...") + new_balance = await subtensor.get_balance( + wallet.coldkeypub.ss58_address, reuse_block=False + ) + logging.info( + f"Balance: [blue]{account_balance} :arrow_right: [green]{new_balance[wallet.coldkeypub.ss58_address]}" + ) + return True + + return False diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 5e9f2e9e19..3dcfd5b2c2 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -139,7 +139,9 @@ def commit_weights_extrinsic( logging.info(success_message) return True, success_message else: - error_message = format_error_message(error_message) + error_message = format_error_message( + error_message, substrate=subtensor.substrate + ) logging.error(f"Failed to commit weights: {error_message}") return False, error_message @@ -269,6 +271,8 @@ def reveal_weights_extrinsic( logging.info(success_message) return True, success_message else: - error_message = format_error_message(error_message) + error_message = format_error_message( + error_message, substrate=subtensor.substrate + ) logging.error(f"Failed to reveal weights: {error_message}") return False, error_message diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py index 8f7f3292b9..ba9dc73756 100644 --- a/bittensor/core/extrinsics/registration.py +++ b/bittensor/core/extrinsics/registration.py @@ -94,7 +94,9 @@ def make_substrate_call_with_retry(): # process if registration successful, try again if pow is still valid response.process_events() if not response.is_success: - return False, format_error_message(response.error_message) + return False, format_error_message( + response.error_message, substrate=self.substrate + ) # Successful registration else: return True, None @@ -335,7 +337,9 @@ def make_substrate_call_with_retry(): # process if registration successful, try again if pow is still valid response.process_events() if not response.is_success: - return False, format_error_message(response.error_message) + return False, format_error_message( + response.error_message, substrate=self.substrate + ) # Successful registration else: return True, None diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py index 129e852777..445d2c0b06 100644 --- a/bittensor/core/extrinsics/root.py +++ b/bittensor/core/extrinsics/root.py @@ -49,7 +49,9 @@ def make_substrate_call_with_retry(): # process if registration successful, try again if pow is still valid response.process_events() if not response.is_success: - return False, format_error_message(response.error_message) + return False, format_error_message( + response.error_message, substrate=self.substrate + ) # Successful registration else: return True, None diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py index ac712cd8cb..6eb7a67b25 100644 --- a/bittensor/core/extrinsics/serving.py +++ b/bittensor/core/extrinsics/serving.py @@ -186,7 +186,9 @@ def serve_extrinsic( ) return True else: - logging.error(f"Failed: {format_error_message(error_message)}") + logging.error( + f"Failed: {format_error_message(error_message, substrate=subtensor.substrate)}" + ) return False else: return True @@ -298,7 +300,9 @@ def publish_metadata( if response.is_success: return True else: - raise MetadataError(format_error_message(response.error_message)) + raise MetadataError( + format_error_message(response.error_message, substrate=self.substrate) + ) # Community uses this function directly diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 98f4c16917..904b699926 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -179,7 +179,9 @@ def set_weights_extrinsic( logging.success(f"Finalized! Set weights: {str(success)}") return True, "Successfully set weights and Finalized." else: - error_message = format_error_message(error_message) + error_message = format_error_message( + error_message, substrate=subtensor.substrate + ) logging.error(error_message) return False, error_message diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index aaa2795583..b68a579967 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -198,7 +198,7 @@ def transfer_extrinsic( ) else: logging.error( - f":cross_mark: Failed: {format_error_message(error_message)}" + f":cross_mark: Failed: {format_error_message(error_message, substrate=subtensor.substrate)}" ) if success: diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index 8413b5329f..8eee9676ad 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -30,16 +30,16 @@ WALLETS_DIR = USER_BITTENSOR_DIR / "wallets" MINERS_DIR = USER_BITTENSOR_DIR / "miners" -# Bittensor networks name -NETWORKS = ["local", "finney", "test", "archive"] - -DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443" -DEFAULT_NETWORK = NETWORKS[1] # Create dirs if they don't exist WALLETS_DIR.mkdir(parents=True, exist_ok=True) MINERS_DIR.mkdir(parents=True, exist_ok=True) +# Bittensor networks name +NETWORKS = ["finney", "test", "archive", "local"] + +DEFAULT_ENDPOINT = "wss://entrypoint-finney.opentensor.ai:443" +DEFAULT_NETWORK = NETWORKS[0] # Bittensor endpoints (Needs to use wss://) FINNEY_ENTRYPOINT = "wss://entrypoint-finney.opentensor.ai:443" @@ -47,6 +47,13 @@ ARCHIVE_ENTRYPOINT = "wss://archive.chain.opentensor.ai:443/" LOCAL_ENTRYPOINT = os.getenv("BT_SUBTENSOR_CHAIN_ENDPOINT") or "ws://127.0.0.1:9944" +NETWORK_MAP = { + NETWORKS[0]: FINNEY_ENTRYPOINT, + NETWORKS[1]: FINNEY_TEST_ENTRYPOINT, + NETWORKS[2]: ARCHIVE_ENTRYPOINT, + NETWORKS[3]: LOCAL_ENTRYPOINT, +} + # Currency Symbols Bittensor TAO_SYMBOL: str = chr(0x03C4) RAO_SYMBOL: str = chr(0x03C1) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 3e3c61b017..fcbb4147d7 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -16,7 +16,7 @@ # DEALINGS IN THE SOFTWARE. """ -The ``bittensor.core.subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor +The ``bittensor.core.subtensor.Subtensor`` module in Bittensor serves as a crucial interface for interacting with the Bittensor blockchain, facilitating a range of operations essential for the decentralized machine learning network. """ diff --git a/bittensor/utils/__init__.py b/bittensor/utils/__init__.py index 6239d89808..745726c264 100644 --- a/bittensor/utils/__init__.py +++ b/bittensor/utils/__init__.py @@ -15,8 +15,10 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +from urllib.parse import urlparse +import ast import hashlib -from typing import Literal, Union, Optional, TYPE_CHECKING +from typing import Any, Literal, Union, Optional, TYPE_CHECKING import scalecodec from bittensor_wallet import Keypair @@ -28,6 +30,7 @@ from .version import version_checking, check_version, VersionCheckError if TYPE_CHECKING: + from bittensor.utils.async_substrate_interface import AsyncSubstrateInterface from substrateinterface import SubstrateInterface RAOPERTAO = 1e9 @@ -142,14 +145,16 @@ def get_hash(content, encoding="utf-8"): def format_error_message( - error_message: dict, substrate: "SubstrateInterface" = None + error_message: Union[dict, Exception], + substrate: Union["AsyncSubstrateInterface", "SubstrateInterface"], ) -> str: """ Formats an error message from the Subtensor error information for use in extrinsics. Args: - error_message (dict): A dictionary containing the error information from Subtensor. - substrate (SubstrateInterface, optional): The substrate interface to use. + error_message: A dictionary containing the error information from Subtensor, or a SubstrateRequestException + containing dictionary literal args. + substrate: The initialised SubstrateInterface object to use. Returns: str: A formatted error message string. @@ -158,6 +163,27 @@ def format_error_message( err_type = "UnknownType" err_description = "Unknown Description" + if isinstance(error_message, Exception): + # generally gotten through SubstrateRequestException args + new_error_message = None + for arg in error_message.args: + try: + d = ast.literal_eval(arg) + if isinstance(d, dict): + if "error" in d: + new_error_message = d["error"] + break + elif all(x in d for x in ["code", "message", "data"]): + new_error_message = d + break + except ValueError: + pass + if new_error_message is None: + return_val = " ".join(error_message.args) + return f"Subtensor returned: {return_val}" + else: + error_message = new_error_message + if isinstance(error_message, dict): # subtensor error structure if ( @@ -166,14 +192,11 @@ def format_error_message( and error_message.get("data") ): err_name = "SubstrateRequestException" - err_type = error_message.get("message") - err_data = error_message.get("data") + err_type = error_message.get("message", "") + err_data = error_message.get("data", "") # subtensor custom error marker if err_data.startswith("Custom error:") and substrate: - if not substrate.metadata: - substrate.get_metadata() - if substrate.metadata: try: pallet = substrate.metadata.get_metadata_pallet( @@ -185,8 +208,10 @@ def format_error_message( err_type = error_dict.get("message", err_type) err_docs = error_dict.get("docs", []) err_description = err_docs[0] if err_docs else err_description - except Exception: - logging.error("Substrate pallets data unavailable.") + except (AttributeError, IndexError): + logging.error( + "Substrate pallets data unavailable. This is usually caused by an uninitialized substrate." + ) else: err_description = err_data @@ -277,3 +302,71 @@ def is_valid_bittensor_address_or_public_key(address: Union[str, bytes]) -> bool else: # Invalid address type return False + + +def decode_hex_identity_dict(info_dictionary) -> dict[str, Any]: + """ + Decodes hex-encoded strings in a dictionary. + + This function traverses the given dictionary, identifies hex-encoded strings, and decodes them into readable strings. It handles nested dictionaries and lists within the dictionary. + + Args: + info_dictionary (dict): The dictionary containing hex-encoded strings to decode. + + Returns: + dict: The dictionary with decoded strings. + + Examples: + input_dict = { + ... "name": {"value": "0x6a6f686e"}, + ... "additional": [ + ... [{"data": "0x64617461"}] + ... ] + ... } + decode_hex_identity_dict(input_dict) + {'name': 'john', 'additional': [('data', 'data')]} + """ + + def get_decoded(data: str) -> str: + """Decodes a hex-encoded string.""" + try: + return bytes.fromhex(data[2:]).decode() + except UnicodeDecodeError: + print(f"Could not decode: {key}: {item}") + + for key, value in info_dictionary.items(): + if isinstance(value, dict): + item = list(value.values())[0] + if isinstance(item, str) and item.startswith("0x"): + try: + info_dictionary[key] = get_decoded(item) + except UnicodeDecodeError: + print(f"Could not decode: {key}: {item}") + else: + info_dictionary[key] = item + if key == "additional": + additional = [] + for item in value: + additional.append( + tuple( + get_decoded(data=next(iter(sub_item.values()))) + for sub_item in item + ) + ) + info_dictionary[key] = additional + + return info_dictionary + + +def validate_chain_endpoint(endpoint_url: str) -> tuple[bool, str]: + """Validates if the provided endpoint URL is a valid WebSocket URL.""" + parsed = urlparse(endpoint_url) + if parsed.scheme not in ("ws", "wss"): + return False, ( + f"Invalid URL or network name provided: [bright_cyan]({endpoint_url})[/bright_cyan].\n" + "Allowed network names are [bright_cyan]finney, test, local[/bright_cyan]. " + "Valid chain endpoints should use the scheme [bright_cyan]`ws` or `wss`[/bright_cyan].\n" + ) + if not parsed.netloc: + return False, "Invalid URL passed as the endpoint" + return True, "" diff --git a/bittensor/utils/async_substrate_interface.py b/bittensor/utils/async_substrate_interface.py new file mode 100644 index 0000000000..de0547e7b5 --- /dev/null +++ b/bittensor/utils/async_substrate_interface.py @@ -0,0 +1,2742 @@ +import asyncio +import json +import random +from collections import defaultdict +from dataclasses import dataclass +from hashlib import blake2b +from typing import Optional, Any, Union, Callable, Awaitable, cast + +import websockets +from async_property import async_property +from bittensor_wallet import Keypair +from bt_decode import PortableRegistry, decode as decode_by_type_string, MetadataV15 +from scalecodec import GenericExtrinsic +from scalecodec.base import ScaleBytes, ScaleType, RuntimeConfigurationObject +from scalecodec.type_registry import load_type_registry_preset +from scalecodec.types import GenericCall +from substrateinterface.exceptions import ( + SubstrateRequestException, + ExtrinsicNotFound, + BlockNotFound, +) +from substrateinterface.storage import StorageKey + +ResultHandler = Callable[[dict, Any], Awaitable[tuple[dict, bool]]] + + +class TimeoutException(Exception): + pass + + +def timeout_handler(signum, frame): + raise TimeoutException("Operation timed out") + + +class ExtrinsicReceipt: + """ + Object containing information of submitted extrinsic. Block hash where extrinsic is included is required + when retrieving triggered events or determine if extrinsic was successful + """ + + def __init__( + self, + substrate: "AsyncSubstrateInterface", + extrinsic_hash: Optional[str] = None, + block_hash: Optional[str] = None, + block_number: Optional[int] = None, + extrinsic_idx: Optional[int] = None, + finalized=None, + ): + """ + Object containing information of submitted extrinsic. Block hash where extrinsic is included is required + when retrieving triggered events or determine if extrinsic was successful + + Parameters + ---------- + substrate + extrinsic_hash + block_hash + finalized + """ + self.substrate = substrate + self.extrinsic_hash = extrinsic_hash + self.block_hash = block_hash + self.block_number = block_number + self.finalized = finalized + + self.__extrinsic_idx = extrinsic_idx + self.__extrinsic = None + + self.__triggered_events: Optional[list] = None + self.__is_success: Optional[bool] = None + self.__error_message = None + self.__weight = None + self.__total_fee_amount = None + + async def get_extrinsic_identifier(self) -> str: + """ + Returns the on-chain identifier for this extrinsic in format "[block_number]-[extrinsic_idx]" e.g. 134324-2 + Returns + ------- + str + """ + if self.block_number is None: + if self.block_hash is None: + raise ValueError( + "Cannot create extrinsic identifier: block_hash is not set" + ) + + self.block_number = await self.substrate.get_block_number(self.block_hash) + + if self.block_number is None: + raise ValueError( + "Cannot create extrinsic identifier: unknown block_hash" + ) + + return f"{self.block_number}-{await self.extrinsic_idx}" + + async def retrieve_extrinsic(self): + if not self.block_hash: + raise ValueError( + "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is " + "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic" + ) + # Determine extrinsic idx + + block = await self.substrate.get_block(block_hash=self.block_hash) + + extrinsics = block["extrinsics"] + + if len(extrinsics) > 0: + if self.__extrinsic_idx is None: + self.__extrinsic_idx = self.__get_extrinsic_index( + block_extrinsics=extrinsics, extrinsic_hash=self.extrinsic_hash + ) + + if self.__extrinsic_idx >= len(extrinsics): + raise ExtrinsicNotFound() + + self.__extrinsic = extrinsics[self.__extrinsic_idx] + + @async_property + async def extrinsic_idx(self) -> int: + """ + Retrieves the index of this extrinsic in containing block + + Returns + ------- + int + """ + if self.__extrinsic_idx is None: + await self.retrieve_extrinsic() + return self.__extrinsic_idx + + @async_property + async def triggered_events(self) -> list: + """ + Gets triggered events for submitted extrinsic. block_hash where extrinsic is included is required, manually + set block_hash or use `wait_for_inclusion` when submitting extrinsic + + Returns + ------- + list + """ + if self.__triggered_events is None: + if not self.block_hash: + raise ValueError( + "ExtrinsicReceipt can't retrieve events because it's unknown which block_hash it is " + "included, manually set block_hash or use `wait_for_inclusion` when sending extrinsic" + ) + + if await self.extrinsic_idx is None: + await self.retrieve_extrinsic() + + self.__triggered_events = [] + + for event in await self.substrate.get_events(block_hash=self.block_hash): + if event["extrinsic_idx"] == await self.extrinsic_idx: + self.__triggered_events.append(event) + + return cast(list, self.__triggered_events) + + async def process_events(self): + if await self.triggered_events: + self.__total_fee_amount = 0 + + # Process fees + has_transaction_fee_paid_event = False + + for event in await self.triggered_events: + if ( + event["event"]["module_id"] == "TransactionPayment" + and event["event"]["event_id"] == "TransactionFeePaid" + ): + self.__total_fee_amount = event["event"]["attributes"]["actual_fee"] + has_transaction_fee_paid_event = True + + # Process other events + for event in await self.triggered_events: + # Check events + if ( + event["event"]["module_id"] == "System" + and event["event"]["event_id"] == "ExtrinsicSuccess" + ): + self.__is_success = True + self.__error_message = None + + if "dispatch_info" in event["event"]["attributes"]: + self.__weight = event["event"]["attributes"]["dispatch_info"][ + "weight" + ] + else: + # Backwards compatibility + self.__weight = event["event"]["attributes"]["weight"] + + elif ( + event["event"]["module_id"] == "System" + and event["event"]["event_id"] == "ExtrinsicFailed" + ): + self.__is_success = False + + dispatch_info = event["event"]["attributes"]["dispatch_info"] + dispatch_error = event["event"]["attributes"]["dispatch_error"] + + self.__weight = dispatch_info["weight"] + + if "Module" in dispatch_error: + module_index = dispatch_error["Module"][0]["index"] + error_index = int.from_bytes( + bytes(dispatch_error["Module"][0]["error"]), + byteorder="little", + signed=False, + ) + + if isinstance(error_index, str): + # Actual error index is first u8 in new [u8; 4] format + error_index = int(error_index[2:4], 16) + module_error = self.substrate.metadata.get_module_error( + module_index=module_index, error_index=error_index + ) + self.__error_message = { + "type": "Module", + "name": module_error.name, + "docs": module_error.docs, + } + elif "BadOrigin" in dispatch_error: + self.__error_message = { + "type": "System", + "name": "BadOrigin", + "docs": "Bad origin", + } + elif "CannotLookup" in dispatch_error: + self.__error_message = { + "type": "System", + "name": "CannotLookup", + "docs": "Cannot lookup", + } + elif "Other" in dispatch_error: + self.__error_message = { + "type": "System", + "name": "Other", + "docs": "Unspecified error occurred", + } + + elif not has_transaction_fee_paid_event: + if ( + event["event"]["module_id"] == "Treasury" + and event["event"]["event_id"] == "Deposit" + ): + self.__total_fee_amount += event["event"]["attributes"]["value"] + elif ( + event["event"]["module_id"] == "Balances" + and event["event"]["event_id"] == "Deposit" + ): + self.__total_fee_amount += event.value["attributes"]["amount"] + + @async_property + async def is_success(self) -> bool: + """ + Returns `True` if `ExtrinsicSuccess` event is triggered, `False` in case of `ExtrinsicFailed` + In case of False `error_message` will contain more details about the error + + + Returns + ------- + bool + """ + if self.__is_success is None: + await self.process_events() + + return cast(bool, self.__is_success) + + @async_property + async def error_message(self) -> Optional[dict]: + """ + Returns the error message if the extrinsic failed in format e.g.: + + `{'type': 'System', 'name': 'BadOrigin', 'docs': 'Bad origin'}` + + Returns + ------- + dict + """ + if self.__error_message is None: + if await self.is_success: + return None + await self.process_events() + return self.__error_message + + @async_property + async def weight(self) -> Union[int, dict]: + """ + Contains the actual weight when executing this extrinsic + + Returns + ------- + int (WeightV1) or dict (WeightV2) + """ + if self.__weight is None: + await self.process_events() + return self.__weight + + @async_property + async def total_fee_amount(self) -> int: + """ + Contains the total fee costs deducted when executing this extrinsic. This includes fee for the validator ( + (`Balances.Deposit` event) and the fee deposited for the treasury (`Treasury.Deposit` event) + + Returns + ------- + int + """ + if self.__total_fee_amount is None: + await self.process_events() + return cast(int, self.__total_fee_amount) + + # Helper functions + @staticmethod + def __get_extrinsic_index(block_extrinsics: list, extrinsic_hash: str) -> int: + """ + Returns the index of a provided extrinsic + """ + for idx, extrinsic in enumerate(block_extrinsics): + if ( + extrinsic.extrinsic_hash + and f"0x{extrinsic.extrinsic_hash.hex()}" == extrinsic_hash + ): + return idx + raise ExtrinsicNotFound() + + # Backwards compatibility methods + def __getitem__(self, item): + return getattr(self, item) + + def __iter__(self): + for item in self.__dict__.items(): + yield item + + def get(self, name): + return self[name] + + +class QueryMapResult: + def __init__( + self, + records: list, + page_size: int, + substrate: "AsyncSubstrateInterface", + module: Optional[str] = None, + storage_function: Optional[str] = None, + params: Optional[list] = None, + block_hash: Optional[str] = None, + last_key: Optional[str] = None, + max_results: Optional[int] = None, + ignore_decoding_errors: bool = False, + ): + self.records = records + self.page_size = page_size + self.module = module + self.storage_function = storage_function + self.block_hash = block_hash + self.substrate = substrate + self.last_key = last_key + self.max_results = max_results + self.params = params + self.ignore_decoding_errors = ignore_decoding_errors + self.loading_complete = False + self._buffer = iter(self.records) # Initialize the buffer with initial records + + async def retrieve_next_page(self, start_key) -> list: + result = await self.substrate.query_map( + module=self.module, + storage_function=self.storage_function, + params=self.params, + page_size=self.page_size, + block_hash=self.block_hash, + start_key=start_key, + max_results=self.max_results, + ignore_decoding_errors=self.ignore_decoding_errors, + ) + + # Update last key from new result set to use as offset for next page + self.last_key = result.last_key + return result.records + + def __aiter__(self): + return self + + async def __anext__(self): + try: + # Try to get the next record from the buffer + return next(self._buffer) + except StopIteration: + # If no more records in the buffer, try to fetch the next page + if self.loading_complete: + raise StopAsyncIteration + + next_page = await self.retrieve_next_page(self.last_key) + if not next_page: + self.loading_complete = True + raise StopAsyncIteration + + # Update the buffer with the newly fetched records + self._buffer = iter(next_page) + return next(self._buffer) + + def __getitem__(self, item): + return self.records[item] + + +@dataclass +class Preprocessed: + queryable: str + method: str + params: list + value_scale_type: str + storage_item: ScaleType + + +class RuntimeCache: + blocks: dict[int, "Runtime"] + block_hashes: dict[str, "Runtime"] + + def __init__(self): + self.blocks = {} + self.block_hashes = {} + + def add_item( + self, block: Optional[int], block_hash: Optional[str], runtime: "Runtime" + ): + if block is not None: + self.blocks[block] = runtime + if block_hash is not None: + self.block_hashes[block_hash] = runtime + + def retrieve( + self, block: Optional[int] = None, block_hash: Optional[str] = None + ) -> Optional["Runtime"]: + if block is not None: + return self.blocks.get(block) + elif block_hash is not None: + return self.block_hashes.get(block_hash) + else: + return None + + +class Runtime: + block_hash: str + block_id: int + runtime_version = None + transaction_version = None + cache_region = None + metadata = None + type_registry_preset = None + + def __init__(self, chain, runtime_config, metadata, type_registry): + self.runtime_config = RuntimeConfigurationObject() + self.config = {} + self.chain = chain + self.type_registry = type_registry + self.runtime_config = runtime_config + self.metadata = metadata + + @property + def implements_scaleinfo(self) -> bool: + """ + Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher) + """ + if self.metadata: + return self.metadata.portable_registry is not None + else: + return False + + def reload_type_registry( + self, use_remote_preset: bool = True, auto_discover: bool = True + ): + """ + Reload type registry and preset used to instantiate the SubstrateInterface object. Useful to periodically apply + changes in type definitions when a runtime upgrade occurred + + Parameters + ---------- + use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed + scalecodec package + auto_discover + + Returns + ------- + + """ + self.runtime_config.clear_type_registry() + + self.runtime_config.implements_scale_info = self.implements_scaleinfo + + # Load metadata types in runtime configuration + self.runtime_config.update_type_registry(load_type_registry_preset(name="core")) + self.apply_type_registry_presets( + use_remote_preset=use_remote_preset, auto_discover=auto_discover + ) + + def apply_type_registry_presets( + self, + use_remote_preset: bool = True, + auto_discover: bool = True, + ): + """ + Applies type registry presets to the runtime + :param use_remote_preset: bool, whether to use presets from remote + :param auto_discover: bool, whether to use presets from local installed scalecodec package + """ + if self.type_registry_preset is not None: + # Load type registry according to preset + type_registry_preset_dict = load_type_registry_preset( + name=self.type_registry_preset, use_remote_preset=use_remote_preset + ) + + if not type_registry_preset_dict: + raise ValueError( + f"Type registry preset '{self.type_registry_preset}' not found" + ) + + elif auto_discover: + # Try to auto discover type registry preset by chain name + type_registry_name = self.chain.lower().replace(" ", "-") + try: + type_registry_preset_dict = load_type_registry_preset( + type_registry_name + ) + self.type_registry_preset = type_registry_name + except ValueError: + type_registry_preset_dict = None + + else: + type_registry_preset_dict = None + + if type_registry_preset_dict: + # Load type registries in runtime configuration + if self.implements_scaleinfo is False: + # Only runtime with no embedded types in metadata need the default set of explicit defined types + self.runtime_config.update_type_registry( + load_type_registry_preset( + "legacy", use_remote_preset=use_remote_preset + ) + ) + + if self.type_registry_preset != "legacy": + self.runtime_config.update_type_registry(type_registry_preset_dict) + + if self.type_registry: + # Load type registries in runtime configuration + self.runtime_config.update_type_registry(self.type_registry) + + +class RequestManager: + RequestResults = dict[Union[str, int], list[Union[ScaleType, dict]]] + + def __init__(self, payloads): + self.response_map = {} + self.responses = defaultdict(lambda: {"complete": False, "results": []}) + self.payloads_count = len(payloads) + + def add_request(self, item_id: int, request_id: Any): + """ + Adds an outgoing request to the responses map for later retrieval + """ + self.response_map[item_id] = request_id + + def overwrite_request(self, item_id: int, request_id: Any): + """ + Overwrites an existing request in the responses map with a new request_id. This is used + for multipart responses that generate a subscription id we need to watch, rather than the initial + request_id. + """ + self.response_map[request_id] = self.response_map.pop(item_id) + return request_id + + def add_response(self, item_id: int, response: dict, complete: bool): + """ + Maps a response to the request for later retrieval + """ + request_id = self.response_map[item_id] + self.responses[request_id]["results"].append(response) + self.responses[request_id]["complete"] = complete + + @property + def is_complete(self) -> bool: + """ + Returns whether all requests in the manager have completed + """ + return ( + all(info["complete"] for info in self.responses.values()) + and len(self.responses) == self.payloads_count + ) + + def get_results(self) -> RequestResults: + """ + Generates a dictionary mapping the requests initiated to the responses received. + """ + return { + request_id: info["results"] for request_id, info in self.responses.items() + } + + +class Websocket: + def __init__( + self, + ws_url: str, + max_subscriptions=1024, + max_connections=100, + shutdown_timer=5, + options: Optional[dict] = None, + ): + """ + Websocket manager object. Allows for the use of a single websocket connection by multiple + calls. + + :param ws_url: Websocket URL to connect to + :param max_subscriptions: Maximum number of subscriptions per websocket connection + :param max_connections: Maximum number of connections total + :param shutdown_timer: Number of seconds to shut down websocket connection after last use + """ + # TODO allow setting max concurrent connections and rpc subscriptions per connection + # TODO reconnection logic + self.ws_url = ws_url + self.ws: Optional[websockets.WebSocketClientProtocol] = None + self.id = 0 + self.max_subscriptions = max_subscriptions + self.max_connections = max_connections + self.shutdown_timer = shutdown_timer + self._received = {} + self._in_use = 0 + self._receiving_task = None + self._attempts = 0 + self._initialized = False + self._lock = asyncio.Lock() + self._exit_task = None + self._open_subscriptions = 0 + self._options = options if options else {} + + async def __aenter__(self): + async with self._lock: + self._in_use += 1 + if self._exit_task: + self._exit_task.cancel() + if not self._initialized: + self._initialized = True + await self._connect() + self._receiving_task = asyncio.create_task(self._start_receiving()) + return self + + async def _connect(self): + self.ws = await asyncio.wait_for( + websockets.connect(self.ws_url, **self._options), timeout=10 + ) + + async def __aexit__(self, exc_type, exc_val, exc_tb): + async with self._lock: + self._in_use -= 1 + if self._exit_task is not None: + self._exit_task.cancel() + try: + await self._exit_task + except asyncio.CancelledError: + pass + if self._in_use == 0 and self.ws is not None: + self.id = 0 + self._open_subscriptions = 0 + self._exit_task = asyncio.create_task(self._exit_with_timer()) + + async def _exit_with_timer(self): + """ + Allows for graceful shutdown of websocket connection after specified number of seconds, allowing + for reuse of the websocket connection. + """ + try: + await asyncio.sleep(self.shutdown_timer) + await self.shutdown() + except asyncio.CancelledError: + pass + + async def shutdown(self): + async with self._lock: + try: + self._receiving_task.cancel() + await self._receiving_task + await self.ws.close() + except (AttributeError, asyncio.CancelledError): + pass + self.ws = None + self._initialized = False + self._receiving_task = None + self.id = 0 + + async def _recv(self) -> None: + try: + response = json.loads( + await cast(websockets.WebSocketClientProtocol, self.ws).recv() + ) + async with self._lock: + self._open_subscriptions -= 1 + if "id" in response: + self._received[response["id"]] = response + elif "params" in response: + self._received[response["params"]["subscription"]] = response + else: + raise KeyError(response) + except websockets.ConnectionClosed: + raise + except KeyError as e: + raise e + + async def _start_receiving(self): + try: + while True: + await self._recv() + except asyncio.CancelledError: + pass + except websockets.ConnectionClosed: + # TODO try reconnect, but only if it's needed + raise + + async def send(self, payload: dict) -> int: + """ + Sends a payload to the websocket connection. + + :param payload: payload, generate a payload with the AsyncSubstrateInterface.make_payload method + """ + async with self._lock: + original_id = self.id + self.id += 1 + self._open_subscriptions += 1 + try: + await self.ws.send(json.dumps({**payload, **{"id": original_id}})) + return original_id + except websockets.ConnectionClosed: + raise + + async def retrieve(self, item_id: int) -> Optional[dict]: + """ + Retrieves a single item from received responses dict queue + + :param item_id: id of the item to retrieve + + :return: retrieved item + """ + while True: + async with self._lock: + if item_id in self._received: + return self._received.pop(item_id) + await asyncio.sleep(0.1) + + +class AsyncSubstrateInterface: + runtime = None + registry: Optional[PortableRegistry] = None + + def __init__( + self, + chain_endpoint: str, + use_remote_preset=False, + auto_discover=True, + auto_reconnect=True, + ss58_format=None, + type_registry=None, + chain_name=None, + ): + """ + The asyncio-compatible version of the subtensor interface commands we use in bittensor + """ + self.chain_endpoint = chain_endpoint + self.__chain = chain_name + self.ws = Websocket( + chain_endpoint, + options={ + "max_size": 2**32, + "read_limit": 2**16, + "write_limit": 2**16, + }, + ) + self._lock = asyncio.Lock() + self.last_block_hash: Optional[str] = None + self.config = { + "use_remote_preset": use_remote_preset, + "auto_discover": auto_discover, + "auto_reconnect": auto_reconnect, + "rpc_methods": None, + "strict_scale_decode": True, + } + self.initialized = False + self._forgettable_task = None + self.ss58_format = ss58_format + self.type_registry = type_registry + self.runtime_cache = RuntimeCache() + self.block_id: Optional[int] = None + self.runtime_version = None + self.runtime_config = RuntimeConfigurationObject() + self.__metadata_cache = {} + self.type_registry_preset = None + self.transaction_version = None + self.metadata = None + self.metadata_version_hex = "0x0f000000" # v15 + + async def __aenter__(self): + await self.initialize() + + async def initialize(self): + """ + Initialize the connection to the chain. + """ + async with self._lock: + if not self.initialized: + if not self.__chain: + chain = await self.rpc_request("system_chain", []) + self.__chain = chain.get("result") + self.reload_type_registry() + await asyncio.gather(self.load_registry(), self.init_runtime(None)) + self.initialized = True + + async def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + @property + def chain(self): + """ + Returns the substrate chain currently associated with object + """ + return self.__chain + + async def get_storage_item(self, module: str, storage_function: str): + if not self.metadata: + await self.init_runtime() + metadata_pallet = self.metadata.get_metadata_pallet(module) + storage_item = metadata_pallet.get_storage_function(storage_function) + return storage_item + + async def _get_current_block_hash( + self, block_hash: Optional[str], reuse: bool + ) -> Optional[str]: + if block_hash: + self.last_block_hash = block_hash + return block_hash + elif reuse: + if self.last_block_hash: + return self.last_block_hash + return block_hash + + async def load_registry(self): + metadata_rpc_result = await self.rpc_request( + "state_call", + ["Metadata_metadata_at_version", self.metadata_version_hex], + ) + metadata_option_hex_str = metadata_rpc_result["result"] + metadata_option_bytes = bytes.fromhex(metadata_option_hex_str[2:]) + metadata_v15 = MetadataV15.decode_from_metadata_option(metadata_option_bytes) + self.registry = PortableRegistry.from_metadata_v15(metadata_v15) + + async def decode_scale( + self, type_string, scale_bytes: bytes, return_scale_obj=False + ): + """ + Helper function to decode arbitrary SCALE-bytes (e.g. 0x02000000) according to given RUST type_string + (e.g. BlockNumber). The relevant versioning information of the type (if defined) will be applied if block_hash + is set + + Parameters + ---------- + type_string + scale_bytes + block_hash + return_scale_obj: if True the SCALE object itself is returned, otherwise the serialized dict value of the object + + Returns + ------- + + """ + if scale_bytes == b"\x00": + obj = None + else: + obj = decode_by_type_string(type_string, self.registry, scale_bytes) + return obj + + async def init_runtime( + self, block_hash: Optional[str] = None, block_id: Optional[int] = None + ) -> Runtime: + """ + This method is used by all other methods that deals with metadata and types defined in the type registry. + It optionally retrieves the block_hash when block_id is given and sets the applicable metadata for that + block_hash. Also, it applies all the versioned types at the time of the block_hash. + + Because parsing of metadata and type registry is quite heavy, the result will be cached per runtime id. + In the future there could be support for caching backends like Redis to make this cache more persistent. + + :param block_hash: optional block hash, should not be specified if block_id is + :param block_id: optional block id, should not be specified if block_hash is + + :returns: Runtime object + """ + + async def get_runtime(block_hash, block_id) -> Runtime: + # Check if runtime state already set to current block + if (block_hash and block_hash == self.last_block_hash) or ( + block_id and block_id == self.block_id + ): + return Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + + if block_id is not None: + block_hash = await self.get_block_hash(block_id) + + if not block_hash: + block_hash = await self.get_chain_head() + + self.last_block_hash = block_hash + self.block_id = block_id + + # In fact calls and storage functions are decoded against runtime of previous block, therefor retrieve + # metadata and apply type registry of runtime of parent block + block_header = await self.rpc_request( + "chain_getHeader", [self.last_block_hash] + ) + + if block_header["result"] is None: + raise SubstrateRequestException( + f'Block not found for "{self.last_block_hash}"' + ) + + parent_block_hash: str = block_header["result"]["parentHash"] + + if ( + parent_block_hash + == "0x0000000000000000000000000000000000000000000000000000000000000000" + ): + runtime_block_hash = self.last_block_hash + else: + runtime_block_hash = parent_block_hash + + runtime_info = await self.get_block_runtime_version( + block_hash=runtime_block_hash + ) + + if runtime_info is None: + raise SubstrateRequestException( + f"No runtime information for block '{block_hash}'" + ) + + # Check if runtime state already set to current block + if runtime_info.get("specVersion") == self.runtime_version: + return Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + + self.runtime_version = runtime_info.get("specVersion") + self.transaction_version = runtime_info.get("transactionVersion") + + if not self.metadata: + if self.runtime_version in self.__metadata_cache: + # Get metadata from cache + # self.debug_message('Retrieved metadata for {} from memory'.format(self.runtime_version)) + self.metadata = self.__metadata_cache[self.runtime_version] + else: + self.metadata = await self.get_block_metadata( + block_hash=runtime_block_hash, decode=True + ) + # self.debug_message('Retrieved metadata for {} from Substrate node'.format(self.runtime_version)) + + # Update metadata cache + self.__metadata_cache[self.runtime_version] = self.metadata + + # Update type registry + self.reload_type_registry(use_remote_preset=False, auto_discover=True) + + if self.implements_scaleinfo: + # self.debug_message('Add PortableRegistry from metadata to type registry') + self.runtime_config.add_portable_registry(self.metadata) + + # Set active runtime version + self.runtime_config.set_active_spec_version_id(self.runtime_version) + + # Check and apply runtime constants + ss58_prefix_constant = await self.get_constant( + "System", "SS58Prefix", block_hash=block_hash + ) + + if ss58_prefix_constant: + self.ss58_format = ss58_prefix_constant + + # Set runtime compatibility flags + try: + _ = self.runtime_config.create_scale_object( + "sp_weights::weight_v2::Weight" + ) + self.config["is_weight_v2"] = True + self.runtime_config.update_type_registry_types( + {"Weight": "sp_weights::weight_v2::Weight"} + ) + except NotImplementedError: + self.config["is_weight_v2"] = False + self.runtime_config.update_type_registry_types({"Weight": "WeightV1"}) + return Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + + if block_id and block_hash: + raise ValueError("Cannot provide block_hash and block_id at the same time") + + if not (runtime := self.runtime_cache.retrieve(block_id, block_hash)): + runtime = await get_runtime(block_hash, block_id) + self.runtime_cache.add_item(block_id, block_hash, runtime) + return runtime + + def reload_type_registry( + self, use_remote_preset: bool = True, auto_discover: bool = True + ): + """ + Reload type registry and preset used to instantiate the SubtrateInterface object. Useful to periodically apply + changes in type definitions when a runtime upgrade occurred + + Parameters + ---------- + use_remote_preset: When True preset is downloaded from Github master, otherwise use files from local installed scalecodec package + auto_discover + + Returns + ------- + + """ + self.runtime_config.clear_type_registry() + + self.runtime_config.implements_scale_info = self.implements_scaleinfo + + # Load metadata types in runtime configuration + self.runtime_config.update_type_registry(load_type_registry_preset(name="core")) + self.apply_type_registry_presets( + use_remote_preset=use_remote_preset, auto_discover=auto_discover + ) + + def apply_type_registry_presets( + self, use_remote_preset: bool = True, auto_discover: bool = True + ): + if self.type_registry_preset is not None: + # Load type registry according to preset + type_registry_preset_dict = load_type_registry_preset( + name=self.type_registry_preset, use_remote_preset=use_remote_preset + ) + + if not type_registry_preset_dict: + raise ValueError( + f"Type registry preset '{self.type_registry_preset}' not found" + ) + + elif auto_discover: + # Try to auto discover type registry preset by chain name + type_registry_name = self.chain.lower().replace(" ", "-") + try: + type_registry_preset_dict = load_type_registry_preset( + type_registry_name + ) + # self.debug_message(f"Auto set type_registry_preset to {type_registry_name} ...") + self.type_registry_preset = type_registry_name + except ValueError: + type_registry_preset_dict = None + + else: + type_registry_preset_dict = None + + if type_registry_preset_dict: + # Load type registries in runtime configuration + if self.implements_scaleinfo is False: + # Only runtime with no embedded types in metadata need the default set of explicit defined types + self.runtime_config.update_type_registry( + load_type_registry_preset( + "legacy", use_remote_preset=use_remote_preset + ) + ) + + if self.type_registry_preset != "legacy": + self.runtime_config.update_type_registry(type_registry_preset_dict) + + if self.type_registry: + # Load type registries in runtime configuration + self.runtime_config.update_type_registry(self.type_registry) + + @property + def implements_scaleinfo(self) -> Optional[bool]: + """ + Returns True if current runtime implementation a `PortableRegistry` (`MetadataV14` and higher) + + Returns + ------- + bool + """ + if self.metadata: + return self.metadata.portable_registry is not None + else: + return None + + async def create_storage_key( + self, + pallet: str, + storage_function: str, + params: Optional[list] = None, + block_hash: str = None, + ) -> StorageKey: + """ + Create a `StorageKey` instance providing storage function details. See `subscribe_storage()`. + + Parameters + ---------- + pallet: name of pallet + storage_function: name of storage function + params: Optional list of parameters in case of a Mapped storage function + + Returns + ------- + StorageKey + """ + await self.init_runtime(block_hash=block_hash) + + return StorageKey.create_from_storage_function( + pallet, + storage_function, + params, + runtime_config=self.runtime_config, + metadata=self.metadata, + ) + + async def _get_block_handler( + self, + block_hash: str, + ignore_decoding_errors: bool = False, + include_author: bool = False, + header_only: bool = False, + finalized_only: bool = False, + subscription_handler: Optional[Callable] = None, + ): + try: + await self.init_runtime(block_hash=block_hash) + except BlockNotFound: + return None + + async def decode_block(block_data, block_data_hash=None): + if block_data: + if block_data_hash: + block_data["header"]["hash"] = block_data_hash + + if type(block_data["header"]["number"]) is str: + # Convert block number from hex (backwards compatibility) + block_data["header"]["number"] = int( + block_data["header"]["number"], 16 + ) + + extrinsic_cls = self.runtime_config.get_decoder_class("Extrinsic") + + if "extrinsics" in block_data: + for idx, extrinsic_data in enumerate(block_data["extrinsics"]): + extrinsic_decoder = extrinsic_cls( + data=ScaleBytes(extrinsic_data), + metadata=self.metadata, + runtime_config=self.runtime_config, + ) + try: + extrinsic_decoder.decode(check_remaining=True) + block_data["extrinsics"][idx] = extrinsic_decoder + + except Exception as e: + if not ignore_decoding_errors: + raise + block_data["extrinsics"][idx] = None + + for idx, log_data in enumerate(block_data["header"]["digest"]["logs"]): + if type(log_data) is str: + # Convert digest log from hex (backwards compatibility) + try: + log_digest_cls = self.runtime_config.get_decoder_class( + "sp_runtime::generic::digest::DigestItem" + ) + + if log_digest_cls is None: + raise NotImplementedError( + "No decoding class found for 'DigestItem'" + ) + + log_digest = log_digest_cls(data=ScaleBytes(log_data)) + log_digest.decode( + check_remaining=self.config.get("strict_scale_decode") + ) + + block_data["header"]["digest"]["logs"][idx] = log_digest + + if include_author and "PreRuntime" in log_digest.value: + if self.implements_scaleinfo: + engine = bytes(log_digest[1][0]) + # Retrieve validator set + parent_hash = block_data["header"]["parentHash"] + validator_set = await self.query( + "Session", "Validators", block_hash=parent_hash + ) + + if engine == b"BABE": + babe_predigest = ( + self.runtime_config.create_scale_object( + type_string="RawBabePreDigest", + data=ScaleBytes( + bytes(log_digest[1][1]) + ), + ) + ) + + babe_predigest.decode( + check_remaining=self.config.get( + "strict_scale_decode" + ) + ) + + rank_validator = babe_predigest[1].value[ + "authority_index" + ] + + block_author = validator_set[rank_validator] + block_data["author"] = block_author.value + + elif engine == b"aura": + aura_predigest = ( + self.runtime_config.create_scale_object( + type_string="RawAuraPreDigest", + data=ScaleBytes( + bytes(log_digest[1][1]) + ), + ) + ) + + aura_predigest.decode(check_remaining=True) + + rank_validator = aura_predigest.value[ + "slot_number" + ] % len(validator_set) + + block_author = validator_set[rank_validator] + block_data["author"] = block_author.value + else: + raise NotImplementedError( + f"Cannot extract author for engine {log_digest.value['PreRuntime'][0]}" + ) + else: + if ( + log_digest.value["PreRuntime"]["engine"] + == "BABE" + ): + validator_set = await self.query( + "Session", + "Validators", + block_hash=block_hash, + ) + rank_validator = log_digest.value["PreRuntime"][ + "data" + ]["authority_index"] + + block_author = validator_set.elements[ + rank_validator + ] + block_data["author"] = block_author.value + else: + raise NotImplementedError( + f"Cannot extract author for engine {log_digest.value['PreRuntime']['engine']}" + ) + + except Exception: + if not ignore_decoding_errors: + raise + block_data["header"]["digest"]["logs"][idx] = None + + return block_data + + if callable(subscription_handler): + rpc_method_prefix = "Finalized" if finalized_only else "New" + + async def result_handler(message, update_nr, subscription_id): + new_block = await decode_block({"header": message["params"]["result"]}) + + subscription_result = subscription_handler( + new_block, update_nr, subscription_id + ) + + if subscription_result is not None: + # Handler returned end result: unsubscribe from further updates + self._forgettable_task = asyncio.create_task( + self.rpc_request( + f"chain_unsubscribe{rpc_method_prefix}Heads", + [subscription_id], + ) + ) + + return subscription_result + + result = await self._make_rpc_request( + [ + self.make_payload( + "_get_block_handler", + f"chain_subscribe{rpc_method_prefix}Heads", + [], + ) + ], + result_handler=result_handler, + ) + + return result + + else: + if header_only: + response = await self.rpc_request("chain_getHeader", [block_hash]) + return await decode_block( + {"header": response["result"]}, block_data_hash=block_hash + ) + + else: + response = await self.rpc_request("chain_getBlock", [block_hash]) + return await decode_block( + response["result"]["block"], block_data_hash=block_hash + ) + + async def get_block( + self, + block_hash: Optional[str] = None, + block_number: Optional[int] = None, + ignore_decoding_errors: bool = False, + include_author: bool = False, + finalized_only: bool = False, + ) -> Optional[dict]: + """ + Retrieves a block and decodes its containing extrinsics and log digest items. If `block_hash` and `block_number` + is omitted the chain tip will be retrieve, or the finalized head if `finalized_only` is set to true. + + Either `block_hash` or `block_number` should be set, or both omitted. + + Parameters + ---------- + block_hash: the hash of the block to be retrieved + block_number: the block number to retrieved + ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue decoding + include_author: This will retrieve the block author from the validator set and add to the result + finalized_only: when no `block_hash` or `block_number` is set, this will retrieve the finalized head + + Returns + ------- + A dict containing the extrinsic and digest logs data + """ + if block_hash and block_number: + raise ValueError("Either block_hash or block_number should be be set") + + if block_number is not None: + block_hash = await self.get_block_hash(block_number) + + if block_hash is None: + return + + if block_hash and finalized_only: + raise ValueError( + "finalized_only cannot be True when block_hash is provided" + ) + + if block_hash is None: + # Retrieve block hash + if finalized_only: + block_hash = await self.get_chain_finalised_head() + else: + block_hash = await self.get_chain_head() + + return await self._get_block_handler( + block_hash=block_hash, + ignore_decoding_errors=ignore_decoding_errors, + header_only=False, + include_author=include_author, + ) + + async def get_events(self, block_hash: Optional[str] = None) -> list: + """ + Convenience method to get events for a certain block (storage call for module 'System' and function 'Events') + + Parameters + ---------- + block_hash + + Returns + ------- + list + """ + + def convert_event_data(data): + # Extract phase information + phase_key, phase_value = next(iter(data["phase"].items())) + try: + extrinsic_idx = phase_value[0] + except IndexError: + extrinsic_idx = None + + # Extract event details + module_id, event_data = next(iter(data["event"].items())) + event_id, attributes_data = next(iter(event_data[0].items())) + + # Convert class and pays_fee dictionaries to their string equivalents if they exist + attributes = attributes_data + if isinstance(attributes, dict): + for key, value in attributes.items(): + if isinstance(value, dict): + # Convert nested single-key dictionaries to their keys as strings + sub_key = next(iter(value.keys())) + if value[sub_key] == (): + attributes[key] = sub_key + + # Create the converted dictionary + converted = { + "phase": phase_key, + "extrinsic_idx": extrinsic_idx, + "event": { + "module_id": module_id, + "event_id": event_id, + "attributes": attributes, + }, + "topics": list(data["topics"]), # Convert topics tuple to a list + } + + return converted + + events = [] + + if not block_hash: + block_hash = await self.get_chain_head() + + storage_obj = await self.query( + module="System", storage_function="Events", block_hash=block_hash + ) + if storage_obj: + for item in list(storage_obj): + # print("item!", item) + events.append(convert_event_data(item)) + # events += list(storage_obj) + return events + + async def get_block_runtime_version(self, block_hash: str) -> dict: + """ + Retrieve the runtime version id of given block_hash + """ + response = await self.rpc_request("state_getRuntimeVersion", [block_hash]) + return response.get("result") + + async def get_block_metadata( + self, block_hash: Optional[str] = None, decode: bool = True + ) -> Union[dict, ScaleType]: + """ + A pass-though to existing JSONRPC method `state_getMetadata`. + + Parameters + ---------- + block_hash + decode: True for decoded version + + Returns + ------- + + """ + params = None + if decode and not self.runtime_config: + raise ValueError( + "Cannot decode runtime configuration without a supplied runtime_config" + ) + + if block_hash: + params = [block_hash] + response = await self.rpc_request("state_getMetadata", params) + + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + if response.get("result") and decode: + metadata_decoder = self.runtime_config.create_scale_object( + "MetadataVersioned", data=ScaleBytes(response.get("result")) + ) + metadata_decoder.decode() + + return metadata_decoder + + return response + + async def _preprocess( + self, + query_for: Optional[list], + block_hash: Optional[str], + storage_function: str, + module: str, + ) -> Preprocessed: + """ + Creates a Preprocessed data object for passing to `_make_rpc_request` + """ + params = query_for if query_for else [] + # Search storage call in metadata + metadata_pallet = self.metadata.get_metadata_pallet(module) + + if not metadata_pallet: + raise SubstrateRequestException(f'Pallet "{module}" not found') + + storage_item = metadata_pallet.get_storage_function(storage_function) + + if not metadata_pallet or not storage_item: + raise SubstrateRequestException( + f'Storage function "{module}.{storage_function}" not found' + ) + + # SCALE type string of value + param_types = storage_item.get_params_type_string() + value_scale_type = storage_item.get_value_type_string() + + if len(params) != len(param_types): + raise ValueError( + f"Storage function requires {len(param_types)} parameters, {len(params)} given" + ) + + storage_key = StorageKey.create_from_storage_function( + module, + storage_item.value["name"], + params, + runtime_config=self.runtime_config, + metadata=self.metadata, + ) + method = "state_getStorageAt" + return Preprocessed( + str(query_for), + method, + [storage_key.to_hex(), block_hash], + value_scale_type, + storage_item, + ) + + async def _process_response( + self, + response: dict, + subscription_id: Union[int, str], + value_scale_type: Optional[str] = None, + storage_item: Optional[ScaleType] = None, + runtime: Optional[Runtime] = None, + result_handler: Optional[ResultHandler] = None, + ) -> tuple[Union[ScaleType, dict], bool]: + """ + Processes the RPC call response by decoding it, returning it as is, or setting a handler for subscriptions, + depending on the specific call. + + :param response: the RPC call response + :param subscription_id: the subscription id for subscriptions, used only for subscriptions with a result handler + :param value_scale_type: Scale Type string used for decoding ScaleBytes results + :param storage_item: The ScaleType object used for decoding ScaleBytes results + :param runtime: the runtime object, used for decoding ScaleBytes results + :param result_handler: the result handler coroutine used for handling longer-running subscriptions + + :return: (decoded response, completion) + """ + result: Union[dict, ScaleType] = response + if value_scale_type and isinstance(storage_item, ScaleType): + if not runtime: + async with self._lock: + runtime = Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + if response.get("result") is not None: + query_value = response.get("result") + elif storage_item.value["modifier"] == "Default": + # Fallback to default value of storage function if no result + query_value = storage_item.value_object["default"].value_object + else: + # No result is interpreted as an Option<...> result + value_scale_type = f"Option<{value_scale_type}>" + query_value = storage_item.value_object["default"].value_object + if isinstance(query_value, str): + q = bytes.fromhex(query_value[2:]) + elif isinstance(query_value, bytearray): + q = bytes(query_value) + else: + q = query_value + obj = await self.decode_scale(value_scale_type, q, True) + result = obj + if asyncio.iscoroutinefunction(result_handler): + # For multipart responses as a result of subscriptions. + message, bool_result = await result_handler(response, subscription_id) + return message, bool_result + return result, True + + async def _make_rpc_request( + self, + payloads: list[dict], + value_scale_type: Optional[str] = None, + storage_item: Optional[ScaleType] = None, + runtime: Optional[Runtime] = None, + result_handler: Optional[ResultHandler] = None, + ) -> RequestManager.RequestResults: + request_manager = RequestManager(payloads) + + subscription_added = False + + async with self.ws as ws: + for item in payloads: + item_id = await ws.send(item["payload"]) + request_manager.add_request(item_id, item["id"]) + + while True: + for item_id in request_manager.response_map.keys(): + if ( + item_id not in request_manager.responses + or asyncio.iscoroutinefunction(result_handler) + ): + if response := await ws.retrieve(item_id): + if ( + asyncio.iscoroutinefunction(result_handler) + and not subscription_added + ): + # handles subscriptions, overwrites the previous mapping of {item_id : payload_id} + # with {subscription_id : payload_id} + try: + item_id = request_manager.overwrite_request( + item_id, response["result"] + ) + except KeyError: + raise SubstrateRequestException(str(response)) + decoded_response, complete = await self._process_response( + response, + item_id, + value_scale_type, + storage_item, + runtime, + result_handler, + ) + request_manager.add_response( + item_id, decoded_response, complete + ) + if ( + asyncio.iscoroutinefunction(result_handler) + and not subscription_added + ): + subscription_added = True + break + + if request_manager.is_complete: + break + + return request_manager.get_results() + + @staticmethod + def make_payload(id_: str, method: str, params: list) -> dict: + """ + Creates a payload for making an rpc_request with _make_rpc_request + + :param id_: a unique name you would like to give to this request + :param method: the method in the RPC request + :param params: the params in the RPC request + + :return: the payload dict + """ + return { + "id": id_, + "payload": {"jsonrpc": "2.0", "method": method, "params": params}, + } + + async def rpc_request( + self, + method: str, + params: Optional[list], + block_hash: Optional[str] = None, + reuse_block_hash: bool = False, + ) -> Any: + """ + Makes an RPC request to the subtensor. Use this only if ``self.query`` and ``self.query_multiple`` and + ``self.query_map`` do not meet your needs. + + :param method: str the method in the RPC request + :param params: list of the params in the RPC request + :param block_hash: optional str, the hash of the block — only supply this if not supplying the block + hash in the params, and not reusing the block hash + :param reuse_block_hash: optional bool, whether to reuse the block hash in the params — only mark as True + if not supplying the block hash in the params, or via the `block_hash` parameter + + :return: the response from the RPC request + """ + block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash) + params = params or [] + payload_id = f"{method}{random.randint(0, 7000)}" + payloads = [ + self.make_payload( + payload_id, + method, + params + [block_hash] if block_hash else params, + ) + ] + runtime = Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + result = await self._make_rpc_request(payloads, runtime=runtime) + if "error" in result[payload_id][0]: + raise SubstrateRequestException(result[payload_id][0]["error"]["message"]) + if "result" in result[payload_id][0]: + return result[payload_id][0] + else: + raise SubstrateRequestException(result[payload_id][0]) + + async def get_block_hash(self, block_id: int) -> str: + return (await self.rpc_request("chain_getBlockHash", [block_id]))["result"] + + async def get_chain_head(self) -> str: + result = await self._make_rpc_request( + [ + self.make_payload( + "rpc_request", + "chain_getHead", + [], + ) + ], + runtime=Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ), + ) + self.last_block_hash = result["rpc_request"][0]["result"] + return result["rpc_request"][0]["result"] + + async def compose_call( + self, + call_module: str, + call_function: str, + call_params: Optional[dict] = None, + block_hash: Optional[str] = None, + ) -> GenericCall: + """ + Composes a call payload which can be used in an extrinsic. + + :param call_module: Name of the runtime module e.g. Balances + :param call_function: Name of the call function e.g. transfer + :param call_params: This is a dict containing the params of the call. e.g. + `{'dest': 'EaG2CRhJWPb7qmdcJvy3LiWdh26Jreu9Dx6R1rXxPmYXoDk', 'value': 1000000000000}` + :param block_hash: Use metadata at given block_hash to compose call + + :return: A composed call + """ + if call_params is None: + call_params = {} + + await self.init_runtime(block_hash=block_hash) + + call = self.runtime_config.create_scale_object( + type_string="Call", metadata=self.metadata + ) + + call.encode( + { + "call_module": call_module, + "call_function": call_function, + "call_args": call_params, + } + ) + + return call + + async def query_multiple( + self, + params: list, + storage_function: str, + module: str, + block_hash: Optional[str] = None, + reuse_block_hash: bool = False, + ) -> dict[str, ScaleType]: + """ + Queries the subtensor. Only use this when making multiple queries, else use ``self.query`` + """ + # By allowing for specifying the block hash, users, if they have multiple query types they want + # to do, can simply query the block hash first, and then pass multiple query_subtensor calls + # into an asyncio.gather, with the specified block hash + block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash) + if block_hash: + self.last_block_hash = block_hash + runtime = await self.init_runtime(block_hash=block_hash) + preprocessed: tuple[Preprocessed] = await asyncio.gather( + *[ + self._preprocess([x], block_hash, storage_function, module) + for x in params + ] + ) + all_info = [ + self.make_payload(item.queryable, item.method, item.params) + for item in preprocessed + ] + # These will always be the same throughout the preprocessed list, so we just grab the first one + value_scale_type = preprocessed[0].value_scale_type + storage_item = preprocessed[0].storage_item + + responses = await self._make_rpc_request( + all_info, value_scale_type, storage_item, runtime + ) + return { + param: responses[p.queryable][0] for (param, p) in zip(params, preprocessed) + } + + async def query_multi( + self, storage_keys: list[StorageKey], block_hash: Optional[str] = None + ) -> list: + """ + Query multiple storage keys in one request. + + Example: + + ``` + storage_keys = [ + substrate.create_storage_key( + "System", "Account", ["F4xQKRUagnSGjFqafyhajLs94e7Vvzvr8ebwYJceKpr8R7T"] + ), + substrate.create_storage_key( + "System", "Account", ["GSEX8kR4Kz5UZGhvRUCJG93D5hhTAoVZ5tAe6Zne7V42DSi"] + ) + ] + + result = substrate.query_multi(storage_keys) + ``` + + Parameters + ---------- + storage_keys: list of StorageKey objects + block_hash: Optional block_hash of state snapshot + + Returns + ------- + list of `(storage_key, scale_obj)` tuples + """ + + await self.init_runtime(block_hash=block_hash) + + # Retrieve corresponding value + response = await self.rpc_request( + "state_queryStorageAt", [[s.to_hex() for s in storage_keys], block_hash] + ) + + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + result = [] + + storage_key_map = {s.to_hex(): s for s in storage_keys} + + for result_group in response["result"]: + for change_storage_key, change_data in result_group["changes"]: + # Decode result for specified storage_key + storage_key = storage_key_map[change_storage_key] + if change_data is None: + change_data = b"\x00" + else: + change_data = bytes.fromhex(change_data[2:]) + result.append( + ( + storage_key, + await self.decode_scale( + storage_key.value_scale_type, change_data + ), + ) + ) + + return result + + async def create_scale_object( + self, + type_string: str, + data: Optional[ScaleBytes] = None, + block_hash: Optional[str] = None, + **kwargs, + ) -> "ScaleType": + """ + Convenience method to create a SCALE object of type `type_string`, this will initialize the runtime + automatically at moment of `block_hash`, or chain tip if omitted. + + :param type_string: str Name of SCALE type to create + :param data: ScaleBytes Optional ScaleBytes to decode + :param block_hash: Optional block hash for moment of decoding, when omitted the chain tip will be used + :param kwargs: keyword args for the Scale Type constructor + + :return: The created Scale Type object + """ + runtime = await self.init_runtime(block_hash=block_hash) + if "metadata" not in kwargs: + kwargs["metadata"] = runtime.metadata + + return runtime.runtime_config.create_scale_object( + type_string, data=data, **kwargs + ) + + async def generate_signature_payload( + self, + call: GenericCall, + era=None, + nonce: int = 0, + tip: int = 0, + tip_asset_id: Optional[int] = None, + include_call_length: bool = False, + ) -> ScaleBytes: + # Retrieve genesis hash + genesis_hash = await self.get_block_hash(0) + + if not era: + era = "00" + + if era == "00": + # Immortal extrinsic + block_hash = genesis_hash + else: + # Determine mortality of extrinsic + era_obj = self.runtime_config.create_scale_object("Era") + + if isinstance(era, dict) and "current" not in era and "phase" not in era: + raise ValueError( + 'The era dict must contain either "current" or "phase" element to encode a valid era' + ) + + era_obj.encode(era) + block_hash = await self.get_block_hash( + block_id=era_obj.birth(era.get("current")) + ) + + # Create signature payload + signature_payload = self.runtime_config.create_scale_object( + "ExtrinsicPayloadValue" + ) + + # Process signed extensions in metadata + if "signed_extensions" in self.metadata[1][1]["extrinsic"]: + # Base signature payload + signature_payload.type_mapping = [["call", "CallBytes"]] + + # Add signed extensions to payload + signed_extensions = self.metadata.get_signed_extensions() + + if "CheckMortality" in signed_extensions: + signature_payload.type_mapping.append( + ["era", signed_extensions["CheckMortality"]["extrinsic"]] + ) + + if "CheckEra" in signed_extensions: + signature_payload.type_mapping.append( + ["era", signed_extensions["CheckEra"]["extrinsic"]] + ) + + if "CheckNonce" in signed_extensions: + signature_payload.type_mapping.append( + ["nonce", signed_extensions["CheckNonce"]["extrinsic"]] + ) + + if "ChargeTransactionPayment" in signed_extensions: + signature_payload.type_mapping.append( + ["tip", signed_extensions["ChargeTransactionPayment"]["extrinsic"]] + ) + + if "ChargeAssetTxPayment" in signed_extensions: + signature_payload.type_mapping.append( + ["asset_id", signed_extensions["ChargeAssetTxPayment"]["extrinsic"]] + ) + + if "CheckMetadataHash" in signed_extensions: + signature_payload.type_mapping.append( + ["mode", signed_extensions["CheckMetadataHash"]["extrinsic"]] + ) + + if "CheckSpecVersion" in signed_extensions: + signature_payload.type_mapping.append( + [ + "spec_version", + signed_extensions["CheckSpecVersion"]["additional_signed"], + ] + ) + + if "CheckTxVersion" in signed_extensions: + signature_payload.type_mapping.append( + [ + "transaction_version", + signed_extensions["CheckTxVersion"]["additional_signed"], + ] + ) + + if "CheckGenesis" in signed_extensions: + signature_payload.type_mapping.append( + [ + "genesis_hash", + signed_extensions["CheckGenesis"]["additional_signed"], + ] + ) + + if "CheckMortality" in signed_extensions: + signature_payload.type_mapping.append( + [ + "block_hash", + signed_extensions["CheckMortality"]["additional_signed"], + ] + ) + + if "CheckEra" in signed_extensions: + signature_payload.type_mapping.append( + ["block_hash", signed_extensions["CheckEra"]["additional_signed"]] + ) + + if "CheckMetadataHash" in signed_extensions: + signature_payload.type_mapping.append( + [ + "metadata_hash", + signed_extensions["CheckMetadataHash"]["additional_signed"], + ] + ) + + if include_call_length: + length_obj = self.runtime_config.create_scale_object("Bytes") + call_data = str(length_obj.encode(str(call.data))) + + else: + call_data = str(call.data) + + payload_dict = { + "call": call_data, + "era": era, + "nonce": nonce, + "tip": tip, + "spec_version": self.runtime_version, + "genesis_hash": genesis_hash, + "block_hash": block_hash, + "transaction_version": self.transaction_version, + "asset_id": {"tip": tip, "asset_id": tip_asset_id}, + "metadata_hash": None, + "mode": "Disabled", + } + + signature_payload.encode(payload_dict) + + if signature_payload.data.length > 256: + return ScaleBytes( + data=blake2b(signature_payload.data.data, digest_size=32).digest() + ) + + return signature_payload.data + + async def create_signed_extrinsic( + self, + call: GenericCall, + keypair: Keypair, + era: Optional[dict] = None, + nonce: Optional[int] = None, + tip: int = 0, + tip_asset_id: Optional[int] = None, + signature: Optional[Union[bytes, str]] = None, + ) -> "GenericExtrinsic": + """ + Creates an extrinsic signed by given account details + + :param call: GenericCall to create extrinsic for + :param keypair: Keypair used to sign the extrinsic + :param era: Specify mortality in blocks in follow format: + {'period': [amount_blocks]} If omitted the extrinsic is immortal + :param nonce: nonce to include in extrinsics, if omitted the current nonce is retrieved on-chain + :param tip: The tip for the block author to gain priority during network congestion + :param tip_asset_id: Optional asset ID with which to pay the tip + :param signature: Optionally provide signature if externally signed + + :return: The signed Extrinsic + """ + await self.init_runtime() + + # Check requirements + if not isinstance(call, GenericCall): + raise TypeError("'call' must be of type Call") + + # Check if extrinsic version is supported + if self.metadata[1][1]["extrinsic"]["version"] != 4: # type: ignore + raise NotImplementedError( + f"Extrinsic version {self.metadata[1][1]['extrinsic']['version']} not supported" # type: ignore + ) + + # Retrieve nonce + if nonce is None: + nonce = await self.get_account_nonce(keypair.ss58_address) or 0 + + # Process era + if era is None: + era = "00" + else: + if isinstance(era, dict) and "current" not in era and "phase" not in era: + # Retrieve current block id + era["current"] = await self.get_block_number( + await self.get_chain_finalised_head() + ) + + if signature is not None: + if isinstance(signature, str) and signature[0:2] == "0x": + signature = bytes.fromhex(signature[2:]) + + # Check if signature is a MultiSignature and contains signature version + if len(signature) == 65: + signature_version = signature[0] + signature = signature[1:] + else: + signature_version = keypair.crypto_type + + else: + # Create signature payload + signature_payload = await self.generate_signature_payload( + call=call, era=era, nonce=nonce, tip=tip, tip_asset_id=tip_asset_id + ) + + # Set Signature version to crypto type of keypair + signature_version = keypair.crypto_type + + # Sign payload + signature = keypair.sign(signature_payload) + + # Create extrinsic + extrinsic = self.runtime_config.create_scale_object( + type_string="Extrinsic", metadata=self.metadata + ) + + value = { + "account_id": f"0x{keypair.public_key.hex()}", + "signature": f"0x{signature.hex()}", + "call_function": call.value["call_function"], + "call_module": call.value["call_module"], + "call_args": call.value["call_args"], + "nonce": nonce, + "era": era, + "tip": tip, + "asset_id": {"tip": tip, "asset_id": tip_asset_id}, + "mode": "Disabled", + } + + # Check if ExtrinsicSignature is MultiSignature, otherwise omit signature_version + signature_cls = self.runtime_config.get_decoder_class("ExtrinsicSignature") + if issubclass(signature_cls, self.runtime_config.get_decoder_class("Enum")): + value["signature_version"] = signature_version + + extrinsic.encode(value) + + return extrinsic + + async def get_chain_finalised_head(self): + """ + A pass-though to existing JSONRPC method `chain_getFinalizedHead` + + Returns + ------- + + """ + response = await self.rpc_request("chain_getFinalizedHead", []) + + if response is not None: + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + return response.get("result") + + async def runtime_call( + self, + api: str, + method: str, + params: Optional[Union[list, dict]] = None, + block_hash: Optional[str] = None, + ) -> ScaleType: + """ + Calls a runtime API method + + :param api: Name of the runtime API e.g. 'TransactionPaymentApi' + :param method: Name of the method e.g. 'query_fee_details' + :param params: List of parameters needed to call the runtime API + :param block_hash: Hash of the block at which to make the runtime API call + + :return: ScaleType from the runtime call + """ + await self.init_runtime() + + if params is None: + params = {} + + try: + runtime_call_def = self.runtime_config.type_registry["runtime_api"][api][ + "methods" + ][method] + runtime_api_types = self.runtime_config.type_registry["runtime_api"][ + api + ].get("types", {}) + except KeyError: + raise ValueError(f"Runtime API Call '{api}.{method}' not found in registry") + + if isinstance(params, list) and len(params) != len(runtime_call_def["params"]): + raise ValueError( + f"Number of parameter provided ({len(params)}) does not " + f"match definition {len(runtime_call_def['params'])}" + ) + + # Add runtime API types to registry + self.runtime_config.update_type_registry_types(runtime_api_types) + runtime = Runtime( + self.chain, + self.runtime_config, + self.metadata, + self.type_registry, + ) + + # Encode params + param_data = ScaleBytes(bytes()) + for idx, param in enumerate(runtime_call_def["params"]): + scale_obj = runtime.runtime_config.create_scale_object(param["type"]) + if isinstance(params, list): + param_data += scale_obj.encode(params[idx]) + else: + if param["name"] not in params: + raise ValueError(f"Runtime Call param '{param['name']}' is missing") + + param_data += scale_obj.encode(params[param["name"]]) + + # RPC request + result_data = await self.rpc_request( + "state_call", [f"{api}_{method}", str(param_data), block_hash] + ) + + # Decode result + # TODO update this to use bt-decode + result_obj = runtime.runtime_config.create_scale_object( + runtime_call_def["type"] + ) + result_obj.decode( + ScaleBytes(result_data["result"]), + check_remaining=self.config.get("strict_scale_decode"), + ) + + return result_obj + + async def get_account_nonce(self, account_address: str) -> int: + """ + Returns current nonce for given account address + + :param account_address: SS58 formatted address + + :return: Nonce for given account address + """ + nonce_obj = await self.runtime_call( + "AccountNonceApi", "account_nonce", [account_address] + ) + return nonce_obj.value + + async def get_metadata_constant(self, module_name, constant_name, block_hash=None): + """ + Retrieves the details of a constant for given module name, call function name and block_hash + (or chaintip if block_hash is omitted) + + Parameters + ---------- + module_name + constant_name + block_hash + + Returns + ------- + MetadataModuleConstants + """ + + # await self.init_runtime(block_hash=block_hash) + + for module in self.metadata.pallets: + if module_name == module.name and module.constants: + for constant in module.constants: + if constant_name == constant.value["name"]: + return constant + + async def get_constant( + self, + module_name: str, + constant_name: str, + block_hash: Optional[str] = None, + reuse_block_hash: bool = False, + ) -> "ScaleType": + """ + Returns the decoded `ScaleType` object of the constant for given module name, call function name and block_hash + (or chaintip if block_hash is omitted) + + Parameters + ---------- + :param module_name: Name of the module to query + :param constant_name: Name of the constant to query + :param block_hash: Hash of the block at which to make the runtime API call + :param reuse_block_hash: Reuse last-used block hash if set to true + + :return: ScaleType from the runtime call + """ + block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash) + constant = await self.get_metadata_constant( + module_name, constant_name, block_hash=block_hash + ) + if constant: + # Decode to ScaleType + return await self.decode_scale( + constant.type, + bytes(constant.constant_value), + return_scale_obj=True, + ) + else: + return None + + async def get_payment_info( + self, call: GenericCall, keypair: Keypair + ) -> dict[str, Any]: + """ + Retrieves fee estimation via RPC for given extrinsic + + Parameters + ---------- + call: Call object to estimate fees for + keypair: Keypair of the sender, does not have to include private key because no valid signature is required + + Returns + ------- + Dict with payment info + + E.g. `{'class': 'normal', 'partialFee': 151000000, 'weight': {'ref_time': 143322000}}` + + """ + + # Check requirements + if not isinstance(call, GenericCall): + raise TypeError("'call' must be of type Call") + + if not isinstance(keypair, Keypair): + raise TypeError("'keypair' must be of type Keypair") + + # No valid signature is required for fee estimation + signature = "0x" + "00" * 64 + + # Create extrinsic + extrinsic = await self.create_signed_extrinsic( + call=call, keypair=keypair, signature=signature + ) + extrinsic_len = self.runtime_config.create_scale_object("u32") + extrinsic_len.encode(len(extrinsic.data)) + + result = await self.runtime_call( + "TransactionPaymentApi", "query_info", [extrinsic, extrinsic_len] + ) + + return result.value + + async def query( + self, + module: str, + storage_function: str, + params: Optional[list] = None, + block_hash: Optional[str] = None, + raw_storage_key: Optional[bytes] = None, + subscription_handler=None, + reuse_block_hash: bool = False, + ) -> Union["ScaleType"]: + """ + Queries subtensor. This should only be used when making a single request. For multiple requests, + you should use ``self.query_multiple`` + """ + block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash) + if block_hash: + self.last_block_hash = block_hash + runtime = await self.init_runtime(block_hash=block_hash) + preprocessed: Preprocessed = await self._preprocess( + params, block_hash, storage_function, module + ) + payload = [ + self.make_payload( + preprocessed.queryable, preprocessed.method, preprocessed.params + ) + ] + value_scale_type = preprocessed.value_scale_type + storage_item = preprocessed.storage_item + + responses = await self._make_rpc_request( + payload, + value_scale_type, + storage_item, + runtime, + result_handler=subscription_handler, + ) + return responses[preprocessed.queryable][0] + + async def query_map( + self, + module: str, + storage_function: str, + params: Optional[list] = None, + block_hash: Optional[str] = None, + max_results: Optional[int] = None, + start_key: Optional[str] = None, + page_size: int = 100, + ignore_decoding_errors: bool = False, + reuse_block_hash: bool = False, + ) -> "QueryMapResult": + """ + Iterates over all key-pairs located at the given module and storage_function. The storage + item must be a map. + + Example: + + ``` + result = await substrate.query_map('System', 'Account', max_results=100) + + async for account, account_info in result: + print(f"Free balance of account '{account.value}': {account_info.value['data']['free']}") + ``` + + Note: it is important that you do not use `for x in result.records`, as this will sidestep possible + pagination. You must do `async for x in result`. + + :param module: The module name in the metadata, e.g. System or Balances. + :param storage_function: The storage function name, e.g. Account or Locks. + :param params: The input parameters in case of for example a `DoubleMap` storage function + :param block_hash: Optional block hash for result at given block, when left to None the chain tip will be used. + :param max_results: the maximum of results required, if set the query will stop fetching results when number is + reached + :param start_key: The storage key used as offset for the results, for pagination purposes + :param page_size: The results are fetched from the node RPC in chunks of this size + :param ignore_decoding_errors: When set this will catch all decoding errors, set the item to None and continue + decoding + :param reuse_block_hash: use True if you wish to make the query using the last-used block hash. Do not mark True + if supplying a block_hash + + :return: QueryMapResult object + """ + params = params or [] + block_hash = await self._get_current_block_hash(block_hash, reuse_block_hash) + if block_hash: + self.last_block_hash = block_hash + runtime = await self.init_runtime(block_hash=block_hash) + + metadata_pallet = runtime.metadata.get_metadata_pallet(module) + if not metadata_pallet: + raise ValueError(f'Pallet "{module}" not found') + storage_item = metadata_pallet.get_storage_function(storage_function) + + if not metadata_pallet or not storage_item: + raise ValueError( + f'Storage function "{module}.{storage_function}" not found' + ) + + value_type = storage_item.get_value_type_string() + param_types = storage_item.get_params_type_string() + key_hashers = storage_item.get_param_hashers() + + # Check MapType conditions + if len(param_types) == 0: + raise ValueError("Given storage function is not a map") + if len(params) > len(param_types) - 1: + raise ValueError( + f"Storage function map can accept max {len(param_types) - 1} parameters, {len(params)} given" + ) + + # Generate storage key prefix + storage_key = StorageKey.create_from_storage_function( + module, + storage_item.value["name"], + params, + runtime_config=runtime.runtime_config, + metadata=runtime.metadata, + ) + prefix = storage_key.to_hex() + + if not start_key: + start_key = prefix + + # Make sure if the max result is smaller than the page size, adjust the page size + if max_results is not None and max_results < page_size: + page_size = max_results + + # Retrieve storage keys + response = await self.rpc_request( + method="state_getKeysPaged", + params=[prefix, page_size, start_key, block_hash], + ) + + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + result_keys = response.get("result") + + result = [] + last_key = None + + def concat_hash_len(key_hasher: str) -> int: + """ + Helper function to avoid if statements + """ + if key_hasher == "Blake2_128Concat": + return 16 + elif key_hasher == "Twox64Concat": + return 8 + elif key_hasher == "Identity": + return 0 + else: + raise ValueError("Unsupported hash type") + + if len(result_keys) > 0: + last_key = result_keys[-1] + + # Retrieve corresponding value + response = await self.rpc_request( + method="state_queryStorageAt", params=[result_keys, block_hash] + ) + + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + for result_group in response["result"]: + for item in result_group["changes"]: + try: + # Determine type string + key_type_string = [] + for n in range(len(params), len(param_types)): + key_type_string.append( + f"[u8; {concat_hash_len(key_hashers[n])}]" + ) + key_type_string.append(param_types[n]) + + item_key_obj = await self.decode_scale( + type_string=f"({', '.join(key_type_string)})", + scale_bytes=bytes.fromhex(item[0][len(prefix) :]), + return_scale_obj=True, + ) + + # strip key_hashers to use as item key + if len(param_types) - len(params) == 1: + item_key = item_key_obj[1] + else: + item_key = tuple( + item_key_obj[key + 1] + for key in range(len(params), len(param_types) + 1, 2) + ) + + except Exception as _: + if not ignore_decoding_errors: + raise + item_key = None + + try: + try: + item_bytes = bytes.fromhex(item[1][2:]) + except ValueError: + item_bytes = bytes.fromhex(item[1]) + + item_value = await self.decode_scale( + type_string=value_type, + scale_bytes=item_bytes, + return_scale_obj=True, + ) + except Exception as _: + if not ignore_decoding_errors: + raise + item_value = None + + result.append([item_key, item_value]) + + return QueryMapResult( + records=result, + page_size=page_size, + module=module, + storage_function=storage_function, + params=params, + block_hash=block_hash, + substrate=self, + last_key=last_key, + max_results=max_results, + ignore_decoding_errors=ignore_decoding_errors, + ) + + async def submit_extrinsic( + self, + extrinsic: GenericExtrinsic, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + ) -> "ExtrinsicReceipt": + """ + Submit an extrinsic to the connected node, with the possibility to wait until the extrinsic is included + in a block and/or the block is finalized. The receipt returned provided information about the block and + triggered events + + Parameters + ---------- + extrinsic: Extrinsic The extrinsic to be sent to the network + wait_for_inclusion: wait until extrinsic is included in a block (only works for websocket connections) + wait_for_finalization: wait until extrinsic is finalized (only works for websocket connections) + + Returns + ------- + ExtrinsicReceipt + + """ + + # Check requirements + if not isinstance(extrinsic, GenericExtrinsic): + raise TypeError("'extrinsic' must be of type Extrinsics") + + async def result_handler(message: dict, subscription_id) -> tuple[dict, bool]: + """ + Result handler function passed as an arg to _make_rpc_request as the result_handler + to handle the results of the extrinsic rpc call, which are multipart, and require + subscribing to the message + + :param message: message received from the rpc call + :param subscription_id: subscription id received from the initial rpc call for the subscription + + :returns: tuple containing the dict of the block info for the subscription, and bool for whether + the subscription is completed. + """ + # Check if extrinsic is included and finalized + if "params" in message and isinstance(message["params"]["result"], dict): + # Convert result enum to lower for backwards compatibility + message_result = { + k.lower(): v for k, v in message["params"]["result"].items() + } + + if "finalized" in message_result and wait_for_finalization: + # Created as a task because we don't actually care about the result + self._forgettable_task = asyncio.create_task( + self.rpc_request("author_unwatchExtrinsic", [subscription_id]) + ) + return { + "block_hash": message_result["finalized"], + "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()), + "finalized": True, + }, True + elif ( + "inblock" in message_result + and wait_for_inclusion + and not wait_for_finalization + ): + # Created as a task because we don't actually care about the result + self._forgettable_task = asyncio.create_task( + self.rpc_request("author_unwatchExtrinsic", [subscription_id]) + ) + return { + "block_hash": message_result["inblock"], + "extrinsic_hash": "0x{}".format(extrinsic.extrinsic_hash.hex()), + "finalized": False, + }, True + return message, False + + if wait_for_inclusion or wait_for_finalization: + responses = ( + await self._make_rpc_request( + [ + self.make_payload( + "rpc_request", + "author_submitAndWatchExtrinsic", + [str(extrinsic.data)], + ) + ], + result_handler=result_handler, + ) + )["rpc_request"] + response = next( + (r for r in responses if "block_hash" in r and "extrinsic_hash" in r), + None, + ) + + if not response: + raise SubstrateRequestException(responses) + + # Also, this will be a multipart response, so maybe should change to everything after the first response? + # The following code implies this will be a single response after the initial subscription id. + result = ExtrinsicReceipt( + substrate=self, + extrinsic_hash=response["extrinsic_hash"], + block_hash=response["block_hash"], + finalized=response["finalized"], + ) + + else: + response = await self.rpc_request( + "author_submitExtrinsic", [str(extrinsic.data)] + ) + + if "result" not in response: + raise SubstrateRequestException(response.get("error")) + + result = ExtrinsicReceipt(substrate=self, extrinsic_hash=response["result"]) + + return result + + async def get_metadata_call_function( + self, + module_name: str, + call_function_name: str, + block_hash: Optional[str] = None, + ) -> Optional[list]: + """ + Retrieves a list of all call functions in metadata active for given block_hash (or chaintip if block_hash + is omitted) + + :param module_name: name of the module + :param call_function_name: name of the call function + :param block_hash: optional block hash + + :return: list of call functions + """ + runtime = await self.init_runtime(block_hash=block_hash) + + for pallet in runtime.metadata.pallets: + if pallet.name == module_name and pallet.calls: + for call in pallet.calls: + if call.name == call_function_name: + return call + return None + + async def get_block_number(self, block_hash: Optional[str] = None) -> int: + """Async version of `substrateinterface.base.get_block_number` method.""" + response = await self.rpc_request("chain_getHeader", [block_hash]) + + if "error" in response: + raise SubstrateRequestException(response["error"]["message"]) + + elif "result" in response: + if response["result"]: + return int(response["result"]["number"], 16) + + async def close(self): + """ + Closes the substrate connection, and the websocket connection. + """ + try: + await self.ws.shutdown() + except AttributeError: + pass diff --git a/bittensor/utils/delegates_details.py b/bittensor/utils/delegates_details.py new file mode 100644 index 0000000000..88a5633e76 --- /dev/null +++ b/bittensor/utils/delegates_details.py @@ -0,0 +1,43 @@ +from dataclasses import dataclass +from typing import Any, Optional + + +@dataclass +class DelegatesDetails: + display: str + additional: list[tuple[str, str]] + web: str + legal: Optional[str] = None + riot: Optional[str] = None + email: Optional[str] = None + pgp_fingerprint: Optional[str] = None + image: Optional[str] = None + twitter: Optional[str] = None + + @classmethod + def from_chain_data(cls, data: dict[str, Any]) -> "DelegatesDetails": + def decode(key: str, default: Optional[str] = ""): + try: + if isinstance(data.get(key), dict): + value = next(data.get(key).values()) + return bytes(value[0]).decode("utf-8") + elif isinstance(data.get(key), int): + return data.get(key) + elif isinstance(data.get(key), tuple): + return bytes(data.get(key)[0]).decode("utf-8") + else: + return default + except (UnicodeDecodeError, TypeError): + return default + + return cls( + display=decode("display"), + additional=decode("additional", []), + web=decode("web"), + legal=decode("legal"), + riot=decode("riot"), + email=decode("email"), + pgp_fingerprint=decode("pgp_fingerprint", None), + image=decode("image"), + twitter=decode("twitter"), + ) diff --git a/bittensor/utils/deprecated.py b/bittensor/utils/deprecated.py index 146e8395d0..124c0daac9 100644 --- a/bittensor/utils/deprecated.py +++ b/bittensor/utils/deprecated.py @@ -45,6 +45,7 @@ from bittensor_wallet import Keypair # noqa: F401 from bittensor.core import settings +from bittensor.core.async_subtensor import AsyncSubtensor from bittensor.core.axon import Axon from bittensor.core.chain_data import ( # noqa: F401 AxonInfo, @@ -116,6 +117,7 @@ from bittensor.utils.subnets import SubnetsAPI # noqa: F401 # Backwards compatibility with previous bittensor versions. +async_subtensor = AsyncSubtensor axon = Axon config = Config dendrite = Dendrite diff --git a/requirements/prod.txt b/requirements/prod.txt index 17c73f6f25..bb8e243948 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -1,6 +1,8 @@ wheel setuptools~=70.0.0 aiohttp~=3.9 +async-property==0.2.2 +backoff bittensor-cli bt-decode colorama~=0.4.6 diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index 59170c9512..4a7b2ccf62 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -8,7 +8,7 @@ import pytest from substrateinterface import SubstrateInterface -from bittensor import logging +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, install_templates, diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py index 853719f85d..b5d18c5729 100644 --- a/tests/e2e_tests/test_axon.py +++ b/tests/e2e_tests/test_axon.py @@ -4,8 +4,8 @@ import pytest import bittensor -from bittensor import logging from bittensor.utils import networking +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.chain_interactions import register_neuron, register_subnet from tests.e2e_tests.utils.e2e_test_utils import ( setup_wallet, diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index ca9b0a0a2c..962a061a9a 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -3,8 +3,9 @@ import numpy as np import pytest -import bittensor -from bittensor import logging +from bittensor.core.subtensor import Subtensor +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit from tests.e2e_tests.utils.chain_interactions import ( add_stake, @@ -48,7 +49,7 @@ async def test_commit_and_reveal_weights(local_chain): ), "Unable to register Alice as a neuron" # Stake to become to top neuron after the first epoch - add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + add_stake(local_chain, alice_wallet, Balance.from_tao(100_000)) # Enable commit_reveal on the subnet assert sudo_set_hyperparameter_bool( @@ -59,7 +60,7 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") assert subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -73,7 +74,7 @@ async def test_commit_and_reveal_weights(local_chain): return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters( netuid=netuid @@ -92,7 +93,7 @@ async def test_commit_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py index e075326ca5..279e151346 100644 --- a/tests/e2e_tests/test_dendrite.py +++ b/tests/e2e_tests/test_dendrite.py @@ -3,20 +3,21 @@ import pytest -import bittensor -from bittensor import logging, Subtensor - -from tests.e2e_tests.utils.e2e_test_utils import ( - setup_wallet, - template_path, - templates_repo, -) +from bittensor.core.metagraph import Metagraph +from bittensor.core.subtensor import Subtensor +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.chain_interactions import ( register_neuron, register_subnet, add_stake, wait_epoch, ) +from tests.e2e_tests.utils.e2e_test_utils import ( + setup_wallet, + template_path, + templates_repo, +) @pytest.mark.asyncio @@ -56,7 +57,7 @@ async def test_dendrite(local_chain): local_chain, bob_wallet, netuid ), f"Neuron wasn't registered to subnet {netuid}" - metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945") subtensor = Subtensor(network="ws://localhost:9945") # Assert one neuron is Bob @@ -69,10 +70,10 @@ async def test_dendrite(local_chain): assert neuron.stake.tao == 0 # Stake to become to top neuron after the first epoch - assert add_stake(local_chain, bob_wallet, bittensor.Balance.from_tao(10_000)) + assert add_stake(local_chain, bob_wallet, Balance.from_tao(10_000)) # Refresh metagraph - metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945") old_neuron = metagraph.neurons[0] # Assert stake is 10000 @@ -121,7 +122,7 @@ async def test_dendrite(local_chain): await wait_epoch(subtensor, netuid=netuid) # Refresh metagraph - metagraph = bittensor.Metagraph(netuid=netuid, network="ws://localhost:9945") + metagraph = Metagraph(netuid=netuid, network="ws://localhost:9945") # Refresh validator neuron updated_neuron = metagraph.neurons[0] diff --git a/tests/e2e_tests/test_liquid_alpha.py b/tests/e2e_tests/test_liquid_alpha.py index d73162fbb4..4725704f61 100644 --- a/tests/e2e_tests/test_liquid_alpha.py +++ b/tests/e2e_tests/test_liquid_alpha.py @@ -1,5 +1,6 @@ -import bittensor -from bittensor import logging +from bittensor.core.subtensor import Subtensor +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.chain_interactions import ( add_stake, register_neuron, @@ -49,10 +50,10 @@ def test_liquid_alpha(local_chain): ), "Unable to register Alice as a neuron" # Stake to become to top neuron after the first epoch - add_stake(local_chain, alice_wallet, bittensor.Balance.from_tao(100_000)) + add_stake(local_chain, alice_wallet, Balance.from_tao(100_000)) # Assert liquid alpha is disabled - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).liquid_alpha_enabled is False @@ -118,7 +119,7 @@ def test_liquid_alpha(local_chain): alpha_high_too_high = u16_max + 1 # One more than the max acceptable value call_params = liquid_alpha_call_params(netuid, f"6553, {alpha_high_too_high}") try: - result, error_message = sudo_set_hyperparameter_values( + sudo_set_hyperparameter_values( local_chain, alice_wallet, call_function="sudo_set_alpha_values", diff --git a/tests/e2e_tests/test_metagraph.py b/tests/e2e_tests/test_metagraph.py index ff16dde369..8999b30358 100644 --- a/tests/e2e_tests/test_metagraph.py +++ b/tests/e2e_tests/test_metagraph.py @@ -1,7 +1,8 @@ import time -import bittensor -from bittensor import logging +from bittensor.core.subtensor import Subtensor +from bittensor.utils.balance import Balance +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.chain_interactions import ( add_stake, register_neuron, @@ -64,7 +65,7 @@ def test_metagraph(local_chain): ).serialize(), "Subnet wasn't created successfully" # Initialize metagraph - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") metagraph = subtensor.metagraph(netuid=1) # Assert metagraph is empty @@ -129,17 +130,17 @@ def test_metagraph(local_chain): # Test staking with low balance assert not add_stake( - local_chain, dave_wallet, bittensor.Balance.from_tao(10_000) + local_chain, dave_wallet, Balance.from_tao(10_000) ), "Low balance stake should fail" # Add stake by Bob assert add_stake( - local_chain, bob_wallet, bittensor.Balance.from_tao(10_000) + local_chain, bob_wallet, Balance.from_tao(10_000) ), "Failed to add stake for Bob" # Assert stake is added after updating metagraph metagraph.sync(subtensor=subtensor) - assert metagraph.neurons[0].stake == bittensor.Balance.from_tao( + assert metagraph.neurons[0].stake == Balance.from_tao( 10_000 ), "Bob's stake not updated in metagraph" diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py index 32d0f6e14d..ffa7b716ee 100644 --- a/tests/e2e_tests/test_subtensor_functions.py +++ b/tests/e2e_tests/test_subtensor_functions.py @@ -3,8 +3,8 @@ import pytest -import bittensor -from bittensor import logging +from bittensor.core.subtensor import Subtensor +from bittensor.utils.btlogging import logging from tests.e2e_tests.utils.chain_interactions import ( register_neuron, register_subnet, @@ -31,7 +31,7 @@ async def test_subtensor_extrinsics(local_chain): AssertionError: If any of the checks or verifications fail """ netuid = 1 - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") # Subnets 0 and 3 are bootstrapped from the start assert subtensor.get_subnets() == [0, 3] @@ -139,7 +139,7 @@ async def test_subtensor_extrinsics(local_chain): await asyncio.sleep( 5 ) # wait for 5 seconds for the metagraph and subtensor to refresh with latest data - subtensor = bittensor.Subtensor(network="ws://localhost:9945") + subtensor = Subtensor(network="ws://localhost:9945") # Verify neuron info is updated after running as a validator neuron_info = subtensor.get_neuron_for_pubkey_and_subnet( diff --git a/tests/e2e_tests/utils/chain_interactions.py b/tests/e2e_tests/utils/chain_interactions.py index aad53812c8..20e4a65dea 100644 --- a/tests/e2e_tests/utils/chain_interactions.py +++ b/tests/e2e_tests/utils/chain_interactions.py @@ -6,7 +6,7 @@ import asyncio from typing import Union, Optional, TYPE_CHECKING -from bittensor import logging +from bittensor.utils.btlogging import logging # for typing purposes if TYPE_CHECKING: diff --git a/tests/unit_tests/extrinsics/test_init.py b/tests/unit_tests/extrinsics/test_init.py index 8a2480a9b9..8ff60d2de6 100644 --- a/tests/unit_tests/extrinsics/test_init.py +++ b/tests/unit_tests/extrinsics/test_init.py @@ -1,9 +1,10 @@ """Tests for bittensor/extrinsics/__ini__ module.""" from bittensor.utils import format_error_message +from tests.unit_tests.extrinsics.test_commit_weights import subtensor -def test_format_error_message_with_right_error_message(): +def test_format_error_message_with_right_error_message(mocker): """Verify that error message from extrinsic response parses correctly.""" # Prep fake_error_message = { @@ -13,7 +14,7 @@ def test_format_error_message_with_right_error_message(): } # Call - result = format_error_message(fake_error_message) + result = format_error_message(fake_error_message, substrate=mocker.MagicMock()) # Assertions @@ -22,13 +23,13 @@ def test_format_error_message_with_right_error_message(): assert "Some error description." in result -def test_format_error_message_with_empty_error_message(): +def test_format_error_message_with_empty_error_message(mocker): """Verify that empty error message from extrinsic response parses correctly.""" # Prep fake_error_message = {} # Call - result = format_error_message(fake_error_message) + result = format_error_message(fake_error_message, substrate=mocker.MagicMock()) # Assertions @@ -37,13 +38,13 @@ def test_format_error_message_with_empty_error_message(): assert "Unknown Description" in result -def test_format_error_message_with_wrong_type_error_message(): +def test_format_error_message_with_wrong_type_error_message(mocker): """Verify that error message from extrinsic response with wrong type parses correctly.""" # Prep fake_error_message = None # Call - result = format_error_message(fake_error_message) + result = format_error_message(fake_error_message, substrate=mocker.MagicMock()) # Assertions From f72a66b0986cfc343b9e88fe7efa92f5d872fa8b Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 14:32:40 -0800 Subject: [PATCH 42/58] async_transfer.py: remove prompt, refactoring --- bittensor/core/extrinsics/async_transfer.py | 43 ++++++++------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py index b9072ae9b8..8d68684d16 100644 --- a/bittensor/core/extrinsics/async_transfer.py +++ b/bittensor/core/extrinsics/async_transfer.py @@ -3,7 +3,6 @@ from bittensor_wallet import Wallet from bittensor_wallet.errors import KeyFileError -from rich.prompt import Confirm from substrateinterface.exceptions import SubstrateRequestException from bittensor.core.settings import NETWORK_EXPLORER_MAP @@ -23,28 +22,26 @@ async def transfer_extrinsic( subtensor: "AsyncSubtensor", wallet: Wallet, destination: str, - amount: Balance, + amount: "Balance", transfer_all: bool = False, wait_for_inclusion: bool = True, wait_for_finalization: bool = False, keep_alive: bool = True, - prompt: bool = False, ) -> bool: """Transfers funds from this wallet to the destination public key address. - :param subtensor: initialized AsyncSubtensor object used for transfer - :param wallet: Bittensor wallet object to make transfer from. - :param destination: Destination public key address (ss58_address or ed25519) of recipient. - :param amount: Amount to stake as Bittensor balance. - :param transfer_all: Whether to transfer all funds from this wallet to the destination address. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, - or returns `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning - `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. - :param keep_alive: If set, keeps the account alive by keeping the balance above the existential deposit. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. - :return: success: Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for - finalization / inclusion, the response is `True`, regardless of its inclusion. + Args: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object used for transfer + wallet (bittensor_wallet.Wallet): Bittensor wallet object to make transfer from. + destination (str): Destination public key address (ss58_address or ed25519) of recipient. + amount (bittensor.utils.balance.Balance): Amount to stake as Bittensor balance. + transfer_all (bool): Whether to transfer all funds from this wallet to the destination address. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit. + + Returns: + success (bool): Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is `True`, regardless of its inclusion. """ async def get_transfer_fee() -> Balance: @@ -76,7 +73,9 @@ async def get_transfer_fee() -> Balance: async def do_transfer() -> tuple[bool, str, str]: """ Makes transfer from wallet to destination public key address. - :return: success, block hash, formatted error message + + Returns: + success, block hash, formatted error message """ call = await subtensor.substrate.compose_call( call_module="Balances", @@ -155,16 +154,6 @@ async def do_transfer() -> tuple[bool, str, str]: logging.error(f"\t\tFor fee:\t{fee}") return False - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to transfer:[bold white]\n" - f" amount: [bright_cyan]{amount}[/bright_cyan]\n" - f" from: [light_goldenrod2]{wallet.name}[/light_goldenrod2] : [bright_magenta]{wallet.coldkey.ss58_address}\n[/bright_magenta]" - f" to: [bright_magenta]{destination}[/bright_magenta]\n for fee: [bright_cyan]{fee}[/bright_cyan]" - ): - return False - logging.info(":satellite: Transferring... Date: Mon, 4 Nov 2024 15:13:13 -0800 Subject: [PATCH 43/58] async_registration.py --- .../core/extrinsics/async_registration.py | 381 ++++++++---------- 1 file changed, 167 insertions(+), 214 deletions(-) diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py index 4da7785b1b..8ab8cef80f 100644 --- a/bittensor/core/extrinsics/async_registration.py +++ b/bittensor/core/extrinsics/async_registration.py @@ -9,14 +9,13 @@ import random import subprocess import time -import typing from contextlib import redirect_stdout from dataclasses import dataclass from datetime import timedelta from multiprocessing import Process, Event, Lock, Array, Value, Queue from multiprocessing.queues import Queue as Queue_Type from queue import Empty, Full -from typing import Optional +from typing import Optional, Union, TYPE_CHECKING, Callable, Any import backoff import numpy as np @@ -24,7 +23,6 @@ from bittensor_wallet import Wallet from bittensor_wallet.errors import KeyFileError from rich.console import Console -from rich.prompt import Confirm from rich.status import Status from substrateinterface.exceptions import SubstrateRequestException @@ -33,7 +31,7 @@ from bittensor.utils.btlogging import logging from bittensor.utils.formatting import millify, get_human_readable -if typing.TYPE_CHECKING: +if TYPE_CHECKING: from bittensor.core.async_subtensor import AsyncSubtensor @@ -45,13 +43,15 @@ def use_torch() -> bool: return True if os.getenv("USE_TORCH") == "1" else False -def legacy_torch_api_compat(func: typing.Callable): +def legacy_torch_api_compat(func: Callable): """ Convert function operating on numpy Input&Output to legacy torch Input&Output API if `use_torch()` is True. - :param func: Function with numpy Input/Output to be decorated. + Args: + func: Function with numpy Input/Output to be decorated. - :return: Decorated function + Returns: + Decorated function """ @functools.wraps(func) @@ -101,7 +101,9 @@ class POWSolution: seal: bytes async def is_stale(self, subtensor: "AsyncSubtensor") -> bool: - """Returns True if the POW is stale. + """ + Returns True if the POW is stale. + This means the block the POW is solved for is within 3 blocks of the current block. """ current_block = await subtensor.substrate.get_block_number(None) @@ -153,9 +155,7 @@ def stop(self) -> None: def get_status_message( cls, stats: RegistrationStatistics, verbose: bool = False ) -> str: - """ - Provides a message of the current status of the block solving as a str for a logger or stdout - """ + """Provides a message of the current status of the block solving as a str for a logger or stdout.""" message = ( "Solving\n" + f"Time Spent (total): [bold white]{timedelta(seconds=stats.time_spent_total)}[/bold white]\n" @@ -174,9 +174,7 @@ def get_status_message( return message def update(self, stats: RegistrationStatistics, verbose: bool = False) -> None: - """ - Passes the current status to the logger - """ + """Passes the current status to the logger.""" if self.status is not None: self.status.update(self.get_status_message(stats, verbose=verbose)) else: @@ -187,34 +185,21 @@ class _SolverBase(Process): """ A process that solves the registration PoW problem. - :param proc_num: The number of the process being created. - :param num_proc: The total number of processes running. - :param update_interval: The number of nonces to try to solve before checking for a new block. - :param finished_queue: The queue to put the process number when a process finishes each update_interval. - Used for calculating the average time per update_interval across all processes. - :param solution_queue: The queue to put the solution the process has found during the pow solve. - :param stop_event: The event to set by the main process when all the solver processes should stop. - The solver process will check for the event after each update_interval. - The solver process will stop when the event is set. - Used to stop the solver processes when a solution is found. - :param curr_block: The array containing this process's current block hash. - The main process will set the array to the new block hash when a new block is finalized in the - network. The solver process will get the new block hash from this array when newBlockEvent is set - :param curr_block_num: The value containing this process's current block number. - The main process will set the value to the new block number when a new block is finalized in - the network. The solver process will get the new block number from this value when - new_block_event is set. - :param curr_diff: The array containing this process's current difficulty. The main process will set the array to - the new difficulty when a new block is finalized in the network. The solver process will get the - new difficulty from this array when newBlockEvent is set. - :param check_block: The lock to prevent this process from getting the new block data while the main process is - updating the data. - :param limit: The limit of the pow solve for a valid solution. - - :var new_block_event: The event to set by the main process when a new block is finalized in the network. - The solver process will check for the event after each update_interval. - The solver process will get the new block hash and difficulty and start solving for a new - nonce. + Args: + proc_num: The number of the process being created. + num_proc: The total number of processes running. + update_interval: The number of nonces to try to solve before checking for a new block. + finished_queue: The queue to put the process number when a process finishes each update_interval. Used for calculating the average time per update_interval across all processes. + solution_queue: The queue to put the solution the process has found during the pow solve. + stop_event: The event to set by the main process when all the solver processes should stop. The solver process will check for the event after each update_interval. The solver process will stop when the event is set. Used to stop the solver processes when a solution is found. + curr_block: The array containing this process's current block hash. The main process will set the array to the new block hash when a new block is finalized in the network. The solver process will get the new block hash from this array when newBlockEvent is set + curr_block_num: The value containing this process's current block number. The main process will set the value to the new block number when a new block is finalized in the network. The solver process will get the new block number from this value when new_block_event is set. + curr_diff: The array containing this process's current difficulty. The main process will set the array to the new difficulty when a new block is finalized in the network. The solver process will get the new difficulty from this array when newBlockEvent is set. + check_block: The lock to prevent this process from getting the new block data while the main process is updating the data. + limit: The limit of the pow solve for a valid solution. + + Returns: + new_block_event: The event to set by the main process when a new block is finalized in the network. The solver process will check for the event after each update_interval. The solver process will get the new block hash and difficulty and start solving for a new nonce. """ proc_num: int @@ -274,9 +259,7 @@ def create_shared_memory() -> tuple[Array, Value, Array]: class _Solver(_SolverBase): - """ - Performs POW Solution - """ + """Performs POW Solution.""" def run(self): block_number: int @@ -320,9 +303,7 @@ def run(self): class _CUDASolver(_SolverBase): - """ - Performs POW Solution using CUDA - """ + """Performs POW Solution using CUDA.""" dev_id: int tpb: int @@ -414,22 +395,18 @@ def __getattr__(self, name): raise ImportError("torch not installed") -if typing.TYPE_CHECKING: +if TYPE_CHECKING: import torch else: torch = LazyLoadedTorch() class MaxSuccessException(Exception): - """ - Raised when the POW Solver has reached the max number of successful solutions - """ + """Raised when the POW Solver has reached the max number of successful solutions.""" class MaxAttemptsException(Exception): - """ - Raised when the POW Solver has reached the max number of attempts - """ + """Raised when the POW Solver has reached the max number of attempts.""" async def is_hotkey_registered( @@ -453,11 +430,10 @@ async def register_extrinsic( netuid: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, max_allowed_attempts: int = 3, output_in_place: bool = True, cuda: bool = False, - dev_id: typing.Union[list[int], int] = 0, + dev_id: Union[list[int], int] = 0, tpb: int = 256, num_processes: Optional[int] = None, update_interval: Optional[int] = None, @@ -465,25 +441,23 @@ async def register_extrinsic( ) -> bool: """Registers the wallet to the chain. - :param subtensor: initialized AsyncSubtensor object to use for chain interactions - :param wallet: Bittensor wallet object. - :param netuid: The ``netuid`` of the subnet to register on. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns - `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, - or returns `False` if the extrinsic fails to be finalized within the timeout. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. - :param max_allowed_attempts: Maximum number of attempts to register the wallet. - :param output_in_place: Whether the POW solving should be outputted to the console as it goes along. - :param cuda: If `True`, the wallet should be registered using CUDA device(s). - :param dev_id: The CUDA device id to use, or a list of device ids. - :param tpb: The number of threads per block (CUDA). - :param num_processes: The number of processes to use to register. - :param update_interval: The number of nonces to solve between updates. - :param log_verbose: If `True`, the registration process will log more information. - - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, - the response is `True`. + Args: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): initialized AsyncSubtensor object to use for chain interactions + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + netuid (int): The ``netuid`` of the subnet to register on. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + max_allowed_attempts (int): Maximum number of attempts to register the wallet. + output_in_place (bool): Whether the POW solving should be outputted to the console as it goes along. + cuda (bool): If `True`, the wallet should be registered using CUDA device(s). + dev_id: The CUDA device id to use, or a list of device ids. + tpb: The number of threads per block (CUDA). + num_processes: The number of processes to use to register. + update_interval: The number of nonces to solve between updates. + log_verbose: If `True`, the registration process will log more information. + + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. """ async def get_neuron_for_pubkey_and_subnet(): @@ -521,15 +495,6 @@ async def get_neuron_for_pubkey_and_subnet(): ) return True - if prompt: - if not Confirm.ask( - f"Continue Registration?\n" - f" hotkey ({wallet.hotkey_str}):\t[bold white]{wallet.hotkey.ss58_address}[/bold white]\n" - f" coldkey ({wallet.name}):\t[bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n" - f" network:\t\t[bold white]{subtensor.network}[/bold white]" - ): - return False - if not torch: log_no_torch_error() return False @@ -544,8 +509,6 @@ async def get_neuron_for_pubkey_and_subnet(): # Solve latest POW. if cuda: if not torch.cuda.is_available(): - if prompt: - logging.info("CUDA is not available.") return False pow_result = await create_pow( subtensor, @@ -674,7 +637,6 @@ async def run_faucet_extrinsic( wallet: Wallet, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, max_allowed_attempts: int = 3, output_in_place: bool = True, cuda: bool = False, @@ -685,36 +647,26 @@ async def run_faucet_extrinsic( log_verbose: bool = False, max_successes: int = 3, ) -> tuple[bool, str]: - r"""Runs a continual POW to get a faucet of TAO on the test net. - - :param subtensor: The subtensor interface object used to run the extrinsic - :param wallet: Bittensor wallet object. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, - or returns `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, - or returns `False` if the extrinsic fails to be finalized within the timeout. - :param max_allowed_attempts: Maximum number of attempts to register the wallet. - :param output_in_place: Whether to output logging data as the process runs. - :param cuda: If `True`, the wallet should be registered using CUDA device(s). - :param dev_id: The CUDA device id to use - :param tpb: The number of threads per block (CUDA). - :param num_processes: The number of processes to use to register. - :param update_interval: The number of nonces to solve between updates. - :param log_verbose: If `True`, the registration process will log more information. - :param max_successes: The maximum number of successful faucet runs for the wallet. - - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for - finalization/inclusion, the response is also `True` + """Runs a continual POW to get a faucet of TAO on the test net. + + Args: + subtensor: The subtensor interface object used to run the extrinsic + wallet: Bittensor wallet object. + wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + max_allowed_attempts: Maximum number of attempts to register the wallet. + output_in_place: Whether to output logging data as the process runs. + cuda: If `True`, the wallet should be registered using CUDA device(s). + dev_id: The CUDA device id to use + tpb: The number of threads per block (CUDA). + num_processes: The number of processes to use to register. + update_interval: The number of nonces to solve between updates. + log_verbose: If `True`, the registration process will log more information. + max_successes: The maximum number of successful faucet runs for the wallet. + + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is also `True` """ - if prompt: - if not Confirm.ask( - "Run Faucet?\n" - f" wallet name: [bold white]{wallet.name}[/bold white]\n" - f" coldkey: [bold white]{wallet.coldkeypub.ss58_address}[/bold white]\n" - f" network: [bold white]{subtensor}[/bold white]" - ): - return False, "" if not torch: log_no_torch_error() @@ -739,8 +691,6 @@ async def run_faucet_extrinsic( # Solve latest POW. if cuda: if not torch.cuda.is_available(): - if prompt: - logging.error("CUDA is not available.") return False, "CUDA is not available." pow_result: Optional[POWSolution] = await create_pow( subtensor, @@ -829,27 +779,29 @@ async def _check_for_newest_block_and_update( curr_diff: Array, curr_block: Array, curr_block_num: Value, - update_curr_block: typing.Callable, + update_curr_block: "Callable", check_block: Lock, solvers: list[_Solver], - curr_stats: RegistrationStatistics, + curr_stats: "RegistrationStatistics", ) -> int: """ Checks for a new block and updates the current block information if a new block is found. - :param subtensor: The subtensor object to use for getting the current block. - :param netuid: The netuid to use for retrieving the difficulty. - :param old_block_number: The old block number to check against. - :param hotkey_bytes: The bytes of the hotkey's pubkey. - :param curr_diff: The current difficulty as a multiprocessing array. - :param curr_block: Where the current block is stored as a multiprocessing array. - :param curr_block_num: Where the current block number is stored as a multiprocessing value. - :param update_curr_block: A function that updates the current block. - :param check_block: A mp lock that is used to check for a new block. - :param solvers: A list of solvers to update the current block for. - :param curr_stats: The current registration statistics to update. - - :return: The current block number. + Args: + subtensor: The subtensor object to use for getting the current block. + netuid: The netuid to use for retrieving the difficulty. + old_block_number: The old block number to check against. + hotkey_bytes: The bytes of the hotkey's pubkey. + curr_diff: The current difficulty as a multiprocessing array. + curr_block: Where the current block is stored as a multiprocessing array. + curr_block_num: Where the current block number is stored as a multiprocessing value. + update_curr_block: A function that updates the current block. + check_block: A mp lock that is used to check for a new block. + solvers: A list of solvers to update the current block for. + curr_stats: The current registration statistics to update. + + Returns: + The current block number. """ block_number = await subtensor.substrate.get_block_number(None) if block_number != old_block_number: @@ -900,13 +852,11 @@ async def _block_solver( log_verbose, cuda: bool, ): - """ - Shared code used by the Solvers to solve the POW solution - """ + """Shared code used by the Solvers to solve the POW solution.""" limit = int(math.pow(2, 256)) - 1 # Establish communication queues - ## See the _Solver class for more information on the queues. + # See the _Solver class for more information on the queues. stop_event = Event() stop_event.clear() @@ -919,7 +869,7 @@ async def _block_solver( ) if cuda: - ## Create a worker per CUDA device + # Create a worker per CUDA device num_processes = len(dev_id) solvers = [ _CUDASolver( @@ -1103,27 +1053,28 @@ async def _solve_for_difficulty_fast_cuda( output_in_place: bool = True, update_interval: int = 50_000, tpb: int = 512, - dev_id: typing.Union[list[int], int] = 0, + dev_id: Union[list[int], int] = 0, n_samples: int = 10, alpha_: float = 0.80, log_verbose: bool = False, -) -> Optional[POWSolution]: +) -> Optional["POWSolution"]: """ Solves the registration fast using CUDA - :param subtensor: The subtensor node to grab blocks - :param wallet: The wallet to register - :param netuid: The netuid of the subnet to register to. - :param output_in_place: If true, prints the output in place, otherwise prints to new lines - :param update_interval: The number of nonces to try before checking for more blocks - :param tpb: The number of threads per block. CUDA param that should match the GPU capability - :param dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices - :param n_samples: The number of samples of the hash_rate to keep for the EWMA - :param alpha_: The alpha for the EWMA for the hash_rate calculation - :param log_verbose: If true, prints more verbose logging of the registration metrics. - - Note: The hash rate is calculated as an exponentially weighted moving average in order to make the measure more - robust. + Args: + subtensor: The subtensor node to grab blocks + wallet: The wallet to register + netuid: The netuid of the subnet to register to. + output_in_place: If true, prints the output in place, otherwise prints to new lines + update_interval: The number of nonces to try before checking for more blocks + tpb: The number of threads per block. CUDA param that should match the GPU capability + dev_id: The CUDA device IDs to execute the registration on, either a single device or a list of devices + n_samples: The number of samples of the hash_rate to keep for the EWMA + alpha_: The alpha for the EWMA for the hash_rate calculation + log_verbose: If true, prints more verbose logging of the registration metrics. + + Note: + The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. """ if isinstance(dev_id, int): dev_id = [dev_id] @@ -1175,21 +1126,20 @@ async def _solve_for_difficulty_fast( """ Solves the POW for registration using multiprocessing. - :param subtensor: Subtensor to connect to for block information and to submit. - :param wallet: wallet to use for registration. - :param netuid: The netuid of the subnet to register to. - :param output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line. - :param num_processes: Number of processes to use. - :param update_interval: Number of nonces to solve before updating block information. - :param n_samples: The number of samples of the hash_rate to keep for the EWMA - :param alpha_: The alpha for the EWMA for the hash_rate calculation - :param log_verbose: If true, prints more verbose logging of the registration metrics. + Args: + subtensor: Subtensor to connect to for block information and to submit. + wallet: wallet to use for registration. + netuid: The netuid of the subnet to register to. + output_in_place: If true, prints the status in place. Otherwise, prints the status on a new line. + num_processes: Number of processes to use. + update_interval: Number of nonces to solve before updating block information. + n_samples: The number of samples of the hash_rate to keep for the EWMA + alpha_: The alpha for the EWMA for the hash_rate calculation + log_verbose: If true, prints more verbose logging of the registration metrics. Notes: - - - The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. - - We can also modify the update interval to do smaller blocks of work, while still updating the block information - after a different number of nonces, to increase the transparency of the process while still keeping the speed. + The hash rate is calculated as an exponentially weighted moving average in order to make the measure more robust. + We can also modify the update interval to do smaller blocks of work, while still updating the block information after a different number of nonces, to increase the transparency of the process while still keeping the speed. """ if not num_processes: # get the number of allowed processes for this process @@ -1222,7 +1172,7 @@ async def _solve_for_difficulty_fast( def _terminate_workers_and_wait_for_exit( - workers: list[typing.Union[Process, Queue_Type]], + workers: list[Union[Process, Queue_Type]], ) -> None: for worker in workers: if isinstance(worker, Queue_Type): @@ -1246,13 +1196,16 @@ async def _get_block_with_retry( """ Gets the current block number, difficulty, and block hash from the substrate node. - :param subtensor: The subtensor object to use to get the block number, difficulty, and block hash. - :param netuid: The netuid of the network to get the block number, difficulty, and block hash from. + Args: + subtensor: The subtensor object to use to get the block number, difficulty, and block hash. + netuid: The netuid of the network to get the block number, difficulty, and block hash from. - :return: The current block number, difficulty of the subnet, block hash + Returns: + The current block number, difficulty of the subnet, block hash - :raises Exception: If the block hash is None. - :raises ValueError: If the difficulty is None. + Raises: + Exception: If the block hash is None. + ValueError: If the difficulty is None. """ block_number = await subtensor.substrate.get_block_number(None) block_hash = await subtensor.substrate.get_block_hash( @@ -1311,32 +1264,32 @@ async def create_pow( netuid: int, output_in_place: bool = True, cuda: bool = False, - dev_id: typing.Union[list[int], int] = 0, + dev_id: Union[list[int], int] = 0, tpb: int = 256, num_processes: int = None, update_interval: int = None, log_verbose: bool = False, -) -> Optional[dict[str, typing.Any]]: +) -> Optional[dict[str, Any]]: """ Creates a proof of work for the given subtensor and wallet. - :param subtensor: The subtensor to create a proof of work for. - :param wallet: The wallet to create a proof of work for. - :param netuid: The netuid for the subnet to create a proof of work for. - :param output_in_place: If true, prints the progress of the proof of work to the console - in-place. Meaning the progress is printed on the same lines. - :param cuda: If true, uses CUDA to solve the proof of work. - :param dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list, - then multiple CUDA devices will be used to solve the proof of work. - :param tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32. - :param num_processes: The number of processes to use when solving the proof of work. - If None, then the number of processes is equal to the number of CPU cores. - :param update_interval: The number of nonces to run before checking for a new block. - :param log_verbose: If true, prints the progress of the proof of work more verbosely. - - :return: The proof of work solution or None if the wallet is already registered or there is a different error. - - :raises ValueError: If the subnet does not exist. + Args: + subtensor: The subtensor to create a proof of work for. + wallet: The wallet to create a proof of work for. + netuid: The netuid for the subnet to create a proof of work for. + output_in_place: If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. + cuda: If true, uses CUDA to solve the proof of work. + dev_id: The CUDA device id(s) to use. If cuda is true and dev_id is a list, then multiple CUDA devices will be used to solve the proof of work. + tpb: The number of threads per block to use when solving the proof of work. Should be a multiple of 32. + num_processes: The number of processes to use when solving the proof of work. If None, then the number of processes is equal to the number of CPU cores. + update_interval: The number of nonces to run before checking for a new block. + log_verbose: If true, prints the progress of the proof of work more verbosely. + + Returns: + The proof of work solution or None if the wallet is already registered or there is a different error. + + Raises: + ValueError: If the subnet does not exist. """ if netuid != -1: if not await subtensor.subnet_exists(netuid=netuid): @@ -1425,7 +1378,7 @@ class CUDAException(Exception): def _hex_bytes_to_u8_list(hex_bytes: bytes): - hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] + hex_chunks = [int(hex_bytes[i: i + 2], 16) for i in range(0, len(hex_bytes), 2)] return hex_chunks @@ -1434,14 +1387,14 @@ def _create_seal_hash(block_and_hotkey_hash_bytes: bytes, nonce: int) -> bytes: Create a cryptographic seal hash from the given block and hotkey hash bytes and nonce. This function generates a seal hash by combining the given block and hotkey hash bytes with a nonce. - It first converts the nonce to a byte representation, then concatenates it with the first 64 hex - characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by - the Keccak-256 algorithm to produce the final seal hash. + It first converts the nonce to a byte representation, then concatenates it with the first 64 hex characters of the block and hotkey hash bytes. The result is then hashed using SHA-256 followed by the Keccak-256 algorithm to produce the final seal hash. - :param block_and_hotkey_hash_bytes: The combined hash bytes of the block and hotkey. - :param nonce: The nonce value used for hashing. + Args: + block_and_hotkey_hash_bytes (bytes): The combined hash bytes of the block and hotkey. + nonce (int): The nonce value used for hashing. - :return: The resulting seal hash. + Returns: + The resulting seal hash. """ nonce_bytes = binascii.hexlify(nonce.to_bytes(8, "little")) pre_seal = nonce_bytes + binascii.hexlify(block_and_hotkey_hash_bytes)[:64] @@ -1482,14 +1435,14 @@ def _update_curr_block( This function updates the current block and its difficulty in a thread-safe manner. It sets the current block number, hashes the block with the hotkey, updates the current block bytes, and packs the difficulty. - :param curr_diff: Shared array to store the current difficulty. - :param curr_block: Shared array to store the current block data. - :param curr_block_num: Shared value to store the current block number. - :param block_number: The block number to set as the current block number. - :param block_bytes: The block data bytes to be hashed with the hotkey. - :param diff: The difficulty value to be packed into the current difficulty array. - :param hotkey_bytes: The hotkey bytes used for hashing the block. - :param lock: A lock to ensure thread-safe updates. + curr_diff: Shared array to store the current difficulty. + curr_block: Shared array to store the current block data. + curr_block_num: Shared value to store the current block number. + block_number: The block number to set as the current block number. + block_bytes: The block data bytes to be hashed with the hotkey. + diff: The difficulty value to be packed into the current difficulty array. + hotkey_bytes: The hotkey bytes used for hashing the block. + lock: A lock to ensure thread-safe updates. """ with lock: curr_block_num.value = block_number @@ -1535,13 +1488,13 @@ def solve_cuda( """ Solves the PoW problem using CUDA. - :param nonce_start: Starting nonce. - :param update_interval: Number of nonces to solve before updating block information. - :param tpb: Threads per block. - :param block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes. - :param difficulty: Difficulty of the PoW problem. - :param limit: Upper limit of the nonce. - :param dev_id: The CUDA device ID + nonce_start: Starting nonce. + update_interval: Number of nonces to solve before updating block information. + tpb: Threads per block. + block_and_hotkey_hash_bytes: Keccak(Bytes of the block hash + bytes of the hotkey) 64 bytes. + difficulty: Difficulty of the PoW problem. + limit: Upper limit of the nonce. + dev_id: The CUDA device ID :return: (nonce, seal) corresponding to the solution. Returns -1 for nonce if no solution is found. """ From 33eda2d6d4f7ea3c3349ba7c10b84fe82a503043 Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 16:26:40 -0800 Subject: [PATCH 44/58] commit_weights.py --- bittensor/core/extrinsics/commit_weights.py | 22 +++++---------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 3dcfd5b2c2..c53a527ea2 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -20,7 +20,6 @@ from typing import Optional, TYPE_CHECKING from retry import retry -from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic from bittensor.utils import format_error_message @@ -33,7 +32,7 @@ from bittensor.core.subtensor import Subtensor -# # Chain call for `commit_weights_extrinsic` +# Chain call for `commit_weights_extrinsic` @ensure_connected def do_commit_weights( self: "Subtensor", @@ -101,11 +100,10 @@ def commit_weights_extrinsic( commit_hash: str, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. - This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages. + This function is a wrapper around the `do_commit_weights` method. Args: subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. @@ -114,16 +112,12 @@ def commit_weights_extrinsic( commit_hash (str): The hash of the neuron's weights to be committed. wait_for_inclusion (bool): Waits for the transaction to be included in a block. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Returns: - tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string - value describing the success or potential error. + tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error. This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required. """ - if prompt and not Confirm.ask(f"Would you like to commit weights?"): - return False, "User cancelled the operation." success, error_message = do_commit_weights( self=subtensor, @@ -226,11 +220,10 @@ def reveal_weights_extrinsic( version_key: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """ Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. - This function is a wrapper around the `_do_reveal_weights` method, handling user prompts and error messages. + This function is a wrapper around the `_do_reveal_weights` method. Args: subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. @@ -242,18 +235,13 @@ def reveal_weights_extrinsic( version_key (int): Version key for compatibility with the network. wait_for_inclusion (bool): Waits for the transaction to be included in a block. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Returns: - tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string - value describing the success or potential error. + tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string value describing the success or potential error. This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper error handling and user interaction when required. """ - if prompt and not Confirm.ask(f"Would you like to reveal weights?"): - return False, "User cancelled the operation." - success, error_message = do_reveal_weights( self=subtensor, wallet=wallet, From 9dd4a0f5d759de8f3d9e70056200496359fa91cd Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 16:49:08 -0800 Subject: [PATCH 45/58] set_weights.py --- bittensor/core/extrinsics/set_weights.py | 11 -------- bittensor/core/subtensor.py | 1 - .../unit_tests/extrinsics/test_set_weights.py | 26 ++----------------- tests/unit_tests/test_subtensor.py | 3 --- 4 files changed, 2 insertions(+), 39 deletions(-) diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 904b699926..725a1d2442 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -21,7 +21,6 @@ import numpy as np from numpy.typing import NDArray from retry import retry -from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic from bittensor.core.settings import version_as_int @@ -114,7 +113,6 @@ def set_weights_extrinsic( version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """Sets the given weights and values on chain for wallet hotkey account. @@ -127,7 +125,6 @@ def set_weights_extrinsic( version_key (int): The version key of the validator. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: tuple[bool, str]: A tuple containing a success flag and an optional response message. @@ -149,14 +146,6 @@ def set_weights_extrinsic( uids, weights ) - # Ask before moving on. - if prompt: - if not Confirm.ask( - f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n" - f"uids: {weight_uids}[/bold white ]?" - ): - return False, "Prompt refused." - logging.info( f":satellite: Setting weights on {subtensor.network} ..." ) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index fcbb4147d7..0394bbb755 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -895,7 +895,6 @@ def set_weights( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) except Exception as e: logging.error(f"Error setting weights: {e}") diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py index 9c32fc9bdf..0cd663f0b7 100644 --- a/tests/unit_tests/extrinsics/test_set_weights.py +++ b/tests/unit_tests/extrinsics/test_set_weights.py @@ -28,7 +28,7 @@ def mock_wallet(): @pytest.mark.parametrize( - "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message", + "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, expected_success, expected_message", [ ( [1, 2], @@ -37,8 +37,6 @@ def mock_wallet(): True, False, True, - True, - True, "Successfully set weights and Finalized.", ), ( @@ -47,8 +45,6 @@ def mock_wallet(): 0, False, False, - False, - True, True, "Not waiting for finalization or inclusion.", ), @@ -58,18 +54,14 @@ def mock_wallet(): 0, True, False, - True, - True, False, "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.", ), - ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."), ], ids=[ "happy-flow", "not-waiting-finalization-inclusion", "error-flow", - "prompt-refused", ], ) def test_set_weights_extrinsic( @@ -80,8 +72,6 @@ def test_set_weights_extrinsic( version_key, wait_for_inclusion, wait_for_finalization, - prompt, - user_accepts, expected_success, expected_message, ): @@ -90,7 +80,7 @@ def test_set_weights_extrinsic( with patch( "bittensor.utils.weight_utils.convert_weights_and_uids_for_emit", return_value=(uids_tensor, weights_tensor), - ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch( + ), patch( "bittensor.core.extrinsics.set_weights.do_set_weights", return_value=(expected_success, "Mock error message"), ) as mock_do_set_weights: @@ -103,22 +93,10 @@ def test_set_weights_extrinsic( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) assert result == expected_success, f"Test {expected_message} failed." assert message == expected_message, f"Test {expected_message} failed." - if user_accepts is not False: - mock_do_set_weights.assert_called_once_with( - self=mock_subtensor, - wallet=mock_wallet, - netuid=123, - uids=uids_tensor, - vals=weights_tensor, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) def test_do_set_weights_is_success(mock_subtensor, mocker): diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index a818f22c55..75c40b9fa7 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -1132,7 +1132,6 @@ def test_set_weights(subtensor, mocker): fake_weights = [0.4, 0.6] fake_wait_for_inclusion = False fake_wait_for_finalization = False - fake_prompt = False fake_max_retries = 5 expected_result = (True, None) @@ -1159,7 +1158,6 @@ def test_set_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=fake_wait_for_inclusion, wait_for_finalization=fake_wait_for_finalization, - prompt=fake_prompt, max_retries=fake_max_retries, ) @@ -1180,7 +1178,6 @@ def test_set_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=fake_wait_for_inclusion, wait_for_finalization=fake_wait_for_finalization, - prompt=fake_prompt, ) assert result == expected_result From f7f36c47afe68763fa88e68a33b790b28f2891af Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 19:36:37 -0800 Subject: [PATCH 46/58] remove all prompts --- bittensor/core/async_subtensor.py | 18 +-- bittensor/core/config.py | 12 -- .../core/extrinsics/async_registration.py | 2 +- bittensor/core/extrinsics/async_root.py | 61 ++++------ bittensor/core/extrinsics/async_transfer.py | 2 +- bittensor/core/extrinsics/registration.py | 31 +----- bittensor/core/extrinsics/root.py | 19 ---- bittensor/core/extrinsics/serving.py | 13 --- bittensor/core/extrinsics/transfer.py | 14 --- bittensor/core/subtensor.py | 23 ---- tests/e2e_tests/test_axon.py | 1 - tests/e2e_tests/test_dendrite.py | 1 - tests/e2e_tests/test_incentive.py | 2 - tests/e2e_tests/test_subtensor_functions.py | 1 - tests/e2e_tests/test_transfer.py | 1 - .../extrinsics/test_registration.py | 63 +++++------ tests/unit_tests/extrinsics/test_root.py | 86 ++++---------- tests/unit_tests/extrinsics/test_serving.py | 105 ++++++++---------- tests/unit_tests/test_subtensor.py | 9 -- 19 files changed, 120 insertions(+), 344 deletions(-) diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index aa2b65fb30..3b08d280bf 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -7,7 +7,6 @@ import typer from bittensor_wallet import Wallet from bittensor_wallet.utils import SS58_FORMAT -from rich.prompt import Confirm from scalecodec import GenericCall from scalecodec.base import RuntimeConfiguration from scalecodec.type_registry import load_type_registry_preset @@ -1113,7 +1112,6 @@ async def transfer( destination: str, amount: float, transfer_all: bool, - prompt: bool, ): """Transfer token of amount to destination.""" return await transfer_extrinsic( @@ -1122,10 +1120,9 @@ async def transfer( destination, Balance.from_tao(amount), transfer_all, - prompt=prompt, ) - async def register(self, wallet: Wallet, prompt: bool): + async def register(self, wallet: Wallet): """Register neuron by recycling some TAO.""" logging.info( f"Registering on netuid 0 on network: {self.network}" @@ -1154,21 +1151,11 @@ async def register(self, wallet: Wallet, prompt: bool): ) return False - if prompt: - if not Confirm.ask( - f"Your balance is: [bold green]{balance}[/bold green]\n" - f"The cost to register by recycle is [bold red]{current_recycle}[/bold red]\n" - f"Do you want to continue?", - default=False, - ): - return False - return await root_register_extrinsic( self, wallet, wait_for_inclusion=True, wait_for_finalization=True, - prompt=prompt, ) async def pow_register( @@ -1188,7 +1175,6 @@ async def pow_register( subtensor=self, wallet=wallet, netuid=netuid, - prompt=True, tpb=threads_per_block, update_interval=update_interval, num_processes=processors, @@ -1203,7 +1189,6 @@ async def set_weights( wallet: "Wallet", netuids: list[int], weights: list[float], - prompt: bool, ): """Set weights for root network.""" netuids_ = np.array(netuids, dtype=np.int64) @@ -1216,7 +1201,6 @@ async def set_weights( netuids=netuids_, weights=weights_, version_key=0, - prompt=prompt, wait_for_finalization=True, wait_for_inclusion=True, ) diff --git a/bittensor/core/config.py b/bittensor/core/config.py index 5027bbecb5..f38aff20e6 100644 --- a/bittensor/core/config.py +++ b/bittensor/core/config.py @@ -97,18 +97,6 @@ def __init__( # this can fail if --no_version_checking has already been added. pass - try: - parser.add_argument( - "--no_prompt", - dest="no_prompt", - action="store_true", - help="Set ``true`` to stop cli from prompting the user.", - default=False, - ) - except Exception: - # this can fail if --no_version_checking has already been added. - pass - # Get args from argv if not passed in. if args is None: args = sys.argv[1:] diff --git a/bittensor/core/extrinsics/async_registration.py b/bittensor/core/extrinsics/async_registration.py index 8ab8cef80f..3739087b10 100644 --- a/bittensor/core/extrinsics/async_registration.py +++ b/bittensor/core/extrinsics/async_registration.py @@ -1378,7 +1378,7 @@ class CUDAException(Exception): def _hex_bytes_to_u8_list(hex_bytes: bytes): - hex_chunks = [int(hex_bytes[i: i + 2], 16) for i in range(0, len(hex_bytes), 2)] + hex_chunks = [int(hex_bytes[i : i + 2], 16) for i in range(0, len(hex_bytes), 2)] return hex_chunks diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py index 9e73f98a30..f93ce4501e 100644 --- a/bittensor/core/extrinsics/async_root.py +++ b/bittensor/core/extrinsics/async_root.py @@ -6,8 +6,6 @@ from bittensor_wallet import Wallet from bittensor_wallet.errors import KeyFileError from numpy.typing import NDArray -from rich.prompt import Confirm -from rich.table import Table, Column from substrateinterface.exceptions import SubstrateRequestException from bittensor.utils import u16_normalized_float, format_error_message @@ -34,20 +32,20 @@ async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]: async def root_register_extrinsic( subtensor: "AsyncSubtensor", - wallet: Wallet, + wallet: "Wallet", wait_for_inclusion: bool = True, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """Registers the wallet to root network. - :param subtensor: The AsyncSubtensor object - :param wallet: Bittensor wallet object. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + Args: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. """ try: @@ -112,22 +110,20 @@ async def set_root_weights_extrinsic( version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> bool: """Sets the given weights and values on chain for wallet hotkey account. - :param subtensor: The AsyncSubtensor object - :param wallet: Bittensor wallet object. - :param netuids: The `netuid` of the subnet to set weights for. - :param weights: Weights to set. These must be `float` s and must correspond to the passed `netuid` s. - :param version_key: The version key of the validator. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns - `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, - or returns `False` if the extrinsic fails to be finalized within the timeout. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, - the response is `True`. + Args: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + netuids (Union[NDArray[np.int64], list[int]]): The `netuid` of the subnet to set weights for. + weights (Union[NDArray[np.float32], list[float]]): Weights to set. These must be `float` s and must correspond to the passed `netuid` s. + version_key (int): The version key of the validator. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. """ async def _do_set_weights(): @@ -203,25 +199,6 @@ async def _do_set_weights(): f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}" ) - # Ask before moving on. - if prompt: - table = Table( - Column("[dark_orange]Netuid", justify="center", style="bold green"), - Column( - "[dark_orange]Weight", justify="center", style="bold light_goldenrod2" - ), - expand=False, - show_edge=False, - ) - print("Netuid | Weight") - - for netuid, weight in zip(netuids, formatted_weights): - table.add_row(str(netuid), f"{weight:.8f}") - print(f"{netuid} | {weight}") - - if not Confirm.ask("\nDo you want to set these root weights?"): - return False - try: logging.info(":satellite: Setting root weights...") weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights) diff --git a/bittensor/core/extrinsics/async_transfer.py b/bittensor/core/extrinsics/async_transfer.py index 8d68684d16..81b8ea7137 100644 --- a/bittensor/core/extrinsics/async_transfer.py +++ b/bittensor/core/extrinsics/async_transfer.py @@ -39,7 +39,7 @@ async def transfer_extrinsic( wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit. - + Returns: success (bool): Flag is `True` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is `True`, regardless of its inclusion. """ diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py index ba9dc73756..15df860d9b 100644 --- a/bittensor/core/extrinsics/registration.py +++ b/bittensor/core/extrinsics/registration.py @@ -20,7 +20,6 @@ from bittensor_wallet.errors import KeyFileError from retry import retry -from rich.prompt import Confirm from bittensor.utils import format_error_message from bittensor.utils.btlogging import logging @@ -43,7 +42,7 @@ def _do_pow_register( self: "Subtensor", netuid: int, wallet: "Wallet", - pow_result: POWSolution, + pow_result: "POWSolution", wait_for_inclusion: bool = False, wait_for_finalization: bool = True, ) -> tuple[bool, Optional[str]]: @@ -53,14 +52,12 @@ def _do_pow_register( netuid (int): The subnet to register on. wallet (bittensor.wallet): The wallet to register. pow_result (POWSolution): The PoW result to register. - wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. - Default to `False`. + wait_for_inclusion (bool): If ``True``, waits for the extrinsic to be included in a block. Default to `False`. wait_for_finalization (bool): If ``True``, waits for the extrinsic to be finalized. Default to `True`. Returns: success (bool): ``True`` if the extrinsic was included in a block. - error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error - message. + error (Optional[str]): ``None`` on success or not waiting for inclusion/finalization, otherwise the error message. """ @retry(delay=1, tries=3, backoff=2, max_delay=4) @@ -110,7 +107,6 @@ def register_extrinsic( netuid: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, max_allowed_attempts: int = 3, output_in_place: bool = True, cuda: bool = False, @@ -128,7 +124,6 @@ def register_extrinsic( netuid (int): The ``netuid`` of the subnet to register on. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. max_allowed_attempts (int): Maximum number of attempts to register the wallet. output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`. cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). @@ -160,16 +155,6 @@ def register_extrinsic( ) return True - if prompt: - if not Confirm.ask( - "Continue Registration?\n hotkey: [bold white]{}[/bold white]\n coldkey: [bold white]{}[/bold white]\n network: [bold white]{}[/bold white]".format( - wallet.hotkey.ss58_address, - wallet.coldkeypub.ss58_address, - subtensor.network, - ) - ): - return False - if not torch: log_no_torch_error() return False @@ -183,8 +168,6 @@ def register_extrinsic( # Solve latest POW. if cuda: if not torch.cuda.is_available(): - if prompt: - logging.info("CUDA is not available.") return False pow_result: Optional[POWSolution] = create_pow( subtensor, @@ -353,7 +336,6 @@ def burned_register_extrinsic( netuid: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """Registers the wallet to chain by recycling TAO. @@ -363,7 +345,6 @@ def burned_register_extrinsic( netuid (int): The ``netuid`` of the subnet to register on. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -390,7 +371,6 @@ def burned_register_extrinsic( old_balance = subtensor.get_balance(wallet.coldkeypub.ss58_address) - recycle_amount = subtensor.recycle(netuid=netuid) if not neuron.is_null: logging.info(":white_heavy_check_mark: Already Registered") logging.info(f"\t\tuid: {neuron.uid}") @@ -399,11 +379,6 @@ def burned_register_extrinsic( logging.info(f"\t\tcoldkey: {neuron.coldkey}") return True - if prompt: - # Prompt user for confirmation. - if not Confirm.ask(f"Recycle {recycle_amount} to register on subnet:{netuid}?"): - return False - logging.info(":satellite: Recycling TAO for Registration...") success, err_msg = _do_burned_register( self=subtensor, diff --git a/bittensor/core/extrinsics/root.py b/bittensor/core/extrinsics/root.py index 445d2c0b06..de72212146 100644 --- a/bittensor/core/extrinsics/root.py +++ b/bittensor/core/extrinsics/root.py @@ -5,7 +5,6 @@ from bittensor_wallet.errors import KeyFileError from numpy.typing import NDArray from retry import retry -from rich.prompt import Confirm from bittensor.core.settings import version_as_int from bittensor.utils import format_error_message, weight_utils @@ -64,7 +63,6 @@ def root_register_extrinsic( wallet: "Wallet", wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """Registers the wallet to root network. @@ -73,7 +71,6 @@ def root_register_extrinsic( wallet (bittensor_wallet.Wallet): Bittensor wallet object. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``True``. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Default is ``False``. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -96,11 +93,6 @@ def root_register_extrinsic( ) return True - if prompt: - # Prompt user for confirmation. - if not Confirm.ask("Register to root network?"): - return False - logging.info(":satellite: Registering to root network...") success, err_msg = _do_root_register( wallet=wallet, @@ -201,7 +193,6 @@ def set_root_weights_extrinsic( version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> bool: """Sets the given weights and values on chain for wallet hotkey account. @@ -213,7 +204,6 @@ def set_root_weights_extrinsic( version_key (int): The version key of the validator. Default is ``0``. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. Default is ``False``. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. Default is ``False``. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Default is ``False``. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -256,15 +246,6 @@ def set_root_weights_extrinsic( f"Raw Weights -> Normalized weights: {weights} -> {formatted_weights}" ) - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to set the following root weights?:\n[bold white] weights: {}\n uids: {}[/bold white ]?".format( - formatted_weights, netuids - ) - ): - return False - logging.info( f":satellite: Setting root weights on {subtensor.network} ..." ) diff --git a/bittensor/core/extrinsics/serving.py b/bittensor/core/extrinsics/serving.py index 6eb7a67b25..b4ce249719 100644 --- a/bittensor/core/extrinsics/serving.py +++ b/bittensor/core/extrinsics/serving.py @@ -15,11 +15,9 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -import json from typing import Optional, TYPE_CHECKING from retry import retry -from rich.prompt import Confirm from bittensor.core.errors import MetadataError from bittensor.core.extrinsics.utils import submit_extrinsic @@ -100,7 +98,6 @@ def serve_extrinsic( placeholder2: int = 0, wait_for_inclusion: bool = False, wait_for_finalization=True, - prompt: bool = False, ) -> bool: """Subscribes a Bittensor endpoint to the subtensor chain. @@ -115,7 +112,6 @@ def serve_extrinsic( placeholder2 (int): A placeholder for future use. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -159,15 +155,6 @@ def serve_extrinsic( ) return True - if prompt: - output = params.copy() - output["coldkey"] = wallet.coldkeypub.ss58_address - output["hotkey"] = wallet.hotkey.ss58_address - if not Confirm.ask( - f"Do you want to serve axon:\n [bold white]{json.dumps(output, indent=4, sort_keys=True)}[/bold white]" - ): - return False - logging.debug( f"Serving axon with: AxonInfo({wallet.hotkey.ss58_address},{ip}:{port}) -> {subtensor.network}:{netuid}" ) diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index b68a579967..d2fba617d2 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -18,7 +18,6 @@ from typing import Optional, Union, TYPE_CHECKING from retry import retry -from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic from bittensor.core.settings import NETWORK_EXPLORER_MAP @@ -103,7 +102,6 @@ def transfer_extrinsic( wait_for_inclusion: bool = True, wait_for_finalization: bool = False, keep_alive: bool = True, - prompt: bool = False, ) -> bool: """Transfers funds from this wallet to the destination public key address. @@ -115,7 +113,6 @@ def transfer_extrinsic( wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. keep_alive (bool): If set, keeps the account alive by keeping the balance above the existential deposit. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or uncluded in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -161,17 +158,6 @@ def transfer_extrinsic( logging.info(f"\t\tFor fee: \t{fee}") return False - # Ask before moving on. - if prompt: - if not Confirm.ask( - "Do you want to transfer:[bold white]\n" - f" amount: {transfer_balance}\n" - f" from: {wallet.name}:{wallet.coldkey.ss58_address}\n" - f" to: {dest}\n" - f" for fee: {fee}[/bold white]" - ): - return False - logging.info(":satellite: Transferring...") success, block_hash, error_message = do_transfer( self=subtensor, diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 0394bbb755..c7b53e5e6e 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -852,7 +852,6 @@ def set_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: """ @@ -866,7 +865,6 @@ def set_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. max_retries (int): The number of maximum attempts to set weights. Default is ``5``. Returns: @@ -912,7 +910,6 @@ def root_set_weights( version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> bool: """ Sets the weights for neurons on the root network. This action is crucial for defining the influence and interactions of neurons at the root level of the Bittensor network. @@ -924,7 +921,6 @@ def root_set_weights( version_key (int, optional): Version key for compatibility with the network. Default is ``0``. wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to ``False``. wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to ``False``. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to ``False``. Returns: bool: ``True`` if the setting of root-level weights is successful, False otherwise. @@ -939,7 +935,6 @@ def root_set_weights( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) def register( @@ -948,7 +943,6 @@ def register( netuid: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, max_allowed_attempts: int = 3, output_in_place: bool = True, cuda: bool = False, @@ -968,7 +962,6 @@ def register( netuid (int): The unique identifier of the subnet. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. max_allowed_attempts (int): Maximum number of attempts to register the wallet. output_in_place (bool): If true, prints the progress of the proof of work to the console in-place. Meaning the progress is printed on the same lines. Defaults to `True`. cuda (bool): If ``true``, the wallet should be registered using CUDA device(s). Defaults to `False`. @@ -990,7 +983,6 @@ def register( netuid=netuid, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, max_allowed_attempts=max_allowed_attempts, output_in_place=output_in_place, cuda=cuda, @@ -1006,7 +998,6 @@ def root_register( wallet: "Wallet", wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """ Registers the neuron associated with the wallet on the root network. This process is integral for participating in the highest layer of decision-making and governance within the Bittensor network. @@ -1015,7 +1006,6 @@ def root_register( wallet (bittensor.wallet): The wallet associated with the neuron to be registered on the root network. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Defaults to `False`. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Defaults to `False`. Returns: bool: ``True`` if the registration on the root network is successful, False otherwise. @@ -1027,7 +1017,6 @@ def root_register( wallet=wallet, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) def burned_register( @@ -1036,7 +1025,6 @@ def burned_register( netuid: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """ Registers a neuron on the Bittensor network by recycling TAO. This method of registration involves recycling TAO tokens, allowing them to be re-mined by performing work on the network. @@ -1046,7 +1034,6 @@ def burned_register( netuid (int): The unique identifier of the subnet. wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to `False`. wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to `True`. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to `False`. Returns: bool: ``True`` if the registration is successful, False otherwise. @@ -1057,7 +1044,6 @@ def burned_register( netuid=netuid, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) def serve_axon( @@ -1175,7 +1161,6 @@ def transfer( amount: Union["Balance", float], wait_for_inclusion: bool = True, wait_for_finalization: bool = False, - prompt: bool = False, ) -> bool: """ Executes a transfer of funds from the provided wallet to the specified destination address. This function is used to move TAO tokens within the Bittensor network, facilitating transactions between neurons. @@ -1186,7 +1171,6 @@ def transfer( amount (Union[bittensor.utils.balance.Balance, float]): The amount of TAO to be transferred. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``True``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. Returns: transfer_extrinsic (bool): ``True`` if the transfer is successful, False otherwise. @@ -1200,7 +1184,6 @@ def transfer( amount=amount, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) # Community uses this method via `bittensor.api.extrinsics.prometheus.prometheus_extrinsic` @@ -1782,7 +1765,6 @@ def commit_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: """ @@ -1798,7 +1780,6 @@ def commit_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. max_retries (int): The number of maximum attempts to commit weights. Default is ``5``. Returns: @@ -1837,7 +1818,6 @@ def commit_weights( commit_hash=commit_hash, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) if success: break @@ -1859,7 +1839,6 @@ def reveal_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, max_retries: int = 5, ) -> tuple[bool, str]: """ @@ -1875,7 +1854,6 @@ def reveal_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``. Returns: @@ -1902,7 +1880,6 @@ def reveal_weights( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) if success: break diff --git a/tests/e2e_tests/test_axon.py b/tests/e2e_tests/test_axon.py index b5d18c5729..a21c4ae532 100644 --- a/tests/e2e_tests/test_axon.py +++ b/tests/e2e_tests/test_axon.py @@ -65,7 +65,6 @@ async def test_axon(local_chain): [ f"{sys.executable}", f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", "--netuid", str(netuid), "--subtensor.network", diff --git a/tests/e2e_tests/test_dendrite.py b/tests/e2e_tests/test_dendrite.py index 279e151346..24484f68d3 100644 --- a/tests/e2e_tests/test_dendrite.py +++ b/tests/e2e_tests/test_dendrite.py @@ -92,7 +92,6 @@ async def test_dendrite(local_chain): [ f"{sys.executable}", f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", "--netuid", str(netuid), "--subtensor.network", diff --git a/tests/e2e_tests/test_incentive.py b/tests/e2e_tests/test_incentive.py index 3e309f4f64..a95cf37660 100644 --- a/tests/e2e_tests/test_incentive.py +++ b/tests/e2e_tests/test_incentive.py @@ -70,7 +70,6 @@ async def test_incentive(local_chain): [ f"{sys.executable}", f'"{template_path}{templates_repo}/neurons/miner.py"', - "--no_prompt", "--netuid", str(netuid), "--subtensor.network", @@ -103,7 +102,6 @@ async def test_incentive(local_chain): [ f"{sys.executable}", f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", "--netuid", str(netuid), "--subtensor.network", diff --git a/tests/e2e_tests/test_subtensor_functions.py b/tests/e2e_tests/test_subtensor_functions.py index ffa7b716ee..d00e587fba 100644 --- a/tests/e2e_tests/test_subtensor_functions.py +++ b/tests/e2e_tests/test_subtensor_functions.py @@ -111,7 +111,6 @@ async def test_subtensor_extrinsics(local_chain): [ f"{sys.executable}", f'"{template_path}{templates_repo}/neurons/validator.py"', - "--no_prompt", "--netuid", str(netuid), "--subtensor.network", diff --git a/tests/e2e_tests/test_transfer.py b/tests/e2e_tests/test_transfer.py index b6be1cd6ae..62cf9723cc 100644 --- a/tests/e2e_tests/test_transfer.py +++ b/tests/e2e_tests/test_transfer.py @@ -32,7 +32,6 @@ def test_transfer(local_chain): amount=2, wait_for_finalization=True, wait_for_inclusion=True, - prompt=False, ) # Account details after transfer diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py index 9a39ba7d1a..ccc68719f3 100644 --- a/tests/unit_tests/extrinsics/test_registration.py +++ b/tests/unit_tests/extrinsics/test_registration.py @@ -62,12 +62,12 @@ def mock_new_wallet(mocker): @pytest.mark.parametrize( - "subnet_exists, neuron_is_null, prompt, prompt_response, cuda_available, expected_result, test_id", + "subnet_exists, neuron_is_null, cuda_available, expected_result, test_id", [ - (False, True, True, True, True, False, "subnet-does-not-exist"), - (True, False, True, True, True, True, "neuron-already-registered"), - (True, True, True, False, True, False, "user-declines-prompt"), - (True, True, False, None, False, False, "cuda-unavailable"), + (False, True, True, False, "subnet-does-not-exist"), + (True, False, True, True, "neuron-already-registered"), + (True, True, True, True, "user-declines-prompt"), + (True, True, False, False, "cuda-unavailable"), ], ) def test_register_extrinsic_without_pow( @@ -75,23 +75,27 @@ def test_register_extrinsic_without_pow( mock_wallet, subnet_exists, neuron_is_null, - prompt, - prompt_response, cuda_available, expected_result, test_id, mocker, ): # Arrange - with mocker.patch.object( - mock_subtensor, "subnet_exists", return_value=subnet_exists - ), mocker.patch.object( - mock_subtensor, - "get_neuron_for_pubkey_and_subnet", - return_value=mocker.MagicMock(is_null=neuron_is_null), - ), mocker.patch( - "rich.prompt.Confirm.ask", return_value=prompt_response - ), mocker.patch("torch.cuda.is_available", return_value=cuda_available): + with ( + mocker.patch.object( + mock_subtensor, "subnet_exists", return_value=subnet_exists + ), + mocker.patch.object( + mock_subtensor, + "get_neuron_for_pubkey_and_subnet", + return_value=mocker.MagicMock(is_null=neuron_is_null), + ), + mocker.patch("torch.cuda.is_available", return_value=cuda_available), + mocker.patch( + "bittensor.utils.registration._get_block_with_retry", + return_value=(0, 0, "00ff11ee"), + ), + ): # Act result = registration.register_extrinsic( subtensor=mock_subtensor, @@ -99,7 +103,6 @@ def test_register_extrinsic_without_pow( netuid=123, wait_for_inclusion=True, wait_for_finalization=True, - prompt=prompt, max_allowed_attempts=3, output_in_place=True, cuda=True, @@ -164,7 +167,6 @@ def test_register_extrinsic_with_pow( netuid=123, wait_for_inclusion=True, wait_for_finalization=True, - prompt=False, max_allowed_attempts=3, output_in_place=True, cuda=cuda, @@ -180,16 +182,16 @@ def test_register_extrinsic_with_pow( @pytest.mark.parametrize( - "subnet_exists, neuron_is_null, recycle_success, prompt, prompt_response, is_registered, expected_result, test_id", + "subnet_exists, neuron_is_null, recycle_success, is_registered, expected_result, test_id", [ # Happy paths - (True, False, None, False, None, None, True, "neuron-not-null"), - (True, True, True, True, True, True, True, "happy-path-wallet-registered"), + (True, False, None, None, True, "neuron-not-null"), + (True, True, True, True, True, "happy-path-wallet-registered"), # Error paths - (False, True, False, False, None, None, False, "subnet-non-existence"), - (True, True, True, True, False, None, False, "prompt-declined"), - (True, True, False, True, True, False, False, "error-path-recycling-failed"), - (True, True, True, True, True, False, False, "error-path-not-registered"), + (False, True, False, None, False, "subnet-non-existence"), + (True, True, True, None, False, "prompt-declined"), + (True, True, False, False, False, "error-path-recycling-failed"), + (True, True, True, False, False, "error-path-not-registered"), ], ) def test_burned_register_extrinsic( @@ -198,8 +200,6 @@ def test_burned_register_extrinsic( subnet_exists, neuron_is_null, recycle_success, - prompt, - prompt_response, is_registered, expected_result, test_id, @@ -218,16 +218,9 @@ def test_burned_register_extrinsic( ), mocker.patch.object( mock_subtensor, "is_hotkey_registered", return_value=is_registered ): - mock_confirm = mocker.MagicMock(return_value=prompt_response) - registration.Confirm.ask = mock_confirm # Act result = registration.burned_register_extrinsic( - subtensor=mock_subtensor, wallet=mock_wallet, netuid=123, prompt=prompt + subtensor=mock_subtensor, wallet=mock_wallet, netuid=123 ) # Assert assert result == expected_result, f"Test failed for test_id: {test_id}" - - if prompt: - mock_confirm.assert_called_once() - else: - mock_confirm.assert_not_called() diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py index bd37be203f..fb8f861476 100644 --- a/tests/unit_tests/extrinsics/test_root.py +++ b/tests/unit_tests/extrinsics/test_root.py @@ -18,7 +18,7 @@ def mock_wallet(mocker): @pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, prompt, user_response, expected_result", + "wait_for_inclusion, wait_for_finalization, hotkey_registered, registration_success, expected_result", [ ( False, @@ -26,8 +26,6 @@ def mock_wallet(mocker): [True, None], True, True, - True, - True, ), # Already registered after attempt ( False, @@ -35,35 +33,21 @@ def mock_wallet(mocker): [False, True], True, True, - True, - True, ), # Registration succeeds with user confirmation - (False, True, [False, False], False, False, None, None), # Registration fails + (False, True, [False, False], False, None), # Registration fails ( False, True, [False, False], True, - False, - None, None, ), # Registration succeeds but neuron not found - ( - False, - True, - [False, False], - True, - True, - False, - False, - ), # User declines registration ], ids=[ "success-already-registered", "success-registration-succeeds", "failure-registration-failed", "failure-neuron-not-found", - "failure-prompt-declined", ], ) def test_root_register_extrinsic( @@ -73,46 +57,41 @@ def test_root_register_extrinsic( wait_for_finalization, hotkey_registered, registration_success, - prompt, - user_response, expected_result, mocker, ): # Arrange mock_subtensor.is_hotkey_registered.side_effect = hotkey_registered - with mocker.patch("rich.prompt.Confirm.ask", return_value=user_response): - # Preps - mock_register = mocker.Mock( - return_value=(registration_success, "Error registering") - ) - root._do_root_register = mock_register + # Preps + mock_register = mocker.Mock( + return_value=(registration_success, "Error registering") + ) + root._do_root_register = mock_register - # Act - result = root.root_register_extrinsic( - subtensor=mock_subtensor, - wallet=mock_wallet, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - # Assert - assert result == expected_result + # Act + result = root.root_register_extrinsic( + subtensor=mock_subtensor, + wallet=mock_wallet, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # Assert + assert result == expected_result - if not hotkey_registered[0] and user_response: - mock_register.assert_called_once() + if not hotkey_registered[0]: + mock_register.assert_called_once() @pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", + "wait_for_inclusion, wait_for_finalization, netuids, weights, user_response, expected_success", [ - (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set + (True, False, [1, 2], [0.5, 0.5], True, True), # Success - weights set ( False, False, [1, 2], [0.5, 0.5], - False, None, True, ), # Success - weights set no wait @@ -123,7 +102,6 @@ def test_root_register_extrinsic( [2000, 20], True, True, - True, ), # Success - large value to be normalized ( True, @@ -132,14 +110,12 @@ def test_root_register_extrinsic( [2000, 0], True, True, - True, ), # Success - single large value ( True, False, [1, 2], [0.5, 0.5], - True, False, False, ), # Failure - prompt declined @@ -148,7 +124,6 @@ def test_root_register_extrinsic( False, [1, 2], [0.5, 0.5], - False, None, False, ), # Failure - setting weights failed @@ -157,7 +132,6 @@ def test_root_register_extrinsic( False, [], [], - None, False, False, ), # Exception catched - ValueError 'min() arg is an empty sequence' @@ -179,7 +153,6 @@ def test_set_root_weights_extrinsic( wait_for_finalization, netuids, weights, - prompt, user_response, expected_success, mocker, @@ -190,8 +163,6 @@ def test_set_root_weights_extrinsic( ) mock_subtensor.min_allowed_weights = mocker.Mock(return_value=0) mock_subtensor.max_weight_limit = mocker.Mock(return_value=1) - mock_confirm = mocker.Mock(return_value=(expected_success, "Mock error")) - root.Confirm.ask = mock_confirm # Call result = root.set_root_weights_extrinsic( @@ -202,27 +173,21 @@ def test_set_root_weights_extrinsic( version_key=0, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) # Asserts assert result == expected_success - if prompt: - mock_confirm.assert_called_once() - else: - mock_confirm.assert_not_called() @pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, netuids, weights, prompt, user_response, expected_success", + "wait_for_inclusion, wait_for_finalization, netuids, weights, user_response, expected_success", [ - (True, False, [1, 2], [0.5, 0.5], True, True, True), # Success - weights set + (True, False, [1, 2], [0.5, 0.5], True, True), # Success - weights set ( False, False, [1, 2], [0.5, 0.5], - False, None, True, ), # Success - weights set no wait @@ -233,7 +198,6 @@ def test_set_root_weights_extrinsic( [2000, 20], True, True, - True, ), # Success - large value to be normalized ( True, @@ -242,14 +206,12 @@ def test_set_root_weights_extrinsic( [2000, 0], True, True, - True, ), # Success - single large value ( True, False, [1, 2], [0.5, 0.5], - True, False, False, ), # Failure - prompt declined @@ -258,7 +220,6 @@ def test_set_root_weights_extrinsic( False, [1, 2], [0.5, 0.5], - False, None, False, ), # Failure - setting weights failed @@ -267,7 +228,6 @@ def test_set_root_weights_extrinsic( False, [], [], - None, False, False, ), # Exception catched - ValueError 'min() arg is an empty sequence' @@ -289,7 +249,6 @@ def test_set_root_weights_extrinsic_torch( wait_for_finalization, netuids, weights, - prompt, user_response, expected_success, force_legacy_torch_compatible_api, @@ -302,7 +261,6 @@ def test_set_root_weights_extrinsic_torch( wait_for_finalization, netuids, weights, - prompt, user_response, expected_success, mocker, diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py index a57e32d01c..04df0de71e 100644 --- a/tests/unit_tests/extrinsics/test_serving.py +++ b/tests/unit_tests/extrinsics/test_serving.py @@ -116,27 +116,22 @@ def test_serve_extrinsic_happy_path( ): # Arrange serving.do_serve_axon = mocker.MagicMock(return_value=(True, "")) - with patch( - "bittensor.core.extrinsics.serving.Confirm.ask", - return_value=True, - ): - # Act - result = serving.serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + ) - # Assert - assert result == expected, f"Test ID: {test_id}" + # Assert + assert result == expected, f"Test ID: {test_id}" # Various edge cases @@ -177,27 +172,22 @@ def test_serve_extrinsic_edge_cases( ): # Arrange serving.do_serve_axon = mocker.MagicMock(return_value=(True, "")) - with patch( - "bittensor.core.extrinsics.serving.Confirm.ask", - return_value=True, - ): - # Act - result = serving.serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + ) - # Assert - assert result == expected, f"Test ID: {test_id}" + # Assert + assert result == expected, f"Test ID: {test_id}" # Various error cases @@ -238,27 +228,22 @@ def test_serve_extrinsic_error_cases( ): # Arrange serving.do_serve_axon = mocker.MagicMock(return_value=(False, "Error serving axon")) - with patch( - "bittensor.core.extrinsics.serving.Confirm.ask", - return_value=True, - ): - # Act - result = serving.serve_extrinsic( - mock_subtensor, - mock_wallet, - ip, - port, - protocol, - netuid, - placeholder1, - placeholder2, - wait_for_inclusion, - wait_for_finalization, - prompt, - ) + # Act + result = serving.serve_extrinsic( + mock_subtensor, + mock_wallet, + ip, + port, + protocol, + netuid, + placeholder1, + placeholder2, + wait_for_inclusion, + wait_for_finalization, + ) - # Assert - assert result == expected_error_message, f"Test ID: {test_id}" + # Assert + assert result == expected_error_message, f"Test ID: {test_id}" @pytest.mark.parametrize( diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index 75c40b9fa7..c889903684 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -1270,7 +1270,6 @@ def test_transfer(subtensor, mocker): fake_amount = 1.1 fake_wait_for_inclusion = True fake_wait_for_finalization = True - fake_prompt = False mocked_transfer_extrinsic = mocker.patch.object( subtensor_module, "transfer_extrinsic" ) @@ -1282,7 +1281,6 @@ def test_transfer(subtensor, mocker): fake_amount, fake_wait_for_inclusion, fake_wait_for_finalization, - fake_prompt, ) # Asserts @@ -1293,7 +1291,6 @@ def test_transfer(subtensor, mocker): amount=fake_amount, wait_for_inclusion=fake_wait_for_inclusion, wait_for_finalization=fake_wait_for_finalization, - prompt=fake_prompt, ) assert result == mocked_transfer_extrinsic.return_value @@ -1740,7 +1737,6 @@ def test_commit_weights(subtensor, mocker): weights = [0.4, 0.6] wait_for_inclusion = False wait_for_finalization = False - prompt = False max_retries = 5 expected_result = (True, None) @@ -1761,7 +1757,6 @@ def test_commit_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, max_retries=max_retries, ) @@ -1782,7 +1777,6 @@ def test_commit_weights(subtensor, mocker): commit_hash=mocked_generate_weight_hash.return_value, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) assert result == expected_result @@ -1809,7 +1803,6 @@ def test_reveal_weights(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) # Assertions @@ -1824,7 +1817,6 @@ def test_reveal_weights(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) @@ -1852,7 +1844,6 @@ def test_reveal_weights_false(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) # Assertion From d424cd162fbdd548944e208f18a726ff5c4237fd Mon Sep 17 00:00:00 2001 From: Roman <167799377+roman-opentensor@users.noreply.github.com> Date: Mon, 4 Nov 2024 19:37:22 -0800 Subject: [PATCH 47/58] SDK (AsyncSubtensor) Part 2 (#2380) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * create the copy of `bittensor/core/subtensor.py` with async suffix. * add async_substrate_interface.py * update `bittensor.utils.format_error_message` to be compatible with async_subtensor * update `bittensor.core.chain_data` * update `bittensor.core.async_subtensor.py` from btcli * add DelegatesDetails for async_subtensor * add validate_chain_endpoint for async_subtensor * update async_substrate_interface.py by Optional where acceptable and doesn't brake logic * improve settings for async_subtensor.py * fix format errors * fix annotations * add async_subtensor.py with adaptation to SDK (all methods checked and work well) * update settings.py to be compatible with async_extrinsics * add async_transfer extrinsic * add async_registration extrinsic * add async_root extrinsics * ruff * Update bittensor/core/extrinsics/async_transfer.py Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> * fix comments review * avoid non-direct import within inner code (fix circular import) * del unused code * update AsyncSubtensor (add methods, fix tests) * Update bittensor/core/async_subtensor.py Co-authored-by: Paweł Polewicz * fix await properties and remove double format_error_message call * fix review comments * improve docstrings * fix --------- Co-authored-by: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> Co-authored-by: Paweł Polewicz --- bittensor/core/async_subtensor.py | 782 +++++++++++++----- bittensor/core/extrinsics/async_root.py | 92 +-- bittensor/core/extrinsics/async_weights.py | 257 ++++++ bittensor/core/extrinsics/prometheus.py | 0 bittensor/core/extrinsics/set_weights.py | 9 +- bittensor/core/settings.py | 7 + .../unit_tests/extrinsics/test_set_weights.py | 4 +- 7 files changed, 880 insertions(+), 271 deletions(-) create mode 100644 bittensor/core/extrinsics/async_weights.py create mode 100644 bittensor/core/extrinsics/prometheus.py diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index aa2b65fb30..508ae02439 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -7,7 +7,7 @@ import typer from bittensor_wallet import Wallet from bittensor_wallet.utils import SS58_FORMAT -from rich.prompt import Confirm +from numpy.typing import NDArray from scalecodec import GenericCall from scalecodec.base import RuntimeConfiguration from scalecodec.type_registry import load_type_registry_preset @@ -28,6 +28,10 @@ root_register_extrinsic, ) from bittensor.core.extrinsics.async_transfer import transfer_extrinsic +from bittensor.core.extrinsics.async_weights import ( + commit_weights_extrinsic, + set_weights_extrinsic, +) from bittensor.core.settings import ( TYPE_REGISTRY, DEFAULTS, @@ -35,7 +39,9 @@ DELEGATES_DETAILS_URL, DEFAULT_NETWORK, ) +from bittensor.core.settings import version_as_int from bittensor.utils import ( + torch, ss58_to_vec_u8, format_error_message, decode_hex_identity_dict, @@ -48,6 +54,7 @@ from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.delegates_details import DelegatesDetails +from bittensor.utils.weight_utils import generate_weight_hash class ParamWithTypes(TypedDict): @@ -152,14 +159,105 @@ async def encode_params( return param_data.to_hex() - async def get_all_subnet_netuids( + async def get_current_block(self) -> int: + """ + Returns the current block number on the Bittensor blockchain. This function provides the latest block number, indicating the most recent state of the blockchain. + + Returns: + int: The current chain block number. + + Knowing the current block number is essential for querying real-time data and performing time-sensitive operations on the blockchain. It serves as a reference point for network activities and data synchronization. + """ + return await self.substrate.get_block_number() + + async def get_block_hash(self, block_id: Optional[int] = None): + """ + Retrieves the hash of a specific block on the Bittensor blockchain. The block hash is a unique identifier representing the cryptographic hash of the block's content, ensuring its integrity and immutability. + + Args: + block_id (int): The block number for which the hash is to be retrieved. + + Returns: + str: The cryptographic hash of the specified block. + + The block hash is a fundamental aspect of blockchain technology, providing a secure reference to each block's data. It is crucial for verifying transactions, ensuring data consistency, and maintaining the trustworthiness of the blockchain. + """ + if block_id: + return await self.substrate.get_block_hash(block_id) + else: + return await self.substrate.get_chain_head() + + async def is_hotkey_registered_any( + self, hotkey_ss58: str, block_hash: Optional[str] = None + ) -> bool: + """ + Checks if a neuron's hotkey is registered on any subnet within the Bittensor network. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + block_hash (Optional[str]): The blockchain block_hash representation of block id. + + Returns: + bool: ``True`` if the hotkey is registered on any subnet, False otherwise. + + This function is essential for determining the network-wide presence and participation of a neuron. + """ + return len(await self.get_netuids_for_hotkey(hotkey_ss58, block_hash)) > 0 + + async def get_subnet_burn_cost( self, block_hash: Optional[str] = None - ) -> list[int]: + ) -> Optional[str]: + """ + Retrieves the burn cost for registering a new subnet within the Bittensor network. This cost represents the amount of Tao that needs to be locked or burned to establish a new subnet. + + Args: + block_hash (Optional[int]): The blockchain block_hash of the block id. + + Returns: + int: The burn cost for subnet registration. + + The subnet burn cost is an important economic parameter, reflecting the network's mechanisms for controlling the proliferation of subnets and ensuring their commitment to the network's long-term viability. + """ + lock_cost = await self.query_runtime_api( + runtime_api="SubnetRegistrationRuntimeApi", + method="get_network_registration_cost", + params=[], + block_hash=block_hash, + ) + + return lock_cost + + async def get_total_subnets( + self, block_hash: Optional[str] = None + ) -> Optional[int]: + """ + Retrieves the total number of subnets within the Bittensor network as of a specific blockchain block. + + Args: + block_hash (Optional[str]): The blockchain block_hash representation of block id. + + Returns: + Optional[str]: The total number of subnets in the network. + + Understanding the total number of subnets is essential for assessing the network's growth and the extent of its decentralized infrastructure. + """ + result = await self.substrate.query( + module="SubtensorModule", + storage_function="TotalNetworks", + params=[], + block_hash=block_hash + ) + return result + + async def get_subnets(self, block_hash: Optional[str] = None) -> list[int]: """ Retrieves the list of all subnet unique identifiers (netuids) currently present in the Bittensor network. - :param block_hash: The hash of the block to retrieve the subnet unique identifiers from. - :return: A list of subnet netuids. + Args: + block_hash (Optional[str]): The hash of the block to retrieve the subnet unique identifiers from. + + Returns: + A list of subnet netuids. This function provides a comprehensive view of the subnets within the Bittensor network, offering insights into its diversity and scale. @@ -180,20 +278,20 @@ async def is_hotkey_delegate( self, hotkey_ss58: str, block_hash: Optional[str] = None, - reuse_block: Optional[bool] = False, + reuse_block: bool = False, ) -> bool: """ - Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function - checks if the neuron associated with the hotkey is part of the network's delegation system. + Determines whether a given hotkey (public key) is a delegate on the Bittensor network. This function checks if the neuron associated with the hotkey is part of the network's delegation system. - :param hotkey_ss58: The SS58 address of the neuron's hotkey. - :param block_hash: The hash of the blockchain block number for the query. - :param reuse_block: Whether to reuse the last-used block hash. + Args: + hotkey_ss58 (str): The SS58 address of the neuron's hotkey. + block_hash (Optional[str]): The hash of the blockchain block number for the query. + reuse_block (Optional[bool]): Whether to reuse the last-used block hash. - :return: `True` if the hotkey is a delegate, `False` otherwise. + Returns: + `True` if the hotkey is a delegate, `False` otherwise. - Being a delegate is a significant status within the Bittensor network, indicating a neuron's - involvement in consensus and governance processes. + Being a delegate is a significant status within the Bittensor network, indicating a neuron's involvement in consensus and governance processes. """ delegates = await self.get_delegates( block_hash=block_hash, reuse_block=reuse_block @@ -201,21 +299,24 @@ async def is_hotkey_delegate( return hotkey_ss58 in [info.hotkey_ss58 for info in delegates] async def get_delegates( - self, block_hash: Optional[str] = None, reuse_block: Optional[bool] = False + self, block_hash: Optional[str] = None, reuse_block: bool = False ) -> list[DelegateInfo]: """ Fetches all delegates on the chain - :param block_hash: hash of the blockchain block number for the query. - :param reuse_block: whether to reuse the last-used block hash. + Args: + block_hash (Optional[str]): hash of the blockchain block number for the query. + reuse_block (Optional[bool]): whether to reuse the last-used block hash. - :return: List of DelegateInfo objects, or an empty list if there are no delegates. + Returns: + List of DelegateInfo objects, or an empty list if there are no delegates. """ hex_bytes_result = await self.query_runtime_api( runtime_api="DelegateInfoRuntimeApi", method="get_delegates", params=[], block_hash=block_hash, + reuse_block=reuse_block, ) if hex_bytes_result is not None: try: @@ -234,17 +335,17 @@ async def get_stake_info_for_coldkey( reuse_block: bool = False, ) -> list[StakeInfo]: """ - Retrieves stake information associated with a specific coldkey. This function provides details - about the stakes held by an account, including the staked amounts and associated delegates. + Retrieves stake information associated with a specific coldkey. This function provides details about the stakes held by an account, including the staked amounts and associated delegates. - :param coldkey_ss58: The ``SS58`` address of the account's coldkey. - :param block_hash: The hash of the blockchain block number for the query. - :param reuse_block: Whether to reuse the last-used block hash. + Args: + coldkey_ss58 (str): The ``SS58`` address of the account's coldkey. + block_hash (Optional[str]): The hash of the blockchain block number for the query. + reuse_block (bool): Whether to reuse the last-used block hash. - :return: A list of StakeInfo objects detailing the stake allocations for the account. + Returns: + A list of StakeInfo objects detailing the stake allocations for the account. - Stake information is vital for account holders to assess their investment and participation - in the network's delegation and consensus processes. + Stake information is vital for account holders to assess their investment and participation in the network's delegation and consensus processes. """ encoded_coldkey = ss58_to_vec_u8(coldkey_ss58) @@ -267,14 +368,18 @@ async def get_stake_info_for_coldkey( return StakeInfo.list_from_vec_u8(bytes_result) async def get_stake_for_coldkey_and_hotkey( - self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str] + self, hotkey_ss58: str, coldkey_ss58: str, block_hash: Optional[str] = None ) -> Balance: """ Retrieves stake information associated with a specific coldkey and hotkey. - :param hotkey_ss58: the hotkey SS58 address to query - :param coldkey_ss58: the coldkey SS58 address to query - :param block_hash: the hash of the blockchain block number for the query. - :return: Stake Balance for the given coldkey and hotkey + + Args: + hotkey_ss58 (str): the hotkey SS58 address to query + coldkey_ss58 (str): the coldkey SS58 address to query + block_hash (Optional[str]): the hash of the blockchain block number for the query. + + Returns: + Stake Balance for the given coldkey and hotkey """ _result = await self.substrate.query( module="SubtensorModule", @@ -288,25 +393,24 @@ async def query_runtime_api( self, runtime_api: str, method: str, - params: Optional[Union[list[list[int]], dict[str, int]]], + params: Optional[Union[list[list[int]], dict[str, int], list[int]]], block_hash: Optional[str] = None, - reuse_block: Optional[bool] = False, + reuse_block: bool = False, ) -> Optional[str]: """ - Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying - runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users - who need to interact with specific runtime methods and decode complex data types. + Queries the runtime API of the Bittensor blockchain, providing a way to interact with the underlying runtime and retrieve data encoded in Scale Bytes format. This function is essential for advanced users who need to interact with specific runtime methods and decode complex data types. - :param runtime_api: The name of the runtime API to query. - :param method: The specific method within the runtime API to call. - :param params: The parameters to pass to the method call. - :param block_hash: The hash of the blockchain block number at which to perform the query. - :param reuse_block: Whether to reuse the last-used block hash. + Args: + runtime_api (str): The name of the runtime API to query. + method (str): The specific method within the runtime API to call. + params (Optional[Union[list[list[int]], dict[str, int]]]): The parameters to pass to the method call. + block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query. + reuse_block (bool): Whether to reuse the last-used block hash. - :return: The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails. + Returns: + The Scale Bytes encoded result from the runtime API call, or ``None`` if the call fails. - This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed - and specific interactions with the network's runtime environment. + This function enables access to the deeper layers of the Bittensor blockchain, allowing for detailed and specific interactions with the network's runtime environment. """ call_definition = TYPE_REGISTRY["runtime_api"][runtime_api]["methods"][method] @@ -322,6 +426,7 @@ async def query_runtime_api( json_result = await self.substrate.rpc_request( method="state_call", params=[api_method, data, block_hash] if block_hash else [api_method, data], + reuse_block_hash=reuse_block, ) if json_result is None: @@ -345,14 +450,16 @@ async def get_balance( self, *addresses: str, block_hash: Optional[str] = None, - reuse_block: bool = False, ) -> dict[str, Balance]: """ Retrieves the balance for given coldkey(s) - :param addresses: coldkey addresses(s) - :param block_hash: the block hash, optional - :param reuse_block: Whether to reuse the last-used block hash when retrieving info. - :return: dict of {address: Balance objects} + + Args: + addresses (str): coldkey addresses(s). + block_hash (Optional[str]): the block hash, optional. + + Returns: + Dict of {address: Balance objects}. """ calls = [ ( @@ -369,20 +476,70 @@ async def get_balance( results.update({item[0].params[0]: Balance(value["data"]["free"])}) return results + async def get_transfer_fee( + self, wallet: "Wallet", dest: str, value: Union["Balance", float, int] + ) -> "Balance": + """ + Calculates the transaction fee for transferring tokens from a wallet to a specified destination address. This function simulates the transfer to estimate the associated cost, taking into account the current network conditions and transaction complexity. + + Args: + wallet (bittensor_wallet.Wallet): The wallet from which the transfer is initiated. + dest (str): The ``SS58`` address of the destination account. + value (Union[bittensor.utils.balance.Balance, float, int]): The amount of tokens to be transferred, specified as a Balance object, or in Tao (float) or Rao (int) units. + + Returns: + bittensor.utils.balance.Balance: The estimated transaction fee for the transfer, represented as a Balance object. + + Estimating the transfer fee is essential for planning and executing token transactions, ensuring that the wallet has sufficient funds to cover both the transfer amount and the associated costs. This function provides a crucial tool for managing financial operations within the Bittensor network. + """ + if isinstance(value, float): + value = Balance.from_tao(value) + elif isinstance(value, int): + value = Balance.from_rao(value) + + if isinstance(value, Balance): + call = await self.substrate.compose_call( + call_module="Balances", + call_function="transfer_allow_death", + call_params={"dest": dest, "value": value.rao}, + ) + + try: + payment_info = await self.substrate.get_payment_info( + call=call, keypair=wallet.coldkeypub + ) + except Exception as e: + logging.error( + f":cross_mark: Failed to get payment info: {e}" + ) + payment_info = {"partialFee": int(2e7)} # assume 0.02 Tao + + fee = Balance.from_rao(payment_info["partialFee"]) + return fee + else: + fee = Balance.from_rao(int(2e7)) + logging.error( + "To calculate the transaction fee, the value must be Balance, float, or int. Received type: %s. Fee " + "is %s", + type(value), + 2e7, + ) + return fee + async def get_total_stake_for_coldkey( self, *ss58_addresses, block_hash: Optional[str] = None, - reuse_block: bool = False, ) -> dict[str, Balance]: """ Returns the total stake held on a coldkey. - :param ss58_addresses: The SS58 address(es) of the coldkey(s) - :param block_hash: The hash of the block number to retrieve the stake from. - :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + Args: + ss58_addresses (tuple[str]): The SS58 address(es) of the coldkey(s) + block_hash (str): The hash of the block number to retrieve the stake from. - :return: {address: Balance objects} + Returns: + Dict in view {address: Balance objects}. """ calls = [ ( @@ -410,11 +567,13 @@ async def get_total_stake_for_hotkey( """ Returns the total stake held on a hotkey. - :param ss58_addresses: The SS58 address(es) of the hotkey(s) - :param block_hash: The hash of the block number to retrieve the stake from. - :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + Args: + ss58_addresses (tuple[str]): The SS58 address(es) of the hotkey(s) + block_hash (str): The hash of the block number to retrieve the stake from. + reuse_block (bool): Whether to reuse the last-used block hash when retrieving info. - :return: {address: Balance objects} + Returns: + Dict {address: Balance objects}. """ results = await self.substrate.query_multiple( params=[s for s in ss58_addresses], @@ -432,15 +591,15 @@ async def get_netuids_for_hotkey( reuse_block: bool = False, ) -> list[int]: """ - Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function - identifies the specific subnets within the Bittensor network where the neuron associated with - the hotkey is active. + Retrieves a list of subnet UIDs (netuids) for which a given hotkey is a member. This function identifies the specific subnets within the Bittensor network where the neuron associated with the hotkey is active. - :param hotkey_ss58: The ``SS58`` address of the neuron's hotkey. - :param block_hash: The hash of the blockchain block number at which to perform the query. - :param reuse_block: Whether to reuse the last-used block hash when retrieving info. + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + block_hash (Optional[str]): The hash of the blockchain block number at which to perform the query. + reuse_block (Optional[bool]): Whether to reuse the last-used block hash when retrieving info. - :return: A list of netuids where the neuron is a member. + Returns: + A list of netuids where the neuron is a member. """ result = await self.substrate.query_map( @@ -462,11 +621,13 @@ async def subnet_exists( """ Checks if a subnet with the specified unique identifier (netuid) exists within the Bittensor network. - :param netuid: The unique identifier of the subnet. - :param block_hash: The hash of the blockchain block number at which to check the subnet existence. - :param reuse_block: Whether to reuse the last-used block hash. + Args: + netuid (int): The unique identifier of the subnet. + block_hash (Optional[str]): The hash of the blockchain block number at which to check the subnet existence. + reuse_block (bool): Whether to reuse the last-used block hash. - :return: `True` if the subnet exists, `False` otherwise. + Returns: + `True` if the subnet exists, `False` otherwise. This function is critical for verifying the presence of specific subnets in the network, enabling a deeper understanding of the network's structure and composition. @@ -490,12 +651,14 @@ async def get_hyperparameter( """ Retrieves a specified hyperparameter for a specific subnet. - :param param_name: The name of the hyperparameter to retrieve. - :param netuid: The unique identifier of the subnet. - :param block_hash: The hash of blockchain block number for the query. - :param reuse_block: Whether to reuse the last-used block hash. + Args: + param_name (str): The name of the hyperparameter to retrieve. + netuid (int): The unique identifier of the subnet. + block_hash (Optional[str]): The hash of blockchain block number for the query. + reuse_block (bool): Whether to reuse the last-used block hash. - :return: The value of the specified hyperparameter if the subnet exists, or None + Returns: + The value of the specified hyperparameter if the subnet exists, or None """ if not await self.subnet_exists(netuid, block_hash): print("subnet does not exist") @@ -525,13 +688,15 @@ async def filter_netuids_by_registered_hotkeys( """ Filters a given list of all netuids for certain specified netuids and hotkeys - :param all_netuids: A list of netuids to filter. - :param filter_for_netuids: A subset of all_netuids to filter from the main list - :param all_hotkeys: Hotkeys to filter from the main list - :param block_hash: hash of the blockchain block number at which to perform the query. - :param reuse_block: whether to reuse the last-used blockchain hash when retrieving info. + Argumens: + all_netuids (Iterable[int]): A list of netuids to filter. + filter_for_netuids (Iterable[int]): A subset of all_netuids to filter from the main list + all_hotkeys (Iterable[Wallet]): Hotkeys to filter from the main list + block_hash (str): hash of the blockchain block number at which to perform the query. + reuse_block (bool): whether to reuse the last-used blockchain hash when retrieving info. - :return: the filtered list of netuids. + Returns: + The filtered list of netuids. """ netuids_with_registered_hotkeys = [ item @@ -571,17 +736,18 @@ async def get_existential_deposit( self, block_hash: Optional[str] = None, reuse_block: bool = False ) -> Balance: """ - Retrieves the existential deposit amount for the Bittensor blockchain. The existential deposit - is the minimum amount of TAO required for an account to exist on the blockchain. Accounts with - balances below this threshold can be reaped to conserve network resources. + Retrieves the existential deposit amount for the Bittensor blockchain. + The existential deposit is the minimum amount of TAO required for an account to exist on the blockchain. + Accounts with balances below this threshold can be reaped to conserve network resources. - :param block_hash: Block hash at which to query the deposit amount. If `None`, the current block is used. - :param reuse_block: Whether to reuse the last-used blockchain block hash. + Args: + block_hash (str): Block hash at which to query the deposit amount. If `None`, the current block is used. + reuse_block (bool): Whether to reuse the last-used blockchain block hash. - :return: The existential deposit amount + Returns: + The existential deposit amount. - The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring - efficient use of storage and preventing the proliferation of dust accounts. + The existential deposit is a fundamental economic parameter in the Bittensor network, ensuring efficient use of storage and preventing the proliferation of dust accounts. """ result = await self.substrate.get_constant( module_name="Balances", @@ -599,17 +765,17 @@ async def neurons( self, netuid: int, block_hash: Optional[str] = None ) -> list[NeuronInfo]: """ - Retrieves a list of all neurons within a specified subnet of the Bittensor network. This function - provides a snapshot of the subnet's neuron population, including each neuron's attributes and network - interactions. + Retrieves a list of all neurons within a specified subnet of the Bittensor network. + This function provides a snapshot of the subnet's neuron population, including each neuron's attributes and network interactions. - :param netuid: The unique identifier of the subnet. - :param block_hash: The hash of the blockchain block number for the query. + Args: + netuid (int): The unique identifier of the subnet. + block_hash (str): The hash of the blockchain block number for the query. - :return: A list of NeuronInfo objects detailing each neuron's characteristics in the subnet. + Returns: + A list of NeuronInfo objects detailing each neuron's characteristics in the subnet. - Understanding the distribution and status of neurons within a subnet is key to comprehending the - network's decentralized structure and the dynamics of its consensus and governance processes. + Understanding the distribution and status of neurons within a subnet is key to comprehending the network's decentralized structure and the dynamics of its consensus and governance processes. """ neurons_lite, weights, bonds = await asyncio.gather( self.neurons_lite(netuid=netuid, block_hash=block_hash), @@ -634,17 +800,17 @@ async def neurons_lite( ) -> list[NeuronInfoLite]: """ Retrieves a list of neurons in a 'lite' format from a specific subnet of the Bittensor network. - This function provides a streamlined view of the neurons, focusing on key attributes such as stake - and network participation. + This function provides a streamlined view of the neurons, focusing on key attributes such as stake and network participation. - :param netuid: The unique identifier of the subnet. - :param block_hash: The hash of the blockchain block number for the query. - :param reuse_block: Whether to reuse the last-used blockchain block hash. + Args: + netuid (int): The unique identifier of the subnet. + block_hash (str): The hash of the blockchain block number for the query. + reuse_block (bool): Whether to reuse the last-used blockchain block hash. - :return: A list of simplified neuron information for the subnet. + Returns: + A list of simplified neuron information for the subnet. - This function offers a quick overview of the neuron population within a subnet, facilitating - efficient analysis of the network's decentralized structure and neuron dynamics. + This function offers a quick overview of the neuron population within a subnet, facilitating efficient analysis of the network's decentralized structure and neuron dynamics. """ hex_bytes_result = await self.query_runtime_api( runtime_api="NeuronInfoRuntimeApi", @@ -670,19 +836,17 @@ async def neuron_for_uid( self, uid: Optional[int], netuid: int, block_hash: Optional[str] = None ) -> NeuronInfo: """ - Retrieves detailed information about a specific neuron identified by its unique identifier (UID) - within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive - view of a neuron's attributes, including its stake, rank, and operational status. - + Retrieves detailed information about a specific neuron identified by its unique identifier (UID) within a specified subnet (netuid) of the Bittensor network. This function provides a comprehensive view of a neuron's attributes, including its stake, rank, and operational status. - :param uid: The unique identifier of the neuron. - :param netuid: The unique identifier of the subnet. - :param block_hash: The hash of the blockchain block number for the query. + Args: + uid (int): The unique identifier of the neuron. + netuid (int): The unique identifier of the subnet. + block_hash (str): The hash of the blockchain block number for the query. - :return: Detailed information about the neuron if found, a null neuron otherwise + Returns: + Detailed information about the neuron if found, a null neuron otherwise - This function is crucial for analyzing individual neurons' contributions and status within a specific - subnet, offering insights into their roles in the network's consensus and validation mechanisms. + This function is crucial for analyzing individual neurons' contributions and status within a specific subnet, offering insights into their roles in the network's consensus and validation mechanisms. """ if uid is None: return NeuronInfo.get_null_neuron() @@ -706,17 +870,17 @@ async def get_delegated( reuse_block: bool = False, ) -> list[tuple[DelegateInfo, Balance]]: """ - Retrieves a list of delegates and their associated stakes for a given coldkey. This function - identifies the delegates that a specific account has staked tokens on. + Retrieves a list of delegates and their associated stakes for a given coldkey. This function identifies the delegates that a specific account has staked tokens on. - :param coldkey_ss58: The `SS58` address of the account's coldkey. - :param block_hash: The hash of the blockchain block number for the query. - :param reuse_block: Whether to reuse the last-used blockchain block hash. + Args: + coldkey_ss58 (str): The `SS58` address of the account's coldkey. + block_hash (Optional[str]): The hash of the blockchain block number for the query. + reuse_block (bool): Whether to reuse the last-used blockchain block hash. - :return: A list of tuples, each containing a delegate's information and staked amount. + Returns: + A list of tuples, each containing a delegate's information and staked amount. - This function is important for account holders to understand their stake allocations and their - involvement in the network's delegation and consensus mechanisms. + This function is important for account holders to understand their stake allocations and their involvement in the network's delegation and consensus mechanisms. """ block_hash = ( @@ -742,22 +906,20 @@ async def query_identity( reuse_block: bool = False, ) -> dict: """ - Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves - detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized - identity and governance system. + Queries the identity of a neuron on the Bittensor blockchain using the given key. This function retrieves detailed identity information about a specific neuron, which is a crucial aspect of the network's decentralized identity and governance system. - Note: - See the `Bittensor CLI documentation `_ for supported identity - parameters. + Args: + key (str): The key used to query the neuron's identity, typically the neuron's SS58 address. + block_hash (str): The hash of the blockchain block number at which to perform the query. + reuse_block (bool): Whether to reuse the last-used blockchain block hash. - :param key: The key used to query the neuron's identity, typically the neuron's SS58 address. - :param block_hash: The hash of the blockchain block number at which to perform the query. - :param reuse_block: Whether to reuse the last-used blockchain block hash. + Returns: + An object containing the identity information of the neuron if found, ``None`` otherwise. - :return: An object containing the identity information of the neuron if found, ``None`` otherwise. + The identity information can include various attributes such as the neuron's stake, rank, and other network-specific details, providing insights into the neuron's role and status within the Bittensor network. - The identity information can include various attributes such as the neuron's stake, rank, and other - network-specific details, providing insights into the neuron's role and status within the Bittensor network. + Note: + See the `Bittensor CLI documentation `_ for supported identity parameters. """ def decode_hex_identity_dict_(info_dictionary): @@ -801,17 +963,16 @@ async def weights( ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the weight distribution set by neurons within a specific subnet of the Bittensor network. - This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the - network's trust and value assignment mechanisms. + This function maps each neuron's UID to the weights it assigns to other neurons, reflecting the network's trust and value assignment mechanisms. Args: - :param netuid: The network UID of the subnet to query. - :param block_hash: The hash of the blockchain block for the query. + netuid (int): The network UID of the subnet to query. + block_hash (str): The hash of the blockchain block for the query. - :return: A list of tuples mapping each neuron's UID to its assigned weights. + Returns: + A list of tuples mapping each neuron's UID to its assigned weights. - The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, - influencing their influence and reward allocation within the subnet. + The weight distribution is a key factor in the network's consensus algorithm and the ranking of neurons, influencing their influence and reward allocation within the subnet. """ # TODO look into seeing if we can speed this up with storage query w_map_encoded = await self.substrate.query_map( @@ -829,18 +990,16 @@ async def bonds( ) -> list[tuple[int, list[tuple[int, int]]]]: """ Retrieves the bond distribution set by neurons within a specific subnet of the Bittensor network. - Bonds represent the investments or commitments made by neurons in one another, indicating a level - of trust and perceived value. This bonding mechanism is integral to the network's market-based approach - to measuring and rewarding machine intelligence. + Bonds represent the investments or commitments made by neurons in one another, indicating a level of trust and perceived value. This bonding mechanism is integral to the network's market-based approach to measuring and rewarding machine intelligence. - :param netuid: The network UID of the subnet to query. - :param block_hash: The hash of the blockchain block number for the query. + Args: + netuid (int): The network UID of the subnet to query. + block_hash (Optional[str]): The hash of the blockchain block number for the query. - :return: list of tuples mapping each neuron's UID to its bonds with other neurons. + Returns: + List of tuples mapping each neuron's UID to its bonds with other neurons. - Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior - within the subnet. It reflects how neurons recognize and invest in each other's intelligence and - contributions, supporting diverse and niche systems within the Bittensor ecosystem. + Understanding bond distributions is crucial for analyzing the trust dynamics and market behavior within the subnet. It reflects how neurons recognize and invest in each other's intelligence and contributions, supporting diverse and niche systems within the Bittensor ecosystem. """ b_map_encoded = await self.substrate.query_map( module="SubtensorModule", @@ -861,11 +1020,13 @@ async def does_hotkey_exist( """ Returns true if the hotkey is known by the chain and there are accounts. - :param hotkey_ss58: The SS58 address of the hotkey. - :param block_hash: The hash of the block number to check the hotkey against. - :param reuse_block: Whether to reuse the last-used blockchain hash. + Args: + hotkey_ss58 (str): The SS58 address of the hotkey. + block_hash (Optional[str]): The hash of the block number to check the hotkey against. + reuse_block (bool): Whether to reuse the last-used blockchain hash. - :return: `True` if the hotkey is known by the chain and there are accounts, `False` otherwise. + Returns: + `True` if the hotkey is known by the chain and there are accounts, `False` otherwise. """ _result = await self.substrate.query( module="SubtensorModule", @@ -885,6 +1046,17 @@ async def does_hotkey_exist( async def get_hotkey_owner( self, hotkey_ss58: str, block_hash: str ) -> Optional[str]: + """ + Retrieves the owner of the given hotkey at a specific block hash. + This function queries the blockchain for the owner of the provided hotkey. If the hotkey does not exist at the specified block hash, it returns None. + + Args: + hotkey_ss58 (str): The SS58 address of the hotkey. + block_hash (str): The hash of the block at which to check the hotkey ownership. + + Returns: + Optional[str]: The SS58 address of the owner if the hotkey exists, or None if it doesn't. + """ hk_owner_query = await self.substrate.query( module="SubtensorModule", storage_function="Owner", @@ -901,20 +1073,22 @@ async def get_hotkey_owner( async def sign_and_send_extrinsic( self, - call: GenericCall, - wallet: Wallet, + call: "GenericCall", + wallet: "Wallet", wait_for_inclusion: bool = True, wait_for_finalization: bool = False, ) -> tuple[bool, str]: """ Helper method to sign and submit an extrinsic call to chain. - :param call: a prepared Call object - :param wallet: the wallet whose coldkey will be used to sign the extrinsic - :param wait_for_inclusion: whether to wait until the extrinsic call is included on the chain - :param wait_for_finalization: whether to wait until the extrinsic call is finalized on the chain + Args: + call (scalecodec.types.GenericCall): a prepared Call object + wallet (bittensor_wallet.Wallet): the wallet whose coldkey will be used to sign the extrinsic + wait_for_inclusion (bool): whether to wait until the extrinsic call is included on the chain + wait_for_finalization (bool): whether to wait until the extrinsic call is finalized on the chain - :return: (success, error message) + Returns: + (success, error message) """ extrinsic = await self.substrate.create_signed_extrinsic( call=call, keypair=wallet.coldkey @@ -938,16 +1112,16 @@ async def sign_and_send_extrinsic( except SubstrateRequestException as e: return False, format_error_message(e, substrate=self.substrate) - async def get_children(self, hotkey, netuid) -> tuple[bool, list, str]: + async def get_children(self, hotkey: str, netuid: int) -> tuple[bool, list, str]: """ - This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys - storage function to get the children and formats them before returning as a tuple. + This method retrieves the children of a given hotkey and netuid. It queries the SubtensorModule's ChildKeys storage function to get the children and formats them before returning as a tuple. - :param hotkey: The hotkey value. - :param netuid: The netuid value. + Args: + hotkey (str): The hotkey value. + netuid (int): The netuid value. - :return: A tuple containing a boolean indicating success or failure, a list of formatted children, and an error - message (if applicable) + Returns: + A tuple containing a boolean indicating success or failure, a list of formatted children, and an error message (if applicable) """ try: children = await self.substrate.query( @@ -972,16 +1146,16 @@ async def get_subnet_hyperparameters( self, netuid: int, block_hash: Optional[str] = None ) -> Optional[Union[list, SubnetHyperparameters]]: """ - Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters - define the operational settings and rules governing the subnet's behavior. + Retrieves the hyperparameters for a specific subnet within the Bittensor network. These hyperparameters define the operational settings and rules governing the subnet's behavior. - :param netuid: The network UID of the subnet to query. - :param block_hash: The hash of the blockchain block number for the query. + Args: + netuid (int): The network UID of the subnet to query. + block_hash (Optional[str]): The hash of the blockchain block number for the query. - :return: The subnet's hyperparameters, or `None` if not available. + Returns: + The subnet's hyperparameters, or `None` if not available. - Understanding the hyperparameters is crucial for comprehending how subnets are configured and - managed, and how they interact with the network's consensus and incentive mechanisms. + Understanding the hyperparameters is crucial for comprehending how subnets are configured and managed, and how they interact with the network's consensus and incentive mechanisms. """ hex_bytes_result = await self.query_runtime_api( runtime_api="SubnetInfoRuntimeApi", @@ -1007,17 +1181,17 @@ async def get_vote_data( reuse_block: bool = False, ) -> Optional["ProposalVoteData"]: """ - Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes - information about how senate members have voted on the proposal. + Retrieves the voting data for a specific proposal on the Bittensor blockchain. This data includes information about how senate members have voted on the proposal. - :param proposal_hash: The hash of the proposal for which voting data is requested. - :param block_hash: The hash of the blockchain block number to query the voting data. - :param reuse_block: Whether to reuse the last-used blockchain block hash. + Args: + proposal_hash (str): The hash of the proposal for which voting data is requested. + block_hash (Optional[str]): The hash of the blockchain block number to query the voting data. + reuse_block (bool): Whether to reuse the last-used blockchain block hash. - :return: An object containing the proposal's voting data, or `None` if not found. + Returns: + An object containing the proposal's voting data, or `None` if not found. - This function is important for tracking and understanding the decision-making processes within - the Bittensor network, particularly how proposals are received and acted upon by the governing body. + This function is important for tracking and understanding the decision-making processes within the Bittensor network, particularly how proposals are received and acted upon by the governing body. """ vote_data = await self.substrate.query( module="Triumvirate", @@ -1035,14 +1209,13 @@ async def get_delegate_identities( self, block_hash: Optional[str] = None ) -> dict[str, DelegatesDetails]: """ - Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info - is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info - from GitHub, but chain data is still limited in that regard. + Fetches delegates identities from the chain and GitHub. Preference is given to chain data, and missing info is filled-in by the info from GitHub. At some point, we want to totally move away from fetching this info from GitHub, but chain data is still limited in that regard. Args: - block_hash: the hash of the blockchain block for the query + block_hash (str): the hash of the blockchain block for the query - Returns: {ss58: DelegatesDetails, ...} + Returns: + Dict {ss58: DelegatesDetails, ...} """ timeout = aiohttp.ClientTimeout(10.0) @@ -1105,17 +1278,52 @@ async def is_hotkey_registered(self, netuid: int, hotkey_ss58: str) -> bool: else: return False + async def get_uid_for_hotkey_on_subnet( + self, hotkey_ss58: str, netuid: int, block_hash: Optional[str] = None + ): + """ + Retrieves the unique identifier (UID) for a neuron's hotkey on a specific subnet. + + Args: + hotkey_ss58 (str): The ``SS58`` address of the neuron's hotkey. + netuid (int): The unique identifier of the subnet. + block_hash (Optional[str]): The blockchain block_hash representation of the block id. + + Returns: + Optional[int]: The UID of the neuron if it is registered on the subnet, ``None`` otherwise. + + The UID is a critical identifier within the network, linking the neuron's hotkey to its operational and governance activities on a particular subnet. + """ + return self.substrate.query( + module="SubtensorModule", + storage_function="Uids", + params=[netuid, hotkey_ss58], + block_hash=block_hash + ) + # extrinsics async def transfer( self, - wallet: Wallet, + wallet: "Wallet", destination: str, amount: float, transfer_all: bool, prompt: bool, - ): - """Transfer token of amount to destination.""" + ) -> bool: + """ + Transfer token of amount to destination. + + Args: + wallet (bittensor_wallet.Wallet): Source wallet for the transfer. + destination (str): Destination address for the transfer. + amount (float): Amount of tokens to transfer. + transfer_all (bool): Flag to transfer all tokens. + prompt (bool): Flag to prompt user for confirmation before transferring. + + Returns: + `True` if the transferring was successful, otherwise `False`. + """ return await transfer_extrinsic( self, wallet, @@ -1125,17 +1333,37 @@ async def transfer( prompt=prompt, ) - async def register(self, wallet: Wallet, prompt: bool): - """Register neuron by recycling some TAO.""" + async def register( + self, + wallet: "Wallet", + netuid: int, + block_hash: Optional[str] = None, + wait_for_inclusion: bool = True, + wait_for_finalization: bool = True, + ) -> bool: + """ + Register neuron by recycling some TAO. + + Args: + wallet (bittensor_wallet.Wallet): Bittensor wallet instance. + netuid (int): Subnet uniq id. + block_hash (Optional[str]): The hash of the blockchain block for the query. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + + Returns: + `True` if registration was successful, otherwise `False`. + """ logging.info( f"Registering on netuid 0 on network: {self.network}" ) # Check current recycle amount logging.info("Fetching recycle amount & balance.") + block_hash = block_hash if block_hash else await self.get_block_hash() recycle_call, balance_ = await asyncio.gather( - self.get_hyperparameter(param_name="Burn", netuid=0, reuse_block=True), - self.get_balance(wallet.coldkeypub.ss58_address, reuse_block=True), + self.get_hyperparameter(param_name="Burn", netuid=netuid, reuse_block=True), + self.get_balance(wallet.coldkeypub.ss58_address, block_hash=block_hash), ) current_recycle = Balance.from_rao(int(recycle_call)) try: @@ -1150,25 +1378,16 @@ async def register(self, wallet: Wallet, prompt: bool): # Check balance is sufficient if balance < current_recycle: logging.error( - f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO" + f"Insufficient balance {balance} to register neuron. Current recycle is {current_recycle} TAO." ) return False - if prompt: - if not Confirm.ask( - f"Your balance is: [bold green]{balance}[/bold green]\n" - f"The cost to register by recycle is [bold red]{current_recycle}[/bold red]\n" - f"Do you want to continue?", - default=False, - ): - return False - return await root_register_extrinsic( - self, - wallet, - wait_for_inclusion=True, - wait_for_finalization=True, - prompt=prompt, + subtensor=self, + wallet=wallet, + netuid=netuid, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, ) async def pow_register( @@ -1199,13 +1418,80 @@ async def pow_register( ) async def set_weights( + self, + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + max_retries: int = 5, + ): + """ + Sets the inter-neuronal weights for the specified neuron. This process involves specifying the influence or trust a neuron places on other neurons in the network, which is a fundamental aspect of Bittensor's decentralized learning architecture. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron setting the weights. + netuid (int): The unique identifier of the subnet. + uids (Union[NDArray[np.int64], torch.LongTensor, list]): The list of neuron UIDs that the weights are being set for. + weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The corresponding weights to be set for each UID. + version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + max_retries (int): The number of maximum attempts to set weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the setting of weights is successful, False otherwise. And `msg`, a string value describing the success or potential error. + + This function is crucial in shaping the network's collective intelligence, where each neuron's learning and contribution are influenced by the weights it sets towards others【81†source】. + """ + uid = self.get_uid_for_hotkey_on_subnet(wallet.hotkey.ss58_address, netuid) + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to set weights!" + while ( + self.blocks_since_last_update(netuid, uid) > self.weights_rate_limit(netuid) # type: ignore + and retries < max_retries + ): + try: + logging.info( + f"Setting weights for subnet #{netuid}. Attempt {retries + 1} of {max_retries}." + ) + success, message = await set_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + except Exception as e: + logging.error(f"Error setting weights: {e}") + finally: + retries += 1 + + return success, message + + async def root_set_weights( self, wallet: "Wallet", netuids: list[int], weights: list[float], - prompt: bool, - ): - """Set weights for root network.""" + ) -> bool: + """ + Set weights for root network. + + Args: + wallet (bittensor_wallet.Wallet): bittensor wallet instance. + netuids (list[int]): The list of subnet uids. + weights (list[float]): The list of weights to be set. + + Returns: + `True` if the setting of weights is successful, `False` otherwise. + """ netuids_ = np.array(netuids, dtype=np.int64) weights_ = np.array(weights, dtype=np.float32) logging.info(f"Setting weights in network: {self.network}") @@ -1216,7 +1502,75 @@ async def set_weights( netuids=netuids_, weights=weights_, version_key=0, - prompt=prompt, wait_for_finalization=True, wait_for_inclusion=True, ) + + async def commit_weights( + self, + wallet: "Wallet", + netuid: int, + salt: list[int], + uids: Union[NDArray[np.int64], list], + weights: Union[NDArray[np.int64], list], + version_key: int = version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, + max_retries: int = 5, + ) -> tuple[bool, str]: + """ + Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. + This action serves as a commitment or snapshot of the neuron's current weight distribution. + + Args: + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + salt (list[int]): list of randomly generated integers as salt to generated weighted hash. + uids (np.ndarray): NumPy array of neuron UIDs for which weights are being committed. + weights (np.ndarray): NumPy array of weight values corresponding to each UID. + version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. + max_retries (int): The number of maximum attempts to commit weights. Default is ``5``. + + Returns: + tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string value describing the success or potential error. + + This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, enhancing transparency and accountability within the Bittensor network. + """ + retries = 0 + success = False + message = "No attempt made. Perhaps it is too soon to commit weights!" + + logging.info( + f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" + ) + + # Generate the hash of the weights + commit_hash = generate_weight_hash( + address=wallet.hotkey.ss58_address, + netuid=netuid, + uids=list(uids), + values=list(weights), + salt=salt, + version_key=version_key, + ) + + while retries < max_retries: + try: + success, message = await commit_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if success: + break + except Exception as e: + logging.error(f"Error committing weights: {e}") + finally: + retries += 1 + + return success, message diff --git a/bittensor/core/extrinsics/async_root.py b/bittensor/core/extrinsics/async_root.py index 9e73f98a30..dd44a55f2a 100644 --- a/bittensor/core/extrinsics/async_root.py +++ b/bittensor/core/extrinsics/async_root.py @@ -6,8 +6,6 @@ from bittensor_wallet import Wallet from bittensor_wallet.errors import KeyFileError from numpy.typing import NDArray -from rich.prompt import Confirm -from rich.table import Table, Column from substrateinterface.exceptions import SubstrateRequestException from bittensor.utils import u16_normalized_float, format_error_message @@ -22,6 +20,19 @@ async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]: + """ + Retrieves the minimum allowed weights and maximum weight limit for the given subnet. + + These values are fetched asynchronously using `asyncio.gather` to run both requests concurrently. + + Args: + subtensor (AsyncSubtensor): The AsyncSubtensor object used to interface with the network's substrate node. + + Returns: + tuple[int, float]: A tuple containing: + - `min_allowed_weights` (int): The minimum allowed weights. + - `max_weight_limit` (float): The maximum weight limit, normalized to a float value. + """ # Get weight restrictions. maw, mwl = await asyncio.gather( subtensor.get_hyperparameter("MinAllowedWeights", netuid=0), @@ -35,19 +46,21 @@ async def get_limits(subtensor: "AsyncSubtensor") -> tuple[int, float]: async def root_register_extrinsic( subtensor: "AsyncSubtensor", wallet: Wallet, + netuid: int, wait_for_inclusion: bool = True, wait_for_finalization: bool = True, - prompt: bool = False, ) -> bool: """Registers the wallet to root network. - :param subtensor: The AsyncSubtensor object - :param wallet: Bittensor wallet object. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. + Arguments: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + netuid (int): Subnet uid. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. """ try: @@ -60,7 +73,7 @@ async def root_register_extrinsic( f"Checking if hotkey ({wallet.hotkey_str}) is registered on root." ) is_registered = await subtensor.is_hotkey_registered( - netuid=0, hotkey_ss58=wallet.hotkey.ss58_address + netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address ) if is_registered: logging.error( @@ -82,7 +95,7 @@ async def root_register_extrinsic( ) if not success: - logging.error(f":cross_mark: Failed: {err_msg}") + logging.error(f":cross_mark: Failed error: {err_msg}") time.sleep(0.5) return False @@ -91,11 +104,11 @@ async def root_register_extrinsic( uid = await subtensor.substrate.query( module="SubtensorModule", storage_function="Uids", - params=[0, wallet.hotkey.ss58_address], + params=[netuid, wallet.hotkey.ss58_address], ) if uid is not None: logging.info( - f":white_heavy_check_mark: Registered with UID {uid}" + f":white_heavy_check_mark: Registered with UID {uid}." ) return True else: @@ -106,28 +119,26 @@ async def root_register_extrinsic( async def set_root_weights_extrinsic( subtensor: "AsyncSubtensor", - wallet: Wallet, + wallet: "Wallet", netuids: Union[NDArray[np.int64], list[int]], weights: Union[NDArray[np.float32], list[float]], version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> bool: """Sets the given weights and values on chain for wallet hotkey account. - :param subtensor: The AsyncSubtensor object - :param wallet: Bittensor wallet object. - :param netuids: The `netuid` of the subnet to set weights for. - :param weights: Weights to set. These must be `float` s and must correspond to the passed `netuid` s. - :param version_key: The version key of the validator. - :param wait_for_inclusion: If set, waits for the extrinsic to enter a block before returning `True`, or returns - `False` if the extrinsic fails to enter the block within the timeout. - :param wait_for_finalization: If set, waits for the extrinsic to be finalized on the chain before returning `True`, - or returns `False` if the extrinsic fails to be finalized within the timeout. - :param prompt: If `True`, the call waits for confirmation from the user before proceeding. - :return: `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, - the response is `True`. + Arguments: + subtensor (bittensor.core.async_subtensor.AsyncSubtensor): The AsyncSubtensor object + wallet (bittensor_wallet.Wallet): Bittensor wallet object. + netuids (Union[NDArray[np.int64], list[int]]): The `netuid` of the subnet to set weights for. + weights (Union[NDArray[np.float32], list[float]]): Weights to set. These must be `float` s and must correspond to the passed `netuid` s. + version_key (int): The version key of the validator. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning `True`, or returns `False` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning `True`, or returns `False` if the extrinsic fails to be finalized within the timeout. + + Returns: + `True` if extrinsic was finalized or included in the block. If we did not wait for finalization/inclusion, the response is `True`. """ async def _do_set_weights(): @@ -168,13 +179,13 @@ async def _do_set_weights(): ) if my_uid is None: - logging.error("Your hotkey is not registered to the root network") + logging.error("Your hotkey is not registered to the root network.") return False try: wallet.unlock_coldkey() except KeyFileError: - logging.error("Error decrypting coldkey (possibly incorrect password)") + logging.error("Error decrypting coldkey (possibly incorrect password).") return False # First convert types. @@ -203,25 +214,6 @@ async def _do_set_weights(): f"Raw weights -> Normalized weights: {weights} -> {formatted_weights}" ) - # Ask before moving on. - if prompt: - table = Table( - Column("[dark_orange]Netuid", justify="center", style="bold green"), - Column( - "[dark_orange]Weight", justify="center", style="bold light_goldenrod2" - ), - expand=False, - show_edge=False, - ) - print("Netuid | Weight") - - for netuid, weight in zip(netuids, formatted_weights): - table.add_row(str(netuid), f"{weight:.8f}") - print(f"{netuid} | {weight}") - - if not Confirm.ask("\nDo you want to set these root weights?"): - return False - try: logging.info(":satellite: Setting root weights...") weight_uids, weight_vals = convert_weights_and_uids_for_emit(netuids, weights) @@ -236,10 +228,10 @@ async def _do_set_weights(): return True else: fmt_err = format_error_message(error_message, subtensor.substrate) - logging.error(f":cross_mark: Failed: {fmt_err}") + logging.error(f":cross_mark: Failed error: {fmt_err}") return False except SubstrateRequestException as e: fmt_err = format_error_message(e, subtensor.substrate) - logging.error(f":cross_mark: Failed: error:{fmt_err}") + logging.error(f":cross_mark: Failed error: {fmt_err}") return False diff --git a/bittensor/core/extrinsics/async_weights.py b/bittensor/core/extrinsics/async_weights.py new file mode 100644 index 0000000000..82f2dc6dc3 --- /dev/null +++ b/bittensor/core/extrinsics/async_weights.py @@ -0,0 +1,257 @@ +"""This module provides functionality for setting weights on the Bittensor network.""" + +from typing import Union, TYPE_CHECKING, Optional + +import numpy as np +from numpy.typing import NDArray + +import bittensor.utils.weight_utils as weight_utils +from bittensor.core.settings import version_as_int +from bittensor.utils import format_error_message +from bittensor.utils.btlogging import logging +from bittensor.utils.registration import torch, use_torch + +if TYPE_CHECKING: + from bittensor_wallet import Wallet + from bittensor.core.async_subtensor import AsyncSubtensor + + +async def _do_set_weights( + subtensor: "AsyncSubtensor", + wallet: "Wallet", + uids: list[int], + vals: list[int], + netuid: int, + version_key: int = version_as_int, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[str]]: # (success, error_message) + """ + Internal method to send a transaction to the Bittensor blockchain, setting weights + for specified neurons. This method constructs and submits the transaction, handling + retries and blockchain communication. + + Args: + subtensor (subtensor.core.async_subtensor.AsyncSubtensor): Async Subtensor instance. + wallet (bittensor.wallet): The wallet associated with the neuron setting the weights. + uids (List[int]): List of neuron UIDs for which weights are being set. + vals (List[int]): List of weight values corresponding to each UID. + netuid (int): Unique identifier for the network. + version_key (int, optional): Version key for compatibility with the network. + wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. + wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. + + Returns: + Tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This method is vital for the dynamic weighting mechanism in Bittensor, where neurons adjust their + trust in other neurons based on observed performance and contributions. + """ + + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="set_weights", + call_params={ + "dests": uids, + "weights": vals, + "netuid": netuid, + "version_key": version_key, + }, + ) + # Period dictates how long the extrinsic will stay as part of waiting pool + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + era={"period": 5}, + ) + response = await subtensor.substrate.submit_extrinsic( + extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + # We only wait here if we expect finalization. + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + await response.process_events() + if await response.is_success: + return True, "Successfully set weights." + else: + return False, format_error_message( + response.error_message, substrate=subtensor.substrate + ) + + +async def set_weights_extrinsic( + subtensor: "AsyncSubtensor", + wallet: "Wallet", + netuid: int, + uids: Union[NDArray[np.int64], "torch.LongTensor", list], + weights: Union[NDArray[np.float32], "torch.FloatTensor", list], + version_key: int = 0, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, str]: + """Sets the given weights and values on chain for wallet hotkey account. + + Args: + subtensor (bittensor.subtensor): Bittensor subtensor object. + wallet (bittensor.wallet): Bittensor wallet object. + netuid (int): The ``netuid`` of the subnet to set weights for. + uids (Union[NDArray[np.int64], torch.LongTensor, list]): The ``uint64`` uids of destination neurons. + weights (Union[NDArray[np.float32], torch.FloatTensor, list]): The weights to set. These must be ``float`` s and correspond to the passed ``uid`` s. + version_key (int): The version key of the validator. + wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. + wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. + prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. + + Returns: + success (bool): Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. + """ + # First convert types. + if use_torch(): + if isinstance(uids, list): + uids = torch.tensor(uids, dtype=torch.int64) + if isinstance(weights, list): + weights = torch.tensor(weights, dtype=torch.float32) + else: + if isinstance(uids, list): + uids = np.array(uids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + # Reformat and normalize. + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + uids, weights + ) + + logging.info( + ":satellite: Setting weights on {subtensor.network} ..." + ) + try: + success, error_message = await _do_set_weights( + subtensor=subtensor, + wallet=wallet, + netuid=netuid, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + message = "Successfully set weights and Finalized." + logging.success(f":white_heavy_check_mark: {message}") + return True, message + else: + logging.error(f"Failed set weights. Error: {error_message}") + return False, error_message + + except Exception as error: + logging.error(f":cross_mark: Failed set weights. Error: {error}") + return False, str(error) + + +async def _do_commit_weights( + subtensor: "AsyncSubtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, Optional[str]]: + """ + Internal method to send a transaction to the Bittensor blockchain, committing the hash of a neuron's weights. + This method constructs and submits the transaction, handling retries and blockchain communication. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + commit_hash (str): The hash of the neuron's weights to be committed. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, Optional[str]]: A tuple containing a success flag and an optional error message. + + This method ensures that the weight commitment is securely recorded on the Bittensor blockchain, providing a verifiable record of the neuron's weight distribution at a specific point in time. + """ + call = await subtensor.substrate.compose_call( + call_module="SubtensorModule", + call_function="commit_weights", + call_params={ + "netuid": netuid, + "commit_hash": commit_hash, + }, + ) + extrinsic = await subtensor.substrate.create_signed_extrinsic( + call=call, + keypair=wallet.hotkey, + ) + response = await subtensor.substrate.submit_extrinsic( + substrate=subtensor.substrate, + extrinsic=extrinsic, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, None + + await response.process_events() + if await response.is_success: + return True, None + else: + return False, format_error_message( + response.error_message, substrate=subtensor.substrate + ) + + +async def commit_weights_extrinsic( + subtensor: "AsyncSubtensor", + wallet: "Wallet", + netuid: int, + commit_hash: str, + wait_for_inclusion: bool = False, + wait_for_finalization: bool = False, +) -> tuple[bool, str]: + """ + Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. + This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages. + + Args: + subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. + wallet (bittensor_wallet.Wallet): The wallet associated with the neuron committing the weights. + netuid (int): The unique identifier of the subnet. + commit_hash (str): The hash of the neuron's weights to be committed. + wait_for_inclusion (bool): Waits for the transaction to be included in a block. + wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. + + Returns: + tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string + value describing the success or potential error. + + This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required. + """ + + success, error_message = await _do_commit_weights( + subtensor=subtensor, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + + if success: + success_message = "Successfully committed weights." + logging.info(success_message) + return True, success_message + else: + logging.error(f"Failed to commit weights: {error_message}") + return False, error_message diff --git a/bittensor/core/extrinsics/prometheus.py b/bittensor/core/extrinsics/prometheus.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 904b699926..ceab305b42 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -47,7 +47,7 @@ def do_set_weights( version_key: int = version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, -) -> tuple[bool, Optional[dict]]: # (success, error_message) +) -> tuple[bool, Optional[str]]: # (success, error_message) """ Internal method to send a transaction to the Bittensor blockchain, setting weights for specified neurons. This method constructs and submits the transaction, handling retries and blockchain communication. @@ -99,7 +99,9 @@ def make_substrate_call_with_retry(): if response.is_success: return True, "Successfully set weights." else: - return False, response.error_message + return False, format_error_message( + response.error_message, substrate=self.substrate + ) return make_substrate_call_with_retry() @@ -179,9 +181,6 @@ def set_weights_extrinsic( logging.success(f"Finalized! Set weights: {str(success)}") return True, "Successfully set weights and Finalized." else: - error_message = format_error_message( - error_message, substrate=subtensor.substrate - ) logging.error(error_message) return False, error_message diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index 8eee9676ad..98d47104ae 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -54,6 +54,13 @@ NETWORKS[3]: LOCAL_ENTRYPOINT, } +NETWORK_MAP = { + NETWORKS[0]: FINNEY_ENTRYPOINT, + NETWORKS[1]: FINNEY_TEST_ENTRYPOINT, + NETWORKS[2]: ARCHIVE_ENTRYPOINT, + NETWORKS[3]: LOCAL_ENTRYPOINT, +} + # Currency Symbols Bittensor TAO_SYMBOL: str = chr(0x03C4) RAO_SYMBOL: str = chr(0x03C1) diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py index 9c32fc9bdf..fbeee34dc0 100644 --- a/tests/unit_tests/extrinsics/test_set_weights.py +++ b/tests/unit_tests/extrinsics/test_set_weights.py @@ -61,7 +61,7 @@ def mock_wallet(): True, True, False, - "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.", + "Mock error message", ), ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."), ], @@ -226,7 +226,7 @@ def test_do_set_weights_is_not_success(mock_subtensor, mocker): mock_subtensor.substrate.submit_extrinsic.return_value.process_events.assert_called_once() assert result == ( False, - mock_subtensor.substrate.submit_extrinsic.return_value.error_message, + "Subtensor returned `UnknownError(UnknownType)` error. This means: `Unknown Description`.", ) From fa9f5814d9dee92ff13a000eb60110774b95aeef Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 20:18:00 -0800 Subject: [PATCH 48/58] last check --- bittensor/core/async_subtensor.py | 4 +-- bittensor/core/extrinsics/async_weights.py | 3 +- .../extrinsics/test_registration.py | 2 -- tests/unit_tests/extrinsics/test_root.py | 29 ++---------------- tests/unit_tests/extrinsics/test_serving.py | 30 +++++++------------ 5 files changed, 15 insertions(+), 53 deletions(-) diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index a9db8cfd38..48eb8a7c43 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -245,7 +245,7 @@ async def get_total_subnets( module="SubtensorModule", storage_function="TotalNetworks", params=[], - block_hash=block_hash + block_hash=block_hash, ) return result @@ -1298,7 +1298,7 @@ async def get_uid_for_hotkey_on_subnet( module="SubtensorModule", storage_function="Uids", params=[netuid, hotkey_ss58], - block_hash=block_hash + block_hash=block_hash, ) # extrinsics diff --git a/bittensor/core/extrinsics/async_weights.py b/bittensor/core/extrinsics/async_weights.py index 82f2dc6dc3..926ce94c2c 100644 --- a/bittensor/core/extrinsics/async_weights.py +++ b/bittensor/core/extrinsics/async_weights.py @@ -103,7 +103,6 @@ async def set_weights_extrinsic( version_key (int): The version key of the validator. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: success (bool): Flag is ``true`` if extrinsic was finalized or included in the block. If we did not wait for finalization / inclusion, the response is ``true``. @@ -222,7 +221,7 @@ async def commit_weights_extrinsic( ) -> tuple[bool, str]: """ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. - This function is a wrapper around the `do_commit_weights` method, handling user prompts and error messages. + This function is a wrapper around the `do_commit_weights` method. Args: subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. diff --git a/tests/unit_tests/extrinsics/test_registration.py b/tests/unit_tests/extrinsics/test_registration.py index ccc68719f3..18d14fac10 100644 --- a/tests/unit_tests/extrinsics/test_registration.py +++ b/tests/unit_tests/extrinsics/test_registration.py @@ -66,7 +66,6 @@ def mock_new_wallet(mocker): [ (False, True, True, False, "subnet-does-not-exist"), (True, False, True, True, "neuron-already-registered"), - (True, True, True, True, "user-declines-prompt"), (True, True, False, False, "cuda-unavailable"), ], ) @@ -189,7 +188,6 @@ def test_register_extrinsic_with_pow( (True, True, True, True, True, "happy-path-wallet-registered"), # Error paths (False, True, False, None, False, "subnet-non-existence"), - (True, True, True, None, False, "prompt-declined"), (True, True, False, False, False, "error-path-recycling-failed"), (True, True, True, False, False, "error-path-not-registered"), ], diff --git a/tests/unit_tests/extrinsics/test_root.py b/tests/unit_tests/extrinsics/test_root.py index fb8f861476..96d90fe09a 100644 --- a/tests/unit_tests/extrinsics/test_root.py +++ b/tests/unit_tests/extrinsics/test_root.py @@ -84,15 +84,14 @@ def test_root_register_extrinsic( @pytest.mark.parametrize( - "wait_for_inclusion, wait_for_finalization, netuids, weights, user_response, expected_success", + "wait_for_inclusion, wait_for_finalization, netuids, weights, expected_success", [ - (True, False, [1, 2], [0.5, 0.5], True, True), # Success - weights set + (True, False, [1, 2], [0.5, 0.5], True), # Success - weights set ( False, False, [1, 2], [0.5, 0.5], - None, True, ), # Success - weights set no wait ( @@ -101,7 +100,6 @@ def test_root_register_extrinsic( [1, 2], [2000, 20], True, - True, ), # Success - large value to be normalized ( True, @@ -109,7 +107,6 @@ def test_root_register_extrinsic( [1, 2], [2000, 0], True, - True, ), # Success - single large value ( True, @@ -117,15 +114,6 @@ def test_root_register_extrinsic( [1, 2], [0.5, 0.5], False, - False, - ), # Failure - prompt declined - ( - True, - False, - [1, 2], - [0.5, 0.5], - None, - False, ), # Failure - setting weights failed ( True, @@ -133,7 +121,6 @@ def test_root_register_extrinsic( [], [], False, - False, ), # Exception catched - ValueError 'min() arg is an empty sequence' ], ids=[ @@ -141,7 +128,6 @@ def test_root_register_extrinsic( "success-not-wait", "success-large-value", "success-single-value", - "failure-user-declines", "failure-setting-weights", "failure-value-error-exception", ], @@ -153,7 +139,6 @@ def test_set_root_weights_extrinsic( wait_for_finalization, netuids, weights, - user_response, expected_success, mocker, ): @@ -207,14 +192,6 @@ def test_set_root_weights_extrinsic( True, True, ), # Success - single large value - ( - True, - False, - [1, 2], - [0.5, 0.5], - False, - False, - ), # Failure - prompt declined ( True, False, @@ -237,7 +214,6 @@ def test_set_root_weights_extrinsic( "success-not-wait", "success-large-value", "success-single-value", - "failure-user-declines", "failure-setting-weights", "failure-value-error-exception", ], @@ -261,7 +237,6 @@ def test_set_root_weights_extrinsic_torch( wait_for_finalization, netuids, weights, - user_response, expected_success, mocker, ) diff --git a/tests/unit_tests/extrinsics/test_serving.py b/tests/unit_tests/extrinsics/test_serving.py index 04df0de71e..46eef17888 100644 --- a/tests/unit_tests/extrinsics/test_serving.py +++ b/tests/unit_tests/extrinsics/test_serving.py @@ -50,7 +50,7 @@ def mock_axon(mock_wallet, mocker): @pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,", [ ( "192.168.1.1", @@ -61,7 +61,6 @@ def mock_axon(mock_wallet, mocker): 0, False, True, - False, True, "happy-path-no-wait", ), @@ -74,7 +73,6 @@ def mock_axon(mock_wallet, mocker): 1, True, False, - False, True, "happy-path-wait-for-inclusion", ), @@ -88,14 +86,13 @@ def mock_axon(mock_wallet, mocker): False, True, True, - True, - "happy-path-wait-for-finalization-and-prompt", + "happy-path-wait-for-finalization", ), ], ids=[ "happy-path-no-wait", "happy-path-wait-for-inclusion", - "happy-path-wait-for-finalization-and-prompt", + "happy-path-wait-for-finalization", ], ) def test_serve_extrinsic_happy_path( @@ -109,7 +106,6 @@ def test_serve_extrinsic_happy_path( placeholder2, wait_for_inclusion, wait_for_finalization, - prompt, expected, test_id, mocker, @@ -136,7 +132,7 @@ def test_serve_extrinsic_happy_path( # Various edge cases @pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected,test_id,", + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected,test_id,", [ ( "192.168.1.4", @@ -147,7 +143,6 @@ def test_serve_extrinsic_happy_path( 3, True, True, - False, True, "edge_case_max_values", ), @@ -165,7 +160,6 @@ def test_serve_extrinsic_edge_cases( placeholder2, wait_for_inclusion, wait_for_finalization, - prompt, expected, test_id, mocker, @@ -192,7 +186,7 @@ def test_serve_extrinsic_edge_cases( # Various error cases @pytest.mark.parametrize( - "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,prompt,expected_error_message,test_id,", + "ip,port,protocol,netuid,placeholder1,placeholder2,wait_for_inclusion,wait_for_finalization,expected_error_message,test_id,", [ ( "192.168.1.5", @@ -204,7 +198,6 @@ def test_serve_extrinsic_edge_cases( True, True, False, - False, "error-case-failed-serve", ), ], @@ -221,7 +214,6 @@ def test_serve_extrinsic_error_cases( placeholder2, wait_for_inclusion, wait_for_finalization, - prompt, expected_error_message, test_id, mocker, @@ -247,20 +239,19 @@ def test_serve_extrinsic_error_cases( @pytest.mark.parametrize( - "netuid, wait_for_inclusion, wait_for_finalization, prompt, external_ip, external_ip_success, serve_success, expected_result, test_id", + "netuid, wait_for_inclusion, wait_for_finalization, external_ip, external_ip_success, serve_success, expected_result, test_id", [ # Happy path test - (1, False, True, False, "192.168.1.1", True, True, True, "happy-ext-ip"), - (1, False, True, True, None, True, True, True, "happy-net-external-ip"), + (1, False, True, "192.168.1.1", True, True, True, "happy-ext-ip"), + (1, False, True, None, True, True, True, "happy-net-external-ip"), # Edge cases - (1, True, True, False, "192.168.1.1", True, True, True, "edge-case-wait"), + (1, True, True, "192.168.1.1", True, True, True, "edge-case-wait"), # Error cases - (1, False, True, False, None, False, True, False, "error-fetching-external-ip"), + (1, False, True, None, False, True, False, "error-fetching-external-ip"), ( 1, False, True, - False, "192.168.1.1", True, False, @@ -282,7 +273,6 @@ def test_serve_axon_extrinsic( netuid, wait_for_inclusion, wait_for_finalization, - prompt, external_ip, external_ip_success, serve_success, From 81a9cdd9552fa43dc87fcb9c4868cbade33c27c9 Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 21:22:12 -0800 Subject: [PATCH 49/58] fix `tests/e2e_tests/test_commit_weights.py` --- tests/e2e_tests/test_commit_weights.py | 38 ++++++++++++++++---------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 962a061a9a..6abeb42fe0 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -21,7 +21,7 @@ @pytest.mark.asyncio async def test_commit_and_reveal_weights(local_chain): """ - Tests the commit/reveal weights mechanism + Tests the commit/reveal weights mechanism with subprocess disabled (CR1.0) Steps: 1. Register a subnet through Alice @@ -60,9 +60,11 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = Subtensor(network="ws://localhost:9945") + subtensor = Subtensor( + network="ws://localhost:9945" + ) assert subtensor.get_subnet_hyperparameters( - netuid=netuid + netuid=netuid, ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" # Lower the commit_reveal interval @@ -70,17 +72,16 @@ async def test_commit_and_reveal_weights(local_chain): local_chain, alice_wallet, call_function="sudo_set_commit_reveal_weights_interval", - call_params={"netuid": netuid, "interval": "370"}, + call_params={"netuid": netuid, "interval": "1"}, return_error_message=True, ) - subtensor = Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters( netuid=netuid ).commit_reveal_weights_interval - == 370 - ), "Failed to set commit/reveal interval" + == 1 + ), "Failed to set commit/reveal periods" assert ( subtensor.weights_rate_limit(netuid=netuid) > 0 @@ -93,7 +94,7 @@ async def test_commit_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) - subtensor = Subtensor(network="ws://localhost:9945") + assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" @@ -118,6 +119,8 @@ async def test_commit_and_reveal_weights(local_chain): wait_for_finalization=True, ) + assert success is True + weight_commits = subtensor.query_module( module="SubtensorModule", name="WeightCommits", @@ -125,18 +128,20 @@ async def test_commit_and_reveal_weights(local_chain): ) # Assert that the committed weights are set correctly assert weight_commits.value is not None, "Weight commit not found in storage" - commit_hash, commit_block = weight_commits.value + commit_hash, commit_block, reveal_block, expire_block = weight_commits.value[0] assert commit_block > 0, f"Invalid block number: {commit_block}" # Query the WeightCommitRevealInterval storage map - weight_commit_reveal_interval = subtensor.query_module( - module="SubtensorModule", name="WeightCommitRevealInterval", params=[netuid] + reveal_periods = subtensor.query_module( + module="SubtensorModule", name="RevealPeriodEpochs", params=[netuid] ) - interval = weight_commit_reveal_interval.value - assert interval > 0, "Invalid WeightCommitRevealInterval" + periods = reveal_periods.value + assert periods > 0, "Invalid RevealPeriodEpochs" # Wait until the reveal block range - await wait_interval(interval, subtensor) + await wait_interval( + subtensor.get_subnet_hyperparameters(netuid=netuid).tempo, subtensor + ) # Reveal weights success, message = subtensor.reveal_weights( @@ -148,6 +153,9 @@ async def test_commit_and_reveal_weights(local_chain): wait_for_inclusion=True, wait_for_finalization=True, ) + + assert success is True + time.sleep(10) # Query the Weights storage map @@ -163,4 +171,4 @@ async def test_commit_and_reveal_weights(local_chain): assert ( weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" - logging.info("✅ Passed test_commit_and_reveal_weights") + logging.info("✅ Passed test_commit_and_reveal_weights") \ No newline at end of file From 20c8ba2251f8724eb400cc56d0445f2c97e11cf1 Mon Sep 17 00:00:00 2001 From: Roman Date: Mon, 4 Nov 2024 21:23:00 -0800 Subject: [PATCH 50/58] ruff --- tests/e2e_tests/test_commit_weights.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index 6abeb42fe0..5c03a3788b 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -60,9 +60,7 @@ async def test_commit_and_reveal_weights(local_chain): netuid, ), "Unable to enable commit reveal on the subnet" - subtensor = Subtensor( - network="ws://localhost:9945" - ) + subtensor = Subtensor(network="ws://localhost:9945") assert subtensor.get_subnet_hyperparameters( netuid=netuid, ).commit_reveal_weights_enabled, "Failed to enable commit/reveal" @@ -171,4 +169,4 @@ async def test_commit_and_reveal_weights(local_chain): assert ( weight_vals[0] == revealed_weights.value[0][1] ), f"Incorrect revealed weights. Expected: {weights[0]}, Actual: {revealed_weights.value[0][1]}" - logging.info("✅ Passed test_commit_and_reveal_weights") \ No newline at end of file + logging.info("✅ Passed test_commit_and_reveal_weights") From 636ec2601e6c9f34bb7a1a2ceef1e4ade9361b4c Mon Sep 17 00:00:00 2001 From: opendansor Date: Mon, 4 Nov 2024 22:12:11 -0800 Subject: [PATCH 51/58] Refactor commit and reveal logic, remove prompts Refactored the commit and reveal weight functions to simplify code, eliminate retry loops, and remove user prompts. The subprocess handling has been improved for better subprocess control, and utility functions have been moved to a new module. --- bittensor/core/extrinsics/commit_weights.py | 60 ++---- bittensor/core/extrinsics/set_weights.py | 193 +++++++---------- bittensor/core/extrinsics/transfer.py | 4 +- bittensor/core/subtensor.py | 196 +++++++----------- bittensor/utils/subprocess/commit_reveal.py | 102 ++++----- .../utils.py} | 107 ++++++---- tests/e2e_tests/conftest.py | 8 +- tests/e2e_tests/test_reveal_weights.py | 1 - 8 files changed, 302 insertions(+), 369 deletions(-) rename bittensor/utils/{subprocess_utils.py => subprocess/utils.py} (76%) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index b78132d7fd..2698577df1 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -65,10 +65,10 @@ def do_commit_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(extrinsic): + def make_substrate_call_with_retry(extrinsic_): response = submit_extrinsic( substrate=self.substrate, - extrinsic=extrinsic, + extrinsic=extrinsic_, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ) @@ -104,7 +104,6 @@ def commit_weights_extrinsic( commit_hash: str, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. @@ -117,7 +116,6 @@ def commit_weights_extrinsic( commit_hash (str): The hash of the neuron's weights to be committed. wait_for_inclusion (bool): Waits for the transaction to be included in a block. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Returns: tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string @@ -125,8 +123,6 @@ def commit_weights_extrinsic( This function provides a user-friendly interface for committing weights to the Bittensor blockchain, ensuring proper error handling and user interaction when required. """ - if prompt and not Confirm.ask(f"Would you like to commit weights?"): - return False, "User cancelled the operation." success, error_message = do_commit_weights( self=subtensor, @@ -139,10 +135,10 @@ def commit_weights_extrinsic( if success: success_message = "Successfully committed weights." - logging.info(success_message) + logging.success(success_message) return True, success_message else: - error_message = format_error_message(error_message) + error_message = format_error_message(error_message, substrate=subtensor.substrate) logging.error(f"Failed to commit weights: {error_message}") return False, error_message @@ -176,19 +172,20 @@ def commit_weights_process( wait until the appropriate time to reveal the weights. """ - def send_command(command): + def send_command(command_): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(("127.0.0.1", 9949)) - client.send(command.encode()) + client.send(command_.encode()) client.close() curr_block = block if block is not None else subtensor.get_current_block() blocks_until_next_epoch = subtensor.blocks_until_next_epoch(netuid=netuid) - subnet_tempo_blocks = subtensor.get_subnet_hyperparameters(netuid=netuid).tempo + subnet_hyperparams = subtensor.get_subnet_hyperparameters(netuid=netuid) + if subnet_hyperparams is None: + raise ValueError(f"Subnet hyperparameters for netuid {netuid} are None.") + subnet_tempo_blocks = subnet_hyperparams.tempo epoch_start_block = curr_block + blocks_until_next_epoch - cr_periods = subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_weights_interval + cr_periods = subnet_hyperparams.commit_reveal_weights_interval reveal_block = epoch_start_block + ((cr_periods - 1) * subnet_tempo_blocks) + 1 expire_block = reveal_block + subnet_tempo_blocks @@ -231,10 +228,10 @@ def do_reveal_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(extrinsic): + def make_substrate_call_with_retry(extrinsic_): response = submit_extrinsic( substrate=self.substrate, - extrinsic=extrinsic, + extrinsic=extrinsic_, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ) @@ -276,7 +273,6 @@ def reveal_weights_extrinsic( version_key: int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """ Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. @@ -292,7 +288,6 @@ def reveal_weights_extrinsic( version_key (int): Version key for compatibility with the network. wait_for_inclusion (bool): Waits for the transaction to be included in a block. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Returns: tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string @@ -300,10 +295,6 @@ def reveal_weights_extrinsic( This function provides a user-friendly interface for revealing weights on the Bittensor blockchain, ensuring proper error handling and user interaction when required. """ - - if prompt and not Confirm.ask(f"Would you like to reveal weights?"): - return False, "User cancelled the operation." - success, error_message = do_reveal_weights( self=subtensor, wallet=wallet, @@ -318,10 +309,10 @@ def reveal_weights_extrinsic( if success: success_message = "Successfully revealed weights." - logging.info(success_message) + logging.success(success_message) return True, success_message else: - error_message = format_error_message(error_message) + error_message = format_error_message(error_message, substrate=subtensor.substrate) logging.error(f"Failed to reveal weights: {error_message}") return False, error_message @@ -351,10 +342,10 @@ def reveal_weights_process( version_key (int): Version key for compatibility with the network. Defaults to `settings.version_as_int`. """ - def send_command(command): + def send_command(command_): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(("127.0.0.1", 9949)) - client.send(command.encode()) + client.send(command_.encode()) client.close() try: @@ -408,10 +399,10 @@ def do_batch_reveal_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(extrinsic): + def make_substrate_call_with_retry(extrinsic_): response = submit_extrinsic( substrate=self.substrate, - extrinsic=extrinsic, + extrinsic=extrinsic_, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ) @@ -453,7 +444,6 @@ def batch_reveal_weights_extrinsic( version_keys: list[int], wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """ Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. @@ -470,7 +460,6 @@ def batch_reveal_weights_extrinsic( version_keys (list[int]): List of version keys for compatibility with the network for each batch. wait_for_inclusion (bool, optional): Waits for the transaction to be included in a block. Defaults to False. wait_for_finalization (bool, optional): Waits for the transaction to be finalized on the blockchain. Defaults to False. - prompt (bool, optional): If ``True``, prompts for user confirmation before proceeding. Defaults to False. Returns: tuple[bool, str]: ``True`` if the weight revelation is successful, ``False`` otherwise. And `msg`, a string @@ -480,9 +469,6 @@ def batch_reveal_weights_extrinsic( ensuring proper error handling and user interaction when required. """ - if prompt and not Confirm.ask(f"Would you like to batch reveal weights?"): - return False, "User cancelled the operation." - success, error_message = do_batch_reveal_weights( self=subtensor, wallet=wallet, @@ -497,10 +483,10 @@ def batch_reveal_weights_extrinsic( if success: success_message = "Successfully batch revealed weights." - logging.info(success_message) + logging.success(success_message) return True, success_message else: - error_message = format_error_message(error_message) + error_message = format_error_message(error_message, substrate=subtensor.substrate) logging.error(f"Failed batch reveal weights extrinsic: {error_message}") return False, error_message @@ -528,10 +514,10 @@ def batch_reveal_weights_process( This function facilitates the batch reveal process, ensuring that the hashed weights are properly recorded and sent. """ - def send_command(command): + def send_command(command_): client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(("127.0.0.1", 9949)) - client.send(command.encode()) + client.send(command_.encode()) client.close() try: diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 9edd9b3165..b82a5fe6ba 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -21,10 +21,9 @@ import numpy as np from numpy.typing import NDArray from retry import retry -from rich.prompt import Confirm from bittensor.core.extrinsics.utils import submit_extrinsic -from bittensor.core.settings import bt_console, version_as_int +from bittensor.core.settings import version_as_int from bittensor.utils import format_error_message, weight_utils from bittensor.utils.btlogging import logging from bittensor.utils.networking import ensure_connected @@ -70,10 +69,10 @@ def do_set_weights( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(extrinsic): + def make_substrate_call_with_retry(extrinsic_): response = submit_extrinsic( substrate=self.substrate, - extrinsic=extrinsic, + extrinsic=extrinsic_, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ) @@ -116,7 +115,6 @@ def set_weights_extrinsic( version_key: int = 0, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, ) -> tuple[bool, str]: """Sets the given weights and values on chain for wallet hotkey account. @@ -129,79 +127,58 @@ def set_weights_extrinsic( version_key (int): The version key of the validator. wait_for_inclusion (bool): If set, waits for the extrinsic to enter a block before returning ``true``, or returns ``false`` if the extrinsic fails to enter the block within the timeout. wait_for_finalization (bool): If set, waits for the extrinsic to be finalized on the chain before returning ``true``, or returns ``false`` if the extrinsic fails to be finalized within the timeout. - prompt (bool): If ``true``, the call waits for confirmation from the user before proceeding. Returns: tuple[bool, str]: A tuple containing a success flag and an optional response message. """ - - if subtensor.get_subnet_hyperparameters( - netuid=netuid - ).commit_reveal_weights_enabled: + get_subnet_hyperparameters = subtensor.get_subnet_hyperparameters(netuid=netuid) + if get_subnet_hyperparameters and get_subnet_hyperparameters.commit_reveal_weights_enabled: # if cr is enabled, commit instead of setting the weights. salt = [random.randint(0, 350) for _ in range(8)] - # Ask before moving on. - if prompt: - if not Confirm.ask( - f"Do you want to commit weights:\n[bold white] weights: {weights}\n" - f"uids: {uids}[/bold white ]?" - ): - return False, "Prompt refused." - - with bt_console.status( - f":satellite: Committing weights on [white]{subtensor.network}[/white] ..." - ): - try: - # First convert types. - if use_torch(): - if isinstance(uids, list): - uids = torch.tensor(uids, dtype=torch.int64) - if isinstance(weights, list): - weights = torch.tensor(weights, dtype=torch.float32) - else: - if isinstance(uids, list): - uids = np.array(uids, dtype=np.int64) - if isinstance(weights, list): - weights = np.array(weights, dtype=np.float32) - - # Reformat and normalize. - weight_uids, weight_vals = ( - weight_utils.convert_weights_and_uids_for_emit(uids, weights) - ) - - success, message = subtensor.commit_weights( - wallet=wallet, - netuid=netuid, - salt=salt, - uids=weight_uids, - weights=weight_vals, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - max_retries=1, - ) - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - if success is True: - bt_console.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - logging.success( - msg=str(success), - prefix="Committed weights", - suffix="Finalized: ", - ) - return True, "Successfully committed weights and Finalized." - else: - logging.error(message) - return False, message - - except Exception as e: - bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") - logging.debug(str(e)) - return False, str(e) + logging.info( + f":satellite: Committing weights on {subtensor.network}..." + ) + try: + # First convert types. + if use_torch(): + if isinstance(uids, list): + uids = torch.tensor(uids, dtype=torch.int64) + if isinstance(weights, list): + weights = torch.tensor(weights, dtype=torch.float32) + else: + if isinstance(uids, list): + uids = np.array(uids, dtype=np.int64) + if isinstance(weights, list): + weights = np.array(weights, dtype=np.float32) + + # Reformat and normalize. + weight_uids, weight_vals = ( + weight_utils.convert_weights_and_uids_for_emit(uids, weights) + ) + + success, message = subtensor.commit_weights( + wallet=wallet, + netuid=netuid, + salt=salt, + uids=weight_uids, + weights=weight_vals, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + logging.success(f"Finalized! Committed weights: {str(success)}") + return True, "Successfully committed weights and Finalized." + else: + logging.error(message) + return False, message + + except Exception as e: + logging.error(f":cross_mark: Failed. Error: {e}") + return False, str(e) else: # First convert types. if use_torch(): @@ -220,48 +197,32 @@ def set_weights_extrinsic( uids, weights ) - # Ask before moving on. - if prompt: - if not Confirm.ask( - f"Do you want to set weights:\n[bold white] weights: {[float(v / 65535) for v in weight_vals]}\n" - f"uids: {weight_uids}[/bold white ]?" - ): - return False, "Prompt refused." - - with bt_console.status( - f":satellite: Setting weights on [white]{subtensor.network}[/white] ..." - ): - try: - success, error_message = do_set_weights( - self=subtensor, - wallet=wallet, - netuid=netuid, - uids=weight_uids, - vals=weight_vals, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) - - if not wait_for_finalization and not wait_for_inclusion: - return True, "Not waiting for finalization or inclusion." - - if success is True: - bt_console.print( - ":white_heavy_check_mark: [green]Finalized[/green]" - ) - logging.success( - msg=str(success), - prefix="Set weights", - suffix="Finalized: ", - ) - return True, "Successfully set weights and Finalized." - else: - error_message = format_error_message(error_message) - logging.error(error_message) - return False, error_message - - except Exception as e: - bt_console.print(f":cross_mark: [red]Failed[/red]: error:{e}") - logging.debug(str(e)) - return False, str(e) + logging.info( + f":satellite: Setting weights on {subtensor.network}..." + ) + try: + success, error_message = do_set_weights( + self=subtensor, + wallet=wallet, + netuid=netuid, + uids=weight_uids, + vals=weight_vals, + version_key=version_key, + wait_for_finalization=wait_for_finalization, + wait_for_inclusion=wait_for_inclusion, + ) + + if not wait_for_finalization and not wait_for_inclusion: + return True, "Not waiting for finalization or inclusion." + + if success is True: + logging.success(f"Finalized! Set weights: {str(success)}") + return True, "Successfully set weights and Finalized." + else: + error_message = format_error_message(error_message) + logging.error(error_message) + return False, error_message + + except Exception as e: + logging.error(f":cross_mark: Failed error: {e}") + return False, str(e) diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index c82a7db690..f3ece25859 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -63,10 +63,10 @@ def do_transfer( """ @retry(delay=1, tries=3, backoff=2, max_delay=4) - def make_substrate_call_with_retry(extrinsic): + def make_substrate_call_with_retry(extrinsic_): response = submit_extrinsic( substrate=self.substrate, - extrinsic=extrinsic, + extrinsic=extrinsic_, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, ) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 2d39e079c0..a8285121c3 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -85,8 +85,8 @@ U64_MAX, u16_normalized_float, networking, - subprocess_utils, ) +from bittensor.utils.subprocess import utils from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.registration import legacy_torch_api_compat @@ -97,8 +97,6 @@ KEY_NONCE: dict[str, int] = {} -COMMIT_REVEAL_PROCESS = "commit_reveal.py" - class ParamWithTypes(TypedDict): name: str # Name of the parameter. @@ -881,7 +879,6 @@ def set_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, max_retries: int = 1, ) -> tuple[bool, str]: """ @@ -895,7 +892,6 @@ def set_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. max_retries (int): The number of maximum attempts to set weights. Default is ``5``. Returns: @@ -925,7 +921,6 @@ def set_weights( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) except Exception as e: logging.error(f"Error setting weights: {e}") @@ -1846,8 +1841,6 @@ def commit_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = True, wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 1, ) -> tuple[bool, str]: """ Commits a hash of the neuron's weights to the Bittensor blockchain using the provided wallet. @@ -1862,8 +1855,6 @@ def commit_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version.``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. - max_retries (int): The number of maximum attempts to commit weights. Default is ``5``. Returns: tuple[bool, str]: ``True`` if the weight commitment is successful, False otherwise. And `msg`, a string @@ -1872,19 +1863,17 @@ def commit_weights( This function allows neurons to create a tamper-proof record of their weight distribution at a specific point in time, enhancing transparency and accountability within the Bittensor network. """ - retries = 0 - success = False - message = "No attempt made. Perhaps it is too soon to commit weights!" + message = "No attempt made. Perhaps it is too soon to commit weights!" + success = False logging.info( f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" ) # start subprocess if permitted and not yet running if self.subprocess_initialization and not subprocess_utils.is_process_running( - COMMIT_REVEAL_PROCESS + subprocess_utils.COMMIT_REVEAL_PROCESS ): - logging.info("Starting commit_reveal subprocess from commit.") subprocess_utils.start_commit_reveal_subprocess( network=self.chain_endpoint, sleep_interval=self.subprocess_sleep_interval, @@ -1902,41 +1891,35 @@ def commit_weights( version_key=version_key, ) - logging.info(f"Commit Hash: {commit_hash}") + logging.info( + f"Committing weights with params: hash={commit_hash}, netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" + ) - while retries < max_retries and not success: - try: - if ( - self.subprocess_initialization - and subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS) - ): - curr_block = self.get_current_block() - commit_weights_process( - self, - wallet=wallet, - netuid=netuid, - commit_hash=commit_hash, - uids=list(uids), - weights=list(weights), - salt=salt, - version_key=version_key, - block=curr_block, - ) - success, message = commit_weights_extrinsic( - subtensor=self, + try: + if ( + self.subprocess_initialization + and subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS) + ): + commit_weights_process( + self, wallet=wallet, netuid=netuid, commit_hash=commit_hash, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, + uids=list(uids), + weights=list(weights), + salt=salt, + version_key=version_key, ) - if success: - break - except Exception as e: - logging.error(f"Error committing weights: {e}") - finally: - retries += 1 + success, message = commit_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + commit_hash=commit_hash, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + except Exception as e: + logging.error(f"Error committing weights: {e}") return success, message @@ -1951,8 +1934,6 @@ def reveal_weights( version_key: int = settings.version_as_int, wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, ) -> tuple[bool, str]: """ Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. @@ -1967,8 +1948,6 @@ def reveal_weights( version_key (int): Version key for compatibility with the network. Default is ``int representation of Bittensor version``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. - max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``. Returns: tuple[bool, str]: ``True`` if the weight revelation is successful, False otherwise. And `msg`, a string @@ -1978,40 +1957,33 @@ def reveal_weights( and accountability within the Bittensor network. """ - retries = 0 - success = False message = "No attempt made. Perhaps it is too soon to reveal weights!" - - while retries < max_retries: - try: - success, message = reveal_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - uids=list(uids), - weights=list(weights), - salt=list(salt), - version_key=version_key, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - if success: - # remove from local db if called directly - if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): - reveal_weights_process( - wallet=wallet, - netuid=netuid, - uids=list(uids), - weights=list(weights), - salt=list(salt), - version_key=version_key, - ) - break - except Exception as e: - logging.error(f"Error revealing weights: {e}") - finally: - retries += 1 + success = False + try: + success, message = reveal_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=list(uids), + weights=list(weights), + salt=list(salt), + version_key=version_key, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if success: + # remove from local db if called directly + if subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS): + reveal_weights_process( + wallet=wallet, + netuid=netuid, + uids=list(uids), + weights=list(weights), + salt=list(salt), + version_key=version_key, + ) + except Exception as e: + logging.error(f"Error revealing weights: {e}") return success, message @@ -2046,8 +2018,6 @@ def batch_reveal_weights( version_keys: list[int], wait_for_inclusion: bool = False, wait_for_finalization: bool = False, - prompt: bool = False, - max_retries: int = 5, ) -> tuple[bool, str]: """ Reveals the weights for a specific subnet on the Bittensor blockchain using the provided wallet. @@ -2062,8 +2032,6 @@ def batch_reveal_weights( version_keys (list[int]): List of version keys for compatibility with the network. Default is ``int representation of Bittensor version``. wait_for_inclusion (bool): Waits for the transaction to be included in a block. Default is ``False``. wait_for_finalization (bool): Waits for the transaction to be finalized on the blockchain. Default is ``False``. - prompt (bool): If ``True``, prompts for user confirmation before proceeding. Default is ``False``. - max_retries (int): The number of maximum attempts to reveal weights. Default is ``5``. Returns: tuple[bool, str]: ``True`` if the batch weight revelation is successful, False otherwise. And `msg`, a string @@ -2072,40 +2040,34 @@ def batch_reveal_weights( This function allows neurons to reveal their previously committed weight distribution, ensuring transparency and accountability within the Bittensor network. """ - retries = 0 success = False message = "No attempt made. Perhaps it is too soon to reveal weights!" - while retries < max_retries: - try: - success, message = batch_reveal_weights_extrinsic( - subtensor=self, - wallet=wallet, - netuid=netuid, - uids=uids, - weights=weights, - salt=salt, - version_keys=version_keys, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - prompt=prompt, - ) - if success: - # remove from local db if called directly - if subprocess_utils.is_process_running(COMMIT_REVEAL_PROCESS): - batch_reveal_weights_process( - wallet=wallet, - netuid=netuid, - uids=uids, - weights=weights, - salt=salt, - version_keys=version_keys, - ) - return success, message - except Exception as e: - logging.error(f"Error revealing weights: {e}") - finally: - retries += 1 + try: + success, message = batch_reveal_weights_extrinsic( + subtensor=self, + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + version_keys=version_keys, + wait_for_inclusion=wait_for_inclusion, + wait_for_finalization=wait_for_finalization, + ) + if success: + # remove from local db if called directly + if subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS): + batch_reveal_weights_process( + wallet=wallet, + netuid=netuid, + uids=uids, + weights=weights, + salt=salt, + version_keys=version_keys, + ) + except Exception as e: + logging.error(f"Error revealing weights: {e}") return success, message diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/subprocess/commit_reveal.py index 829361a3b8..562b983115 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/subprocess/commit_reveal.py @@ -10,13 +10,15 @@ from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet -from bittensor.utils import subprocess_utils as utils +from bittensor.utils.subprocess import utils as utils from typing import List, Any, Dict, Optional # Path to the SQLite database -DB_PATH = os.path.expanduser("~/.bittensor/bittensor.db") +DB_PATH = os.path.join(os.path.expanduser("~"), ".bittensor", "bittensor.db") # Global variable to control the server loop running = True +PORT = 9949 +HOST = "127.0.0.1" class Commit: @@ -140,9 +142,9 @@ def __str__(self) -> str: ) -def table_exists(table_name: str) -> bool: +def _table_exists(table_name: str) -> bool: """ - Checks if a table exists in the database. + Checks if the database exists and if a table exists in the database. Args: table_name (str): The name of the table to check. @@ -150,6 +152,10 @@ def table_exists(table_name: str) -> bool: Returns: bool: True if the table exists, False otherwise. """ + if not os.path.exists(DB_PATH): + print(f"Database at path '{DB_PATH}' does not exist.") + return False + try: columns, rows = utils.read_table(table_name) print(f"Table '{table_name}' exists with columns: {columns}") @@ -182,7 +188,7 @@ def is_table_empty(table_name: str) -> bool: return False -def initialize_db(): +def _initialize_db(): """ Initializes the database by creating the 'commits' table if it does not exist. """ @@ -202,14 +208,14 @@ def initialize_db(): ("version_key", "INTEGER"), ("revealed", "BOOLEAN DEFAULT FALSE"), ] - if not table_exists("commits"): + if not _table_exists("commits"): print("Creating table 'commits'...") - utils.create_table("commits", columns, []) + assert utils.create_table("commits", columns, []) else: print("Table 'commits' already exists.") -def reveal(subtensor: Subtensor, commit: Commit): +def _reveal(subtensor: Subtensor, commit: Commit): """ Reveals the weights for a commit to the subtensor network. @@ -234,13 +240,13 @@ def reveal(subtensor: Subtensor, commit: Commit): ) del wallet if success: - revealed_commit(commit.commit_hash) + _revealed_commit(commit.commit_hash) print(f"Reveal success for commit {commit}") else: print(f"Reveal failure for commit: {message}") -def reveal_batch(subtensor: Subtensor, commits: List[Commit]): +def _reveal_batch(subtensor: Subtensor, commits: List[Commit]): """ Reveals the weights for a batch of commits to the subtensor network. @@ -277,13 +283,13 @@ def reveal_batch(subtensor: Subtensor, commits: List[Commit]): if success: for commit in commits: - revealed_commit(commit.commit_hash) + _revealed_commit(commit.commit_hash) print(f"Reveal success for batch commit: {commit}") else: print(f"Reveal failure for batch commits: {message}") -def sync_commit_data(matching_commit, commit_block, reveal_block, expire_block): +def _sync_commit_data(matching_commit, commit_block, reveal_block, expire_block): """ Sync the commit data with the given block details. @@ -316,7 +322,7 @@ def sync_commit_data(matching_commit, commit_block, reveal_block, expire_block): print(f"Error updating commit data: {e}") -def chain_hash_sync(subtensor: Subtensor, current_block: int): +def _chain_hash_sync(subtensor: Subtensor, current_block: int): """ Perform a verification to check if the local reveal list is consistent with the chain. @@ -326,7 +332,7 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): """ try: # Retrieve all commits from the local database - local_commits = get_all_commits() + local_commits = _get_all_commits() # Filter commits to only those that are not revealed local_commits = [commit for commit in local_commits if not commit.revealed] chain_commits = [] @@ -376,7 +382,7 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): or reveal_block != matching_commit.reveal_block or expire_block != matching_commit.expire_block ): - sync_commit_data( + _sync_commit_data( matching_commit, commit_block, reveal_block, @@ -392,7 +398,7 @@ def chain_hash_sync(subtensor: Subtensor, current_block: int): print(f"Error during chain_hash_sync: {e}") -def delete_old_commits(current_block: int, offset: int): +def _delete_old_commits(current_block: int, offset: int): """ Deletes rows in the database where the current block is greater than the expire_block. Prints each commit before deleting it. @@ -402,7 +408,7 @@ def delete_old_commits(current_block: int, offset: int): current_block (int): The current block number. """ try: - commits = get_all_commits() + commits = _get_all_commits() if not commits: print("No commits found in the database.") return @@ -420,7 +426,7 @@ def delete_old_commits(current_block: int, offset: int): print(f"Error deleting expired commits: {e}") -def revealed_commit(commit_hash: str): +def _revealed_commit(commit_hash: str): """ Handles the revealed_hash command by updating the revealed status on the corresponding commit from the database using the commit hash. @@ -446,7 +452,7 @@ def revealed_commit(commit_hash: str): print(f"Error updating from table 'commits': {e}") -def revealed_commit_batch(commit_hashes: List[str]): +def _revealed_commit_batch(commit_hashes: List[str]): """ Handles the revealed_batch_hash command by removing the corresponding commits from the database using the commit hashes. @@ -474,7 +480,7 @@ def revealed_commit_batch(commit_hashes: List[str]): print(f"Error updating from table 'commits': {e}") -def committed(commit: Commit): +def _committed(commit: Commit): """ Commits a new commit object to the database. @@ -491,7 +497,7 @@ def committed(commit: Commit): print(f"Committed data: {commit_data}") -def get_all_commits() -> List[Commit]: +def _get_all_commits() -> List[Commit]: """ Retrieves all commits from the database. @@ -502,7 +508,7 @@ def get_all_commits() -> List[Commit]: return [Commit.from_dict(dict(zip(columns, commit))) for commit in rows] -def check_reveal(current_block: int) -> bool: +def _check_reveal(current_block: int) -> bool: """ Checks if there are any commits to reveal. @@ -513,7 +519,7 @@ def check_reveal(current_block: int) -> bool: bool: True if a commit was revealed, False otherwise. """ try: - commits = get_all_commits() + commits = _get_all_commits() commits = [commit for commit in commits if not commit.revealed] except Exception as e: @@ -531,7 +537,7 @@ def check_reveal(current_block: int) -> bool: return False -def reveal_commits(subtensor: Subtensor, current_block: int): +def _reveal_commits(subtensor: Subtensor, current_block: int): """ Performs reveal on commits that are ready to be revealed. @@ -540,7 +546,7 @@ def reveal_commits(subtensor: Subtensor, current_block: int): subtensor (Subtensor): The subtensor network object. """ try: - local_commits = get_all_commits() + local_commits = _get_all_commits() local_commits = [commit for commit in local_commits if not commit.revealed] local_reveals = [ commit @@ -601,10 +607,10 @@ def reveal_commits(subtensor: Subtensor, current_block: int): if len(ready_to_reveal) > 1: chain_reveals.extend(ready_to_reveal) - reveal_batch(subtensor, ready_to_reveal) + _reveal_batch(subtensor, ready_to_reveal) elif len(ready_to_reveal) == 1: chain_reveals.extend(ready_to_reveal) - reveal(subtensor, ready_to_reveal[0]) + _reveal(subtensor, ready_to_reveal[0]) except Exception as e: print(f"Error querying expected hashes for {combination}: {e}") @@ -626,7 +632,7 @@ def reveal_commits(subtensor: Subtensor, current_block: int): f"revealing commit {commit.commit_hash} as a newer hash was submitted" ) commit.revealed = True - revealed_commit(commit.commit_hash) + _revealed_commit(commit.commit_hash) except Exception as e: print(f"Error reading table 'commits': {e}") @@ -649,7 +655,7 @@ def handle_client_connection(client_socket: socket.socket): json_start_index = request.index("[") json_payload = request[json_start_index:] args = json.loads(json_payload) - revealed_commit_batch(args) + _revealed_commit_batch(args) except json.JSONDecodeError as e: print(f"Error decoding JSON for {command}: {e}") except Exception as e: @@ -658,11 +664,11 @@ def handle_client_connection(client_socket: socket.socket): args = shlex.split(request) command = args[0] commands = { - "revealed_hash": lambda: revealed_commit(args[1]), - "revealed_hash_batch": lambda: revealed_commit_batch( + "revealed_hash": lambda: _revealed_commit(args[1]), + "revealed_hash_batch": lambda: _revealed_commit_batch( json.loads(args[1]) ), - "committed": lambda: committed( + "committed": lambda: _committed( Commit( wallet_hotkey_name=args[3], wallet_hotkey_ss58=args[4], @@ -679,7 +685,7 @@ def handle_client_connection(client_socket: socket.socket): version_key=int(args[13]), ) ), - "terminate": lambda: terminate_process(None, None), + "terminate": lambda: _terminate_process(None, None), } if command in commands: try: @@ -696,15 +702,15 @@ def handle_client_connection(client_socket: socket.socket): client_socket.close() -def start_socket_server(): +def _start_socket_server(): """ Starts the socket server to listen for incoming connections. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.bind(("127.0.0.1", 9949)) + server.bind((HOST, PORT)) server.listen(5) server.settimeout(2) # Set timeout for any incoming requests to 2 seconds - print("Listening on port 9949...") + print(f"Listening on port {PORT}...") with ThreadPoolExecutor(max_workers=10) as executor: # limit of workers amount while running: @@ -719,7 +725,7 @@ def start_socket_server(): break -def terminate_process(signal_number: Optional[int], frame: Optional[Any]): +def _terminate_process(signal_number: Optional[int], frame: Optional[Any]): """ Terminates the process gracefully. @@ -733,19 +739,19 @@ def terminate_process(signal_number: Optional[int], frame: Optional[Any]): sys.exit(0) -def main(args: argparse.Namespace): +def main(parsed_args: argparse.Namespace): """ The main function to run the Bittensor commit-reveal subprocess script. Args: - args (argparse.Namespace): The command-line arguments. + parsed_args (argparse.Namespace): The command-line arguments. """ - initialize_db() + _initialize_db() print( - f"initializing subtensor with network: {args.network} and sleep time: {args.sleep_interval} seconds" + f"initializing subtensor with network: {parsed_args.network} and sleep time: {parsed_args.sleep_interval} seconds" ) - subtensor = Subtensor(network=args.network, subprocess_initialization=False) - server_thread = threading.Thread(target=start_socket_server) + subtensor = Subtensor(network=parsed_args.network, subprocess_initialization=False) + server_thread = threading.Thread(target=_start_socket_server) server_thread.start() counter = 0 # Initialize counter @@ -753,15 +759,15 @@ def main(args: argparse.Namespace): while running: counter += 1 curr_block = subtensor.get_current_block() - if check_reveal(curr_block): + if _check_reveal(curr_block): print(f"Revealing commit on block {curr_block}") - reveal_commits(subtensor=subtensor, current_block=curr_block) + _reveal_commits(subtensor=subtensor, current_block=curr_block) if counter % 100 == 0: - chain_hash_sync(subtensor=subtensor, current_block=curr_block) - delete_old_commits(current_block=curr_block, offset=1000) + _chain_hash_sync(subtensor=subtensor, current_block=curr_block) + _delete_old_commits(current_block=curr_block, offset=1000) - time.sleep(args.sleep_interval) + time.sleep(parsed_args.sleep_interval) if __name__ == "__main__": diff --git a/bittensor/utils/subprocess_utils.py b/bittensor/utils/subprocess/utils.py similarity index 76% rename from bittensor/utils/subprocess_utils.py rename to bittensor/utils/subprocess/utils.py index 18331bd221..29386ed898 100644 --- a/bittensor/utils/subprocess_utils.py +++ b/bittensor/utils/subprocess/utils.py @@ -1,16 +1,18 @@ import os import re +import socket import sqlite3 import subprocess import time from datetime import datetime from typing import Optional - +from bittensor.utils.btlogging import logging import psutil -LOG_DIR = os.path.expanduser("~/.bittensor/logs") -PROCESS_NAME = "commit_reveal.py" - +LOG_DIR = os.path.join(os.path.expanduser("~"), ".bittensor", "logs") +COMMIT_REVEAL_PROCESS = "commit_reveal.py" +PORT = 9949 +HOST = "127.0.0.1" # Ensure the log directory exists os.makedirs(LOG_DIR, exist_ok=True) @@ -21,9 +23,9 @@ def get_cr_log_files() -> tuple[str, str]: Returns: tuple[str, str]: Paths to the stdout log file and stderr log file. """ - pid = get_process(PROCESS_NAME) + pid = get_process(COMMIT_REVEAL_PROCESS) if pid is None: - raise RuntimeError(f"Process '{PROCESS_NAME}' is not running.") + raise RuntimeError(f"Process '{COMMIT_REVEAL_PROCESS}' is not running.") # Define a regex pattern to match log files with timestamps log_pattern = re.compile(r"commit_reveal_(stdout|stderr)_(\d{8}_\d{6})\.log") @@ -100,15 +102,15 @@ def is_commit_reveal_subprocess_ready() -> bool: try: stdout_log, stderr_log = get_cr_log_files() except RuntimeError as e: - print(str(e)) + logging.error(str(e)) return False - def check_message_in_log(file_path: str, message: str) -> bool: + def check_message_in_log(file_path: str, message_: str) -> bool: """Check if a specific message is present in the log file.""" if os.path.exists(file_path): with open(file_path, "r") as file: for line in file: - if message in line: + if message_ in line: return True return False @@ -134,20 +136,20 @@ def is_table_empty(table_name: str) -> bool: ) table_exists = cursor.fetchone() if not table_exists: - print(f"Table '{table_name}' does not exist.") + logging.debug(f"Table '{table_name}' does not exist.") return True # Check if table is empty cursor.execute(f"SELECT COUNT(*) FROM {table_name}") count = cursor.fetchone()[0] if count == 0: - print(f"Table '{table_name}' is empty.") + logging.debug(f"Table '{table_name}' is empty.") return True else: - print(f"Table '{table_name}' is not empty.") + logging.debug(f"Table '{table_name}' is not empty.") return False except Exception as e: - print(f"Error checking if table '{table_name}' is empty: {e}") + logging.error(f"Error checking if table '{table_name}' is empty: {e}") return False @@ -160,11 +162,21 @@ def start_if_existing_commits( stop_commit_reveal_subprocess() start_commit_reveal_subprocess(network, sleep_interval) else: - print( + logging.info( "Existing commits table is empty. Skipping starting commit reveal subprocess until a commit is there." ) +def _is_port_available(port: int = PORT, host: str = HOST) -> bool: + """Checks if the specified port is available.""" + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind((host, port)) + return True + except socket.error: + return False + + def start_commit_reveal_subprocess( network: Optional[str] = None, sleep_interval: Optional[float] = None ): @@ -180,19 +192,20 @@ def start_commit_reveal_subprocess( ) project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) - if not is_process_running(PROCESS_NAME): + if not is_process_running(COMMIT_REVEAL_PROCESS): current_time = datetime.now().strftime("%Y%m%d_%H%M%S") # Correctly construct the paths for STDOUT and STDERR log files stdout_log = os.path.join(LOG_DIR, f"commit_reveal_stdout_{current_time}.log") stderr_log = os.path.join(LOG_DIR, f"commit_reveal_stderr_{current_time}.log") - os.makedirs(LOG_DIR, exist_ok=True) + if not _is_port_available(): + logging.error(f":cross_mark: Error: Port {PORT} is busy.") + return - stdout_file = open(stdout_log, "w") - stderr_file = open(stderr_log, "w") + os.makedirs(LOG_DIR, exist_ok=True) - print(f"Starting subprocess '{PROCESS_NAME}'...") + logging.info(f"Starting subprocess '{COMMIT_REVEAL_PROCESS}'...") env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") @@ -211,36 +224,36 @@ def start_commit_reveal_subprocess( preexec_fn=os.setsid, env=env, ) - print(f"Subprocess '{PROCESS_NAME}' started with PID {process.pid}.") + logging.info(f"Subprocess '{COMMIT_REVEAL_PROCESS}' started with PID {process.pid}.") attempt_count = 0 while not is_commit_reveal_subprocess_ready() and attempt_count < 5: time.sleep(5) - print( + logging.debug( f"Waiting for commit_reveal subprocess to be ready. Attempt {attempt_count + 1}..." ) attempt_count += 1 if attempt_count >= 5: - print("Max attempts reached. Subprocess may not be ready.") + logging.warning("Max start attempts reached. Subprocess may not be ready.") except Exception as e: - print(f"Failed to start subprocess '{PROCESS_NAME}': {e}") + logging.error(f"Failed to start subprocess '{COMMIT_REVEAL_PROCESS}': {e}") else: - print(f"Subprocess '{PROCESS_NAME}' is already running.") + logging.error(f"Subprocess '{COMMIT_REVEAL_PROCESS}' is already running.") def stop_commit_reveal_subprocess(): """ Stop the commit reveal subprocess if it is running. """ - pid = get_process(PROCESS_NAME) + pid = get_process(COMMIT_REVEAL_PROCESS) if pid is not None: - print(f"Stopping subprocess '{PROCESS_NAME}' with PID {pid}...") + logging.debug(f"Stopping subprocess '{COMMIT_REVEAL_PROCESS}' with PID {pid}...") os.kill(pid, 15) # SIGTERM - print(f"Subprocess '{PROCESS_NAME}' stopped.") + logging.debug(f"Subprocess '{COMMIT_REVEAL_PROCESS}' stopped.") else: - print(f"Subprocess '{PROCESS_NAME}' is not running.") + logging.debug(f"Subprocess '{COMMIT_REVEAL_PROCESS}' is not running.") class DB: @@ -250,7 +263,7 @@ class DB: def __init__( self, - db_path: str = os.path.expanduser("~/.bittensor/bittensor.db"), + db_path: str = os.path.join(os.path.expanduser("~"), ".bittensor", "bittensor.db"), row_factory=None, ): if not os.path.exists(os.path.dirname(db_path)): @@ -270,7 +283,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.conn.close() -def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]): +def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]) -> bool: """ Creates and populates the rows of a table in the SQLite database. @@ -278,6 +291,9 @@ def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]): title (str): title of the table. columns (list[tuple[str, str]]): List of tuples where each tuple contains column name and column type. rows (list[list]): List of lists where each sublist contains elements representing a row. + + Returns: + bool: True if the table creation is successful, False otherwise. """ blob_cols = [] for idx, (_, col_type) in enumerate(columns): @@ -287,19 +303,22 @@ def create_table(title: str, columns: list[tuple[str, str]], rows: list[list]): for row in rows: for idx in blob_cols: row[idx] = row[idx].to_bytes(row[idx].bit_length() + 7, byteorder="big") - with DB() as (conn, cursor): - drop_query = f"DROP TABLE IF EXISTS {title}" - cursor.execute(drop_query) - conn.commit() - columns_ = ", ".join([" ".join(x) for x in columns]) - creation_query = f"CREATE TABLE IF NOT EXISTS {title} ({columns_})" - conn.commit() - cursor.execute(creation_query) - conn.commit() - query = f"INSERT INTO {title} ({', '.join([x[0] for x in columns])}) VALUES ({', '.join(['?'] * len(columns))})" - cursor.executemany(query, rows) - conn.commit() - return + try: + with DB() as (conn, cursor): + drop_query = f"DROP TABLE IF EXISTS {title}" + cursor.execute(drop_query) + conn.commit() + columns_ = ", ".join([" ".join(x) for x in columns]) + creation_query = f"CREATE TABLE IF NOT EXISTS {title} ({columns_})" + cursor.execute(creation_query) + conn.commit() + query = f"INSERT INTO {title} ({', '.join([x[0] for x in columns])}) VALUES ({', '.join(['?'] * len(columns))})" + cursor.executemany(query, rows) + conn.commit() + return True + except Exception as e: + logging.error(f"Error creating table '{title}': {e}") + return False def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: @@ -323,7 +342,7 @@ def read_table(table_name: str, order_by: str = "") -> tuple[list, list]: column_names.append(info[1]) column_types.append(info[2]) except IndexError: - print(f"Error retrieving column info: {info}") + logging.error(f"Error retrieving column info: {info}") cursor.execute(f"SELECT * FROM {table_name} {order_by}") rows = cursor.fetchall() diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index acfafb29c6..835755fa0d 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -8,7 +8,7 @@ import pytest from substrateinterface import SubstrateInterface -from bittensor.utils import subprocess_utils +from bittensor.utils.subprocess import utils from bittensor import logging from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, @@ -65,12 +65,12 @@ def local_chain(request): pattern = re.compile(r"Imported #1") timestamp = int(time.time()) - def wait_for_node_start(process, pattern): - for line in process.stdout: + def wait_for_node_start(process_, pattern_): + for line in process_.stdout: print(line.strip()) if int(time.time()) - timestamp > 20 * 60: pytest.fail("Subtensor not started in time") - if pattern.search(line): + if pattern_.search(line): print("Node started!") break diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index e20222f133..b7f24710fc 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -5,7 +5,6 @@ import bittensor.utils.subprocess.commit_reveal as commit_reveal_subprocess import bittensor from bittensor import logging -from bittensor.utils import subprocess_utils from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit from tests.e2e_tests.utils.chain_interactions import ( add_stake, From 9705134dc4b494d3078783e9d9e9584de50bba79 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 5 Nov 2024 00:17:42 -0800 Subject: [PATCH 52/58] Refactor commit and reveal logic, remove prompts Refactored the commit and reveal weight functions to simplify code, eliminate retry loops, and remove user prompts. The subprocess handling has been improved for better subprocess control, and utility functions have been moved to a new module. --- bittensor/core/extrinsics/commit_weights.py | 16 +++++----- bittensor/core/subtensor.py | 4 +-- .../__init__.py | 0 .../commit_reveal.py | 8 ++--- .../utils.py | 29 ++++++++++--------- tests/e2e_tests/conftest.py | 4 +-- tests/e2e_tests/test_commit_weights.py | 7 +++-- tests/e2e_tests/test_reveal_weights.py | 16 +++++----- .../unit_tests/extrinsics/test_set_weights.py | 26 ++--------------- tests/unit_tests/test_subtensor.py | 13 +-------- 10 files changed, 47 insertions(+), 76 deletions(-) rename bittensor/utils/{subprocess => background_subprocess}/__init__.py (100%) rename bittensor/utils/{subprocess => background_subprocess}/commit_reveal.py (98%) rename bittensor/utils/{subprocess => background_subprocess}/utils.py (91%) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 00ff3bd71c..a525bdb0ad 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -157,7 +157,7 @@ def commit_weights_process( block: Optional[int] = None, ): """ - Lets the subprocess know what a commit was submitted to the chain. + Lets the background_subprocess know what a commit was submitted to the chain. Args: subtensor (bittensor.core.subtensor.Subtensor): The subtensor instance used for blockchain interaction. @@ -170,7 +170,7 @@ def commit_weights_process( version_key (int): Version key for network compatibility (default is settings.version_as_int). block (Optional[int]): Specific block number to use (default is None). - The function calculates the necessary blocks until the next epoch and the reveal block, then the subprocess will + The function calculates the necessary blocks until the next epoch and the reveal block, then the background_subprocess will wait until the appropriate time to reveal the weights. """ @@ -330,10 +330,10 @@ def reveal_weights_process( version_key: int = settings.version_as_int, ): """ - Coordinates the process of revealing weights with the background subprocess. + Coordinates the process of revealing weights with the background background_subprocess. This method generates a hash of the weights using the provided wallet and network - parameters, and sends a command to a local subprocess that this commit was revealed. + parameters, and sends a command to a local background_subprocess that this commit was revealed. In case of any exception during hash generation, it sends a command with detailed information including wallet details and weight parameters. @@ -353,7 +353,7 @@ def send_command(command_): client.close() try: - # Generate the hash of the weights - so we can remove from local reveal subprocess + # Generate the hash of the weights - so we can remove from local reveal background_subprocess commit_hash = generate_weight_hash( address=wallet.hotkey.ss58_address, netuid=netuid, @@ -365,7 +365,7 @@ def send_command(command_): command = f'revealed_hash "{commit_hash}"' send_command(command) except Exception as e: - logging.error(f"Not able to generate hash to reveal weights on subprocess: {e}") + logging.error(f"Not able to generate hash to reveal weights on background_subprocess: {e}") # Chain call for `batch_reveal_weights_extrinsic` @@ -435,7 +435,7 @@ def make_substrate_call_with_retry(extrinsic_): call=call, keypair=wallet.hotkey, ) - return make_substrate_call_with_retry(extrinsic=extrinsic) + return make_substrate_call_with_retry(extrinsic) def batch_reveal_weights_extrinsic( @@ -543,4 +543,4 @@ def send_command(command_): command = f"revealed_hash_batch {json.dumps(commit_hashes)}" send_command(command) except Exception as e: - logging.error(f"Failed batch reveal weights subprocess: {e}") + logging.error(f"Failed batch reveal weights background_subprocess: {e}") diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 2c43b099ee..245babd0be 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -82,7 +82,7 @@ u16_normalized_float, networking, ) -from bittensor.utils.subprocess import utils +from bittensor.utils.background_subprocess import utils as subprocess_utils from bittensor.utils.balance import Balance from bittensor.utils.btlogging import logging from bittensor.utils.registration import legacy_torch_api_compat @@ -1833,7 +1833,7 @@ def commit_weights( f"Committing weights with params: netuid={netuid}, uids={uids}, weights={weights}, version_key={version_key}" ) - # start subprocess if permitted and not yet running + # start background_subprocess if permitted and not yet running if self.subprocess_initialization and not subprocess_utils.is_process_running( subprocess_utils.COMMIT_REVEAL_PROCESS ): diff --git a/bittensor/utils/subprocess/__init__.py b/bittensor/utils/background_subprocess/__init__.py similarity index 100% rename from bittensor/utils/subprocess/__init__.py rename to bittensor/utils/background_subprocess/__init__.py diff --git a/bittensor/utils/subprocess/commit_reveal.py b/bittensor/utils/background_subprocess/commit_reveal.py similarity index 98% rename from bittensor/utils/subprocess/commit_reveal.py rename to bittensor/utils/background_subprocess/commit_reveal.py index 562b983115..24416f6b8f 100644 --- a/bittensor/utils/subprocess/commit_reveal.py +++ b/bittensor/utils/background_subprocess/commit_reveal.py @@ -10,7 +10,7 @@ from bittensor.core.subtensor import Subtensor from bittensor_wallet import Wallet -from bittensor.utils.subprocess import utils as utils +from bittensor.utils.background_subprocess import utils as utils from typing import List, Any, Dict, Optional # Path to the SQLite database @@ -741,7 +741,7 @@ def _terminate_process(signal_number: Optional[int], frame: Optional[Any]): def main(parsed_args: argparse.Namespace): """ - The main function to run the Bittensor commit-reveal subprocess script. + The main function to run the Bittensor commit-reveal background_subprocess script. Args: parsed_args (argparse.Namespace): The command-line arguments. @@ -755,7 +755,7 @@ def main(parsed_args: argparse.Namespace): server_thread.start() counter = 0 # Initialize counter - print("commit_reveal subprocess is ready") + print("commit_reveal background_subprocess is ready") while running: counter += 1 curr_block = subtensor.get_current_block() @@ -772,7 +772,7 @@ def main(parsed_args: argparse.Namespace): if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Run the Bittensor commit-reveal subprocess script." + description="Run the Bittensor commit-reveal background_subprocess script." ) parser.add_argument( "--network", diff --git a/bittensor/utils/subprocess/utils.py b/bittensor/utils/background_subprocess/utils.py similarity index 91% rename from bittensor/utils/subprocess/utils.py rename to bittensor/utils/background_subprocess/utils.py index 29386ed898..9dd1db046b 100644 --- a/bittensor/utils/subprocess/utils.py +++ b/bittensor/utils/background_subprocess/utils.py @@ -94,7 +94,7 @@ def get_process(process_name: str) -> Optional[int]: def is_commit_reveal_subprocess_ready() -> bool: """ - Check the logs for the message 'commit_reveal subprocess is ready' and return True if it's found. + Check the logs for the message 'commit_reveal background_subprocess is ready' and return True if it's found. Returns: bool: True if the message is found in the logs, False otherwise. @@ -114,7 +114,7 @@ def check_message_in_log(file_path: str, message_: str) -> bool: return True return False - message = "commit_reveal subprocess is ready" + message = "commit_reveal background_subprocess is ready" return check_message_in_log(stdout_log, message) @@ -163,7 +163,7 @@ def start_if_existing_commits( start_commit_reveal_subprocess(network, sleep_interval) else: logging.info( - "Existing commits table is empty. Skipping starting commit reveal subprocess until a commit is there." + "Existing commits table is empty. Skipping starting commit reveal background_subprocess until a commit is there." ) @@ -181,14 +181,14 @@ def start_commit_reveal_subprocess( network: Optional[str] = None, sleep_interval: Optional[float] = None ): """ - Start the commit reveal subprocess if not already running. + Start the commit reveal background_subprocess if not already running. Args: network (Optional[str]): Network name if any, optional. sleep_interval (Optional[float]): Sleep interval if any, optional. """ script_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), "subprocess", "commit_reveal.py") + os.path.join(os.path.dirname(__file__), "commit_reveal.py") ) project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) @@ -205,7 +205,10 @@ def start_commit_reveal_subprocess( os.makedirs(LOG_DIR, exist_ok=True) - logging.info(f"Starting subprocess '{COMMIT_REVEAL_PROCESS}'...") + stdout_file = open(stdout_log, "w") + stderr_file = open(stderr_log, "w") + + logging.info(f"Starting background_subprocess '{COMMIT_REVEAL_PROCESS}'...") env = os.environ.copy() env["PYTHONPATH"] = project_root + ":" + env.get("PYTHONPATH", "") @@ -216,11 +219,11 @@ def start_commit_reveal_subprocess( args.extend(["--sleep-interval", str(sleep_interval)]) try: - # Create a new subprocess + # Create a new background_subprocess process = subprocess.Popen( args=args, - stdout=open(stdout_log, "a"), # Redirect subprocess stdout to log file - stderr=open(stderr_log, "a"), # Redirect subprocess stderr to log file + stdout=stdout_file, + stderr=stderr_file, preexec_fn=os.setsid, env=env, ) @@ -230,26 +233,26 @@ def start_commit_reveal_subprocess( while not is_commit_reveal_subprocess_ready() and attempt_count < 5: time.sleep(5) logging.debug( - f"Waiting for commit_reveal subprocess to be ready. Attempt {attempt_count + 1}..." + f"Waiting for commit_reveal background_subprocess to be ready. Attempt {attempt_count + 1}..." ) attempt_count += 1 if attempt_count >= 5: logging.warning("Max start attempts reached. Subprocess may not be ready.") except Exception as e: - logging.error(f"Failed to start subprocess '{COMMIT_REVEAL_PROCESS}': {e}") + logging.error(f"Failed to start background_subprocess '{COMMIT_REVEAL_PROCESS}': {e}") else: logging.error(f"Subprocess '{COMMIT_REVEAL_PROCESS}' is already running.") def stop_commit_reveal_subprocess(): """ - Stop the commit reveal subprocess if it is running. + Stop the commit reveal background_subprocess if it is running. """ pid = get_process(COMMIT_REVEAL_PROCESS) if pid is not None: - logging.debug(f"Stopping subprocess '{COMMIT_REVEAL_PROCESS}' with PID {pid}...") + logging.debug(f"Stopping background_subprocess '{COMMIT_REVEAL_PROCESS}' with PID {pid}...") os.kill(pid, 15) # SIGTERM logging.debug(f"Subprocess '{COMMIT_REVEAL_PROCESS}' stopped.") else: diff --git a/tests/e2e_tests/conftest.py b/tests/e2e_tests/conftest.py index e542e1c852..0bf1194269 100644 --- a/tests/e2e_tests/conftest.py +++ b/tests/e2e_tests/conftest.py @@ -10,7 +10,7 @@ from substrateinterface import SubstrateInterface from bittensor.utils.btlogging import logging -from bittensor.utils.subprocess import utils +from bittensor.utils.background_subprocess import utils from tests.e2e_tests.utils.e2e_test_utils import ( clone_or_update_templates, install_templates, @@ -97,5 +97,5 @@ def wait_for_node_start(process_, pattern_): logging.info("Uninstalling neuron templates") uninstall_templates(templates_dir) - # kill subprocess if its running + # kill background_subprocess if its running utils.stop_commit_reveal_subprocess() diff --git a/tests/e2e_tests/test_commit_weights.py b/tests/e2e_tests/test_commit_weights.py index b9cf049ba5..e197f71658 100644 --- a/tests/e2e_tests/test_commit_weights.py +++ b/tests/e2e_tests/test_commit_weights.py @@ -21,7 +21,7 @@ @pytest.mark.asyncio async def test_commit_and_reveal_weights(local_chain): """ - Tests the commit/reveal weights mechanism with subprocess disabled (CR1.0) + Tests the commit/reveal weights mechanism with background_subprocess disabled (CR1.0) Steps: 1. Register a subnet through Alice @@ -76,7 +76,6 @@ async def test_commit_and_reveal_weights(local_chain): return_error_message=True, ) - subtensor = Subtensor(network="ws://localhost:9945") assert ( subtensor.get_subnet_hyperparameters( netuid=netuid @@ -95,7 +94,7 @@ async def test_commit_and_reveal_weights(local_chain): call_params={"netuid": netuid, "weights_set_rate_limit": "0"}, return_error_message=True, ) - subtensor = Subtensor(network="ws://localhost:9945") + assert ( subtensor.get_subnet_hyperparameters(netuid=netuid).weights_rate_limit == 0 ), "Failed to set weights_rate_limit" @@ -154,6 +153,8 @@ async def test_commit_and_reveal_weights(local_chain): ) time.sleep(10) + assert success + # Query the Weights storage map revealed_weights = subtensor.query_module( module="SubtensorModule", diff --git a/tests/e2e_tests/test_reveal_weights.py b/tests/e2e_tests/test_reveal_weights.py index b7f24710fc..e8e8c29ca2 100644 --- a/tests/e2e_tests/test_reveal_weights.py +++ b/tests/e2e_tests/test_reveal_weights.py @@ -2,7 +2,7 @@ import numpy as np import pytest -import bittensor.utils.subprocess.commit_reveal as commit_reveal_subprocess +import bittensor.utils.background_subprocess.commit_reveal as commit_reveal_subprocess import bittensor from bittensor import logging from bittensor.utils.weight_utils import convert_weights_and_uids_for_emit @@ -20,7 +20,7 @@ @pytest.mark.asyncio async def test_commit_and_reveal_weights(local_chain): """ - Tests the commit/reveal weights mechanism with a subprocess doing the reveal function + Tests the commit/reveal weights mechanism with a background_subprocess doing the reveal function Steps: 1. Register a subnet through Alice @@ -28,7 +28,7 @@ async def test_commit_and_reveal_weights(local_chain): 3. Enable commit-reveal mechanism on the subnet 4. Lower the commit_reveal interval and rate limit 5. Commit weights and verify - 6. Wait interval & see if subprocess did the reveal weights and verify + 6. Wait interval & see if background_subprocess did the reveal weights and verify Raises: AssertionError: If any of the checks or verifications fail """ @@ -166,7 +166,7 @@ async def test_commit_and_reveal_weights(local_chain): @pytest.mark.asyncio async def test_set_and_reveal_weights(local_chain): """ - Tests the commit/reveal weights mechanism with a subprocess doing the reveal function + Tests the commit/reveal weights mechanism with a background_subprocess doing the reveal function Steps: 1. Register a subnet through Alice @@ -174,7 +174,7 @@ async def test_set_and_reveal_weights(local_chain): 3. Enable commit-reveal mechanism on the subnet 4. Lower the commit_reveal interval and rate limit 5. Commit weights and verify - 6. Wait interval & see if subprocess did the reveal weights and verify + 6. Wait interval & see if background_subprocess did the reveal weights and verify Raises: AssertionError: If any of the checks or verifications fail """ @@ -310,7 +310,7 @@ async def test_set_and_reveal_weights(local_chain): @pytest.mark.asyncio async def test_set_and_reveal_batch_weights(local_chain): """ - Tests the commit/reveal batch weights mechanism with a subprocess doing the reveal function + Tests the commit/reveal batch weights mechanism with a background_subprocess doing the reveal function Steps: 1. Register a subnet through Alice @@ -318,7 +318,7 @@ async def test_set_and_reveal_batch_weights(local_chain): 3. Enable commit-reveal mechanism on the subnet 4. Lower the commit_reveal interval and rate limit 5. Commit weights and verify - 6. Wait interval & see if subprocess did the reveal weights and verify + 6. Wait interval & see if background_subprocess did the reveal weights and verify Raises: AssertionError: If any of the checks or verifications fail """ @@ -475,7 +475,7 @@ async def test_set_and_reveal_batch_weights_over_limit(local_chain): 3. Enable commit-reveal mechanism on the subnet 4. Lower the commit_reveal interval and rate limit 5. Commit weights and verify - 6. Wait interval & see if subprocess did the reveal weights and verify + 6. Wait interval & see if background_subprocess did the reveal weights and verify Raises: AssertionError: If any of the checks or verifications fail """ diff --git a/tests/unit_tests/extrinsics/test_set_weights.py b/tests/unit_tests/extrinsics/test_set_weights.py index ef80bc2b00..78249de279 100644 --- a/tests/unit_tests/extrinsics/test_set_weights.py +++ b/tests/unit_tests/extrinsics/test_set_weights.py @@ -28,7 +28,7 @@ def mock_wallet(): @pytest.mark.parametrize( - "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, prompt, user_accepts, expected_success, expected_message", + "uids, weights, version_key, wait_for_inclusion, wait_for_finalization, expected_success, expected_message", [ ( [1, 2], @@ -37,8 +37,6 @@ def mock_wallet(): True, False, True, - True, - True, "Successfully set weights and Finalized.", ), ( @@ -47,8 +45,6 @@ def mock_wallet(): 0, False, False, - False, - True, True, "Not waiting for finalization or inclusion.", ), @@ -58,18 +54,14 @@ def mock_wallet(): 0, True, False, - True, - True, False, "Mock error message", ), - ([1, 2], [0.5, 0.5], 0, True, True, True, False, False, "Prompt refused."), ], ids=[ "happy-flow", "not-waiting-finalization-inclusion", "error-flow", - "prompt-refused", ], ) def test_set_weights_extrinsic( @@ -80,8 +72,6 @@ def test_set_weights_extrinsic( version_key, wait_for_inclusion, wait_for_finalization, - prompt, - user_accepts, expected_success, expected_message, ): @@ -95,7 +85,7 @@ def test_set_weights_extrinsic( ), patch( "bittensor.utils.weight_utils.convert_weights_and_uids_for_emit", return_value=(uids_tensor, weights_tensor), - ), patch("rich.prompt.Confirm.ask", return_value=user_accepts), patch( + ), patch( "bittensor.core.extrinsics.set_weights.do_set_weights", return_value=(expected_success, "Mock error message"), ) as mock_do_set_weights: @@ -108,22 +98,10 @@ def test_set_weights_extrinsic( version_key=version_key, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) assert result == expected_success, f"Test {expected_message} failed." assert message == expected_message, f"Test {expected_message} failed." - if user_accepts is not False: - mock_do_set_weights.assert_called_once_with( - self=mock_subtensor, - wallet=mock_wallet, - netuid=123, - uids=uids_tensor, - vals=weights_tensor, - version_key=version_key, - wait_for_finalization=wait_for_finalization, - wait_for_inclusion=wait_for_inclusion, - ) def test_do_set_weights_is_success(mock_subtensor, mocker): diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py index db572b9b7d..324f311872 100644 --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -1133,7 +1133,6 @@ def test_set_weights(subtensor, mocker): fake_weights = [0.4, 0.6] fake_wait_for_inclusion = False fake_wait_for_finalization = False - fake_prompt = False fake_max_retries = 5 expected_result = (True, None) @@ -1160,7 +1159,6 @@ def test_set_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=fake_wait_for_inclusion, wait_for_finalization=fake_wait_for_finalization, - prompt=fake_prompt, max_retries=fake_max_retries, ) @@ -1181,7 +1179,6 @@ def test_set_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=fake_wait_for_inclusion, wait_for_finalization=fake_wait_for_finalization, - prompt=fake_prompt, ) assert result == expected_result @@ -1744,8 +1741,6 @@ def test_commit_weights(subtensor, mocker): weights = [0.4, 0.6] wait_for_inclusion = False wait_for_finalization = False - prompt = False - max_retries = 5 expected_result = (True, None) mocked_generate_weight_hash = mocker.patch.object( @@ -1766,8 +1761,6 @@ def test_commit_weights(subtensor, mocker): version_key=settings.version_as_int, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, - max_retries=max_retries, ) weight_uids, weight_vals = convert_weights_and_uids_for_emit( @@ -1791,7 +1784,6 @@ def test_commit_weights(subtensor, mocker): commit_hash=mocked_generate_weight_hash.return_value, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization, - prompt=prompt, ) assert result == expected_result @@ -1818,7 +1810,6 @@ def test_reveal_weights(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) # Assertions @@ -1833,7 +1824,6 @@ def test_reveal_weights(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) @@ -1861,12 +1851,11 @@ def test_reveal_weights_false(subtensor, mocker): salt=salt, wait_for_inclusion=False, wait_for_finalization=False, - prompt=False, ) # Assertion assert result == expected_result - assert mocked_extrinsic.call_count == 5 + assert mocked_extrinsic.call_count == 1 def test_connect_without_substrate(mocker): From 77087df04e2ee4555a0e011c2c77f5ab10e6d888 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 5 Nov 2024 00:19:36 -0800 Subject: [PATCH 53/58] Lint --- bittensor/core/async_subtensor.py | 4 +-- bittensor/core/extrinsics/commit_weights.py | 8 ++++-- bittensor/core/extrinsics/set_weights.py | 25 +++++++++++-------- bittensor/core/subtensor.py | 13 ++++++---- .../utils/background_subprocess/utils.py | 23 ++++++++++++----- 5 files changed, 47 insertions(+), 26 deletions(-) diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index 508ae02439..86c5b300c2 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -245,7 +245,7 @@ async def get_total_subnets( module="SubtensorModule", storage_function="TotalNetworks", params=[], - block_hash=block_hash + block_hash=block_hash, ) return result @@ -1298,7 +1298,7 @@ async def get_uid_for_hotkey_on_subnet( module="SubtensorModule", storage_function="Uids", params=[netuid, hotkey_ss58], - block_hash=block_hash + block_hash=block_hash, ) # extrinsics diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index a525bdb0ad..36ac0722bd 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -365,7 +365,9 @@ def send_command(command_): command = f'revealed_hash "{commit_hash}"' send_command(command) except Exception as e: - logging.error(f"Not able to generate hash to reveal weights on background_subprocess: {e}") + logging.error( + f"Not able to generate hash to reveal weights on background_subprocess: {e}" + ) # Chain call for `batch_reveal_weights_extrinsic` @@ -490,7 +492,9 @@ def batch_reveal_weights_extrinsic( logging.success(success_message) return True, success_message else: - error_message = format_error_message(error_message, substrate=subtensor.substrate) + error_message = format_error_message( + error_message, substrate=subtensor.substrate + ) logging.error(f"Failed batch reveal weights extrinsic: {error_message}") return False, error_message diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index d8572d43e7..63e0ca38a8 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -135,13 +135,14 @@ def set_weights_extrinsic( tuple[bool, str]: A tuple containing a success flag and an optional response message. """ get_subnet_hyperparameters = subtensor.get_subnet_hyperparameters(netuid=netuid) - if get_subnet_hyperparameters and get_subnet_hyperparameters.commit_reveal_weights_enabled: + if ( + get_subnet_hyperparameters + and get_subnet_hyperparameters.commit_reveal_weights_enabled + ): # if cr is enabled, commit instead of setting the weights. salt = [random.randint(0, 350) for _ in range(8)] - logging.info( - f":satellite: Committing weights on {subtensor.network}..." - ) + logging.info(f":satellite: Committing weights on {subtensor.network}...") try: # First convert types. if use_torch(): @@ -156,8 +157,8 @@ def set_weights_extrinsic( weights = np.array(weights, dtype=np.float32) # Reformat and normalize. - weight_uids, weight_vals = ( - weight_utils.convert_weights_and_uids_for_emit(uids, weights) + weight_uids, weight_vals = weight_utils.convert_weights_and_uids_for_emit( + uids, weights ) success, message = subtensor.commit_weights( @@ -173,7 +174,9 @@ def set_weights_extrinsic( return True, "Not waiting for finalization or inclusion." if success is True: - logging.success(f"Finalized! Committed weights: {str(success)}") + logging.success( + f"Finalized! Committed weights: {str(success)}" + ) return True, "Successfully committed weights and Finalized." else: logging.error(message) @@ -200,9 +203,7 @@ def set_weights_extrinsic( uids, weights ) - logging.info( - f":satellite: Setting weights on {subtensor.network}..." - ) + logging.info(f":satellite: Setting weights on {subtensor.network}...") try: success, error_message = do_set_weights( self=subtensor, @@ -219,7 +220,9 @@ def set_weights_extrinsic( return True, "Not waiting for finalization or inclusion." if success is True: - logging.success(f"Finalized! Set weights: {str(success)}") + logging.success( + f"Finalized! Set weights: {str(success)}" + ) return True, "Successfully set weights and Finalized." else: logging.error(error_message) diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index 245babd0be..77fdd12c35 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -1859,9 +1859,8 @@ def commit_weights( ) try: - if ( - self.subprocess_initialization - and subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS) + if self.subprocess_initialization and subprocess_utils.is_process_running( + subprocess_utils.COMMIT_REVEAL_PROCESS ): commit_weights_process( self, @@ -1936,7 +1935,9 @@ def reveal_weights( ) if success: # remove from local db if called directly - if subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS): + if subprocess_utils.is_process_running( + subprocess_utils.COMMIT_REVEAL_PROCESS + ): reveal_weights_process( wallet=wallet, netuid=netuid, @@ -2020,7 +2021,9 @@ def batch_reveal_weights( ) if success: # remove from local db if called directly - if subprocess_utils.is_process_running(subprocess_utils.COMMIT_REVEAL_PROCESS): + if subprocess_utils.is_process_running( + subprocess_utils.COMMIT_REVEAL_PROCESS + ): batch_reveal_weights_process( wallet=wallet, netuid=netuid, diff --git a/bittensor/utils/background_subprocess/utils.py b/bittensor/utils/background_subprocess/utils.py index 9dd1db046b..d76dd98a06 100644 --- a/bittensor/utils/background_subprocess/utils.py +++ b/bittensor/utils/background_subprocess/utils.py @@ -132,7 +132,8 @@ def is_table_empty(table_name: str) -> bool: with DB() as (conn, cursor): # Check if table exists cursor.execute( - "SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,) + "SELECT name FROM sqlite_master WHERE type='table' AND name=?", + (table_name,), ) table_exists = cursor.fetchone() if not table_exists: @@ -227,7 +228,9 @@ def start_commit_reveal_subprocess( preexec_fn=os.setsid, env=env, ) - logging.info(f"Subprocess '{COMMIT_REVEAL_PROCESS}' started with PID {process.pid}.") + logging.info( + f"Subprocess '{COMMIT_REVEAL_PROCESS}' started with PID {process.pid}." + ) attempt_count = 0 while not is_commit_reveal_subprocess_ready() and attempt_count < 5: @@ -238,9 +241,13 @@ def start_commit_reveal_subprocess( attempt_count += 1 if attempt_count >= 5: - logging.warning("Max start attempts reached. Subprocess may not be ready.") + logging.warning( + "Max start attempts reached. Subprocess may not be ready." + ) except Exception as e: - logging.error(f"Failed to start background_subprocess '{COMMIT_REVEAL_PROCESS}': {e}") + logging.error( + f"Failed to start background_subprocess '{COMMIT_REVEAL_PROCESS}': {e}" + ) else: logging.error(f"Subprocess '{COMMIT_REVEAL_PROCESS}' is already running.") @@ -252,7 +259,9 @@ def stop_commit_reveal_subprocess(): pid = get_process(COMMIT_REVEAL_PROCESS) if pid is not None: - logging.debug(f"Stopping background_subprocess '{COMMIT_REVEAL_PROCESS}' with PID {pid}...") + logging.debug( + f"Stopping background_subprocess '{COMMIT_REVEAL_PROCESS}' with PID {pid}..." + ) os.kill(pid, 15) # SIGTERM logging.debug(f"Subprocess '{COMMIT_REVEAL_PROCESS}' stopped.") else: @@ -266,7 +275,9 @@ class DB: def __init__( self, - db_path: str = os.path.join(os.path.expanduser("~"), ".bittensor", "bittensor.db"), + db_path: str = os.path.join( + os.path.expanduser("~"), ".bittensor", "bittensor.db" + ), row_factory=None, ): if not os.path.exists(os.path.dirname(db_path)): From 666ec7e166556a6dd668a9ca4a5225f819d09fa1 Mon Sep 17 00:00:00 2001 From: opendansor Date: Tue, 5 Nov 2024 00:32:45 -0800 Subject: [PATCH 54/58] Remove rich prompt --- bittensor/core/extrinsics/commit_weights.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bittensor/core/extrinsics/commit_weights.py b/bittensor/core/extrinsics/commit_weights.py index 36ac0722bd..f13a9962f9 100644 --- a/bittensor/core/extrinsics/commit_weights.py +++ b/bittensor/core/extrinsics/commit_weights.py @@ -21,7 +21,6 @@ from typing import Optional, TYPE_CHECKING import socket from retry import retry -from rich.prompt import Confirm from bittensor.core import settings from bittensor.core.extrinsics.utils import submit_extrinsic From 43a01a7b0a320468b5c6ef74198631179ab34882 Mon Sep 17 00:00:00 2001 From: Benjamin Himes <37844818+thewhaleking@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:29:54 +0200 Subject: [PATCH 55/58] Handle SSL Error on Connection (#2384) --- bittensor/core/async_subtensor.py | 14 ++++++++++---- bittensor/core/subtensor.py | 3 ++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/bittensor/core/async_subtensor.py b/bittensor/core/async_subtensor.py index 508ae02439..40c77ae30e 100644 --- a/bittensor/core/async_subtensor.py +++ b/bittensor/core/async_subtensor.py @@ -1,10 +1,10 @@ import asyncio +import ssl from typing import Optional, Any, Union, TypedDict, Iterable import aiohttp import numpy as np import scalecodec -import typer from bittensor_wallet import Wallet from bittensor_wallet.utils import SS58_FORMAT from numpy.typing import NDArray @@ -134,7 +134,13 @@ async def __aenter__(self): logging.error( f"Error: Timeout occurred connecting to substrate. Verify your chain and network settings: {self}" ) - raise typer.Exit(code=1) + raise ConnectionError + except (ConnectionRefusedError, ssl.SSLError) as error: + logging.error( + f"Error: Connection refused when connecting to substrate. " + f"Verify your chain and network settings: {self}. Error: {error}" + ) + raise ConnectionError async def __aexit__(self, exc_type, exc_val, exc_tb): await self.substrate.close() @@ -245,7 +251,7 @@ async def get_total_subnets( module="SubtensorModule", storage_function="TotalNetworks", params=[], - block_hash=block_hash + block_hash=block_hash, ) return result @@ -1298,7 +1304,7 @@ async def get_uid_for_hotkey_on_subnet( module="SubtensorModule", storage_function="Uids", params=[netuid, hotkey_ss58], - block_hash=block_hash + block_hash=block_hash, ) # extrinsics diff --git a/bittensor/core/subtensor.py b/bittensor/core/subtensor.py index fcbb4147d7..61a3c39389 100644 --- a/bittensor/core/subtensor.py +++ b/bittensor/core/subtensor.py @@ -23,6 +23,7 @@ import argparse import copy import socket +import ssl from typing import Union, Optional, TypedDict, Any import numpy as np @@ -231,7 +232,7 @@ def _get_substrate(self): except (AttributeError, TypeError, socket.error, OSError) as e: logging.warning(f"Error setting timeout: {e}") - except ConnectionRefusedError as error: + except (ConnectionRefusedError, ssl.SSLError) as error: logging.error( f"Could not connect to {self.network} network with {self.chain_endpoint} chain endpoint.", ) From e50b59b878c40fc1e666bcec44a74082136ae6d5 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 5 Nov 2024 11:34:32 -0800 Subject: [PATCH 56/58] review comments fixed --- bittensor/core/extrinsics/registration.py | 10 +++++++++- bittensor/core/extrinsics/set_weights.py | 2 ++ bittensor/core/extrinsics/transfer.py | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/bittensor/core/extrinsics/registration.py b/bittensor/core/extrinsics/registration.py index 15df860d9b..de38869a80 100644 --- a/bittensor/core/extrinsics/registration.py +++ b/bittensor/core/extrinsics/registration.py @@ -155,6 +155,10 @@ def register_extrinsic( ) return True + logging.debug( + f"Registration hotkey: {wallet.hotkey.ss58_address}, Public coldkey: {wallet.coldkey.ss58_address} in the network: {subtensor.network}." + ) + if not torch: log_no_torch_error() return False @@ -380,6 +384,10 @@ def burned_register_extrinsic( return True logging.info(":satellite: Recycling TAO for Registration...") + + recycle_amount = subtensor.recycle(netuid=netuid) + logging.info(f"Recycling {recycle_amount} to register on subnet:{netuid}") + success, err_msg = _do_burned_register( self=subtensor, netuid=netuid, @@ -389,7 +397,7 @@ def burned_register_extrinsic( ) if not success: - logging.error(f":cross_mark: Failed: {err_msg}") + logging.error(f":cross_mark: Failed error: {err_msg}") time.sleep(0.5) return False # Successful registration, final check for neuron and pubkey diff --git a/bittensor/core/extrinsics/set_weights.py b/bittensor/core/extrinsics/set_weights.py index 6495c9e765..0475b4222a 100644 --- a/bittensor/core/extrinsics/set_weights.py +++ b/bittensor/core/extrinsics/set_weights.py @@ -151,6 +151,8 @@ def set_weights_extrinsic( logging.info( f":satellite: Setting weights on {subtensor.network} ..." ) + logging.debug(f"Weights: {[float(v / 65535) for v in weight_vals]}") + try: success, error_message = do_set_weights( self=subtensor, diff --git a/bittensor/core/extrinsics/transfer.py b/bittensor/core/extrinsics/transfer.py index d2fba617d2..b2e0606064 100644 --- a/bittensor/core/extrinsics/transfer.py +++ b/bittensor/core/extrinsics/transfer.py @@ -159,6 +159,11 @@ def transfer_extrinsic( return False logging.info(":satellite: Transferring...") + logging.info(f"\tAmount: {transfer_balance}") + logging.info(f"\tfrom: {wallet.name}:{wallet.coldkey.ss58_address}") + logging.info(f"\tTo: {dest}") + logging.info(f"\tFor fee: {fee}") + success, block_hash, error_message = do_transfer( self=subtensor, wallet=wallet, From 7d230945918198fcbaf11a03b553e6336af75134 Mon Sep 17 00:00:00 2001 From: Roman Date: Tue, 5 Nov 2024 12:13:48 -0800 Subject: [PATCH 57/58] merge staging --- requirements/prod.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/prod.txt b/requirements/prod.txt index c63eb16bc5..c82efb8dfd 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -19,10 +19,10 @@ pyyaml retry requests rich +psutil pydantic>=2.3, <3 python-Levenshtein scalecodec==1.2.11 substrate-interface~=1.7.9 uvicorn bittensor-wallet>=2.0.2 -psutil From 26dce8243877b5dcb080c8a65206f297aee115a1 Mon Sep 17 00:00:00 2001 From: ibraheem-opentensor Date: Tue, 5 Nov 2024 12:29:28 -0800 Subject: [PATCH 58/58] Bumps changelog and version --- CHANGELOG.md | 33 +++++++++++++++++++++++++++++++++ bittensor/core/settings.py | 6 ++---- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b04ebaa0f0..e019df21fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,38 @@ # Changelog +## 8.3.0rc3 /2024-11-05 + +## What's Changed +* Further improvements in Commit-reveal V2, new subprocess, and related utilities by @opendansor in https://github.com/opentensor/bittensor/pull/2355 +* remove unused prometheus extrinsic by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2378 +* Replace rich.console to btlogging.loggin by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2377 +* SDK (AsyncSubtensor) Part 1 by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2374 +* SDK (AsyncSubtensor) Part 2 by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2380 +* Handle SSL Error on Connection by @thewhaleking in https://github.com/opentensor/bittensor/pull/2384 +* Avoid using `prompt` in SDK by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2382 +* Backmerge/8.2.0 by @ibraheem-opentensor in https://github.com/opentensor/bittensor/pull/2389 + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v8.3.0rc2...v8.3.0rc3 + +## 8.3.0rc2 /2024-11-04 + +## What's Changed +* Tweaks in Commit-reveal V2, new subprocess, and related utilities by @opendansor in https://github.com/opentensor/bittensor/pull/2355 + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v8.3.0rc1...v8.3.0rc2 + +## 8.3.0rc1 /2024-11-01 + +## What's Changed +* Expands the type registry to include all the available options by @thewhaleking in https://github.com/opentensor/bittensor/pull/2353 +* add `Subtensor.register`, `Subtensor.difficulty` and related staff with tests by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2352 +* added to Subtensor: `burned_register`, `get_subnet_burn_cost`, `recycle` and related extrinsics by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2359 +* Poem "Risen from the Past". Act 3. by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2363 +* default port from 9946 to 9944 by @roman-opentensor in https://github.com/opentensor/bittensor/pull/2376 +* Commit-reveal V2, new subprocess, and related utilities by @opendansor in https://github.com/opentensor/bittensor/pull/2355 + +**Full Changelog**: https://github.com/opentensor/bittensor/compare/v8.2.0...v8.3.0rc1 + ## 8.2.0 /2024-10-10 ## What's Changed diff --git a/bittensor/core/settings.py b/bittensor/core/settings.py index 98d47104ae..ec372ba061 100644 --- a/bittensor/core/settings.py +++ b/bittensor/core/settings.py @@ -15,7 +15,7 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -__version__ = "8.2.0" +__version__ = "8.3.0rc3" import os import re @@ -306,9 +306,7 @@ # Parsing version without any literals. -__version__ = re.match(r"^\d+\.\d+\.\d+", __version__).group(0) - -version_split = __version__.split(".") +version_split = re.match(r"^\d+\.\d+\.\d+", __version__).group(0).split(".") _version_info = tuple(int(part) for part in version_split) _version_int_base = 1000 assert max(_version_info) < _version_int_base