From ddd45c19d480b8d020160a7dd358a713ea321d79 Mon Sep 17 00:00:00 2001 From: Brendan Playford <34052452+teslashibe@users.noreply.github.com> Date: Sat, 26 Feb 2022 19:06:19 -0800 Subject: [PATCH] V1.02 (#37) * Consolidate genesis.json into testnet folder and update Readme with issue #2 https://github.com/masa-finance/masa-node-v1.0/issues/2 * Call geth init only if geth.ipc does not exist * Create control file. * Bug/v1.01 hotfix (#36) * Update .gitignore to enable geth gile upload * Add geth back to src repo to fix make all bug * Update src Readme with geth installation instructions only * Update Readme for bug submissions and docker exec command to include /bin/sh * Improvement to consolidate genesis file into one location under network/testnet * Update genesis.json path * Bug fix: geth make - test fail * Fix geth make tests * Add node folder * Update version and test fixes * Force cleanup * Force udpdate to src v22.1.0 * Make test - passed with Go 1.16.14. Update Readme.md Co-authored-by: BA-MikeJ Co-authored-by: Piotr Halicki <49653152+PeterHalicki@users.noreply.github.com> --- .gitignore | 8 - README.md | 11 +- docker-compose.yml | 10 +- genesis.json | 53 -- node/README.md | 129 ++++ src/README.md | 132 ++++ src/accounts/abi/bind/bind_test.go | 5 + src/accounts/keystore/account_cache.go | 301 ++++++++ src/accounts/keystore/account_cache_test.go | 406 ++++++++++ src/accounts/keystore/file_cache.go | 102 +++ src/accounts/keystore/key.go | 238 ++++++ src/accounts/keystore/keystore.go | 519 +++++++++++++ src/accounts/keystore/keystore_test.go | 474 ++++++++++++ src/accounts/keystore/passphrase.go | 368 +++++++++ src/accounts/keystore/passphrase_test.go | 60 ++ src/accounts/keystore/plain.go | 61 ++ src/accounts/keystore/plain_test.go | 266 +++++++ src/accounts/keystore/presale.go | 150 ++++ src/accounts/keystore/testdata/dupes/1 | 1 + src/accounts/keystore/testdata/dupes/2 | 1 + src/accounts/keystore/testdata/dupes/foo | 1 + .../keystore/testdata/keystore/.hiddenfile | 1 + .../keystore/testdata/keystore/README | 21 + ...--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 | 1 + src/accounts/keystore/testdata/keystore/aaa | 1 + src/accounts/keystore/testdata/keystore/empty | 0 .../fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e | 1 + .../keystore/testdata/keystore/garbage | Bin 0 -> 300 bytes .../keystore/testdata/keystore/no-address | 1 + src/accounts/keystore/testdata/keystore/zero | 1 + src/accounts/keystore/testdata/keystore/zzz | 1 + .../cb61d5a9c4896fb9658090b597ef0e7be6f7b67e | 1 + .../keystore/testdata/v1_test_vector.json | 28 + .../keystore/testdata/v3_test_vector.json | 97 +++ .../keystore/testdata/very-light-scrypt.json | 1 + src/accounts/keystore/wallet.go | 150 ++++ src/accounts/keystore/watch.go | 108 +++ src/accounts/keystore/watch_fallback.go | 28 + src/build/run-peeps.sh | 2 +- src/cmd/geth/accountcmd.go | 355 +++++++++ src/cmd/geth/accountcmd_plugin.go | 323 ++++++++ src/cmd/geth/accountcmd_plugin_test.go | 299 ++++++++ src/cmd/geth/accountcmd_test.go | 374 ++++++++++ src/cmd/geth/chaincmd.go | 559 ++++++++++++++ src/cmd/geth/config.go | 358 +++++++++ src/cmd/geth/config_test.go | 697 ++++++++++++++++++ src/cmd/geth/consolecmd.go | 353 +++++++++ src/cmd/geth/consolecmd_test.go | 322 ++++++++ src/cmd/geth/dao_test.go | 155 ++++ src/cmd/geth/dbcmd.go | 341 +++++++++ src/cmd/geth/genesis_test.go | 189 +++++ src/cmd/geth/les_test.go | 224 ++++++ src/cmd/geth/main.go | 549 ++++++++++++++ src/cmd/geth/misccmd.go | 169 +++++ src/cmd/geth/run_test.go | 126 ++++ src/cmd/geth/snapshot.go | 439 +++++++++++ src/cmd/geth/testdata/blockchain.blocks | Bin 0 -> 23287 bytes src/cmd/geth/testdata/clique.json | 25 + src/cmd/geth/testdata/empty.js | 1 + src/cmd/geth/testdata/geth/nodekey | 1 + src/cmd/geth/testdata/geth/static-nodes.json | 3 + src/cmd/geth/testdata/guswallet.json | 6 + src/cmd/geth/testdata/key.prv | 1 + src/cmd/geth/testdata/password.txt | 1 + src/cmd/geth/testdata/passwords.txt | 3 + src/cmd/geth/testdata/vcheck/data.json | 61 ++ .../vulnerabilities.json.minisig.1 | 4 + .../vulnerabilities.json.minisig.2 | 4 + .../vulnerabilities.json.minisig.3 | 4 + src/cmd/geth/testdata/vcheck/minisign.pub | 2 + src/cmd/geth/testdata/vcheck/minisign.sec | 2 + .../vcheck/signify-sigs/data.json.sig | 2 + src/cmd/geth/testdata/vcheck/signifykey.pub | 2 + src/cmd/geth/testdata/vcheck/signifykey.sec | 2 + .../sigs/vulnerabilities.json.minisig.1 | 4 + .../sigs/vulnerabilities.json.minisig.2 | 4 + .../sigs/vulnerabilities.json.minisig.3 | 4 + .../geth/testdata/vcheck/vulnerabilities.json | 70 ++ src/cmd/geth/testdata/wrong-passwords.txt | 3 + src/cmd/geth/usage.go | 384 ++++++++++ src/cmd/geth/version_check.go | 169 +++++ src/cmd/geth/version_check_test.go | 130 ++++ src/core/state/journal.go | 10 + src/core/state/statedb.go | 43 +- src/eth/backend.go | 11 + src/eth/protocols/eth/handshake.go | 2 +- src/go.mod | 8 +- src/go.sum | 9 + src/p2p/server.go | 7 + ...0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 | 1 + src/tests/fuzzers/keystore/keystore-fuzzer.go | 37 + src/ui/README.md | 28 - src/ui/docker-compose.yml | 6 - 93 files changed, 10514 insertions(+), 111 deletions(-) delete mode 100644 genesis.json create mode 100644 node/README.md create mode 100644 src/README.md create mode 100644 src/accounts/keystore/account_cache.go create mode 100644 src/accounts/keystore/account_cache_test.go create mode 100644 src/accounts/keystore/file_cache.go create mode 100644 src/accounts/keystore/key.go create mode 100644 src/accounts/keystore/keystore.go create mode 100644 src/accounts/keystore/keystore_test.go create mode 100644 src/accounts/keystore/passphrase.go create mode 100644 src/accounts/keystore/passphrase_test.go create mode 100644 src/accounts/keystore/plain.go create mode 100644 src/accounts/keystore/plain_test.go create mode 100644 src/accounts/keystore/presale.go create mode 100644 src/accounts/keystore/testdata/dupes/1 create mode 100644 src/accounts/keystore/testdata/dupes/2 create mode 100644 src/accounts/keystore/testdata/dupes/foo create mode 100644 src/accounts/keystore/testdata/keystore/.hiddenfile create mode 100644 src/accounts/keystore/testdata/keystore/README create mode 100644 src/accounts/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 create mode 100644 src/accounts/keystore/testdata/keystore/aaa create mode 100644 src/accounts/keystore/testdata/keystore/empty create mode 100644 src/accounts/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e create mode 100644 src/accounts/keystore/testdata/keystore/garbage create mode 100644 src/accounts/keystore/testdata/keystore/no-address create mode 100644 src/accounts/keystore/testdata/keystore/zero create mode 100644 src/accounts/keystore/testdata/keystore/zzz create mode 100644 src/accounts/keystore/testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e create mode 100644 src/accounts/keystore/testdata/v1_test_vector.json create mode 100644 src/accounts/keystore/testdata/v3_test_vector.json create mode 100644 src/accounts/keystore/testdata/very-light-scrypt.json create mode 100644 src/accounts/keystore/wallet.go create mode 100644 src/accounts/keystore/watch.go create mode 100644 src/accounts/keystore/watch_fallback.go create mode 100644 src/cmd/geth/accountcmd.go create mode 100644 src/cmd/geth/accountcmd_plugin.go create mode 100644 src/cmd/geth/accountcmd_plugin_test.go create mode 100644 src/cmd/geth/accountcmd_test.go create mode 100644 src/cmd/geth/chaincmd.go create mode 100644 src/cmd/geth/config.go create mode 100644 src/cmd/geth/config_test.go create mode 100644 src/cmd/geth/consolecmd.go create mode 100644 src/cmd/geth/consolecmd_test.go create mode 100644 src/cmd/geth/dao_test.go create mode 100644 src/cmd/geth/dbcmd.go create mode 100644 src/cmd/geth/genesis_test.go create mode 100644 src/cmd/geth/les_test.go create mode 100644 src/cmd/geth/main.go create mode 100644 src/cmd/geth/misccmd.go create mode 100644 src/cmd/geth/run_test.go create mode 100644 src/cmd/geth/snapshot.go create mode 100644 src/cmd/geth/testdata/blockchain.blocks create mode 100644 src/cmd/geth/testdata/clique.json create mode 100644 src/cmd/geth/testdata/empty.js create mode 100644 src/cmd/geth/testdata/geth/nodekey create mode 100644 src/cmd/geth/testdata/geth/static-nodes.json create mode 100644 src/cmd/geth/testdata/guswallet.json create mode 100644 src/cmd/geth/testdata/key.prv create mode 100644 src/cmd/geth/testdata/password.txt create mode 100644 src/cmd/geth/testdata/passwords.txt create mode 100644 src/cmd/geth/testdata/vcheck/data.json create mode 100644 src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 create mode 100644 src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 create mode 100644 src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 create mode 100644 src/cmd/geth/testdata/vcheck/minisign.pub create mode 100644 src/cmd/geth/testdata/vcheck/minisign.sec create mode 100644 src/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig create mode 100644 src/cmd/geth/testdata/vcheck/signifykey.pub create mode 100644 src/cmd/geth/testdata/vcheck/signifykey.sec create mode 100644 src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 create mode 100644 src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 create mode 100644 src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 create mode 100644 src/cmd/geth/testdata/vcheck/vulnerabilities.json create mode 100644 src/cmd/geth/testdata/wrong-passwords.txt create mode 100644 src/cmd/geth/usage.go create mode 100644 src/cmd/geth/version_check.go create mode 100644 src/cmd/geth/version_check_test.go create mode 100644 src/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 create mode 100644 src/tests/fuzzers/keystore/keystore-fuzzer.go delete mode 100644 src/ui/README.md delete mode 100644 src/ui/docker-compose.yml diff --git a/.gitignore b/.gitignore index 0f9b38f8..d5fd0772 100644 --- a/.gitignore +++ b/.gitignore @@ -11,14 +11,6 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out -# Dependency directories (remove the comment below to include it) -# vendor/ -**/geth -**/keystore -**geth.ipc -**/build/bin -# geth node build files - # OS generated files # ###################### *.DS_Store diff --git a/README.md b/README.md index 95d32a3c..3bf862ff 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,11 @@ # Masa Testnet Node V1.01 ## Release Date February 22nd, 2022 +## Requirements +Tested with: Go Version 1.16.14 +Download [here](https://go.dev/dl/) ## Roadmap & Todo's -The Masa Node UI is in alpha and will get incremental releases, please report all bugs you find to bugs@masa.finance or submit an issue [here](https://github.com/masa-finance/masa-node-v1.0/issues) +The Masa Node UI is in alpha and will get incremental releases, please report all bugs you find as an issue [here](https://github.com/masa-finance/masa-node-v1.0/issues) # Run With Docker This guide will get you up and running using docker. If you want to us the geth binary please navigate to the bottom section of the page [here](#run-with-geth). ## Get Docker @@ -45,7 +48,7 @@ masa-node-v1/ 1. __Note__: to attach geth to your node Javascript console (use the same container id or name from `docker ps` ```sh - docker exec -it masa-node-v10_masa-node_1 + docker exec -it masa-node-v10_masa-node_1 /bin/sh geth attach /qdata/dd/geth.ipc Welcome to the Geth JavaScript console! @@ -149,12 +152,12 @@ Fatal: Consensus not specified. Exiting!! ``` ## Initialize the node Navigate to the `node` directory and initialize the first node. -The repo directory includes the `genesis.json` file that is used to connect to the Masa protocol at the following path `../genesis.json` +The repo directory includes the `genesis.json` file that is used to connect to the Masa protocol at the following path `../network/testnet/genesis.json` Run the following command ``` cd node -geth --datadir data init ../genesis.json +geth --datadir data init ../network/testnet/genesis.json ``` You will get the following output; ``` diff --git a/docker-compose.yml b/docker-compose.yml index 18934f8c..31a28c64 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,14 +21,16 @@ x-masa-testnet-node-v10-def: - -c - | DDIR=/qdata/dd - rm -rf $${DDIR} - mkdir -p $${DDIR}/keystore - mkdir -p $${DDIR}/geth GENESIS_FILE="/network/genesis.json" CONSENSUS_RPC_API="istanbul" NETWORK_ID=$$(cat $${GENESIS_FILE} | grep chainId | awk -F " " '{print $$2}' | awk -F "," '{print $$1}') GETH_ARGS_istanbul="--emitcheckpoints --istanbul.blockperiod 1 --mine --miner.threads 1 --syncmode full" - geth --datadir $${DDIR} init $${GENESIS_FILE} + if [ ! -f $${DDIR}/control_file ]; then + mkdir -p $${DDIR}/keystore + mkdir -p $${DDIR}/geth + geth --datadir $${DDIR} init $${GENESIS_FILE} + echo "Created $$(date)" > $${DDIR}/control_file + fi geth \ --identity node$${NODE_ID}-${MASA_CONSENSUS:-istanbul} \ --datadir $${DDIR} \ diff --git a/genesis.json b/genesis.json deleted file mode 100644 index 75a8b6ad..00000000 --- a/genesis.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "alloc": { - "0xed9d02e382b34818e88b88a309c7fe71e65f419d": { - "balance": "4000000000000000000000000000" - }, - "0xca843569e3427144cead5e4d5999a3d0ccf92b8e": { - "balance": "4000000000000000000000000000" - }, - "0x0fbdc686b912d7722dc86510934589e0aaf3b55a": { - "balance": "4000000000000000000000000000" - }, - "0x9186eb3d20cbd1f5f992a950d808c4495153abd5": { - "balance": "4000000000000000000000000000" - }, - "0x0638e1574728b6d862dd5d3a3e0942c3be47d996": { - "balance": "4000000000000000000000000000" - } - }, - "coinbase": "0x0000000000000000000000000000000000000000", - "config": { - "homesteadBlock": 0, - "byzantiumBlock": 0, - "constantinopleBlock":0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "chainId": 190250, - "eip150Block": 0, - "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "eip155Block": 0, - "eip158Block": 0, - "qip714Block": 50, - "isQuorum": true, - "privacyPrecompileBlock": 0, - "maxCodeSizeConfig" : [ - { - "block" : 0, - "size" : 64 - } - ], - "istanbul": { - "epoch": 30000, - "policy": 0, - "ceil2Nby3Block": 0 - } - }, - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f8d9f89394d8dba507e85f116b1f7e231ca8525fc9008a6966946571d97f340c8495b661a823f2c2145ca47d63c294e36cbeb565b061217930767886474e3cde903ac594f512a992f3fb749857d758ffda1330e590fa915e94b912de287f9b047b4228436e94b5b78e3ee16171948157d4437104e3b8df4451a85f7b2438ef6699ff94b131288f355bc27090e542ae0be213c20350b767b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0", - "gasLimit": "0xE0000000", - "difficulty": "0x1", - "mixHash": "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365", - "nonce": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x00" -} \ No newline at end of file diff --git a/node/README.md b/node/README.md new file mode 100644 index 00000000..f318cbf5 --- /dev/null +++ b/node/README.md @@ -0,0 +1,129 @@ +# Masa Testnet Node V1.01 +## Release Date +February 22nd, 2022 +## Roadmap & Todo's +The Masa Node UI is in alpha and will get incremental releases, please report all bugs you find as an issue [here](https://github.com/masa-finance/masa-node-v1.0/issues) +# Run With Geth +To run from source follow these steps +## Clone the repository and build the source: +``` +git clone https://github.com/masa-finance/masa-node-v1.0 +cd masa-node-v1.0/src +make all +``` +**`make all` must be run from within the src folder** +## Run the tests: +``` +make test +``` +## Add PATH +### Method 1 +Binaries are placed in `$REPO_ROOT/build/bin`. You must add the `bin` folder to `PATH` to make `geth` and `bootnode` easily invokable from the command line. For example, if `Users/yourname/masa-node-v1.0` is the location you have cloned the masa-node repository to on your computer. In your terminal run +``` +sudo nano /etc/paths +or +export PATH=$PATH:$REPO_ROOT/build/bin +``` +Remember to source your $PATH or restart the terminal. Run `echo $PATH` from the command line to check that the `PATH` has been added correctly. + +For example; +``` +echo $PATH + +gives the following response + +/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/quorum/build/bin:/Users/yourname/masa-node/build/bin:/usr/local/go/bin:/usr/local/share/dotnet:~/.dotnet/tools:/Library/Frameworks/Mono.framework/Versions/Current/Commands +``` +### Method 2 +The second way to make geth and bootnode easily invokable is to copy the binaries located in `$REPO_ROOT/build/bin` to a folder already in your `PATH` file such as `/usr/local/bin`. +### Method 3 + +You can also supplement `PATH` by adding add `PATH=$PATH:$REPO_ROOT/build/bin` to your `~/.bashrc`, `~/.bash_aliases`, or `~/bash_profile` file. +## Testing PATH +When you run geth from the command line from an arbitrary folder you will get the following output on the terminal. +``` +geth + +returns + +INFO [12-08|05:37:18.131] Starting Geth on Ethereum mainnet... +INFO [12-08|05:37:18.131] Bumping default cache on mainnet provided=1024 updated=4096 +INFO [12-08|05:37:18.131] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-08|05:37:18.132] Maximum peer count ETH=50 LES=0 total=50 +INFO [12-08|05:37:18.160] Set global gas cap cap=25000000 +INFO [12-08|05:37:18.160] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-08|05:37:18.160] Allocated trie memory caches clean=1023.00MiB dirty=1024.00MiB +INFO [12-08|05:37:18.160] Allocated cache and file handles database=/Users/brendanplayford/Library/Ethereum/geth/chaindata cache=2.00GiB ... +... +INFO [12-08|05:37:18.751] Started P2P networking self=enode://162cfffb34b0c3e76abeb9f31541737fcd3b622e35fa3b0080a14dfb9d2a53168ac3abf10122b79d3b8d7d55516982e0f903d179916ccb51abe5cd00de1bdb07@127.0.0.1:30303 +INFO [12-08|05:37:18.752] IPC endpoint opened url=/Users/brendanplayford/Library/Ethereum/geth.ipc isMultitenant=false +INFO [12-08|05:37:18.752] Security Plugin is not enabled +Fatal: Consensus not specified. Exiting!! +``` +## Initialize the node +Navigate to the `node` directory and initialize the first node. +The repo directory includes the `genesis.json` file that is used to connect to the Masa protocol at the following path `../network/testnet/genesis.json` + +Run the following command +``` +cd node +geth --datadir data init ../network/testnet/genesis.json +``` +You will get the following output; +``` +INFO [12-09|18:22:24.031] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-09|18:22:24.035] Maximum peer count ETH=50 LES=0 total=50 +INFO [12-09|18:22:24.063] Set global gas cap cap=25000000 +INFO [12-09|18:22:24.064] Allocated cache and file handles database=/Users/brendanplayford/masa/masa-node-v1.0/node/data/geth/chaindata cache=16.00MiB handles=16 +INFO [12-09|18:22:24.135] Writing custom genesis block +INFO [12-09|18:22:24.140] Persisted trie from memory database nodes=7 size=1.02KiB time="280.583µs" gcnodes=0 gcsize=0.00B gctime=0s livenodes=1 livesize=0.00B +INFO [12-09|18:22:24.141] Successfully wrote genesis state database=chaindata hash="69b521…fb4c77" +INFO [12-09|18:22:24.141] Allocated cache and file handles database=/Users/brendanplayford/masa/masa-node-v1.0/node/data/geth/lightchaindata cache=16.00MiB handles=16 +INFO [12-09|18:22:24.204] Writing custom genesis block +INFO [12-09|18:22:24.205] Persisted trie from memory database nodes=7 size=1.02KiB time="162.437µs" gcnodes=0 gcsize=0.00B gctime=0s livenodes=1 livesize=0.00B +INFO [12-09|18:22:24.205] Successfully wrote genesis state database=lightchaindata hash="69b521…fb4c77" +``` + +## Set your node identity +Set your own identity of your node on the Masa protocol to be easily identified in a list of peers. + +For example; we name our node 'MasaMoonNode' by setting the flag `--identity MasaMoonNode` will set up an identity for your node so it can be identified as MasaMoonNode in a list of peers. +**Update your flag `--identity MasaMoonNode` to be unique** +## Start the node +In the `node` directory, start the node by running the following command: +``` +PRIVATE_CONFIG=ignore geth --identity MasaMoonNode --datadir data --bootnodes enode://91a3c3d5e76b0acf05d9abddee959f1bcbc7c91537d2629288a9edd7a3df90acaa46ffba0e0e5d49a20598e0960ac458d76eb8fa92a1d64938c0a3a3d60f8be4@54.158.188.182:21000 --emitcheckpoints --istanbul.blockperiod 1 --mine --miner.threads 1 --syncmode full --verbosity 5 --networkid 190250 --rpc --rpccorsdomain "*" --rpcvhosts "*" --rpcaddr 127.0.0.1 --rpcport 8545 --rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul --port 30300 +``` +### Additional Bootnodes +Masa operates several bootnodes, one is already included in the comnand above by default. If you are having issues connecting to the bootnode please use an alternaitve from the list below. + +We are also looking for community run bootnodes to add to our list. Please reach out to us on Discord or Submit a PR to this repo if you want to add a bootnode to the community list. +#### Masa Bootnodes +``` +enode://ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef@54.146.254.245:21000 + +enode://91a3c3d5e76b0acf05d9abddee959f1bcbc7c91537d2629288a9edd7a3df90acaa46ffba0e0e5d49a20598e0960ac458d76eb8fa92a1d64938c0a3a3d60f8be4@54.158.188.182:21000 + +enode://d87c03855093a39dced2af54d39b827e4e841fd0ca98673b2e94681d9d52d2f1b6a6d42754da86fa8f53d8105896fda44f3012be0ceb6342e114b0f01456924c@34.225.220.240:21000 + +enode://fcb5a1a8d65eb167cd3030ca9ae35aa8e290b9add3eb46481d0fbd1eb10065aeea40059f48314c88816aab2af9303e193becc511b1035c9fd8dbe97d21f913b9@52.1.125.71:21000 +``` +#### Community Bootnodes +Submit a PR to add a bootnode to the community list [here](https://github.com/masa-finance/masa-node-v1.0/pulls). +## Node Syncing +It can take some time for your node to fully sync to the Masa Testnet 2.0 - please be patient while your node catches up with the most recent blocks. +## Node UI +You must be running Docker to run the Node UI with geth +### Specification +- React.js & Typescript +- Docker for deployment +## Run The Masa Node UI +Follow these instructions to run the Node UI with geth +``` +cd masa-node-v1.0 +cd src +cd ui +docker-compose up ui +``` +Navigate to you local host to interact with the Masa Node +`http://localhost:3000` \ No newline at end of file diff --git a/src/README.md b/src/README.md new file mode 100644 index 00000000..a3d54308 --- /dev/null +++ b/src/README.md @@ -0,0 +1,132 @@ +# Masa Testnet Node V1.01 +## Release Date +February 22nd, 2022 +## Requirements +Tested with: Go Version 1.16.14 +Download [here](https://go.dev/dl/) +## Roadmap & Todo's +The Masa Node UI is in alpha and will get incremental releases, please report all bugs you find as an issue [here](https://github.com/masa-finance/masa-node-v1.0/issues) +# Run With Geth +To run from source follow these steps +## Clone the repository and build the source: +``` +git clone https://github.com/masa-finance/masa-node-v1.0 +cd masa-node-v1.0/src +make all +``` +**`make all` must be run from within the src folder** +## Run the tests: +``` +make test +``` +## Add PATH +### Method 1 +Binaries are placed in `$REPO_ROOT/build/bin`. You must add the `bin` folder to `PATH` to make `geth` and `bootnode` easily invokable from the command line. For example, if `Users/yourname/masa-node-v1.0` is the location you have cloned the masa-node repository to on your computer. In your terminal run +``` +sudo nano /etc/paths +or +export PATH=$PATH:$REPO_ROOT/build/bin +``` +Remember to source your $PATH or restart the terminal. Run `echo $PATH` from the command line to check that the `PATH` has been added correctly. + +For example; +``` +echo $PATH + +gives the following response + +/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/quorum/build/bin:/Users/yourname/masa-node/build/bin:/usr/local/go/bin:/usr/local/share/dotnet:~/.dotnet/tools:/Library/Frameworks/Mono.framework/Versions/Current/Commands +``` +### Method 2 +The second way to make geth and bootnode easily invokable is to copy the binaries located in `$REPO_ROOT/build/bin` to a folder already in your `PATH` file such as `/usr/local/bin`. +### Method 3 + +You can also supplement `PATH` by adding add `PATH=$PATH:$REPO_ROOT/build/bin` to your `~/.bashrc`, `~/.bash_aliases`, or `~/bash_profile` file. +## Testing PATH +When you run geth from the command line from an arbitrary folder you will get the following output on the terminal. +``` +geth + +returns + +INFO [12-08|05:37:18.131] Starting Geth on Ethereum mainnet... +INFO [12-08|05:37:18.131] Bumping default cache on mainnet provided=1024 updated=4096 +INFO [12-08|05:37:18.131] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-08|05:37:18.132] Maximum peer count ETH=50 LES=0 total=50 +INFO [12-08|05:37:18.160] Set global gas cap cap=25000000 +INFO [12-08|05:37:18.160] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-08|05:37:18.160] Allocated trie memory caches clean=1023.00MiB dirty=1024.00MiB +INFO [12-08|05:37:18.160] Allocated cache and file handles database=/Users/brendanplayford/Library/Ethereum/geth/chaindata cache=2.00GiB ... +... +INFO [12-08|05:37:18.751] Started P2P networking self=enode://162cfffb34b0c3e76abeb9f31541737fcd3b622e35fa3b0080a14dfb9d2a53168ac3abf10122b79d3b8d7d55516982e0f903d179916ccb51abe5cd00de1bdb07@127.0.0.1:30303 +INFO [12-08|05:37:18.752] IPC endpoint opened url=/Users/brendanplayford/Library/Ethereum/geth.ipc isMultitenant=false +INFO [12-08|05:37:18.752] Security Plugin is not enabled +Fatal: Consensus not specified. Exiting!! +``` +## Initialize the node +Navigate to the `node` directory and initialize the first node. +The repo directory includes the `genesis.json` file that is used to connect to the Masa protocol at the following path `../network/testnet/genesis.json` + +Run the following command +``` +cd node +geth --datadir data init ../network/testnet/genesis.json +``` +You will get the following output; +``` +INFO [12-09|18:22:24.031] Running with private transaction manager disabled - quorum private transactions will not be supported +INFO [12-09|18:22:24.035] Maximum peer count ETH=50 LES=0 total=50 +INFO [12-09|18:22:24.063] Set global gas cap cap=25000000 +INFO [12-09|18:22:24.064] Allocated cache and file handles database=/Users/brendanplayford/masa/masa-node-v1.0/node/data/geth/chaindata cache=16.00MiB handles=16 +INFO [12-09|18:22:24.135] Writing custom genesis block +INFO [12-09|18:22:24.140] Persisted trie from memory database nodes=7 size=1.02KiB time="280.583µs" gcnodes=0 gcsize=0.00B gctime=0s livenodes=1 livesize=0.00B +INFO [12-09|18:22:24.141] Successfully wrote genesis state database=chaindata hash="69b521…fb4c77" +INFO [12-09|18:22:24.141] Allocated cache and file handles database=/Users/brendanplayford/masa/masa-node-v1.0/node/data/geth/lightchaindata cache=16.00MiB handles=16 +INFO [12-09|18:22:24.204] Writing custom genesis block +INFO [12-09|18:22:24.205] Persisted trie from memory database nodes=7 size=1.02KiB time="162.437µs" gcnodes=0 gcsize=0.00B gctime=0s livenodes=1 livesize=0.00B +INFO [12-09|18:22:24.205] Successfully wrote genesis state database=lightchaindata hash="69b521…fb4c77" +``` + +## Set your node identity +Set your own identity of your node on the Masa protocol to be easily identified in a list of peers. + +For example; we name our node 'MasaMoonNode' by setting the flag `--identity MasaMoonNode` will set up an identity for your node so it can be identified as MasaMoonNode in a list of peers. +**Update your flag `--identity MasaMoonNode` to be unique** +## Start the node +In the `node` directory, start the node by running the following command: +``` +PRIVATE_CONFIG=ignore geth --identity MasaMoonNode --datadir data --bootnodes enode://91a3c3d5e76b0acf05d9abddee959f1bcbc7c91537d2629288a9edd7a3df90acaa46ffba0e0e5d49a20598e0960ac458d76eb8fa92a1d64938c0a3a3d60f8be4@54.158.188.182:21000 --emitcheckpoints --istanbul.blockperiod 1 --mine --miner.threads 1 --syncmode full --verbosity 5 --networkid 190250 --rpc --rpccorsdomain "*" --rpcvhosts "*" --rpcaddr 127.0.0.1 --rpcport 8545 --rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul --port 30300 +``` +### Additional Bootnodes +Masa operates several bootnodes, one is already included in the comnand above by default. If you are having issues connecting to the bootnode please use an alternaitve from the list below. + +We are also looking for community run bootnodes to add to our list. Please reach out to us on Discord or Submit a PR to this repo if you want to add a bootnode to the community list. +#### Masa Bootnodes +``` +enode://ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef@54.146.254.245:21000 + +enode://91a3c3d5e76b0acf05d9abddee959f1bcbc7c91537d2629288a9edd7a3df90acaa46ffba0e0e5d49a20598e0960ac458d76eb8fa92a1d64938c0a3a3d60f8be4@54.158.188.182:21000 + +enode://d87c03855093a39dced2af54d39b827e4e841fd0ca98673b2e94681d9d52d2f1b6a6d42754da86fa8f53d8105896fda44f3012be0ceb6342e114b0f01456924c@34.225.220.240:21000 + +enode://fcb5a1a8d65eb167cd3030ca9ae35aa8e290b9add3eb46481d0fbd1eb10065aeea40059f48314c88816aab2af9303e193becc511b1035c9fd8dbe97d21f913b9@52.1.125.71:21000 +``` +#### Community Bootnodes +Submit a PR to add a bootnode to the community list [here](https://github.com/masa-finance/masa-node-v1.0/pulls). +## Node Syncing +It can take some time for your node to fully sync to the Masa Testnet 2.0 - please be patient while your node catches up with the most recent blocks. +## Node UI +You must be running Docker to run the Node UI with geth +### Specification +- React.js & Typescript +- Docker for deployment +## Run The Masa Node UI +Follow these instructions to run the Node UI with geth +``` +cd masa-node-v1.0 +cd src +cd ui +docker-compose up ui +``` +Navigate to you local host to interact with the Masa Node +`http://localhost:3000` \ No newline at end of file diff --git a/src/accounts/abi/bind/bind_test.go b/src/accounts/abi/bind/bind_test.go index 0cbd23ed..b21d178d 100644 --- a/src/accounts/abi/bind/bind_test.go +++ b/src/accounts/abi/bind/bind_test.go @@ -1858,6 +1858,11 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } + tidier := exec.Command(gocmd, "mod", "tidy") + tidier.Dir = pkg + if out, err := tidier.CombinedOutput(); err != nil { + t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) + } // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "-v", "-count", "1") cmd.Dir = pkg diff --git a/src/accounts/keystore/account_cache.go b/src/accounts/keystore/account_cache.go new file mode 100644 index 00000000..a3ec6e9c --- /dev/null +++ b/src/accounts/keystore/account_cache.go @@ -0,0 +1,301 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// Minimum amount of time between cache reloads. This limit applies if the platform does +// not support change notifications. It also applies if the keystore directory does not +// exist yet, the code will attempt to create a watcher at most this often. +const minReloadInterval = 2 * time.Second + +type accountsByURL []accounts.Account + +func (s accountsByURL) Len() int { return len(s) } +func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 } +func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// AmbiguousAddrError is returned when attempting to unlock +// an address for which more than one file exists. +type AmbiguousAddrError struct { + Addr common.Address + Matches []accounts.Account +} + +func (err *AmbiguousAddrError) Error() string { + files := "" + for i, a := range err.Matches { + files += a.URL.Path + if i < len(err.Matches)-1 { + files += ", " + } + } + return fmt.Sprintf("multiple keys match address (%s)", files) +} + +// accountCache is a live index of all accounts in the keystore. +type accountCache struct { + keydir string + watcher *watcher + mu sync.Mutex + all accountsByURL + byAddr map[common.Address][]accounts.Account + throttle *time.Timer + notify chan struct{} + fileC fileCache +} + +func newAccountCache(keydir string) (*accountCache, chan struct{}) { + ac := &accountCache{ + keydir: keydir, + byAddr: make(map[common.Address][]accounts.Account), + notify: make(chan struct{}, 1), + fileC: fileCache{all: mapset.NewThreadUnsafeSet()}, + } + ac.watcher = newWatcher(ac) + return ac, ac.notify +} + +func (ac *accountCache) accounts() []accounts.Account { + ac.maybeReload() + ac.mu.Lock() + defer ac.mu.Unlock() + cpy := make([]accounts.Account, len(ac.all)) + copy(cpy, ac.all) + return cpy +} + +func (ac *accountCache) hasAddress(addr common.Address) bool { + ac.maybeReload() + ac.mu.Lock() + defer ac.mu.Unlock() + return len(ac.byAddr[addr]) > 0 +} + +func (ac *accountCache) add(newAccount accounts.Account) { + ac.mu.Lock() + defer ac.mu.Unlock() + + i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Cmp(newAccount.URL) >= 0 }) + if i < len(ac.all) && ac.all[i] == newAccount { + return + } + // newAccount is not in the cache. + ac.all = append(ac.all, accounts.Account{}) + copy(ac.all[i+1:], ac.all[i:]) + ac.all[i] = newAccount + ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount) +} + +// note: removed needs to be unique here (i.e. both File and Address must be set). +func (ac *accountCache) delete(removed accounts.Account) { + ac.mu.Lock() + defer ac.mu.Unlock() + + ac.all = removeAccount(ac.all, removed) + if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 { + delete(ac.byAddr, removed.Address) + } else { + ac.byAddr[removed.Address] = ba + } +} + +// deleteByFile removes an account referenced by the given path. +func (ac *accountCache) deleteByFile(path string) { + ac.mu.Lock() + defer ac.mu.Unlock() + i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path }) + + if i < len(ac.all) && ac.all[i].URL.Path == path { + removed := ac.all[i] + ac.all = append(ac.all[:i], ac.all[i+1:]...) + if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 { + delete(ac.byAddr, removed.Address) + } else { + ac.byAddr[removed.Address] = ba + } + } +} + +func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account { + for i := range slice { + if slice[i] == elem { + return append(slice[:i], slice[i+1:]...) + } + } + return slice +} + +// find returns the cached account for address if there is a unique match. +// The exact matching rules are explained by the documentation of accounts.Account. +// Callers must hold ac.mu. +func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) { + // Limit search to address candidates if possible. + matches := ac.all + if (a.Address != common.Address{}) { + matches = ac.byAddr[a.Address] + } + if a.URL.Path != "" { + // If only the basename is specified, complete the path. + if !strings.ContainsRune(a.URL.Path, filepath.Separator) { + a.URL.Path = filepath.Join(ac.keydir, a.URL.Path) + } + for i := range matches { + if matches[i].URL == a.URL { + return matches[i], nil + } + } + if (a.Address == common.Address{}) { + return accounts.Account{}, ErrNoMatch + } + } + switch len(matches) { + case 1: + return matches[0], nil + case 0: + return accounts.Account{}, ErrNoMatch + default: + err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))} + copy(err.Matches, matches) + sort.Sort(accountsByURL(err.Matches)) + return accounts.Account{}, err + } +} + +func (ac *accountCache) maybeReload() { + ac.mu.Lock() + + if ac.watcher.running { + ac.mu.Unlock() + return // A watcher is running and will keep the cache up-to-date. + } + if ac.throttle == nil { + ac.throttle = time.NewTimer(0) + } else { + select { + case <-ac.throttle.C: + default: + ac.mu.Unlock() + return // The cache was reloaded recently. + } + } + // No watcher running, start it. + ac.watcher.start() + ac.throttle.Reset(minReloadInterval) + ac.mu.Unlock() + ac.scanAccounts() +} + +func (ac *accountCache) close() { + ac.mu.Lock() + ac.watcher.close() + if ac.throttle != nil { + ac.throttle.Stop() + } + if ac.notify != nil { + close(ac.notify) + ac.notify = nil + } + ac.mu.Unlock() +} + +// scanAccounts checks if any changes have occurred on the filesystem, and +// updates the account cache accordingly +func (ac *accountCache) scanAccounts() error { + // Scan the entire folder metadata for file changes + creates, deletes, updates, err := ac.fileC.scan(ac.keydir) + if err != nil { + log.Debug("Failed to reload keystore contents", "err", err) + return err + } + if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 { + return nil + } + // Create a helper method to scan the contents of the key files + var ( + buf = new(bufio.Reader) + key struct { + Address string `json:"address"` + } + ) + readAccount := func(path string) *accounts.Account { + fd, err := os.Open(path) + if err != nil { + log.Trace("Failed to open keystore file", "path", path, "err", err) + return nil + } + defer fd.Close() + buf.Reset(fd) + // Parse the address. + key.Address = "" + err = json.NewDecoder(buf).Decode(&key) + addr := common.HexToAddress(key.Address) + switch { + case err != nil: + log.Debug("Failed to decode keystore key", "path", path, "err", err) + case addr == common.Address{}: + log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address") + default: + return &accounts.Account{ + Address: addr, + URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}, + } + } + return nil + } + // Process all the file diffs + start := time.Now() + + for _, p := range creates.ToSlice() { + if a := readAccount(p.(string)); a != nil { + ac.add(*a) + } + } + for _, p := range deletes.ToSlice() { + ac.deleteByFile(p.(string)) + } + for _, p := range updates.ToSlice() { + path := p.(string) + ac.deleteByFile(path) + if a := readAccount(path); a != nil { + ac.add(*a) + } + } + end := time.Now() + + select { + case ac.notify <- struct{}{}: + default: + } + log.Trace("Handled keystore changes", "time", end.Sub(start)) + return nil +} diff --git a/src/accounts/keystore/account_cache_test.go b/src/accounts/keystore/account_cache_test.go new file mode 100644 index 00000000..79e76ee3 --- /dev/null +++ b/src/accounts/keystore/account_cache_test.go @@ -0,0 +1,406 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "reflect" + "sort" + "testing" + "time" + + "github.com/cespare/cp" + "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" +) + +var ( + cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore")) + cachetestAccounts = []accounts.Account{ + { + Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8")}, + }, + { + Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "aaa")}, + }, + { + Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "zzz")}, + }, + } +) + +func TestWatchNewFile(t *testing.T) { + t.Parallel() + + dir, ks := tmpKeyStore(t, false) + defer os.RemoveAll(dir) + + // Ensure the watcher is started before adding any files. + ks.Accounts() + time.Sleep(1000 * time.Millisecond) + + // Move in the files. + wantAccounts := make([]accounts.Account, len(cachetestAccounts)) + for i := range cachetestAccounts { + wantAccounts[i] = accounts.Account{ + Address: cachetestAccounts[i].Address, + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, filepath.Base(cachetestAccounts[i].URL.Path))}, + } + if err := cp.CopyFile(wantAccounts[i].URL.Path, cachetestAccounts[i].URL.Path); err != nil { + t.Fatal(err) + } + } + + // ks should see the accounts. + var list []accounts.Account + for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 { + list = ks.Accounts() + if reflect.DeepEqual(list, wantAccounts) { + // ks should have also received change notifications + select { + case <-ks.changes: + default: + t.Fatalf("wasn't notified of new accounts") + } + return + } + time.Sleep(d) + } + t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts)) +} + +func TestWatchNoDir(t *testing.T) { + t.Parallel() + + // Create ks but not the directory that it watches. + rand.Seed(time.Now().UnixNano()) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + ks := NewKeyStore(dir, LightScryptN, LightScryptP) + + list := ks.Accounts() + if len(list) > 0 { + t.Error("initial account list not empty:", list) + } + time.Sleep(100 * time.Millisecond) + + // Create the directory and copy a key file into it. + os.MkdirAll(dir, 0700) + defer os.RemoveAll(dir) + file := filepath.Join(dir, "aaa") + if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil { + t.Fatal(err) + } + + // ks should see the account. + wantAccounts := []accounts.Account{cachetestAccounts[0]} + wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file} + for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 { + list = ks.Accounts() + if reflect.DeepEqual(list, wantAccounts) { + // ks should have also received change notifications + select { + case <-ks.changes: + default: + t.Fatalf("wasn't notified of new accounts") + } + return + } + time.Sleep(d) + } + t.Errorf("\ngot %v\nwant %v", list, wantAccounts) +} + +func TestCacheInitialReload(t *testing.T) { + cache, _ := newAccountCache(cachetestDir) + accounts := cache.accounts() + if !reflect.DeepEqual(accounts, cachetestAccounts) { + t.Fatalf("got initial accounts: %swant %s", spew.Sdump(accounts), spew.Sdump(cachetestAccounts)) + } +} + +func TestCacheAddDeleteOrder(t *testing.T) { + cache, _ := newAccountCache("testdata/no-such-dir") + cache.watcher.running = true // prevent unexpected reloads + + accs := []accounts.Account{ + { + Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "-309830980"}, + }, + { + Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "ggg"}, + }, + { + Address: common.HexToAddress("8bda78331c916a08481428e4b07c96d3e916d165"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "zzzzzz-the-very-last-one.keyXXX"}, + }, + { + Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "SOMETHING.key"}, + }, + { + Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8"}, + }, + { + Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "aaa"}, + }, + { + Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: "zzz"}, + }, + } + for _, a := range accs { + cache.add(a) + } + // Add some of them twice to check that they don't get reinserted. + cache.add(accs[0]) + cache.add(accs[2]) + + // Check that the account list is sorted by filename. + wantAccounts := make([]accounts.Account, len(accs)) + copy(wantAccounts, accs) + sort.Sort(accountsByURL(wantAccounts)) + list := cache.accounts() + if !reflect.DeepEqual(list, wantAccounts) { + t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts)) + } + for _, a := range accs { + if !cache.hasAddress(a.Address) { + t.Errorf("expected hasAccount(%x) to return true", a.Address) + } + } + if cache.hasAddress(common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) { + t.Errorf("expected hasAccount(%x) to return false", common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) + } + + // Delete a few keys from the cache. + for i := 0; i < len(accs); i += 2 { + cache.delete(wantAccounts[i]) + } + cache.delete(accounts.Account{Address: common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"), URL: accounts.URL{Scheme: KeyStoreScheme, Path: "something"}}) + + // Check content again after deletion. + wantAccountsAfterDelete := []accounts.Account{ + wantAccounts[1], + wantAccounts[3], + wantAccounts[5], + } + list = cache.accounts() + if !reflect.DeepEqual(list, wantAccountsAfterDelete) { + t.Fatalf("got accounts after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantAccountsAfterDelete)) + } + for _, a := range wantAccountsAfterDelete { + if !cache.hasAddress(a.Address) { + t.Errorf("expected hasAccount(%x) to return true", a.Address) + } + } + if cache.hasAddress(wantAccounts[0].Address) { + t.Errorf("expected hasAccount(%x) to return false", wantAccounts[0].Address) + } +} + +func TestCacheFind(t *testing.T) { + dir := filepath.Join("testdata", "dir") + cache, _ := newAccountCache(dir) + cache.watcher.running = true // prevent unexpected reloads + + accs := []accounts.Account{ + { + Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "a.key")}, + }, + { + Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "b.key")}, + }, + { + Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c.key")}, + }, + { + Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c2.key")}, + }, + } + for _, a := range accs { + cache.add(a) + } + + nomatchAccount := accounts.Account{ + Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"), + URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "something")}, + } + tests := []struct { + Query accounts.Account + WantResult accounts.Account + WantError error + }{ + // by address + {Query: accounts.Account{Address: accs[0].Address}, WantResult: accs[0]}, + // by file + {Query: accounts.Account{URL: accs[0].URL}, WantResult: accs[0]}, + // by basename + {Query: accounts.Account{URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Base(accs[0].URL.Path)}}, WantResult: accs[0]}, + // by file and address + {Query: accs[0], WantResult: accs[0]}, + // ambiguous address, tie resolved by file + {Query: accs[2], WantResult: accs[2]}, + // ambiguous address error + { + Query: accounts.Account{Address: accs[2].Address}, + WantError: &AmbiguousAddrError{ + Addr: accs[2].Address, + Matches: []accounts.Account{accs[2], accs[3]}, + }, + }, + // no match error + {Query: nomatchAccount, WantError: ErrNoMatch}, + {Query: accounts.Account{URL: nomatchAccount.URL}, WantError: ErrNoMatch}, + {Query: accounts.Account{URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Base(nomatchAccount.URL.Path)}}, WantError: ErrNoMatch}, + {Query: accounts.Account{Address: nomatchAccount.Address}, WantError: ErrNoMatch}, + } + for i, test := range tests { + a, err := cache.find(test.Query) + if !reflect.DeepEqual(err, test.WantError) { + t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError) + continue + } + if a != test.WantResult { + t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult) + continue + } + } +} + +func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { + var list []accounts.Account + for d := 200 * time.Millisecond; d < 16*time.Second; d *= 2 { + list = ks.Accounts() + if reflect.DeepEqual(list, wantAccounts) { + // ks should have also received change notifications + select { + case <-ks.changes: + default: + return fmt.Errorf("wasn't notified of new accounts") + } + return nil + } + time.Sleep(d) + } + return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts) +} + +// TestUpdatedKeyfileContents tests that updating the contents of a keystore file +// is noticed by the watcher, and the account cache is updated accordingly +func TestUpdatedKeyfileContents(t *testing.T) { + t.Parallel() + + // Create a temporary kesytore to test with + rand.Seed(time.Now().UnixNano()) + dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int())) + ks := NewKeyStore(dir, LightScryptN, LightScryptP) + + list := ks.Accounts() + if len(list) > 0 { + t.Error("initial account list not empty:", list) + } + time.Sleep(100 * time.Millisecond) + + // Create the directory and copy a key file into it. + os.MkdirAll(dir, 0700) + defer os.RemoveAll(dir) + file := filepath.Join(dir, "aaa") + + // Place one of our testfiles in there + if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil { + t.Fatal(err) + } + + // ks should see the account. + wantAccounts := []accounts.Account{cachetestAccounts[0]} + wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file} + if err := waitForAccounts(wantAccounts, ks); err != nil { + t.Error(err) + return + } + + // needed so that modTime of `file` is different to its current value after forceCopyFile + time.Sleep(1000 * time.Millisecond) + + // Now replace file contents + if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil { + t.Fatal(err) + return + } + wantAccounts = []accounts.Account{cachetestAccounts[1]} + wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file} + if err := waitForAccounts(wantAccounts, ks); err != nil { + t.Errorf("First replacement failed") + t.Error(err) + return + } + + // needed so that modTime of `file` is different to its current value after forceCopyFile + time.Sleep(1000 * time.Millisecond) + + // Now replace file contents again + if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil { + t.Fatal(err) + return + } + wantAccounts = []accounts.Account{cachetestAccounts[2]} + wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file} + if err := waitForAccounts(wantAccounts, ks); err != nil { + t.Errorf("Second replacement failed") + t.Error(err) + return + } + + // needed so that modTime of `file` is different to its current value after ioutil.WriteFile + time.Sleep(1000 * time.Millisecond) + + // Now replace file contents with crap + if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil { + t.Fatal(err) + return + } + if err := waitForAccounts([]accounts.Account{}, ks); err != nil { + t.Errorf("Emptying account file failed") + t.Error(err) + return + } +} + +// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists. +func forceCopyFile(dst, src string) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return err + } + return ioutil.WriteFile(dst, data, 0644) +} diff --git a/src/accounts/keystore/file_cache.go b/src/accounts/keystore/file_cache.go new file mode 100644 index 00000000..8b309321 --- /dev/null +++ b/src/accounts/keystore/file_cache.go @@ -0,0 +1,102 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/log" +) + +// fileCache is a cache of files seen during scan of keystore. +type fileCache struct { + all mapset.Set // Set of all files from the keystore folder + lastMod time.Time // Last time instance when a file was modified + mu sync.Mutex +} + +// scan performs a new scan on the given directory, compares against the already +// cached filenames, and returns file sets: creates, deletes, updates. +func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { + t0 := time.Now() + + // List all the failes from the keystore folder + files, err := ioutil.ReadDir(keyDir) + if err != nil { + return nil, nil, nil, err + } + t1 := time.Now() + + fc.mu.Lock() + defer fc.mu.Unlock() + + // Iterate all the files and gather their metadata + all := mapset.NewThreadUnsafeSet() + mods := mapset.NewThreadUnsafeSet() + + var newLastMod time.Time + for _, fi := range files { + path := filepath.Join(keyDir, fi.Name()) + // Skip any non-key files from the folder + if nonKeyFile(fi) { + log.Trace("Ignoring file on account scan", "path", path) + continue + } + // Gather the set of all and fresly modified files + all.Add(path) + + modified := fi.ModTime() + if modified.After(fc.lastMod) { + mods.Add(path) + } + if modified.After(newLastMod) { + newLastMod = modified + } + } + t2 := time.Now() + + // Update the tracked files and return the three sets + deletes := fc.all.Difference(all) // Deletes = previous - current + creates := all.Difference(fc.all) // Creates = current - previous + updates := mods.Difference(creates) // Updates = modified - creates + + fc.all, fc.lastMod = all, newLastMod + t3 := time.Now() + + // Report on the scanning stats and return + log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2)) + return creates, deletes, updates, nil +} + +// nonKeyFile ignores editor backups, hidden files and folders/symlinks. +func nonKeyFile(fi os.FileInfo) bool { + // Skip editor backups and UNIX-style hidden files. + if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") { + return true + } + // Skip misc special files, directories (yes, symlinks too). + if fi.IsDir() || fi.Mode()&os.ModeType != 0 { + return true + } + return false +} diff --git a/src/accounts/keystore/key.go b/src/accounts/keystore/key.go new file mode 100644 index 00000000..2b815ce0 --- /dev/null +++ b/src/accounts/keystore/key.go @@ -0,0 +1,238 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "bytes" + "crypto/ecdsa" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" +) + +const ( + version = 3 +) + +type Key struct { + Id uuid.UUID // Version 4 "random" for unique id not derived from key data + // to simplify lookups we also store the address + Address common.Address + // we only store privkey as pubkey/address can be derived from it + // privkey in this struct is always in plaintext + PrivateKey *ecdsa.PrivateKey +} + +type keyStore interface { + // Loads and decrypts the key from disk. + GetKey(addr common.Address, filename string, auth string) (*Key, error) + // Writes and encrypts the key. + StoreKey(filename string, k *Key, auth string) error + // Joins filename with the key directory unless it is already absolute. + JoinPath(filename string) string +} + +type plainKeyJSON struct { + Address string `json:"address"` + PrivateKey string `json:"privatekey"` + Id string `json:"id"` + Version int `json:"version"` +} + +type encryptedKeyJSONV3 struct { + Address string `json:"address"` + Crypto CryptoJSON `json:"crypto"` + Id string `json:"id"` + Version int `json:"version"` +} + +type encryptedKeyJSONV1 struct { + Address string `json:"address"` + Crypto CryptoJSON `json:"crypto"` + Id string `json:"id"` + Version string `json:"version"` +} + +type CryptoJSON struct { + Cipher string `json:"cipher"` + CipherText string `json:"ciphertext"` + CipherParams cipherparamsJSON `json:"cipherparams"` + KDF string `json:"kdf"` + KDFParams map[string]interface{} `json:"kdfparams"` + MAC string `json:"mac"` +} + +type cipherparamsJSON struct { + IV string `json:"iv"` +} + +func (k *Key) MarshalJSON() (j []byte, err error) { + jStruct := plainKeyJSON{ + hex.EncodeToString(k.Address[:]), + hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)), + k.Id.String(), + version, + } + j, err = json.Marshal(jStruct) + return j, err +} + +func (k *Key) UnmarshalJSON(j []byte) (err error) { + keyJSON := new(plainKeyJSON) + err = json.Unmarshal(j, &keyJSON) + if err != nil { + return err + } + + u := new(uuid.UUID) + *u, err = uuid.Parse(keyJSON.Id) + if err != nil { + return err + } + k.Id = *u + addr, err := hex.DecodeString(keyJSON.Address) + if err != nil { + return err + } + privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey) + if err != nil { + return err + } + + k.Address = common.BytesToAddress(addr) + k.PrivateKey = privkey + + return nil +} + +func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("Could not create random uuid: %v", err)) + } + key := &Key{ + Id: id, + Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), + PrivateKey: privateKeyECDSA, + } + return key +} + +// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit +// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we +// retry until the first byte is 0. +func NewKeyForDirectICAP(rand io.Reader) *Key { + randBytes := make([]byte, 64) + _, err := rand.Read(randBytes) + if err != nil { + panic("key generation: could not read from random source: " + err.Error()) + } + reader := bytes.NewReader(randBytes) + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), reader) + if err != nil { + panic("key generation: ecdsa.GenerateKey failed: " + err.Error()) + } + key := newKeyFromECDSA(privateKeyECDSA) + if !strings.HasPrefix(key.Address.Hex(), "0x00") { + return NewKeyForDirectICAP(rand) + } + return key +} + +func newKey(rand io.Reader) (*Key, error) { + privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand) + if err != nil { + return nil, err + } + return newKeyFromECDSA(privateKeyECDSA), nil +} + +func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Account, error) { + key, err := newKey(rand) + if err != nil { + return nil, accounts.Account{}, err + } + a := accounts.Account{ + Address: key.Address, + URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}, + } + if err := ks.StoreKey(a.URL.Path, key, auth); err != nil { + zeroKey(key.PrivateKey) + return nil, a, err + } + return key, a, err +} + +func writeTemporaryKeyFile(file string, content []byte) (string, error) { + // Create the keystore directory with appropriate permissions + // in case it is not present yet. + const dirPerm = 0700 + if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil { + return "", err + } + // Atomic write: create a temporary hidden file first + // then move it into place. TempFile assigns mode 0600. + f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp") + if err != nil { + return "", err + } + if _, err := f.Write(content); err != nil { + f.Close() + os.Remove(f.Name()) + return "", err + } + f.Close() + return f.Name(), nil +} + +func writeKeyFile(file string, content []byte) error { + name, err := writeTemporaryKeyFile(file, content) + if err != nil { + return err + } + return os.Rename(name, file) +} + +// keyFileName implements the naming convention for keyfiles: +// UTC---
+func keyFileName(keyAddr common.Address) string { + ts := time.Now().UTC() + return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:])) +} + +func toISO8601(t time.Time) string { + var tz string + name, offset := t.Zone() + if name == "UTC" { + tz = "Z" + } else { + tz = fmt.Sprintf("%03d00", offset/3600) + } + return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", + t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz) +} diff --git a/src/accounts/keystore/keystore.go b/src/accounts/keystore/keystore.go new file mode 100644 index 00000000..2b0ccf1c --- /dev/null +++ b/src/accounts/keystore/keystore.go @@ -0,0 +1,519 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package keystore implements encrypted storage of secp256k1 private keys. +// +// Keys are stored as encrypted JSON files according to the Web3 Secret Storage specification. +// See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition for more information. +package keystore + +import ( + "crypto/ecdsa" + crand "crypto/rand" + "errors" + "math/big" + "os" + "path/filepath" + "reflect" + "runtime" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" +) + +var ( + ErrLocked = accounts.NewAuthNeededError("password or unlock") + ErrNoMatch = errors.New("no key for given address or file") + ErrDecrypt = errors.New("could not decrypt key with given password") + + // ErrAccountAlreadyExists is returned if an account attempted to import is + // already present in the keystore. + ErrAccountAlreadyExists = errors.New("account already exists") +) + +// KeyStoreType is the reflect type of a keystore backend. +var KeyStoreType = reflect.TypeOf(&KeyStore{}) + +// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs. +const KeyStoreScheme = "keystore" + +// Maximum time between wallet refreshes (if filesystem notifications don't work). +const walletRefreshCycle = 3 * time.Second + +// KeyStore manages a key storage directory on disk. +type KeyStore struct { + storage keyStore // Storage backend, might be cleartext or encrypted + cache *accountCache // In-memory account cache over the filesystem storage + changes chan struct{} // Channel receiving change notifications from the cache + unlocked map[common.Address]*unlocked // Currently unlocked account (decrypted private keys) + + wallets []accounts.Wallet // Wallet wrappers around the individual key files + updateFeed event.Feed // Event feed to notify wallet additions/removals + updateScope event.SubscriptionScope // Subscription scope tracking current live listeners + updating bool // Whether the event notification loop is running + + mu sync.RWMutex + importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing +} + +type unlocked struct { + *Key + abort chan struct{} +} + +// NewKeyStore creates a keystore for the given directory. +func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore { + keydir, _ = filepath.Abs(keydir) + ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}} + ks.init(keydir) + return ks +} + +// NewPlaintextKeyStore creates a keystore for the given directory. +// Deprecated: Use NewKeyStore. +func NewPlaintextKeyStore(keydir string) *KeyStore { + keydir, _ = filepath.Abs(keydir) + ks := &KeyStore{storage: &keyStorePlain{keydir}} + ks.init(keydir) + return ks +} + +func (ks *KeyStore) init(keydir string) { + // Lock the mutex since the account cache might call back with events + ks.mu.Lock() + defer ks.mu.Unlock() + + // Initialize the set of unlocked keys and the account cache + ks.unlocked = make(map[common.Address]*unlocked) + ks.cache, ks.changes = newAccountCache(keydir) + + // TODO: In order for this finalizer to work, there must be no references + // to ks. addressCache doesn't keep a reference but unlocked keys do, + // so the finalizer will not trigger until all timed unlocks have expired. + runtime.SetFinalizer(ks, func(m *KeyStore) { + m.cache.close() + }) + // Create the initial list of wallets from the cache + accs := ks.cache.accounts() + ks.wallets = make([]accounts.Wallet, len(accs)) + for i := 0; i < len(accs); i++ { + ks.wallets[i] = &keystoreWallet{account: accs[i], keystore: ks} + } +} + +// Wallets implements accounts.Backend, returning all single-key wallets from the +// keystore directory. +func (ks *KeyStore) Wallets() []accounts.Wallet { + // Make sure the list of wallets is in sync with the account cache + ks.refreshWallets() + + ks.mu.RLock() + defer ks.mu.RUnlock() + + cpy := make([]accounts.Wallet, len(ks.wallets)) + copy(cpy, ks.wallets) + return cpy +} + +// refreshWallets retrieves the current account list and based on that does any +// necessary wallet refreshes. +func (ks *KeyStore) refreshWallets() { + // Retrieve the current list of accounts + ks.mu.Lock() + accs := ks.cache.accounts() + + // Transform the current list of wallets into the new one + var ( + wallets = make([]accounts.Wallet, 0, len(accs)) + events []accounts.WalletEvent + ) + + for _, account := range accs { + // Drop wallets while they were in front of the next account + for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 { + events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped}) + ks.wallets = ks.wallets[1:] + } + // If there are no more wallets or the account is before the next, wrap new wallet + if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 { + wallet := &keystoreWallet{account: account, keystore: ks} + + events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived}) + wallets = append(wallets, wallet) + continue + } + // If the account is the same as the first wallet, keep it + if ks.wallets[0].Accounts()[0] == account { + wallets = append(wallets, ks.wallets[0]) + ks.wallets = ks.wallets[1:] + continue + } + } + // Drop any leftover wallets and set the new batch + for _, wallet := range ks.wallets { + events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped}) + } + ks.wallets = wallets + ks.mu.Unlock() + + // Fire all wallet events and return + for _, event := range events { + ks.updateFeed.Send(event) + } +} + +// Subscribe implements accounts.Backend, creating an async subscription to +// receive notifications on the addition or removal of keystore wallets. +func (ks *KeyStore) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription { + // We need the mutex to reliably start/stop the update loop + ks.mu.Lock() + defer ks.mu.Unlock() + + // Subscribe the caller and track the subscriber count + sub := ks.updateScope.Track(ks.updateFeed.Subscribe(sink)) + + // Subscribers require an active notification loop, start it + if !ks.updating { + ks.updating = true + go ks.updater() + } + return sub +} + +// updater is responsible for maintaining an up-to-date list of wallets stored in +// the keystore, and for firing wallet addition/removal events. It listens for +// account change events from the underlying account cache, and also periodically +// forces a manual refresh (only triggers for systems where the filesystem notifier +// is not running). +func (ks *KeyStore) updater() { + for { + // Wait for an account update or a refresh timeout + select { + case <-ks.changes: + case <-time.After(walletRefreshCycle): + } + // Run the wallet refresher + ks.refreshWallets() + + // If all our subscribers left, stop the updater + ks.mu.Lock() + if ks.updateScope.Count() == 0 { + ks.updating = false + ks.mu.Unlock() + return + } + ks.mu.Unlock() + } +} + +// HasAddress reports whether a key with the given address is present. +func (ks *KeyStore) HasAddress(addr common.Address) bool { + return ks.cache.hasAddress(addr) +} + +// Accounts returns all key files present in the directory. +func (ks *KeyStore) Accounts() []accounts.Account { + return ks.cache.accounts() +} + +// Delete deletes the key matched by account if the passphrase is correct. +// If the account contains no filename, the address must match a unique key. +func (ks *KeyStore) Delete(a accounts.Account, passphrase string) error { + // Decrypting the key isn't really necessary, but we do + // it anyway to check the password and zero out the key + // immediately afterwards. + a, key, err := ks.getDecryptedKey(a, passphrase) + if key != nil { + zeroKey(key.PrivateKey) + } + if err != nil { + return err + } + // The order is crucial here. The key is dropped from the + // cache after the file is gone so that a reload happening in + // between won't insert it into the cache again. + err = os.Remove(a.URL.Path) + if err == nil { + ks.cache.delete(a) + ks.refreshWallets() + } + return err +} + +// SignHash calculates a ECDSA signature for the given hash. The produced +// signature is in the [R || S || V] format where V is 0 or 1. +func (ks *KeyStore) SignHash(a accounts.Account, hash []byte) ([]byte, error) { + // Look up the key to sign with and abort if it cannot be found + ks.mu.RLock() + defer ks.mu.RUnlock() + + unlockedKey, found := ks.unlocked[a.Address] + if !found { + return nil, ErrLocked + } + // Sign the hash using plain ECDSA operations + return crypto.Sign(hash, unlockedKey.PrivateKey) +} + +// SignTx signs the given transaction with the requested account. +func (ks *KeyStore) SignTx(a accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + // Look up the key to sign with and abort if it cannot be found + ks.mu.RLock() + defer ks.mu.RUnlock() + + unlockedKey, found := ks.unlocked[a.Address] + if !found { + return nil, ErrLocked + } + + // start quorum specific + if tx.IsPrivate() { + log.Info("Private transaction signing with QuorumPrivateTxSigner") + return types.SignTx(tx, types.QuorumPrivateTxSigner{}, unlockedKey.PrivateKey) + } // End quorum specific + + // Depending on the presence of the chain ID, sign with 2718 or homestead + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, unlockedKey.PrivateKey) +} + +// SignHashWithPassphrase signs hash if the private key matching the given address +// can be decrypted with the given passphrase. The produced signature is in the +// [R || S || V] format where V is 0 or 1. +func (ks *KeyStore) SignHashWithPassphrase(a accounts.Account, passphrase string, hash []byte) (signature []byte, err error) { + _, key, err := ks.getDecryptedKey(a, passphrase) + if err != nil { + return nil, err + } + defer zeroKey(key.PrivateKey) + return crypto.Sign(hash, key.PrivateKey) +} + +// SignTxWithPassphrase signs the transaction if the private key matching the +// given address can be decrypted with the given passphrase. +func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + _, key, err := ks.getDecryptedKey(a, passphrase) + if err != nil { + return nil, err + } + defer zeroKey(key.PrivateKey) + + if tx.IsPrivate() { + return types.SignTx(tx, types.QuorumPrivateTxSigner{}, key.PrivateKey) + } + // Depending on the presence of the chain ID, sign with or without replay protection. + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, key.PrivateKey) +} + +// Unlock unlocks the given account indefinitely. +func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error { + return ks.TimedUnlock(a, passphrase, 0) +} + +// Lock removes the private key with the given address from memory. +func (ks *KeyStore) Lock(addr common.Address) error { + ks.mu.Lock() + if unl, found := ks.unlocked[addr]; found { + ks.mu.Unlock() + ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) + } else { + ks.mu.Unlock() + } + return nil +} + +// TimedUnlock unlocks the given account with the passphrase. The account +// stays unlocked for the duration of timeout. A timeout of 0 unlocks the account +// until the program exits. The account must match a unique key file. +// +// If the account address is already unlocked for a duration, TimedUnlock extends or +// shortens the active unlock timeout. If the address was previously unlocked +// indefinitely the timeout is not altered. +func (ks *KeyStore) TimedUnlock(a accounts.Account, passphrase string, timeout time.Duration) error { + a, key, err := ks.getDecryptedKey(a, passphrase) + if err != nil { + return err + } + + ks.mu.Lock() + defer ks.mu.Unlock() + u, found := ks.unlocked[a.Address] + if found { + if u.abort == nil { + // The address was unlocked indefinitely, so unlocking + // it with a timeout would be confusing. + zeroKey(key.PrivateKey) + return nil + } + // Terminate the expire goroutine and replace it below. + close(u.abort) + } + if timeout > 0 { + u = &unlocked{Key: key, abort: make(chan struct{})} + go ks.expire(a.Address, u, timeout) + } else { + u = &unlocked{Key: key} + } + ks.unlocked[a.Address] = u + return nil +} + +// Find resolves the given account into a unique entry in the keystore. +func (ks *KeyStore) Find(a accounts.Account) (accounts.Account, error) { + ks.cache.maybeReload() + ks.cache.mu.Lock() + a, err := ks.cache.find(a) + ks.cache.mu.Unlock() + return a, err +} + +func (ks *KeyStore) getDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) { + a, err := ks.Find(a) + if err != nil { + return a, nil, err + } + key, err := ks.storage.GetKey(a.Address, a.URL.Path, auth) + return a, key, err +} + +func (ks *KeyStore) expire(addr common.Address, u *unlocked, timeout time.Duration) { + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-u.abort: + // just quit + case <-t.C: + ks.mu.Lock() + // only drop if it's still the same key instance that dropLater + // was launched with. we can check that using pointer equality + // because the map stores a new pointer every time the key is + // unlocked. + if ks.unlocked[addr] == u { + zeroKey(u.PrivateKey) + delete(ks.unlocked, addr) + } + ks.mu.Unlock() + } +} + +// NewAccount generates a new key and stores it into the key directory, +// encrypting it with the passphrase. +func (ks *KeyStore) NewAccount(passphrase string) (accounts.Account, error) { + _, account, err := storeNewKey(ks.storage, crand.Reader, passphrase) + if err != nil { + return accounts.Account{}, err + } + // Add the account to the cache immediately rather + // than waiting for file system notifications to pick it up. + ks.cache.add(account) + ks.refreshWallets() + return account, nil +} + +// Export exports as a JSON key, encrypted with newPassphrase. +func (ks *KeyStore) Export(a accounts.Account, passphrase, newPassphrase string) (keyJSON []byte, err error) { + _, key, err := ks.getDecryptedKey(a, passphrase) + if err != nil { + return nil, err + } + var N, P int + if store, ok := ks.storage.(*keyStorePassphrase); ok { + N, P = store.scryptN, store.scryptP + } else { + N, P = StandardScryptN, StandardScryptP + } + return EncryptKey(key, newPassphrase, N, P) +} + +// Import stores the given encrypted JSON key into the key directory. +func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (accounts.Account, error) { + key, err := DecryptKey(keyJSON, passphrase) + if key != nil && key.PrivateKey != nil { + defer zeroKey(key.PrivateKey) + } + if err != nil { + return accounts.Account{}, err + } + ks.importMu.Lock() + defer ks.importMu.Unlock() + + if ks.cache.hasAddress(key.Address) { + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists + } + return ks.importKey(key, newPassphrase) +} + +// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase. +func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) { + ks.importMu.Lock() + defer ks.importMu.Unlock() + + key := newKeyFromECDSA(priv) + if ks.cache.hasAddress(key.Address) { + return accounts.Account{ + Address: key.Address, + }, ErrAccountAlreadyExists + } + return ks.importKey(key, passphrase) +} + +func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, error) { + a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}} + if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil { + return accounts.Account{}, err + } + ks.cache.add(a) + ks.refreshWallets() + return a, nil +} + +// Update changes the passphrase of an existing account. +func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error { + a, key, err := ks.getDecryptedKey(a, passphrase) + if err != nil { + return err + } + return ks.storage.StoreKey(a.URL.Path, key, newPassphrase) +} + +// ImportPreSaleKey decrypts the given Ethereum presale wallet and stores +// a key file in the key directory. The key file is encrypted with the same passphrase. +func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (accounts.Account, error) { + a, _, err := importPreSaleKey(ks.storage, keyJSON, passphrase) + if err != nil { + return a, err + } + ks.cache.add(a) + ks.refreshWallets() + return a, nil +} + +// zeroKey zeroes a private key in memory. +func zeroKey(k *ecdsa.PrivateKey) { + b := k.D.Bits() + for i := range b { + b[i] = 0 + } +} diff --git a/src/accounts/keystore/keystore_test.go b/src/accounts/keystore/keystore_test.go new file mode 100644 index 00000000..cb5de11c --- /dev/null +++ b/src/accounts/keystore/keystore_test.go @@ -0,0 +1,474 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "io/ioutil" + "math/rand" + "os" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" +) + +var testSigData = make([]byte, 32) + +func TestKeyStore(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + + a, err := ks.NewAccount("foo") + if err != nil { + t.Fatal(err) + } + if !strings.HasPrefix(a.URL.Path, dir) { + t.Errorf("account file %s doesn't have dir prefix", a.URL) + } + stat, err := os.Stat(a.URL.Path) + if err != nil { + t.Fatalf("account file %s doesn't exist (%v)", a.URL, err) + } + if runtime.GOOS != "windows" && stat.Mode() != 0600 { + t.Fatalf("account file has wrong mode: got %o, want %o", stat.Mode(), 0600) + } + if !ks.HasAddress(a.Address) { + t.Errorf("HasAccount(%x) should've returned true", a.Address) + } + if err := ks.Update(a, "foo", "bar"); err != nil { + t.Errorf("Update error: %v", err) + } + if err := ks.Delete(a, "bar"); err != nil { + t.Errorf("Delete error: %v", err) + } + if common.FileExist(a.URL.Path) { + t.Errorf("account file %s should be gone after Delete", a.URL) + } + if ks.HasAddress(a.Address) { + t.Errorf("HasAccount(%x) should've returned true after Delete", a.Address) + } +} + +func TestSign(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + + pass := "" // not used but required by API + a1, err := ks.NewAccount(pass) + if err != nil { + t.Fatal(err) + } + if err := ks.Unlock(a1, ""); err != nil { + t.Fatal(err) + } + if _, err := ks.SignHash(accounts.Account{Address: a1.Address}, testSigData); err != nil { + t.Fatal(err) + } +} + +func TestSignWithPassphrase(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + + pass := "passwd" + acc, err := ks.NewAccount(pass) + if err != nil { + t.Fatal(err) + } + + if _, unlocked := ks.unlocked[acc.Address]; unlocked { + t.Fatal("expected account to be locked") + } + + _, err = ks.SignHashWithPassphrase(acc, pass, testSigData) + if err != nil { + t.Fatal(err) + } + + if _, unlocked := ks.unlocked[acc.Address]; unlocked { + t.Fatal("expected account to be locked") + } + + if _, err = ks.SignHashWithPassphrase(acc, "invalid passwd", testSigData); err == nil { + t.Fatal("expected SignHashWithPassphrase to fail with invalid password") + } +} + +func TestTimedUnlock(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + + pass := "foo" + a1, err := ks.NewAccount(pass) + if err != nil { + t.Fatal(err) + } + + // Signing without passphrase fails because account is locked + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != ErrLocked { + t.Fatal("Signing should've failed with ErrLocked before unlocking, got ", err) + } + + // Signing with passphrase works + if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil { + t.Fatal(err) + } + + // Signing without passphrase works because account is temp unlocked + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != nil { + t.Fatal("Signing shouldn't return an error after unlocking, got ", err) + } + + // Signing fails again after automatic locking + time.Sleep(250 * time.Millisecond) + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != ErrLocked { + t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err) + } +} + +func TestOverrideUnlock(t *testing.T) { + dir, ks := tmpKeyStore(t, false) + defer os.RemoveAll(dir) + + pass := "foo" + a1, err := ks.NewAccount(pass) + if err != nil { + t.Fatal(err) + } + + // Unlock indefinitely. + if err = ks.TimedUnlock(a1, pass, 5*time.Minute); err != nil { + t.Fatal(err) + } + + // Signing without passphrase works because account is temp unlocked + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != nil { + t.Fatal("Signing shouldn't return an error after unlocking, got ", err) + } + + // reset unlock to a shorter period, invalidates the previous unlock + if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil { + t.Fatal(err) + } + + // Signing without passphrase still works because account is temp unlocked + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != nil { + t.Fatal("Signing shouldn't return an error after unlocking, got ", err) + } + + // Signing fails again after automatic locking + time.Sleep(250 * time.Millisecond) + _, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData) + if err != ErrLocked { + t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err) + } +} + +// This test should fail under -race if signing races the expiration goroutine. +func TestSignRace(t *testing.T) { + dir, ks := tmpKeyStore(t, false) + defer os.RemoveAll(dir) + + // Create a test account. + a1, err := ks.NewAccount("") + if err != nil { + t.Fatal("could not create the test account", err) + } + + if err := ks.TimedUnlock(a1, "", 15*time.Millisecond); err != nil { + t.Fatal("could not unlock the test account", err) + } + end := time.Now().Add(500 * time.Millisecond) + for time.Now().Before(end) { + if _, err := ks.SignHash(accounts.Account{Address: a1.Address}, testSigData); err == ErrLocked { + return + } else if err != nil { + t.Errorf("Sign error: %v", err) + return + } + time.Sleep(1 * time.Millisecond) + } + t.Errorf("Account did not lock within the timeout") +} + +// Tests that the wallet notifier loop starts and stops correctly based on the +// addition and removal of wallet event subscriptions. +func TestWalletNotifierLifecycle(t *testing.T) { + // Create a temporary kesytore to test with + dir, ks := tmpKeyStore(t, false) + defer os.RemoveAll(dir) + + // Ensure that the notification updater is not running yet + time.Sleep(250 * time.Millisecond) + ks.mu.RLock() + updating := ks.updating + ks.mu.RUnlock() + + if updating { + t.Errorf("wallet notifier running without subscribers") + } + // Subscribe to the wallet feed and ensure the updater boots up + updates := make(chan accounts.WalletEvent) + + subs := make([]event.Subscription, 2) + for i := 0; i < len(subs); i++ { + // Create a new subscription + subs[i] = ks.Subscribe(updates) + + // Ensure the notifier comes online + time.Sleep(250 * time.Millisecond) + ks.mu.RLock() + updating = ks.updating + ks.mu.RUnlock() + + if !updating { + t.Errorf("sub %d: wallet notifier not running after subscription", i) + } + } + // Unsubscribe and ensure the updater terminates eventually + for i := 0; i < len(subs); i++ { + // Close an existing subscription + subs[i].Unsubscribe() + + // Ensure the notifier shuts down at and only at the last close + for k := 0; k < int(walletRefreshCycle/(250*time.Millisecond))+2; k++ { + ks.mu.RLock() + updating = ks.updating + ks.mu.RUnlock() + + if i < len(subs)-1 && !updating { + t.Fatalf("sub %d: event notifier stopped prematurely", i) + } + if i == len(subs)-1 && !updating { + return + } + time.Sleep(250 * time.Millisecond) + } + } + t.Errorf("wallet notifier didn't terminate after unsubscribe") +} + +type walletEvent struct { + accounts.WalletEvent + a accounts.Account +} + +// Tests that wallet notifications and correctly fired when accounts are added +// or deleted from the keystore. +func TestWalletNotifications(t *testing.T) { + dir, ks := tmpKeyStore(t, false) + defer os.RemoveAll(dir) + + // Subscribe to the wallet feed and collect events. + var ( + events []walletEvent + updates = make(chan accounts.WalletEvent) + sub = ks.Subscribe(updates) + ) + defer sub.Unsubscribe() + go func() { + for { + select { + case ev := <-updates: + events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]}) + case <-sub.Err(): + close(updates) + return + } + } + }() + + // Randomly add and remove accounts. + var ( + live = make(map[common.Address]accounts.Account) + wantEvents []walletEvent + ) + for i := 0; i < 1024; i++ { + if create := len(live) == 0 || rand.Int()%4 > 0; create { + // Add a new account and ensure wallet notifications arrives + account, err := ks.NewAccount("") + if err != nil { + t.Fatalf("failed to create test account: %v", err) + } + live[account.Address] = account + wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletArrived}, account}) + } else { + // Delete a random account. + var account accounts.Account + for _, a := range live { + account = a + break + } + if err := ks.Delete(account, ""); err != nil { + t.Fatalf("failed to delete test account: %v", err) + } + delete(live, account.Address) + wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletDropped}, account}) + } + } + + // Shut down the event collector and check events. + sub.Unsubscribe() + for ev := range updates { + events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]}) + } + checkAccounts(t, live, ks.Wallets()) + checkEvents(t, wantEvents, events) +} + +// TestImportExport tests the import functionality of a keystore. +func TestImportECDSA(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + key, err := crypto.GenerateKey() + if err != nil { + t.Fatalf("failed to generate key: %v", key) + } + if _, err = ks.ImportECDSA(key, "old"); err != nil { + t.Errorf("importing failed: %v", err) + } + if _, err = ks.ImportECDSA(key, "old"); err == nil { + t.Errorf("importing same key twice succeeded") + } + if _, err = ks.ImportECDSA(key, "new"); err == nil { + t.Errorf("importing same key twice succeeded") + } +} + +// TestImportECDSA tests the import and export functionality of a keystore. +func TestImportExport(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + if _, err = ks2.Import(json, "old", "old"); err == nil { + t.Errorf("importing with invalid password succeeded") + } + acc2, err := ks2.Import(json, "new", "new") + if err != nil { + t.Errorf("importing failed: %v", err) + } + if acc.Address != acc2.Address { + t.Error("imported account does not match exported account") + } + if _, err = ks2.Import(json, "new", "new"); err == nil { + t.Errorf("importing a key twice succeeded") + } + +} + +// TestImportRace tests the keystore on races. +// This test should fail under -race if importing races. +func TestImportRace(t *testing.T) { + dir, ks := tmpKeyStore(t, true) + defer os.RemoveAll(dir) + acc, err := ks.NewAccount("old") + if err != nil { + t.Fatalf("failed to create account: %v", acc) + } + json, err := ks.Export(acc, "old", "new") + if err != nil { + t.Fatalf("failed to export account: %v", acc) + } + dir2, ks2 := tmpKeyStore(t, true) + defer os.RemoveAll(dir2) + var atom uint32 + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + if _, err := ks2.Import(json, "new", "new"); err != nil { + atomic.AddUint32(&atom, 1) + } + + }() + } + wg.Wait() + if atom != 1 { + t.Errorf("Import is racy") + } +} + +// checkAccounts checks that all known live accounts are present in the wallet list. +func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) { + if len(live) != len(wallets) { + t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live)) + return + } + liveList := make([]accounts.Account, 0, len(live)) + for _, account := range live { + liveList = append(liveList, account) + } + sort.Sort(accountsByURL(liveList)) + for j, wallet := range wallets { + if accs := wallet.Accounts(); len(accs) != 1 { + t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs)) + } else if accs[0] != liveList[j] { + t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j]) + } + } +} + +// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times. +func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) { + for _, wantEv := range want { + nmatch := 0 + for ; len(have) > 0; nmatch++ { + if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a { + break + } + have = have[1:] + } + if nmatch == 0 { + t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address) + } + } +} + +func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) { + d, err := ioutil.TempDir("", "eth-keystore-test") + if err != nil { + t.Fatal(err) + } + newKs := NewPlaintextKeyStore + if encrypted { + newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) } + } + return d, newKs(d) +} diff --git a/src/accounts/keystore/passphrase.go b/src/accounts/keystore/passphrase.go new file mode 100644 index 00000000..3b3e6318 --- /dev/null +++ b/src/accounts/keystore/passphrase.go @@ -0,0 +1,368 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* + +This key store behaves as KeyStorePlain with the difference that +the private key is encrypted and on disk uses another JSON encoding. + +The crypto is documented at https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition + +*/ + +package keystore + +import ( + "bytes" + "crypto/aes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "golang.org/x/crypto/pbkdf2" + "golang.org/x/crypto/scrypt" +) + +const ( + keyHeaderKDF = "scrypt" + + // StandardScryptN is the N parameter of Scrypt encryption algorithm, using 256MB + // memory and taking approximately 1s CPU time on a modern processor. + StandardScryptN = 1 << 18 + + // StandardScryptP is the P parameter of Scrypt encryption algorithm, using 256MB + // memory and taking approximately 1s CPU time on a modern processor. + StandardScryptP = 1 + + // LightScryptN is the N parameter of Scrypt encryption algorithm, using 4MB + // memory and taking approximately 100ms CPU time on a modern processor. + LightScryptN = 1 << 12 + + // LightScryptP is the P parameter of Scrypt encryption algorithm, using 4MB + // memory and taking approximately 100ms CPU time on a modern processor. + LightScryptP = 6 + + scryptR = 8 + scryptDKLen = 32 +) + +type keyStorePassphrase struct { + keysDirPath string + scryptN int + scryptP int + // skipKeyFileVerification disables the security-feature which does + // reads and decrypts any newly created keyfiles. This should be 'false' in all + // cases except tests -- setting this to 'true' is not recommended. + skipKeyFileVerification bool +} + +func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) { + // Load the key from the keystore and decrypt its contents + keyjson, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + key, err := DecryptKey(keyjson, auth) + if err != nil { + return nil, err + } + // Make sure we're really operating on the requested key (no swap attacks) + if key.Address != addr { + return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, addr) + } + return key, nil +} + +// StoreKey generates a key, encrypts with 'auth' and stores in the given directory +func StoreKey(dir, auth string, scryptN, scryptP int) (accounts.Account, error) { + _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth) + return a, err +} + +func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error { + keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP) + if err != nil { + return err + } + // Write into temporary file + tmpName, err := writeTemporaryKeyFile(filename, keyjson) + if err != nil { + return err + } + if !ks.skipKeyFileVerification { + // Verify that we can decrypt the file with the given password. + _, err = ks.GetKey(key.Address, tmpName, auth) + if err != nil { + msg := "An error was encountered when saving and verifying the keystore file. \n" + + "This indicates that the keystore is corrupted. \n" + + "The corrupted file is stored at \n%v\n" + + "Please file a ticket at:\n\n" + + "https://github.com/ethereum/go-ethereum/issues." + + "The error was : %s" + //lint:ignore ST1005 This is a message for the user + return fmt.Errorf(msg, tmpName, err) + } + } + return os.Rename(tmpName, filename) +} + +func (ks keyStorePassphrase) JoinPath(filename string) string { + if filepath.IsAbs(filename) { + return filename + } + return filepath.Join(ks.keysDirPath, filename) +} + +// Encryptdata encrypts the data given as 'data' with the password 'auth'. +func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) { + + salt := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } + derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen) + if err != nil { + return CryptoJSON{}, err + } + encryptKey := derivedKey[:16] + + iv := make([]byte, aes.BlockSize) // 16 + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + panic("reading from crypto/rand failed: " + err.Error()) + } + cipherText, err := aesCTRXOR(encryptKey, data, iv) + if err != nil { + return CryptoJSON{}, err + } + mac := crypto.Keccak256(derivedKey[16:32], cipherText) + + scryptParamsJSON := make(map[string]interface{}, 5) + scryptParamsJSON["n"] = scryptN + scryptParamsJSON["r"] = scryptR + scryptParamsJSON["p"] = scryptP + scryptParamsJSON["dklen"] = scryptDKLen + scryptParamsJSON["salt"] = hex.EncodeToString(salt) + cipherParamsJSON := cipherparamsJSON{ + IV: hex.EncodeToString(iv), + } + + cryptoStruct := CryptoJSON{ + Cipher: "aes-128-ctr", + CipherText: hex.EncodeToString(cipherText), + CipherParams: cipherParamsJSON, + KDF: keyHeaderKDF, + KDFParams: scryptParamsJSON, + MAC: hex.EncodeToString(mac), + } + return cryptoStruct, nil +} + +// EncryptKey encrypts a key using the specified scrypt parameters into a json +// blob that can be decrypted later on. +func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { + keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32) + cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP) + if err != nil { + return nil, err + } + encryptedKeyJSONV3 := encryptedKeyJSONV3{ + hex.EncodeToString(key.Address[:]), + cryptoStruct, + key.Id.String(), + version, + } + return json.Marshal(encryptedKeyJSONV3) +} + +// DecryptKey decrypts a key from a json blob, returning the private key itself. +func DecryptKey(keyjson []byte, auth string) (*Key, error) { + // Parse the json into a simple map to fetch the key version + m := make(map[string]interface{}) + if err := json.Unmarshal(keyjson, &m); err != nil { + return nil, err + } + // Depending on the version try to parse one way or another + var ( + keyBytes, keyId []byte + err error + ) + if version, ok := m["version"].(string); ok && version == "1" { + k := new(encryptedKeyJSONV1) + if err := json.Unmarshal(keyjson, k); err != nil { + return nil, err + } + keyBytes, keyId, err = decryptKeyV1(k, auth) + } else { + k := new(encryptedKeyJSONV3) + if err := json.Unmarshal(keyjson, k); err != nil { + return nil, err + } + keyBytes, keyId, err = decryptKeyV3(k, auth) + } + // Handle any decryption errors and return the key + if err != nil { + return nil, err + } + key := crypto.ToECDSAUnsafe(keyBytes) + id, err := uuid.FromBytes(keyId) + if err != nil { + return nil, err + } + return &Key{ + Id: id, + Address: crypto.PubkeyToAddress(key.PublicKey), + PrivateKey: key, + }, nil +} + +func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) { + if cryptoJson.Cipher != "aes-128-ctr" { + return nil, fmt.Errorf("cipher not supported: %v", cryptoJson.Cipher) + } + mac, err := hex.DecodeString(cryptoJson.MAC) + if err != nil { + return nil, err + } + + iv, err := hex.DecodeString(cryptoJson.CipherParams.IV) + if err != nil { + return nil, err + } + + cipherText, err := hex.DecodeString(cryptoJson.CipherText) + if err != nil { + return nil, err + } + + derivedKey, err := getKDFKey(cryptoJson, auth) + if err != nil { + return nil, err + } + + calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) + if !bytes.Equal(calculatedMAC, mac) { + return nil, ErrDecrypt + } + + plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv) + if err != nil { + return nil, err + } + return plainText, err +} + +func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) { + if keyProtected.Version != version { + return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version) + } + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] + plainText, err := DecryptDataV3(keyProtected.Crypto, auth) + if err != nil { + return nil, nil, err + } + return plainText, keyId, err +} + +func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) { + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] + mac, err := hex.DecodeString(keyProtected.Crypto.MAC) + if err != nil { + return nil, nil, err + } + + iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV) + if err != nil { + return nil, nil, err + } + + cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText) + if err != nil { + return nil, nil, err + } + + derivedKey, err := getKDFKey(keyProtected.Crypto, auth) + if err != nil { + return nil, nil, err + } + + calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) + if !bytes.Equal(calculatedMAC, mac) { + return nil, nil, ErrDecrypt + } + + plainText, err := aesCBCDecrypt(crypto.Keccak256(derivedKey[:16])[:16], cipherText, iv) + if err != nil { + return nil, nil, err + } + return plainText, keyId, err +} + +func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) { + authArray := []byte(auth) + salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) + if err != nil { + return nil, err + } + dkLen := ensureInt(cryptoJSON.KDFParams["dklen"]) + + if cryptoJSON.KDF == keyHeaderKDF { + n := ensureInt(cryptoJSON.KDFParams["n"]) + r := ensureInt(cryptoJSON.KDFParams["r"]) + p := ensureInt(cryptoJSON.KDFParams["p"]) + return scrypt.Key(authArray, salt, n, r, p, dkLen) + + } else if cryptoJSON.KDF == "pbkdf2" { + c := ensureInt(cryptoJSON.KDFParams["c"]) + prf := cryptoJSON.KDFParams["prf"].(string) + if prf != "hmac-sha256" { + return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf) + } + key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New) + return key, nil + } + + return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF) +} + +// TODO: can we do without this when unmarshalling dynamic JSON? +// why do integers in KDF params end up as float64 and not int after +// unmarshal? +func ensureInt(x interface{}) int { + res, ok := x.(int) + if !ok { + res = int(x.(float64)) + } + return res +} diff --git a/src/accounts/keystore/passphrase_test.go b/src/accounts/keystore/passphrase_test.go new file mode 100644 index 00000000..630682ce --- /dev/null +++ b/src/accounts/keystore/passphrase_test.go @@ -0,0 +1,60 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "io/ioutil" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +const ( + veryLightScryptN = 2 + veryLightScryptP = 1 +) + +// Tests that a json key file can be decrypted and encrypted in multiple rounds. +func TestKeyEncryptDecrypt(t *testing.T) { + keyjson, err := ioutil.ReadFile("testdata/very-light-scrypt.json") + if err != nil { + t.Fatal(err) + } + password := "" + address := common.HexToAddress("45dea0fb0bba44f4fcf290bba71fd57d7117cbb8") + + // Do a few rounds of decryption and encryption + for i := 0; i < 3; i++ { + // Try a bad password first + if _, err := DecryptKey(keyjson, password+"bad"); err == nil { + t.Errorf("test %d: json key decrypted with bad password", i) + } + // Decrypt with the correct password + key, err := DecryptKey(keyjson, password) + if err != nil { + t.Fatalf("test %d: json key failed to decrypt: %v", i, err) + } + if key.Address != address { + t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address) + } + // Recrypt with a new password and start over + password += "new data appended" + if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil { + t.Errorf("test %d: failed to recrypt key %v", i, err) + } + } +} diff --git a/src/accounts/keystore/plain.go b/src/accounts/keystore/plain.go new file mode 100644 index 00000000..f62a133c --- /dev/null +++ b/src/accounts/keystore/plain.go @@ -0,0 +1,61 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" +) + +type keyStorePlain struct { + keysDirPath string +} + +func (ks keyStorePlain) GetKey(addr common.Address, filename, auth string) (*Key, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + key := new(Key) + if err := json.NewDecoder(fd).Decode(key); err != nil { + return nil, err + } + if key.Address != addr { + return nil, fmt.Errorf("key content mismatch: have address %x, want %x", key.Address, addr) + } + return key, nil +} + +func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error { + content, err := json.Marshal(key) + if err != nil { + return err + } + return writeKeyFile(filename, content) +} + +func (ks keyStorePlain) JoinPath(filename string) string { + if filepath.IsAbs(filename) { + return filename + } + return filepath.Join(ks.keysDirPath, filename) +} diff --git a/src/accounts/keystore/plain_test.go b/src/accounts/keystore/plain_test.go new file mode 100644 index 00000000..b8319258 --- /dev/null +++ b/src/accounts/keystore/plain_test.go @@ -0,0 +1,266 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) { + d, err := ioutil.TempDir("", "geth-keystore-test") + if err != nil { + t.Fatal(err) + } + if encrypted { + ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true} + } else { + ks = &keyStorePlain{d} + } + return d, ks +} + +func TestKeyStorePlain(t *testing.T) { + dir, ks := tmpKeyStoreIface(t, false) + defer os.RemoveAll(dir) + + pass := "" // not used but required by API + k1, account, err := storeNewKey(ks, rand.Reader, pass) + if err != nil { + t.Fatal(err) + } + k2, err := ks.GetKey(k1.Address, account.URL.Path, pass) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k1.Address, k2.Address) { + t.Fatal(err) + } + if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) { + t.Fatal(err) + } +} + +func TestKeyStorePassphrase(t *testing.T) { + dir, ks := tmpKeyStoreIface(t, true) + defer os.RemoveAll(dir) + + pass := "foo" + k1, account, err := storeNewKey(ks, rand.Reader, pass) + if err != nil { + t.Fatal(err) + } + k2, err := ks.GetKey(k1.Address, account.URL.Path, pass) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(k1.Address, k2.Address) { + t.Fatal(err) + } + if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) { + t.Fatal(err) + } +} + +func TestKeyStorePassphraseDecryptionFail(t *testing.T) { + dir, ks := tmpKeyStoreIface(t, true) + defer os.RemoveAll(dir) + + pass := "foo" + k1, account, err := storeNewKey(ks, rand.Reader, pass) + if err != nil { + t.Fatal(err) + } + if _, err = ks.GetKey(k1.Address, account.URL.Path, "bar"); err != ErrDecrypt { + t.Fatalf("wrong error for invalid password\ngot %q\nwant %q", err, ErrDecrypt) + } +} + +func TestImportPreSaleKey(t *testing.T) { + dir, ks := tmpKeyStoreIface(t, true) + defer os.RemoveAll(dir) + + // file content of a presale key file generated with: + // python pyethsaletool.py genwallet + // with password "foo" + fileContent := "{\"encseed\": \"26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba\", \"ethaddr\": \"d4584b5f6229b7be90727b0fc8c6b91bb427821f\", \"email\": \"gustav.simonsson@gmail.com\", \"btcaddr\": \"1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx\"}" + pass := "foo" + account, _, err := importPreSaleKey(ks, []byte(fileContent), pass) + if err != nil { + t.Fatal(err) + } + if account.Address != common.HexToAddress("d4584b5f6229b7be90727b0fc8c6b91bb427821f") { + t.Errorf("imported account has wrong address %x", account.Address) + } + if !strings.HasPrefix(account.URL.Path, dir) { + t.Errorf("imported account file not in keystore directory: %q", account.URL) + } +} + +// Test and utils for the key store tests in the Ethereum JSON tests; +// testdataKeyStoreTests/basic_tests.json +type KeyStoreTestV3 struct { + Json encryptedKeyJSONV3 + Password string + Priv string +} + +type KeyStoreTestV1 struct { + Json encryptedKeyJSONV1 + Password string + Priv string +} + +func TestV3_PBKDF2_1(t *testing.T) { + t.Parallel() + tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) + testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t) +} + +var testsSubmodule = filepath.Join("..", "..", "tests", "testdata", "KeyStoreTests") + +func skipIfSubmoduleMissing(t *testing.T) { + if !common.FileExist(testsSubmodule) { + t.Skipf("can't find JSON tests from submodule at %s", testsSubmodule) + } +} + +func TestV3_PBKDF2_2(t *testing.T) { + skipIfSubmoduleMissing(t) + t.Parallel() + tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t) + testDecryptV3(tests["test1"], t) +} + +func TestV3_PBKDF2_3(t *testing.T) { + skipIfSubmoduleMissing(t) + t.Parallel() + tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t) + testDecryptV3(tests["python_generated_test_with_odd_iv"], t) +} + +func TestV3_PBKDF2_4(t *testing.T) { + skipIfSubmoduleMissing(t) + t.Parallel() + tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t) + testDecryptV3(tests["evilnonce"], t) +} + +func TestV3_Scrypt_1(t *testing.T) { + t.Parallel() + tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) + testDecryptV3(tests["wikipage_test_vector_scrypt"], t) +} + +func TestV3_Scrypt_2(t *testing.T) { + skipIfSubmoduleMissing(t) + t.Parallel() + tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t) + testDecryptV3(tests["test2"], t) +} + +func TestV1_1(t *testing.T) { + t.Parallel() + tests := loadKeyStoreTestV1("testdata/v1_test_vector.json", t) + testDecryptV1(tests["test1"], t) +} + +func TestV1_2(t *testing.T) { + t.Parallel() + ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true} + addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e") + file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e" + k, err := ks.GetKey(addr, file, "g") + if err != nil { + t.Fatal(err) + } + privHex := hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)) + expectedHex := "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d" + if privHex != expectedHex { + t.Fatal(fmt.Errorf("Unexpected privkey: %v, expected %v", privHex, expectedHex)) + } +} + +func testDecryptV3(test KeyStoreTestV3, t *testing.T) { + privBytes, _, err := decryptKeyV3(&test.Json, test.Password) + if err != nil { + t.Fatal(err) + } + privHex := hex.EncodeToString(privBytes) + if test.Priv != privHex { + t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex)) + } +} + +func testDecryptV1(test KeyStoreTestV1, t *testing.T) { + privBytes, _, err := decryptKeyV1(&test.Json, test.Password) + if err != nil { + t.Fatal(err) + } + privHex := hex.EncodeToString(privBytes) + if test.Priv != privHex { + t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex)) + } +} + +func loadKeyStoreTestV3(file string, t *testing.T) map[string]KeyStoreTestV3 { + tests := make(map[string]KeyStoreTestV3) + err := common.LoadJSON(file, &tests) + if err != nil { + t.Fatal(err) + } + return tests +} + +func loadKeyStoreTestV1(file string, t *testing.T) map[string]KeyStoreTestV1 { + tests := make(map[string]KeyStoreTestV1) + err := common.LoadJSON(file, &tests) + if err != nil { + t.Fatal(err) + } + return tests +} + +func TestKeyForDirectICAP(t *testing.T) { + t.Parallel() + key := NewKeyForDirectICAP(rand.Reader) + if !strings.HasPrefix(key.Address.Hex(), "0x00") { + t.Errorf("Expected first address byte to be zero, have: %s", key.Address.Hex()) + } +} + +func TestV3_31_Byte_Key(t *testing.T) { + t.Parallel() + tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) + testDecryptV3(tests["31_byte_key"], t) +} + +func TestV3_30_Byte_Key(t *testing.T) { + t.Parallel() + tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t) + testDecryptV3(tests["30_byte_key"], t) +} diff --git a/src/accounts/keystore/presale.go b/src/accounts/keystore/presale.go new file mode 100644 index 00000000..0664dc2c --- /dev/null +++ b/src/accounts/keystore/presale.go @@ -0,0 +1,150 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/crypto" + "github.com/google/uuid" + "golang.org/x/crypto/pbkdf2" +) + +// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON +func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accounts.Account, *Key, error) { + key, err := decryptPreSaleKey(keyJSON, password) + if err != nil { + return accounts.Account{}, nil, err + } + key.Id, err = uuid.NewRandom() + if err != nil { + return accounts.Account{}, nil, err + } + a := accounts.Account{ + Address: key.Address, + URL: accounts.URL{ + Scheme: KeyStoreScheme, + Path: keyStore.JoinPath(keyFileName(key.Address)), + }, + } + err = keyStore.StoreKey(a.URL.Path, key, password) + return a, key, err +} + +func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) { + preSaleKeyStruct := struct { + EncSeed string + EthAddr string + Email string + BtcAddr string + }{} + err = json.Unmarshal(fileContent, &preSaleKeyStruct) + if err != nil { + return nil, err + } + encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed) + if err != nil { + return nil, errors.New("invalid hex in encSeed") + } + if len(encSeedBytes) < 16 { + return nil, errors.New("invalid encSeed, too short") + } + iv := encSeedBytes[:16] + cipherText := encSeedBytes[16:] + /* + See https://github.com/ethereum/pyethsaletool + + pyethsaletool generates the encryption key from password by + 2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:(). + 16 byte key length within PBKDF2 and resulting key is used as AES key + */ + passBytes := []byte(password) + derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New) + plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv) + if err != nil { + return nil, err + } + ethPriv := crypto.Keccak256(plainText) + ecKey := crypto.ToECDSAUnsafe(ethPriv) + + key = &Key{ + Id: uuid.UUID{}, + Address: crypto.PubkeyToAddress(ecKey.PublicKey), + PrivateKey: ecKey, + } + derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x" + expectedAddr := preSaleKeyStruct.EthAddr + if derivedAddr != expectedAddr { + err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr) + } + return key, err +} + +func aesCTRXOR(key, inText, iv []byte) ([]byte, error) { + // AES-128 is selected due to size of encryptKey. + aesBlock, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + stream := cipher.NewCTR(aesBlock, iv) + outText := make([]byte, len(inText)) + stream.XORKeyStream(outText, inText) + return outText, err +} + +func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) { + aesBlock, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + decrypter := cipher.NewCBCDecrypter(aesBlock, iv) + paddedPlaintext := make([]byte, len(cipherText)) + decrypter.CryptBlocks(paddedPlaintext, cipherText) + plaintext := pkcs7Unpad(paddedPlaintext) + if plaintext == nil { + return nil, ErrDecrypt + } + return plaintext, err +} + +// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes +func pkcs7Unpad(in []byte) []byte { + if len(in) == 0 { + return nil + } + + padding := in[len(in)-1] + if int(padding) > len(in) || padding > aes.BlockSize { + return nil + } else if padding == 0 { + return nil + } + + for i := len(in) - 1; i > len(in)-int(padding)-1; i-- { + if in[i] != padding { + return nil + } + } + return in[:len(in)-int(padding)] +} diff --git a/src/accounts/keystore/testdata/dupes/1 b/src/accounts/keystore/testdata/dupes/1 new file mode 100644 index 00000000..a3868ec6 --- /dev/null +++ b/src/accounts/keystore/testdata/dupes/1 @@ -0,0 +1 @@ +{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/dupes/2 b/src/accounts/keystore/testdata/dupes/2 new file mode 100644 index 00000000..a3868ec6 --- /dev/null +++ b/src/accounts/keystore/testdata/dupes/2 @@ -0,0 +1 @@ +{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/dupes/foo b/src/accounts/keystore/testdata/dupes/foo new file mode 100644 index 00000000..c57060ae --- /dev/null +++ b/src/accounts/keystore/testdata/dupes/foo @@ -0,0 +1 @@ +{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/keystore/.hiddenfile b/src/accounts/keystore/testdata/keystore/.hiddenfile new file mode 100644 index 00000000..d91faccd --- /dev/null +++ b/src/accounts/keystore/testdata/keystore/.hiddenfile @@ -0,0 +1 @@ +{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3} diff --git a/src/accounts/keystore/testdata/keystore/README b/src/accounts/keystore/testdata/keystore/README new file mode 100644 index 00000000..6af9ac3f --- /dev/null +++ b/src/accounts/keystore/testdata/keystore/README @@ -0,0 +1,21 @@ +This directory contains accounts for testing. +The password that unlocks them is "foobar". + +The "good" key files which are supposed to be loadable are: + +- File: UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 + Address: 0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8 +- File: aaa + Address: 0xf466859ead1932d743d622cb74fc058882e8648a +- File: zzz + Address: 0x289d485d9771714cce91d3393d764e1311907acc + +The other files (including this README) are broken in various ways +and should not be picked up by package accounts: + +- File: no-address (missing address field, otherwise same as "aaa") +- File: garbage (file with random data) +- File: empty (file with no content) +- File: swapfile~ (should be skipped) +- File: .hiddenfile (should be skipped) +- File: foo/... (should be skipped because it is a directory) diff --git a/src/accounts/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 b/src/accounts/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 new file mode 100644 index 00000000..c57060ae --- /dev/null +++ b/src/accounts/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 @@ -0,0 +1 @@ +{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/keystore/aaa b/src/accounts/keystore/testdata/keystore/aaa new file mode 100644 index 00000000..a3868ec6 --- /dev/null +++ b/src/accounts/keystore/testdata/keystore/aaa @@ -0,0 +1 @@ +{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/keystore/empty b/src/accounts/keystore/testdata/keystore/empty new file mode 100644 index 00000000..e69de29b diff --git a/src/accounts/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e b/src/accounts/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e new file mode 100644 index 00000000..309841e5 --- /dev/null +++ b/src/accounts/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e @@ -0,0 +1 @@ +{"address":"fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8124d5134aa4a927c79fd852989e4b5419397566f04b0936a1eb1d168c7c68a5","cipherparams":{"iv":"e2febe17176414dd2cda28287947eb2f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"44b415ede89f3bdd6830390a21b78965f571b347a589d1d943029f016c5e8bd5"},"mac":"5e149ff25bfd9dd45746a84bb2bcd2f015f2cbca2b6d25c5de8c29617f71fe5b"},"id":"d6ac5452-2b2c-4d3c-ad80-4bf0327d971c","version":3} \ No newline at end of file diff --git a/src/accounts/keystore/testdata/keystore/garbage b/src/accounts/keystore/testdata/keystore/garbage new file mode 100644 index 0000000000000000000000000000000000000000..ff45091e714078dd7d3b4ea95964452e33a895f7 GIT binary patch literal 300 zcmV+{0n`3r1xkOa0KiH=0-y31ays31&4D+~b{#6-MH z)8?iosg+26q81!5ujp29iM}4_d}^;*-$8$htAbEpk(KDl*$;NvD$v8GZL@TRuT#)+ zq*|PXNljY5_xwCfoMayTjJ(vY;=t!uVJT5-Fn0O7W{#e;Ho?+NsQQi=!GV>j#9U#& zAbp7L1M-8N-V+7}EDxG9CNuhKbj?($B?=E1a1Xi%v;bYvR+C$EjApbg!W^>zB$Cd( z+NKd!El}@p)NJLnQ}B=D#e5uCh87_~lKd2z=idP7$. + +package keystore + +import ( + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +) + +// keystoreWallet implements the accounts.Wallet interface for the original +// keystore. +type keystoreWallet struct { + account accounts.Account // Single account contained in this wallet + keystore *KeyStore // Keystore where the account originates from +} + +// URL implements accounts.Wallet, returning the URL of the account within. +func (w *keystoreWallet) URL() accounts.URL { + return w.account.URL +} + +// Status implements accounts.Wallet, returning whether the account held by the +// keystore wallet is unlocked or not. +func (w *keystoreWallet) Status() (string, error) { + w.keystore.mu.RLock() + defer w.keystore.mu.RUnlock() + + if _, ok := w.keystore.unlocked[w.account.Address]; ok { + return "Unlocked", nil + } + return "Locked", nil +} + +// Open implements accounts.Wallet, but is a noop for plain wallets since there +// is no connection or decryption step necessary to access the list of accounts. +func (w *keystoreWallet) Open(passphrase string) error { return nil } + +// Close implements accounts.Wallet, but is a noop for plain wallets since there +// is no meaningful open operation. +func (w *keystoreWallet) Close() error { return nil } + +// Accounts implements accounts.Wallet, returning an account list consisting of +// a single account that the plain keystore wallet contains. +func (w *keystoreWallet) Accounts() []accounts.Account { + return []accounts.Account{w.account} +} + +// Contains implements accounts.Wallet, returning whether a particular account is +// or is not wrapped by this wallet instance. +func (w *keystoreWallet) Contains(account accounts.Account) bool { + return account.Address == w.account.Address && (account.URL == (accounts.URL{}) || account.URL == w.account.URL) +} + +// Derive implements accounts.Wallet, but is a noop for plain wallets since there +// is no notion of hierarchical account derivation for plain keystore accounts. +func (w *keystoreWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) { + return accounts.Account{}, accounts.ErrNotSupported +} + +// SelfDerive implements accounts.Wallet, but is a noop for plain wallets since +// there is no notion of hierarchical account derivation for plain keystore accounts. +func (w *keystoreWallet) SelfDerive(bases []accounts.DerivationPath, chain ethereum.ChainStateReader) { +} + +// signHash attempts to sign the given hash with +// the given account. If the wallet does not wrap this particular account, an +// error is returned to avoid account leakage (even though in theory we may be +// able to sign via our shared keystore backend). +func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte, error) { + // Make sure the requested account is contained within + if !w.Contains(account) { + return nil, accounts.ErrUnknownAccount + } + // Account seems valid, request the keystore to sign + return w.keystore.SignHash(account, hash) +} + +// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed. +func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { + return w.signHash(account, crypto.Keccak256(data)) +} + +// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed. +func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { + // Make sure the requested account is contained within + if !w.Contains(account) { + return nil, accounts.ErrUnknownAccount + } + // Account seems valid, request the keystore to sign + return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data)) +} + +// SignText implements accounts.Wallet, attempting to sign the hash of +// the given text with the given account. +func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) { + return w.signHash(account, accounts.TextHash(text)) +} + +// SignTextWithPassphrase implements accounts.Wallet, attempting to sign the +// hash of the given text with the given account using passphrase as extra authentication. +func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { + // Make sure the requested account is contained within + if !w.Contains(account) { + return nil, accounts.ErrUnknownAccount + } + // Account seems valid, request the keystore to sign + return w.keystore.SignHashWithPassphrase(account, passphrase, accounts.TextHash(text)) +} + +// SignTx implements accounts.Wallet, attempting to sign the given transaction +// with the given account. If the wallet does not wrap this particular account, +// an error is returned to avoid account leakage (even though in theory we may +// be able to sign via our shared keystore backend). +func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + // Make sure the requested account is contained within + if !w.Contains(account) { + return nil, accounts.ErrUnknownAccount + } + // Account seems valid, request the keystore to sign + return w.keystore.SignTx(account, tx, chainID) +} + +// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given +// transaction with the given account using passphrase as extra authentication. +func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { + // Make sure the requested account is contained within + if !w.Contains(account) { + return nil, accounts.ErrUnknownAccount + } + // Account seems valid, request the keystore to sign + return w.keystore.SignTxWithPassphrase(account, passphrase, tx, chainID) +} diff --git a/src/accounts/keystore/watch.go b/src/accounts/keystore/watch.go new file mode 100644 index 00000000..d6ef5332 --- /dev/null +++ b/src/accounts/keystore/watch.go @@ -0,0 +1,108 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris + +package keystore + +import ( + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/rjeczalik/notify" +) + +type watcher struct { + ac *accountCache + starting bool + running bool + ev chan notify.EventInfo + quit chan struct{} +} + +func newWatcher(ac *accountCache) *watcher { + return &watcher{ + ac: ac, + ev: make(chan notify.EventInfo, 10), + quit: make(chan struct{}), + } +} + +// starts the watcher loop in the background. +// Start a watcher in the background if that's not already in progress. +// The caller must hold w.ac.mu. +func (w *watcher) start() { + if w.starting || w.running { + return + } + w.starting = true + go w.loop() +} + +func (w *watcher) close() { + close(w.quit) +} + +func (w *watcher) loop() { + defer func() { + w.ac.mu.Lock() + w.running = false + w.starting = false + w.ac.mu.Unlock() + }() + logger := log.New("path", w.ac.keydir) + + if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil { + logger.Trace("Failed to watch keystore folder", "err", err) + return + } + defer notify.Stop(w.ev) + logger.Trace("Started watching keystore folder") + defer logger.Trace("Stopped watching keystore folder") + + w.ac.mu.Lock() + w.running = true + w.ac.mu.Unlock() + + // Wait for file system events and reload. + // When an event occurs, the reload call is delayed a bit so that + // multiple events arriving quickly only cause a single reload. + var ( + debounceDuration = 500 * time.Millisecond + rescanTriggered = false + debounce = time.NewTimer(0) + ) + // Ignore initial trigger + if !debounce.Stop() { + <-debounce.C + } + defer debounce.Stop() + for { + select { + case <-w.quit: + return + case <-w.ev: + // Trigger the scan (with delay), if not already triggered + if !rescanTriggered { + debounce.Reset(debounceDuration) + rescanTriggered = true + } + case <-debounce.C: + w.ac.scanAccounts() + rescanTriggered = false + } + } +} diff --git a/src/accounts/keystore/watch_fallback.go b/src/accounts/keystore/watch_fallback.go new file mode 100644 index 00000000..de0e87f8 --- /dev/null +++ b/src/accounts/keystore/watch_fallback.go @@ -0,0 +1,28 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris + +// This is the fallback implementation of directory watching. +// It is used on unsupported platforms. + +package keystore + +type watcher struct{ running bool } + +func newWatcher(*accountCache) *watcher { return new(watcher) } +func (*watcher) start() {} +func (*watcher) close() {} diff --git a/src/build/run-peeps.sh b/src/build/run-peeps.sh index 527f64bc..fcdbe1ce 100755 --- a/src/build/run-peeps.sh +++ b/src/build/run-peeps.sh @@ -9,5 +9,5 @@ else cd PEEPS fi -./gradlew --no-daemon --parallel endToEndTest +./gradlew --no-daemon endToEndTest diff --git a/src/cmd/geth/accountcmd.go b/src/cmd/geth/accountcmd.go new file mode 100644 index 00000000..6473a827 --- /dev/null +++ b/src/cmd/geth/accountcmd.go @@ -0,0 +1,355 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "gopkg.in/urfave/cli.v1" +) + +var ( + walletCommand = cli.Command{ + Name: "wallet", + Usage: "Manage Ethereum presale wallets", + ArgsUsage: "", + Category: "ACCOUNT COMMANDS", + Description: ` + geth wallet import /path/to/my/presale.wallet + +will prompt for your password and imports your ether presale account. +It can be used non-interactively with the --password option taking a +passwordfile as argument containing the wallet password in plaintext.`, + Subcommands: []cli.Command{ + { + + Name: "import", + Usage: "Import Ethereum presale wallet", + ArgsUsage: "", + Action: utils.MigrateFlags(importWallet), + Category: "ACCOUNT COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.KeyStoreDirFlag, + utils.PasswordFileFlag, + utils.LightKDFFlag, + }, + Description: ` + geth wallet [options] /path/to/my/presale.wallet + +will prompt for your password and imports your ether presale account. +It can be used non-interactively with the --password option taking a +passwordfile as argument containing the wallet password in plaintext.`, + }, + }, + } + + accountCommand = cli.Command{ + Name: "account", + Usage: "Manage accounts", + Category: "ACCOUNT COMMANDS", + Description: ` + +Manage accounts, list all existing accounts, import a private key into a new +account, create a new account or update an existing account. + +It supports interactive mode, when you are prompted for password as well as +non-interactive mode where passwords are supplied via a given password file. +Non-interactive mode is only meant for scripted use on test networks or known +safe environments. + +Make sure you remember the password you gave when creating a new account (with +either new or import). Without it you are not able to unlock your account. + +Note that exporting your key in unencrypted format is NOT supported. + +Keys are stored under /keystore. +It is safe to transfer the entire directory or the individual keys therein +between ethereum nodes by simply copying. + +Make sure you backup your keys regularly.`, + Subcommands: []cli.Command{ + { + Name: "list", + Usage: "Print summary of existing accounts", + Action: utils.MigrateFlags(accountList), + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.KeyStoreDirFlag, + }, + Description: ` +Print a short summary of all accounts`, + }, + { + Name: "new", + Usage: "Create a new account", + Action: utils.MigrateFlags(accountCreate), + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.KeyStoreDirFlag, + utils.PasswordFileFlag, + utils.LightKDFFlag, + }, + Description: ` + geth account new + +Creates a new account and prints the address. + +The account is saved in encrypted format, you are prompted for a password. + +You must remember this password to unlock your account in the future. + +For non-interactive use the password can be specified with the --password flag: + +Note, this is meant to be used for testing only, it is a bad idea to save your +password to file or expose in any other way. +`, + }, + { + Name: "update", + Usage: "Update an existing account", + Action: utils.MigrateFlags(accountUpdate), + ArgsUsage: "
", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.KeyStoreDirFlag, + utils.LightKDFFlag, + }, + Description: ` + geth account update
+ +Update an existing account. + +The account is saved in the newest version in encrypted format, you are prompted +for a password to unlock the account and another to save the updated file. + +This same command can therefore be used to migrate an account of a deprecated +format to the newest format or change the password for an account. + +For non-interactive use the password can be specified with the --password flag: + + geth account update [options]
+ +Since only one password can be given, only format update can be performed, +changing your password is only possible interactively. +`, + }, + { + Name: "import", + Usage: "Import a private key into a new account", + Action: utils.MigrateFlags(accountImport), + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.KeyStoreDirFlag, + utils.PasswordFileFlag, + utils.LightKDFFlag, + }, + ArgsUsage: "", + Description: ` + geth account import + +Imports an unencrypted private key from and creates a new account. +Prints the address. + +The keyfile is assumed to contain an unencrypted private key in hexadecimal format. + +The account is saved in encrypted format, you are prompted for a password. + +You must remember this password to unlock your account in the future. + +For non-interactive use the password can be specified with the -password flag: + + geth account import [options] + +Note: +As you can directly copy your encrypted accounts to another ethereum instance, +this import mechanism is not needed when you transfer an account between +nodes. +`, + }, + quorumAccountPluginCommands, + }, + } +) + +func accountList(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + var index int + for _, wallet := range stack.AccountManager().Wallets() { + for _, account := range wallet.Accounts() { + fmt.Printf("Account #%d: {%x} %s\n", index, account.Address, &account.URL) + index++ + } + } + return nil +} + +// tries unlocking the specified account a few times. +func unlockAccount(ks *keystore.KeyStore, address string, i int, passwords []string) (accounts.Account, string) { + account, err := utils.MakeAddress(ks, address) + if err != nil { + utils.Fatalf("Could not list accounts: %v", err) + } + for trials := 0; trials < 3; trials++ { + prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3) + password := utils.GetPassPhraseWithList(prompt, false, i, passwords) + err = ks.Unlock(account, password) + if err == nil { + log.Info("Unlocked account", "address", account.Address.Hex()) + return account, password + } + if err, ok := err.(*keystore.AmbiguousAddrError); ok { + log.Info("Unlocked account", "address", account.Address.Hex()) + return ambiguousAddrRecovery(ks, err, password), password + } + if err != keystore.ErrDecrypt { + // No need to prompt again if the error is not decryption-related. + break + } + } + // All trials expended to unlock account, bail out + utils.Fatalf("Failed to unlock account %s (%v)", address, err) + + return accounts.Account{}, "" +} + +func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrError, auth string) accounts.Account { + fmt.Printf("Multiple key files exist for address %x:\n", err.Addr) + for _, a := range err.Matches { + fmt.Println(" ", a.URL) + } + fmt.Println("Testing your password against all of them...") + var match *accounts.Account + for _, a := range err.Matches { + if err := ks.Unlock(a, auth); err == nil { + match = &a + break + } + } + if match == nil { + utils.Fatalf("None of the listed files could be unlocked.") + } + fmt.Printf("Your password unlocked %s\n", match.URL) + fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:") + for _, a := range err.Matches { + if a != *match { + fmt.Println(" ", a.URL) + } + } + return *match +} + +// accountCreate creates a new account into the keystore defined by the CLI flags. +func accountCreate(ctx *cli.Context) error { + cfg := gethConfig{Node: defaultNodeConfig()} + // Load config file. + if file := ctx.GlobalString(configFileFlag.Name); file != "" { + if err := loadConfig(file, &cfg); err != nil { + utils.Fatalf("%v", err) + } + } + utils.SetNodeConfig(ctx, &cfg.Node) + scryptN, scryptP, keydir, err := cfg.Node.AccountConfig() + + if err != nil { + utils.Fatalf("Failed to read configuration: %v", err) + } + + password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) + + account, err := keystore.StoreKey(keydir, password, scryptN, scryptP) + + if err != nil { + utils.Fatalf("Failed to create account: %v", err) + } + fmt.Printf("\nYour new key was generated\n\n") + fmt.Printf("Public address of the key: %s\n", account.Address.Hex()) + fmt.Printf("Path of the secret key file: %s\n\n", account.URL.Path) + fmt.Printf("- You can share your public address with anyone. Others need it to interact with you.\n") + fmt.Printf("- You must NEVER share the secret key with anyone! The key controls access to your funds!\n") + fmt.Printf("- You must BACKUP your key file! Without the key, it's impossible to access account funds!\n") + fmt.Printf("- You must REMEMBER your password! Without the password, it's impossible to decrypt the key!\n\n") + return nil +} + +// accountUpdate transitions an account from a previous format to the current +// one, also providing the possibility to change the pass-phrase. +func accountUpdate(ctx *cli.Context) error { + if len(ctx.Args()) == 0 { + utils.Fatalf("No accounts specified to update") + } + stack, _ := makeConfigNode(ctx) + ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + + for _, addr := range ctx.Args() { + account, oldPassword := unlockAccount(ks, addr, 0, nil) + newPassword := utils.GetPassPhraseWithList("Please give a new password. Do not forget this password.", true, 0, nil) + if err := ks.Update(account, oldPassword, newPassword); err != nil { + utils.Fatalf("Could not update the account: %v", err) + } + } + return nil +} + +func importWallet(ctx *cli.Context) error { + keyfile := ctx.Args().First() + if len(keyfile) == 0 { + utils.Fatalf("keyfile must be given as argument") + } + keyJSON, err := ioutil.ReadFile(keyfile) + if err != nil { + utils.Fatalf("Could not read wallet file: %v", err) + } + + stack, _ := makeConfigNode(ctx) + passphrase := utils.GetPassPhraseWithList("", false, 0, utils.MakePasswordList(ctx)) + + ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + acct, err := ks.ImportPreSaleKey(keyJSON, passphrase) + if err != nil { + utils.Fatalf("%v", err) + } + fmt.Printf("Address: {%x}\n", acct.Address) + return nil +} + +func accountImport(ctx *cli.Context) error { + keyfile := ctx.Args().First() + if len(keyfile) == 0 { + utils.Fatalf("keyfile must be given as argument") + } + key, err := crypto.LoadECDSA(keyfile) + if err != nil { + utils.Fatalf("Failed to load the private key: %v", err) + } + stack, _ := makeConfigNode(ctx) + passphrase := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) + + ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + acct, err := ks.ImportECDSA(key, passphrase) + if err != nil { + utils.Fatalf("Could not create the account: %v", err) + } + fmt.Printf("Address: {%x}\n", acct.Address) + return nil +} diff --git a/src/cmd/geth/accountcmd_plugin.go b/src/cmd/geth/accountcmd_plugin.go new file mode 100644 index 00000000..2ad51b69 --- /dev/null +++ b/src/cmd/geth/accountcmd_plugin.go @@ -0,0 +1,323 @@ +package main + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/accounts/pluggable" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/plugin" + "gopkg.in/urfave/cli.v1" +) + +var ( + quorumAccountPluginCommands = cli.Command{ + Name: "plugin", + Usage: "Manage 'account' plugin accounts", + Description: ` + geth account plugin + + Quorum supports alternate account management methods through the use of 'account' plugins. + + See docs.goquorum.com for more info. + `, + Subcommands: []cli.Command{ + { + Name: "list", + Usage: "Print summary of existing 'account' plugin accounts", + Action: utils.MigrateFlags(listPluginAccountsCLIAction), + Flags: []cli.Flag{ + utils.PluginSettingsFlag, // flag is used implicitly by makeConfigNode() + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.PluginSkipVerifyFlag, + }, + Description: ` + geth account plugin list +Print a short summary of all accounts for the given plugin settings`, + }, + { + Name: "new", + Usage: "Create a new account using an 'account' plugin", + Action: utils.MigrateFlags(createPluginAccountCLIAction), + Flags: []cli.Flag{ + utils.PluginSettingsFlag, + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.PluginSkipVerifyFlag, + utils.AccountPluginNewAccountConfigFlag, + }, + Description: fmt.Sprintf(` + geth account plugin new + +Creates a new account using an 'account' plugin and prints the address. + +--%v and --%v flags are required. + +Each 'account' plugin will have different requirements for the value of --%v. +For more info see the documentation for the particular 'account' plugin being used. +`, utils.PluginSettingsFlag.Name, utils.AccountPluginNewAccountConfigFlag.Name, utils.AccountPluginNewAccountConfigFlag.Name), + }, + { + Name: "import", + Usage: "Import a private key into a new account using an 'account' plugin", + Action: utils.MigrateFlags(importPluginAccountCLIAction), + Flags: []cli.Flag{ + utils.PluginSettingsFlag, + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.PluginSkipVerifyFlag, + utils.AccountPluginNewAccountConfigFlag, + }, + ArgsUsage: "", + Description: ` + geth account plugin import + +Imports an unencrypted private key from and creates a new account using an 'account' plugin. +Prints the address. + +The keyfile must contain an unencrypted private key in hexadecimal format. + +--%v and --%v flags are required. + +Note: +Before using this import mechanism to transfer accounts that are already 'account' plugin-managed between nodes, consult +the documentation for the particular 'account' plugin being used as it may support alternate methods for transferring. +`, + }, + }, + } + + // supportedPlugins is the list of supported plugins for the account subcommand + supportedPlugins = []plugin.PluginInterfaceName{plugin.AccountPluginInterfaceName} + + invalidPluginFlagsErr = fmt.Errorf("--%v and --%v flags must be set", utils.PluginSettingsFlag.Name, utils.AccountPluginNewAccountConfigFlag.Name) + + // makeConfigNodeDelegate is a wrapper for the makeConfigNode function. + // It can be replaced with a stub for testing. + makeConfigNodeDelegate configNodeMaker = standardConfigNodeMaker{} +) + +func listPluginAccountsCLIAction(ctx *cli.Context) error { + accts, err := listPluginAccounts(ctx) + if err != nil { + utils.Fatalf("%v", err) + } + + var index int + for _, acct := range accts { + fmt.Printf("Account #%d: {%x} %s\n", index, acct.Address, &acct.URL) + index++ + } + + return nil +} + +func listPluginAccounts(ctx *cli.Context) ([]accounts.Account, error) { + if !ctx.IsSet(utils.PluginSettingsFlag.Name) { + return []accounts.Account{}, fmt.Errorf("--%v required", utils.PluginSettingsFlag.Name) + } + + p, err := setupAccountPluginForCLI(ctx) + if err != nil { + return []accounts.Account{}, err + } + defer func() { + if err := p.teardown(); err != nil { + log.Error("error tearing down account plugin", "err", err) + } + }() + + return p.accounts(), nil +} + +func createPluginAccountCLIAction(ctx *cli.Context) error { + account, err := createPluginAccount(ctx) + if err != nil { + utils.Fatalf("unable to create plugin-backed account: %v", err) + } + writePluginAccountToStdOut(account) + return nil +} + +func createPluginAccount(ctx *cli.Context) (accounts.Account, error) { + if !ctx.IsSet(utils.PluginSettingsFlag.Name) || !ctx.IsSet(utils.AccountPluginNewAccountConfigFlag.Name) { + return accounts.Account{}, invalidPluginFlagsErr + } + + newAcctCfg, err := getNewAccountConfigFromCLI(ctx) + if err != nil { + return accounts.Account{}, err + } + + p, err := setupAccountPluginForCLI(ctx) + if err != nil { + return accounts.Account{}, err + } + defer func() { + if err := p.teardown(); err != nil { + log.Error("error tearing down account plugin", "err", err) + } + }() + + return p.NewAccount(newAcctCfg) +} + +func importPluginAccountCLIAction(ctx *cli.Context) error { + account, err := importPluginAccount(ctx) + if err != nil { + utils.Fatalf("unable to import key and create plugin-backed account: %v", err) + } + writePluginAccountToStdOut(account) + return nil +} + +func importPluginAccount(ctx *cli.Context) (accounts.Account, error) { + keyfile := ctx.Args().First() + if len(keyfile) == 0 { + return accounts.Account{}, errors.New("keyfile must be given as argument") + } + key, err := crypto.LoadECDSA(keyfile) + if err != nil { + return accounts.Account{}, fmt.Errorf("Failed to load the private key: %v", err) + } + keyBytes := crypto.FromECDSA(key) + keyHex := hex.EncodeToString(keyBytes) + + if !ctx.IsSet(utils.PluginSettingsFlag.Name) || !ctx.IsSet(utils.AccountPluginNewAccountConfigFlag.Name) { + return accounts.Account{}, invalidPluginFlagsErr + } + + newAcctCfg, err := getNewAccountConfigFromCLI(ctx) + if err != nil { + return accounts.Account{}, err + } + + p, err := setupAccountPluginForCLI(ctx) + if err != nil { + return accounts.Account{}, err + } + defer func() { + if err := p.teardown(); err != nil { + log.Error("error tearing down account plugin", "err", err) + } + }() + + return p.ImportRawKey(keyHex, newAcctCfg) +} + +func getNewAccountConfigFromCLI(ctx *cli.Context) (map[string]interface{}, error) { + data := ctx.String(utils.AccountPluginNewAccountConfigFlag.Name) + conf, err := plugin.ReadMultiFormatConfig(data) + if err != nil { + return nil, fmt.Errorf("invalid account creation config provided: %v", err) + } + // plugin backend expects config to be json map + confMap := new(map[string]interface{}) + if err := json.Unmarshal(conf, confMap); err != nil { + return nil, fmt.Errorf("invalid account creation config provided: %v", err) + } + return *confMap, nil +} + +type accountPlugin struct { + pluggable.AccountCreator + am *accounts.Manager + pm *plugin.PluginManager +} + +func (c *accountPlugin) teardown() error { + return c.pm.Stop() +} + +func (c *accountPlugin) accounts() []accounts.Account { + b := c.am.Backends(pluggable.BackendType) + if b == nil { + return []accounts.Account{} + } + + var accts []accounts.Account + for _, wallet := range b[0].Wallets() { + accts = append(accts, wallet.Accounts()...) + } + return accts +} + +// startPluginManagerForAccountCLI is a helper func for use with the account plugin CLI. +// It creates and starts a new PluginManager with the provided CLI flags. +// The caller should call teardown on the returned accountPlugin to stop the plugin after use. +// The returned accountPlugin provides several methods necessary for the account plugin CLI, abstracting the underlying plugin/account types. +// +// This func should not be used for anything other than the account CLI. +// The account plugin, if present, is registered with the existing pluggable.Backend in the stack's AccountManager. +// This allows the AccountManager to use the account plugin even though the PluginManager is not registered with the stack. +// Instead of registering a plugin manager with the stack this is manually creating a plugin manager. +// This means that the plugin manager can be started without having to start the whole stack (P2P client, IPC interface, ...). +// The purpose of this is to help prevent issues/conflicts if an existing node is already running on this host. +// +func setupAccountPluginForCLI(ctx *cli.Context) (*accountPlugin, error) { + stack, cfg := makeConfigNodeDelegate.makeConfigNode(ctx) + + if cfg.Node.Plugins == nil { + return nil, errors.New("no plugin config provided") + } + if err := cfg.Node.Plugins.CheckSettingsAreSupported(supportedPlugins); err != nil { + return nil, err + } + if err := cfg.Node.ResolvePluginBaseDir(); err != nil { + return nil, fmt.Errorf("unable to resolve plugin base dir due to %s", err) + } + + pm, err := plugin.NewPluginManager( + cfg.Node.UserIdent, + cfg.Node.Plugins, + ctx.Bool(utils.PluginSkipVerifyFlag.Name), + ctx.Bool(utils.PluginLocalVerifyFlag.Name), + ctx.String(utils.PluginPublicKeyFlag.Name), + ) + if err != nil { + return nil, fmt.Errorf("unable to create plugin manager: %v", err) + } + if err := pm.Start(); err != nil { + return nil, fmt.Errorf("unable to start plugin manager: %v", err) + } + + b := stack.AccountManager().Backends(pluggable.BackendType)[0].(*pluggable.Backend) + if err := pm.AddAccountPluginToBackend(b); err != nil { + return nil, fmt.Errorf("unable to load pluggable account backend: %v", err) + } + + return &accountPlugin{ + AccountCreator: b, + am: stack.AccountManager(), + pm: pm, + }, nil +} + +func writePluginAccountToStdOut(account accounts.Account) { + fmt.Printf("\nYour new plugin-backed account was generated\n\n") + fmt.Printf("Public address of the account: %s\n", account.Address.Hex()) + fmt.Printf("Account URL: %s\n\n", account.URL.Path) + fmt.Printf("- You can share your public address with anyone. Others need it to interact with you.\n") + fmt.Printf("- You must NEVER share the secret key with anyone! The key controls access to your funds!\n") + fmt.Printf("- Consider BACKING UP your account! The specifics of backing up will depend on the plugin backend being used.\n") + fmt.Printf("- The plugin backend may require you to REMEMBER part/all of the new account config to retrieve the key in the future!\n See the plugin specific documentation for more info.\n\n") + fmt.Printf("- See the documentation for the plugin being used for more info.\n\n") +} + +type configNodeMaker interface { + makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) +} + +// standardConfigNodeMaker is a wrapper around the makeConfigNode function to enable mocking in testing +type standardConfigNodeMaker struct{} + +func (f standardConfigNodeMaker) makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { + return makeConfigNode(ctx) +} diff --git a/src/cmd/geth/accountcmd_plugin_test.go b/src/cmd/geth/accountcmd_plugin_test.go new file mode 100644 index 00000000..6967be36 --- /dev/null +++ b/src/cmd/geth/accountcmd_plugin_test.go @@ -0,0 +1,299 @@ +package main + +import ( + "flag" + "io/ioutil" + "os" + "testing" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/plugin" + "github.com/stretchr/testify/require" + "gopkg.in/urfave/cli.v1" +) + +// newAccountPluginCLIContext creates a cli.Context setup with the core account plugin CLI flags. +// args sets the values of the flags. +func newAccountPluginCLIContext(args []string) *cli.Context { + fs := &flag.FlagSet{} + fs.String(utils.PluginSettingsFlag.Name, "", "") + fs.String(utils.AccountPluginNewAccountConfigFlag.Name, "", "") + _ = fs.Parse(args) + + return cli.NewContext(nil, fs, nil) +} + +type mockConfigNodeMaker struct { + do func(ctx *cli.Context) (*node.Node, gethConfig) +} + +func (m *mockConfigNodeMaker) makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { + return m.do(ctx) +} + +func TestListPluginAccounts_ErrIfCLIFlagNotSet(t *testing.T) { + var args []string + ctx := newAccountPluginCLIContext(args) + + _, err := listPluginAccounts(ctx) + require.EqualError(t, err, "--plugins required") +} + +func TestListPluginAccounts_ErrIfUnsupportedPluginInConfig(t *testing.T) { + var unsupportedPlugin plugin.PluginInterfaceName = "somename" + pluginSettings := plugin.Settings{ + Providers: map[plugin.PluginInterfaceName]plugin.PluginDefinition{ + unsupportedPlugin: {}, + }, + } + + args := []string{ + "--plugins", "/path/to/config.json", + } + ctx := newAccountPluginCLIContext(args) + + makeConfigNodeDelegate = &mockConfigNodeMaker{ + do: func(ctx *cli.Context) (*node.Node, gethConfig) { + return nil, gethConfig{ + Node: node.Config{ + Plugins: &pluginSettings, + }, + } + }, + } + + _, err := listPluginAccounts(ctx) + require.EqualError(t, err, "unsupported plugins configured: [somename]") +} + +func TestCreatePluginAccount_ErrIfCLIFlagsNotSet(t *testing.T) { + tests := []struct { + name string + args []string + }{ + { + name: "no plugin flags", + args: []string{}, + }, + { + name: "only plugin settings flag", + args: []string{"--plugins", "/path/to/config.json"}, + }, + { + name: "only new plugin account config settings flag", + args: []string{"--plugins.account.config", "/path/to/new-acct-config.json"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := newAccountPluginCLIContext(tt.args) + + _, err := createPluginAccount(ctx) + require.EqualError(t, err, "--plugins and --plugins.account.config flags must be set") + }) + } +} + +func TestCreatePluginAccount_ErrIfInvalidNewAccountConfig(t *testing.T) { + tests := []struct { + name string + flagValue string + wantErrMsg string + }{ + { + name: "json: invalid json", + flagValue: "{invalidjson: abc}", + wantErrMsg: "invalid account creation config provided: invalid character 'i' looking for beginning of object key string", + }, + { + name: "file: does not exist", + flagValue: "file://doesnotexist", + wantErrMsg: "invalid account creation config provided: open doesnotexist: no such file or directory", + }, + { + name: "env: not set", + flagValue: "env://notset", + wantErrMsg: "invalid account creation config provided: env variable notset not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + args := []string{ + "--plugins", "/path/to/config.json", + "--plugins.account.config", tt.flagValue, + } + ctx := newAccountPluginCLIContext(args) + _, err := createPluginAccount(ctx) + require.EqualError(t, err, tt.wantErrMsg) + }) + } +} + +func TestCreatePluginAccount_ErrIfUnsupportedPluginInConfig(t *testing.T) { + var unsupportedPlugin plugin.PluginInterfaceName = "somename" + pluginSettings := plugin.Settings{ + Providers: map[plugin.PluginInterfaceName]plugin.PluginDefinition{ + unsupportedPlugin: {}, + }, + } + + args := []string{ + "--plugins", "/path/to/config.json", + "--plugins.account.config", "{}", + } + ctx := newAccountPluginCLIContext(args) + + makeConfigNodeDelegate = &mockConfigNodeMaker{ + do: func(ctx *cli.Context) (*node.Node, gethConfig) { + return nil, gethConfig{ + Node: node.Config{ + Plugins: &pluginSettings, + }, + } + }, + } + + _, err := createPluginAccount(ctx) + require.EqualError(t, err, "unsupported plugins configured: [somename]") +} + +func TestImportPluginAccount_ErrIfNoArg(t *testing.T) { + var args []string + ctx := newAccountPluginCLIContext(args) + + _, err := importPluginAccount(ctx) + require.EqualError(t, err, "keyfile must be given as argument") +} + +func TestImportPluginAccount_ErrIfInvalidRawkey(t *testing.T) { + args := []string{"/incorrect/path/to/file.key"} + ctx := newAccountPluginCLIContext(args) + + _, err := importPluginAccount(ctx) + require.EqualError(t, err, "Failed to load the private key: open /incorrect/path/to/file.key: no such file or directory") +} + +func TestImportPluginAccount_ErrIfCLIFlagsNotSet(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "rawkey") + require.NoError(t, err) + t.Log("creating tmp file", "path", tmpfile.Name()) + defer os.Remove(tmpfile.Name()) + _, err = tmpfile.Write([]byte("1fe8f1ad4053326db20529257ac9401f2e6c769ef1d736b8c2f5aba5f787c72b")) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + + tests := []struct { + name string + args []string + }{ + { + name: "no plugin flags", + args: []string{tmpfile.Name()}, + }, + { + name: "only plugin settings flag", + args: []string{"--plugins", "/path/to/config.json", tmpfile.Name()}, + }, + { + name: "only new plugin account config settings flag", + args: []string{"--plugins.account.config", "/path/to/new-acct-config.json", tmpfile.Name()}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := newAccountPluginCLIContext(tt.args) + + _, err := importPluginAccount(ctx) + require.EqualError(t, err, "--plugins and --plugins.account.config flags must be set") + }) + } +} + +func TestImportPluginAccount_ErrIfInvalidNewAccountConfig(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "rawkey") + require.NoError(t, err) + t.Log("creating tmp file", "path", tmpfile.Name()) + defer os.Remove(tmpfile.Name()) + _, err = tmpfile.Write([]byte("1fe8f1ad4053326db20529257ac9401f2e6c769ef1d736b8c2f5aba5f787c72b")) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + + tests := []struct { + name string + flagValue string + wantErrMsg string + }{ + { + name: "json: invalid json", + flagValue: "{invalidjson: abc}", + wantErrMsg: "invalid account creation config provided: invalid character 'i' looking for beginning of object key string", + }, + { + name: "file: does not exist", + flagValue: "file://doesnotexist", + wantErrMsg: "invalid account creation config provided: open doesnotexist: no such file or directory", + }, + { + name: "env: not set", + flagValue: "env://notset", + wantErrMsg: "invalid account creation config provided: env variable notset not found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + args := []string{ + "--plugins", "/path/to/config.json", + "--plugins.account.config", tt.flagValue, + tmpfile.Name(), + } + ctx := newAccountPluginCLIContext(args) + _, err := importPluginAccount(ctx) + require.EqualError(t, err, tt.wantErrMsg) + }) + } +} + +func TestImportPluginAccount_ErrIfUnsupportedPluginInConfig(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "rawkey") + require.NoError(t, err) + t.Log("creating tmp file", "path", tmpfile.Name()) + defer os.Remove(tmpfile.Name()) + _, err = tmpfile.Write([]byte("1fe8f1ad4053326db20529257ac9401f2e6c769ef1d736b8c2f5aba5f787c72b")) + require.NoError(t, err) + err = tmpfile.Close() + require.NoError(t, err) + + var unsupportedPlugin plugin.PluginInterfaceName = "somename" + pluginSettings := plugin.Settings{ + Providers: map[plugin.PluginInterfaceName]plugin.PluginDefinition{ + unsupportedPlugin: {}, + }, + } + + args := []string{ + "--plugins", "/path/to/config.json", + "--plugins.account.config", "{}", + tmpfile.Name(), + } + ctx := newAccountPluginCLIContext(args) + + makeConfigNodeDelegate = &mockConfigNodeMaker{ + do: func(ctx *cli.Context) (*node.Node, gethConfig) { + return nil, gethConfig{ + Node: node.Config{ + Plugins: &pluginSettings, + }, + } + }, + } + + _, err = importPluginAccount(ctx) + require.EqualError(t, err, "unsupported plugins configured: [somename]") +} diff --git a/src/cmd/geth/accountcmd_test.go b/src/cmd/geth/accountcmd_test.go new file mode 100644 index 00000000..8a137ba5 --- /dev/null +++ b/src/cmd/geth/accountcmd_test.go @@ -0,0 +1,374 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "io/ioutil" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/cespare/cp" +) + +// These tests are 'smoke tests' for the account related +// subcommands and flags. +// +// For most tests, the test files from package accounts +// are copied into a temporary keystore directory. + +func tmpDatadirWithKeystore(t *testing.T) string { + datadir := tmpdir(t) + keystore := filepath.Join(datadir, "keystore") + source := filepath.Join("..", "..", "accounts", "keystore", "testdata", "keystore") + if err := cp.CopyAll(keystore, source); err != nil { + t.Fatal(err) + } + // add the necessary files for geth to start with the raft consensus + geth := filepath.Join(datadir, "geth") + sourceNodeKey := filepath.Join("testdata", "geth") + if err := cp.CopyAll(geth, sourceNodeKey); err != nil { + t.Fatal(err) + } + return datadir +} + +func runMinimalGethWithRaftConsensus(t *testing.T, args ...string) *testgeth { + argsWithRaft := append([]string{"--raft"}, args...) + return runMinimalGeth(t, argsWithRaft...) +} + +func TestAccountListEmpty(t *testing.T) { + geth := runGeth(t, "account", "list") + geth.ExpectExit() +} + +func TestAccountList(t *testing.T) { + datadir := tmpDatadirWithKeystore(t) + geth := runGeth(t, "account", "list", "--datadir", datadir) + defer geth.ExpectExit() + if runtime.GOOS == "windows" { + geth.Expect(` +Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 +Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa +Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz +`) + } else { + geth.Expect(` +Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 +Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa +Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz +`) + } +} + +func TestAccountNew(t *testing.T) { + geth := runGeth(t, "account", "new", "--lightkdf") + defer geth.ExpectExit() + geth.Expect(` +Your new account is locked with a password. Please give a password. Do not forget this password. +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foobar"}} +Repeat password: {{.InputLine "foobar"}} + +Your new key was generated +`) + geth.ExpectRegexp(` +Public address of the key: 0x[0-9a-fA-F]{40} +Path of the secret key file: .*UTC--.+--[0-9a-f]{40} + +- You can share your public address with anyone. Others need it to interact with you. +- You must NEVER share the secret key with anyone! The key controls access to your funds! +- You must BACKUP your key file! Without the key, it's impossible to access account funds! +- You must REMEMBER your password! Without the password, it's impossible to decrypt the key! +`) +} + +func TestAccountImport(t *testing.T) { + tests := []struct{ name, key, output string }{ + { + name: "correct account", + key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + output: "Address: {fcad0b19bb29d4674531d6f115237e16afce377c}\n", + }, + { + name: "invalid character", + key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef1", + output: "Fatal: Failed to load the private key: invalid character '1' at end of key file\n", + }, + } + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + importAccountWithExpect(t, test.key, test.output) + }) + } +} + +func importAccountWithExpect(t *testing.T, key string, expected string) { + dir := tmpdir(t) + keyfile := filepath.Join(dir, "key.prv") + if err := ioutil.WriteFile(keyfile, []byte(key), 0600); err != nil { + t.Error(err) + } + passwordFile := filepath.Join(dir, "password.txt") + if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil { + t.Error(err) + } + geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile) + defer geth.ExpectExit() + geth.Expect(expected) +} + +func TestAccountNewBadRepeat(t *testing.T) { + geth := runGeth(t, "account", "new", "--lightkdf") + defer geth.ExpectExit() + geth.Expect(` +Your new account is locked with a password. Please give a password. Do not forget this password. +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "something"}} +Repeat password: {{.InputLine "something else"}} +Fatal: Passwords do not match +`) +} + +func TestAccountUpdate(t *testing.T) { + datadir := tmpDatadirWithKeystore(t) + geth := runGeth(t, "account", "update", + "--datadir", datadir, "--lightkdf", + "f466859ead1932d743d622cb74fc058882e8648a") + defer geth.ExpectExit() + geth.Expect(` +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foobar"}} +Please give a new password. Do not forget this password. +Password: {{.InputLine "foobar2"}} +Repeat password: {{.InputLine "foobar2"}} +`) +} + +func TestWalletImport(t *testing.T) { + geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") + defer geth.ExpectExit() + geth.Expect(` +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foo"}} +Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f} +`) + + files, err := ioutil.ReadDir(filepath.Join(geth.Datadir, "keystore")) + if len(files) != 1 { + t.Errorf("expected one key file in keystore directory, found %d files (error: %v)", len(files), err) + } +} + +func TestWalletImportBadPassword(t *testing.T) { + geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") + defer geth.ExpectExit() + geth.Expect(` +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "wrong"}} +Fatal: could not decrypt key with given password +`) +} + +func TestUnlockFlag(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + geth := runMinimalGethWithRaftConsensus(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") + geth.Expect(` +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foobar"}} +`) + geth.ExpectExit() + + wantMessages := []string{ + "Unlocked account", + "=0xf466859eAD1932D743d622CB74FC058882E8648A", + } + for _, m := range wantMessages { + if !strings.Contains(geth.StderrText(), m) { + t.Errorf("stderr text does not contain %q", m) + } + } +} + +func TestGethDoesntStartWithoutConfiguredConsensus(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + + datadir := tmpDatadirWithKeystore(t) + geth := runGeth(t, + "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0") + + expectedText := "Consensus not specified. Exiting!!" + + // changed to expect regexp because fatalf writes the message to stdout/stderr + geth.ExpectRegexp(expectedText) +} + +func TestGethStartsWhenConsensusAndPrivateConfigAreConfigured(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + + datadir := tmpDatadirWithKeystore(t) + geth := runGeth(t, + "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", "--raft") + + geth.ExpectExit() +} + +func TestUnlockFlagWrongPassword(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") + defer geth.ExpectExit() + geth.Expect(` +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "wrong1"}} +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 2/3 +Password: {{.InputLine "wrong2"}} +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 3/3 +Password: {{.InputLine "wrong3"}} +Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could not decrypt key with given password) +`) +} + +// https://github.com/ethereum/go-ethereum/issues/1785 +func TestUnlockFlagMultiIndex(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + geth := runMinimalGethWithRaftConsensus(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "js", "testdata/empty.js") + geth.Expect(` +Unlocking account 0 | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foobar"}} +Unlocking account 2 | Attempt 1/3 +Password: {{.InputLine "foobar"}} +`) + geth.ExpectExit() + + wantMessages := []string{ + "Unlocked account", + "=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8", + "=0x289d485D9771714CCe91D3393D764E1311907ACc", + } + for _, m := range wantMessages { + if !strings.Contains(geth.StderrText(), m) { + t.Errorf("stderr text does not contain %q", m) + } + } +} + +func TestUnlockFlagPasswordFile(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + geth := runMinimalGethWithRaftConsensus(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "js", "testdata/empty.js") + geth.ExpectExit() + + wantMessages := []string{ + "Unlocked account", + "=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8", + "=0x289d485D9771714CCe91D3393D764E1311907ACc", + } + for _, m := range wantMessages { + if !strings.Contains(geth.StderrText(), m) { + t.Errorf("stderr text does not contain %q", m) + } + } +} + +func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", + "testdata/wrong-passwords.txt", "--unlock", "0,2") + defer geth.ExpectExit() + geth.Expect(` +Fatal: Failed to unlock account 0 (could not decrypt key with given password) +`) +} + +func TestUnlockFlagAmbiguous(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + datadir := tmpDatadirWithKeystore(t) + store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") + geth := runMinimalGethWithRaftConsensus(t, "--datadir", datadir, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", + "js", "testdata/empty.js") + defer geth.ExpectExit() + + // Helper for the expect template, returns absolute keystore path. + geth.SetTemplateFunc("keypath", func(file string) string { + abs, _ := filepath.Abs(filepath.Join(store, file)) + return abs + }) + geth.Expect(` +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "foobar"}} +Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a: + keystore://{{keypath "1"}} + keystore://{{keypath "2"}} +Testing your password against all of them... +Your password unlocked keystore://{{keypath "1"}} +In order to avoid this warning, you need to remove the following duplicate key files: + keystore://{{keypath "2"}} +`) + geth.ExpectExit() + + wantMessages := []string{ + "Unlocked account", + "=0xf466859eAD1932D743d622CB74FC058882E8648A", + } + for _, m := range wantMessages { + if !strings.Contains(geth.StderrText(), m) { + t.Errorf("stderr text does not contain %q", m) + } + } +} + +func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + + defer geth.ExpectExit() + + // Helper for the expect template, returns absolute keystore path. + geth.SetTemplateFunc("keypath", func(file string) string { + abs, _ := filepath.Abs(filepath.Join(store, file)) + return abs + }) + geth.Expect(` +Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 +!! Unsupported terminal, password will be echoed. +Password: {{.InputLine "wrong"}} +Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a: + keystore://{{keypath "1"}} + keystore://{{keypath "2"}} +Testing your password against all of them... +Fatal: None of the listed files could be unlocked. +`) + geth.ExpectExit() +} diff --git a/src/cmd/geth/chaincmd.go b/src/cmd/geth/chaincmd.go new file mode 100644 index 00000000..d430aa01 --- /dev/null +++ b/src/cmd/geth/chaincmd.go @@ -0,0 +1,559 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strconv" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/mps" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/private" + "github.com/ethereum/go-ethereum/private/engine/notinuse" + "github.com/ethereum/go-ethereum/trie" + "gopkg.in/urfave/cli.v1" +) + +var ( + initCommand = cli.Command{ + Action: utils.MigrateFlags(initGenesis), + Name: "init", + Usage: "Bootstrap and initialize a new genesis block", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The init command initializes a new genesis block and definition for the network. +This is a destructive action and changes the network in which you will be +participating. + +It expects the genesis file as argument.`, + } + dumpGenesisCommand = cli.Command{ + Action: utils.MigrateFlags(dumpGenesis), + Name: "dumpgenesis", + Usage: "Dumps genesis block JSON configuration to stdout", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, + } + importCommand = cli.Command{ + Action: utils.MigrateFlags(importChain), + Name: "import", + Usage: "Import a blockchain file", + ArgsUsage: " ( ... ) ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.GCModeFlag, + utils.SnapshotFlag, + utils.CacheDatabaseFlag, + utils.CacheGCFlag, + utils.MetricsEnabledFlag, + utils.MetricsEnabledExpensiveFlag, + utils.MetricsHTTPFlag, + utils.MetricsPortFlag, + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBTagsFlag, + utils.TxLookupLimitFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The import command imports blocks from an RLP-encoded form. The form can be one file +with several RLP-encoded blocks, or several files can be used. + +If only one file is used, import error will result in failure. If several files are used, +processing will proceed even if an individual RLP-file import failure occurs.`, + } + mpsdbUpgradeCommand = cli.Command{ + Action: utils.MigrateFlags(mpsdbUpgrade), + Name: "mpsdbupgrade", + Usage: "Upgrade a standalone DB to an MPS DB", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Description: ` +Checks if the chain config isMPS parameter value. +If false, it upgrades the DB to be MPS enabled (builds the trie of private states) and if successful sets isMPS to true. +If true, exits displaying an error message that the DB is already MPS.`, + Category: "BLOCKCHAIN COMMANDS", + } + exportCommand = cli.Command{ + Action: utils.MigrateFlags(exportChain), + Name: "export", + Usage: "Export blockchain into file", + ArgsUsage: " [ ]", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +Requires a first argument of the file to write to. +Optional second and third arguments control the first and +last block to write. In this mode, the file will be appended +if already existing. If the file ends with .gz, the output will +be gzipped.`, + } + importPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(importPreimages), + Name: "import-preimages", + Usage: "Import the preimage database from an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` + The import-preimages command imports hash preimages from an RLP encoded stream.`, + } + exportPreimagesCommand = cli.Command{ + Action: utils.MigrateFlags(exportPreimages), + Name: "export-preimages", + Usage: "Export the preimage database into an RLP stream", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The export-preimages command export hash preimages to an RLP encoded stream`, + } + copydbCommand = cli.Command{ + Action: utils.MigrateFlags(copyDb), + Name: "copydb", + Usage: "Create a local chain from a target chaindata folder", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.FakePoWFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.TxLookupLimitFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The first argument must be the directory containing the blockchain to download from`, + } + dumpCommand = cli.Command{ + Action: utils.MigrateFlags(dump), + Name: "dump", + Usage: "Dump a specific block from storage", + ArgsUsage: "[ | ]...", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.CacheFlag, + utils.SyncModeFlag, + utils.IterativeOutputFlag, + utils.ExcludeCodeFlag, + utils.ExcludeStorageFlag, + utils.IncludeIncompletesFlag, + }, + Category: "BLOCKCHAIN COMMANDS", + Description: ` +The arguments are interpreted as block numbers or hashes. +Use "ethereum dump 0" to dump the genesis block.`, + } +) + +// In the regular Genesis / ChainConfig struct, due to the way go deserializes +// json, IsQuorum defaults to false (when not specified). Here we specify it as +// a pointer so we can make the distinction and default unspecified to true. +func getIsQuorum(file io.Reader) bool { + altGenesis := new(struct { + Config *struct { + IsQuorum *bool `json:"isQuorum"` + } `json:"config"` + }) + + if err := json.NewDecoder(file).Decode(altGenesis); err != nil { + utils.Fatalf("invalid genesis file: %v", err) + } + + // unspecified defaults to true + return altGenesis.Config.IsQuorum == nil || *altGenesis.Config.IsQuorum +} + +// initGenesis will initialise the given JSON format genesis file and writes it as +// the zero'd block (i.e. genesis) or will fail hard if it can't succeed. +func initGenesis(ctx *cli.Context) error { + // Make sure we have a valid genesis JSON + genesisPath := ctx.Args().First() + if len(genesisPath) == 0 { + utils.Fatalf("Must supply path to genesis JSON file") + } + file, err := os.Open(genesisPath) + if err != nil { + utils.Fatalf("Failed to read genesis file: %v", err) + } + defer file.Close() + + genesis := new(core.Genesis) + if err := json.NewDecoder(file).Decode(genesis); err != nil { + utils.Fatalf("invalid genesis file: %v", err) + } + + // Quorum + file.Seek(0, 0) + genesis.Config.IsQuorum = getIsQuorum(file) + + // check the data given as a part of newMaxConfigData to ensure that + // its in expected order + err = genesis.Config.CheckMaxCodeConfigData() + if err != nil { + utils.Fatalf("maxCodeSize data invalid: %v", err) + } + // End Quorum + + // Open and initialise both full and light databases + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + for _, name := range []string{"chaindata", "lightchaindata"} { + chaindb, err := stack.OpenDatabase(name, 0, 0, "") + if err != nil { + utils.Fatalf("Failed to open database: %v", err) + } + _, hash, err := core.SetupGenesisBlock(chaindb, genesis) + if err != nil { + utils.Fatalf("Failed to write genesis block: %v", err) + } + chaindb.Close() + log.Info("Successfully wrote genesis state", "database", name, "hash", hash) + } + return nil +} + +func dumpGenesis(ctx *cli.Context) error { + genesis := utils.MakeGenesis(ctx) + if genesis == nil { + genesis = core.DefaultGenesisBlock() + } + if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { + utils.Fatalf("could not encode genesis") + } + return nil +} + +func importChain(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + // Start metrics export if enabled + utils.SetupMetrics(ctx) + // Start system runtime metrics collection + go metrics.CollectProcessMetrics(3 * time.Second) + + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, db := utils.MakeChain(ctx, stack, false, true) + defer db.Close() + + // Start periodically gathering memory profiles + var peakMemAlloc, peakMemSys uint64 + go func() { + stats := new(runtime.MemStats) + for { + runtime.ReadMemStats(stats) + if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { + atomic.StoreUint64(&peakMemAlloc, stats.Alloc) + } + if atomic.LoadUint64(&peakMemSys) < stats.Sys { + atomic.StoreUint64(&peakMemSys, stats.Sys) + } + time.Sleep(5 * time.Second) + } + }() + // Import the chain + start := time.Now() + + var importErr error + + if len(ctx.Args()) == 1 { + if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { + importErr = err + log.Error("Import error", "err", err) + } + } else { + for _, arg := range ctx.Args() { + if err := utils.ImportChain(chain, arg); err != nil { + importErr = err + log.Error("Import error", "file", arg, "err", err) + } + } + } + chain.Stop() + fmt.Printf("Import done in %v.\n\n", time.Since(start)) + + // Output pre-compaction stats mostly to see the import trashing + showLeveldbStats(db) + + // Print the memory statistics used by the importing + mem := new(runtime.MemStats) + runtime.ReadMemStats(mem) + + fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) + fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) + fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) + fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) + + if ctx.GlobalBool(utils.NoCompactionFlag.Name) { + return nil + } + + // Compact the entire database to more accurately measure disk io and print the stats + start = time.Now() + fmt.Println("Compacting entire database...") + if err := db.Compact(nil, nil); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + + showLeveldbStats(db) + return importErr +} + +func mpsdbUpgrade(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + // initialise the tx manager with the dummy tx mgr + private.P = ¬inuse.DBUpgradePrivateTransactionManager{} + + chain, db := utils.MakeChain(ctx, stack, false, true) + + if chain.Config().IsMPS { + utils.Fatalf("The database is already upgraded to support multiple private states.") + } + + currentBlockNumber := chain.CurrentBlock().Number().Int64() + fmt.Printf("Current block number %v\n", currentBlockNumber) + + return mps.UpgradeDB(db, chain) +} + +func exportChain(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, _ := utils.MakeChain(ctx, stack, true, true) + start := time.Now() + + var err error + fp := ctx.Args().First() + if len(ctx.Args()) < 3 { + err = utils.ExportChain(chain, fp) + } else { + // This can be improved to allow for numbers larger than 9223372036854775807 + first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) + last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) + if ferr != nil || lerr != nil { + utils.Fatalf("Export error in parsing parameters: block number not an integer\n") + } + if first < 0 || last < 0 { + utils.Fatalf("Export error: block number must be greater than 0\n") + } + err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) + } + + if err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +// importPreimages imports preimage data from the specified file. +func importPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack) + start := time.Now() + + if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { + utils.Fatalf("Import error: %v\n", err) + } + fmt.Printf("Import done in %v\n", time.Since(start)) + return nil +} + +// exportPreimages dumps the preimage data to specified json file in streaming way. +func exportPreimages(ctx *cli.Context) error { + if len(ctx.Args()) < 1 { + utils.Fatalf("This command requires an argument.") + } + + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack) + start := time.Now() + + if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + +func copyDb(ctx *cli.Context) error { + // Ensure we have a source chain directory to copy + if len(ctx.Args()) < 1 { + utils.Fatalf("Source chaindata directory path argument missing") + } + if len(ctx.Args()) < 2 { + utils.Fatalf("Source ancient chain directory path argument missing") + } + // Initialize a new chain for the running node to sync into + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chainDb := utils.MakeChain(ctx, stack, false, false) + syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) + + var syncBloom *trie.SyncBloom + if syncMode == downloader.FastSync { + syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) + } + dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) + + // Create a source peer to satisfy downloader requests from + db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") + if err != nil { + return err + } + hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) + if err != nil { + return err + } + peer := downloader.NewFakePeer("local", db, hc, dl) + if err = dl.RegisterPeer("local", 63, peer); err != nil { + return err + } + // Synchronise with the simulated peer + start := time.Now() + + currentHeader := hc.CurrentHeader() + if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { + return err + } + for dl.Synchronising() { + time.Sleep(10 * time.Millisecond) + } + fmt.Printf("Database copy done in %v\n", time.Since(start)) + + // Compact the entire database to remove any sync overhead + start = time.Now() + fmt.Println("Compacting entire database...") + if err = db.Compact(nil, nil); err != nil { + utils.Fatalf("Compaction failed: %v", err) + } + fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) + return nil +} + +func dump(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chainDb := utils.MakeChain(ctx, stack, true, false) + defer chainDb.Close() + for _, arg := range ctx.Args() { + var block *types.Block + if hashish(arg) { + block = chain.GetBlockByHash(common.HexToHash(arg)) + } else { + num, _ := strconv.Atoi(arg) + block = chain.GetBlockByNumber(uint64(num)) + } + if block == nil { + fmt.Println("{}") + utils.Fatalf("block not found") + } else { + state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) + if err != nil { + utils.Fatalf("could not create new state: %v", err) + } + excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) + excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) + includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) + if ctx.Bool(utils.IterativeOutputFlag.Name) { + state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) + } else { + if includeMissing { + fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + + " otherwise the accounts will overwrite each other in the resulting mapping.") + } + fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) + } + } + } + return nil +} + +// hashish returns true for strings that look like hashes. +func hashish(x string) bool { + _, err := strconv.Atoi(x) + return err != nil +} diff --git a/src/cmd/geth/config.go b/src/cmd/geth/config.go new file mode 100644 index 00000000..f5036a68 --- /dev/null +++ b/src/cmd/geth/config.go @@ -0,0 +1,358 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bufio" + "errors" + "fmt" + "math/big" + "os" + "reflect" + "unicode" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common/http" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/extension/privacyExtension" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/private" + "github.com/ethereum/go-ethereum/private/engine" + "github.com/naoina/toml" + "gopkg.in/urfave/cli.v1" +) + +var ( + dumpConfigCommand = cli.Command{ + Action: utils.MigrateFlags(dumpConfig), + Name: "dumpconfig", + Usage: "Show configuration values", + ArgsUsage: "", + Flags: append(nodeFlags, rpcFlags...), + Category: "MISCELLANEOUS COMMANDS", + Description: `The dumpconfig command shows configuration values.`, + } + + configFileFlag = cli.StringFlag{ + Name: "config", + Usage: "TOML configuration file", + } +) + +// These settings ensure that TOML keys use the same names as Go struct fields. +var tomlSettings = toml.Config{ + NormFieldName: func(rt reflect.Type, key string) string { + return key + }, + FieldToKey: func(rt reflect.Type, field string) string { + return field + }, + MissingField: func(rt reflect.Type, field string) error { + link := "" + if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" { + link = fmt.Sprintf(", see https://godoc.org/%s#%s for available fields", rt.PkgPath(), rt.Name()) + } + return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link) + }, +} + +type ethstatsConfig struct { + URL string `toml:",omitempty"` +} + +type gethConfig struct { + Eth ethconfig.Config + Node node.Config + Ethstats ethstatsConfig + Metrics metrics.Config +} + +func loadConfig(file string, cfg *gethConfig) error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + err = tomlSettings.NewDecoder(bufio.NewReader(f)).Decode(cfg) + // Add file name to errors that have a line number. + if _, ok := err.(*toml.LineError); ok { + err = errors.New(file + ", " + err.Error()) + } + return err +} + +func defaultNodeConfig() node.Config { + cfg := node.DefaultConfig + cfg.Name = clientIdentifier + cfg.Version = params.VersionWithCommit(gitCommit, gitDate) + cfg.HTTPModules = append(cfg.HTTPModules, "eth") + cfg.WSModules = append(cfg.WSModules, "eth") + cfg.IPCPath = "geth.ipc" + return cfg +} + +// makeConfigNode loads geth configuration and creates a blank node instance. +func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { + // Quorum: Must occur before setQuorumConfig, as it needs an initialised PTM to be enabled + // Extension Service and Multitenancy feature validation also depend on PTM availability + if err := quorumInitialisePrivacy(ctx); err != nil { + utils.Fatalf("Error initialising Private Transaction Manager: %s", err.Error()) + } + + // Load defaults. + cfg := gethConfig{ + Eth: ethconfig.Defaults, + Node: defaultNodeConfig(), + Metrics: metrics.DefaultConfig, + } + + // Load config file. + if file := ctx.GlobalString(configFileFlag.Name); file != "" { + if err := loadConfig(file, &cfg); err != nil { + utils.Fatalf("%v", err) + } + } + + // Apply flags. + utils.SetNodeConfig(ctx, &cfg.Node) + stack, err := node.New(&cfg.Node) + if err != nil { + utils.Fatalf("Failed to create the protocol stack: %v", err) + } + utils.SetEthConfig(ctx, stack, &cfg.Eth) + if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { + cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) + } + applyMetricConfig(ctx, &cfg) + + return stack, cfg +} + +// makeFullNode loads geth configuration and creates the Ethereum backend. +func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { + stack, cfg := makeConfigNode(ctx) + if ctx.GlobalIsSet(utils.OverrideBerlinFlag.Name) { + cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name)) + } + + //Must occur before registering the extension service, as it needs an initialised PTM to be enabled + if err := quorumInitialisePrivacy(ctx); err != nil { + utils.Fatalf("Error initialising Private Transaction Manager: %s", err.Error()) + } + + // Quorum - returning `ethService` too for the Raft and extension service + backend, ethService := utils.RegisterEthService(stack, &cfg.Eth) + + // Quorum + // plugin service must be after eth service so that eth service will be stopped gradually if any of the plugin + // fails to start + if cfg.Node.Plugins != nil { + utils.RegisterPluginService(stack, &cfg.Node, ctx.Bool(utils.PluginSkipVerifyFlag.Name), ctx.Bool(utils.PluginLocalVerifyFlag.Name), ctx.String(utils.PluginPublicKeyFlag.Name)) + } + + if cfg.Node.IsPermissionEnabled() { + utils.RegisterPermissionService(stack, ctx.Bool(utils.RaftDNSEnabledFlag.Name), backend.ChainConfig().ChainID) + } + + if ctx.GlobalBool(utils.RaftModeFlag.Name) { + utils.RegisterRaftService(stack, ctx, &cfg.Node, ethService) + } + + if private.IsQuorumPrivacyEnabled() { + utils.RegisterExtensionService(stack, ethService) + } + // End Quorum + + // Configure GraphQL if requested + if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { + utils.RegisterGraphQLService(stack, backend, cfg.Node) + } + // Add the Ethereum Stats daemon if requested. + if cfg.Ethstats.URL != "" { + utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) + } + return stack, backend +} + +// dumpConfig is the dumpconfig command. +func dumpConfig(ctx *cli.Context) error { + _, cfg := makeConfigNode(ctx) + comment := "" + + if cfg.Eth.Genesis != nil { + cfg.Eth.Genesis = nil + comment += "# Note: this config doesn't contain the genesis block.\n\n" + } + + out, err := tomlSettings.Marshal(&cfg) + if err != nil { + return err + } + + dump := os.Stdout + if ctx.NArg() > 0 { + dump, err = os.OpenFile(ctx.Args().Get(0), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer dump.Close() + } + dump.WriteString(comment) + dump.Write(out) + + return nil +} + +func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { + if ctx.GlobalIsSet(utils.MetricsEnabledFlag.Name) { + cfg.Metrics.Enabled = ctx.GlobalBool(utils.MetricsEnabledFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnabledExpensiveFlag.Name) { + cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledExpensiveFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsHTTPFlag.Name) { + cfg.Metrics.HTTP = ctx.GlobalString(utils.MetricsHTTPFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsPortFlag.Name) { + cfg.Metrics.Port = ctx.GlobalInt(utils.MetricsPortFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBFlag.Name) { + cfg.Metrics.EnableInfluxDB = ctx.GlobalBool(utils.MetricsEnableInfluxDBFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBEndpointFlag.Name) { + cfg.Metrics.InfluxDBEndpoint = ctx.GlobalString(utils.MetricsInfluxDBEndpointFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBDatabaseFlag.Name) { + cfg.Metrics.InfluxDBDatabase = ctx.GlobalString(utils.MetricsInfluxDBDatabaseFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBUsernameFlag.Name) { + cfg.Metrics.InfluxDBUsername = ctx.GlobalString(utils.MetricsInfluxDBUsernameFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBPasswordFlag.Name) { + cfg.Metrics.InfluxDBPassword = ctx.GlobalString(utils.MetricsInfluxDBPasswordFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { + cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) + } +} + +// quorumValidateEthService checks quorum features that depend on the ethereum service +func quorumValidateEthService(stack *node.Node, isRaft bool) { + var ethereum *eth.Ethereum + + err := stack.Lifecycle(ðereum) + if err != nil { + utils.Fatalf("Error retrieving Ethereum service: %v", err) + } + + quorumValidateConsensus(ethereum, isRaft) + + quorumValidatePrivacyEnhancements(ethereum) +} + +// quorumValidateConsensus checks if a consensus was used. The node is killed if consensus was not used +func quorumValidateConsensus(ethereum *eth.Ethereum, isRaft bool) { + if !isRaft && ethereum.BlockChain().Config().Istanbul == nil && ethereum.BlockChain().Config().Clique == nil { + utils.Fatalf("Consensus not specified. Exiting!!") + } +} + +// quorumValidatePrivacyEnhancements checks if privacy enhancements are configured the transaction manager supports +// the PrivacyEnhancements feature +func quorumValidatePrivacyEnhancements(ethereum *eth.Ethereum) { + privacyEnhancementsBlock := ethereum.BlockChain().Config().PrivacyEnhancementsBlock + if privacyEnhancementsBlock != nil { + log.Info("Privacy enhancements is configured to be enabled from block ", "height", privacyEnhancementsBlock) + if !private.P.HasFeature(engine.PrivacyEnhancements) { + utils.Fatalf("Cannot start quorum with privacy enhancements enabled while the transaction manager does not support it") + } + } +} + +// configure and set up quorum transaction privacy +func quorumInitialisePrivacy(ctx *cli.Context) error { + cfg, err := QuorumSetupPrivacyConfiguration(ctx) + if err != nil { + return err + } + + err = private.InitialiseConnection(cfg) + if err != nil { + return err + } + privacyExtension.Init() + + return nil +} + +// Get private transaction manager configuration +func QuorumSetupPrivacyConfiguration(ctx *cli.Context) (http.Config, error) { + // get default configuration + cfg, err := private.GetLegacyEnvironmentConfig() + if err != nil { + return http.Config{}, err + } + + // override the config with command line parameters + if ctx.GlobalIsSet(utils.QuorumPTMUnixSocketFlag.Name) { + cfg.SetSocket(ctx.GlobalString(utils.QuorumPTMUnixSocketFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMUrlFlag.Name) { + cfg.SetHttpUrl(ctx.GlobalString(utils.QuorumPTMUrlFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTimeoutFlag.Name) { + cfg.SetTimeout(ctx.GlobalUint(utils.QuorumPTMTimeoutFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMDialTimeoutFlag.Name) { + cfg.SetDialTimeout(ctx.GlobalUint(utils.QuorumPTMDialTimeoutFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMHttpIdleTimeoutFlag.Name) { + cfg.SetHttpIdleConnTimeout(ctx.GlobalUint(utils.QuorumPTMHttpIdleTimeoutFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMHttpWriteBufferSizeFlag.Name) { + cfg.SetHttpWriteBufferSize(ctx.GlobalInt(utils.QuorumPTMHttpWriteBufferSizeFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMHttpReadBufferSizeFlag.Name) { + cfg.SetHttpReadBufferSize(ctx.GlobalInt(utils.QuorumPTMHttpReadBufferSizeFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTlsModeFlag.Name) { + cfg.SetTlsMode(ctx.GlobalString(utils.QuorumPTMTlsModeFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTlsRootCaFlag.Name) { + cfg.SetTlsRootCA(ctx.GlobalString(utils.QuorumPTMTlsRootCaFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTlsClientCertFlag.Name) { + cfg.SetTlsClientCert(ctx.GlobalString(utils.QuorumPTMTlsClientCertFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTlsClientKeyFlag.Name) { + cfg.SetTlsClientKey(ctx.GlobalString(utils.QuorumPTMTlsClientKeyFlag.Name)) + } + if ctx.GlobalIsSet(utils.QuorumPTMTlsInsecureSkipVerify.Name) { + cfg.SetTlsInsecureSkipVerify(ctx.Bool(utils.QuorumPTMTlsInsecureSkipVerify.Name)) + } + + if err = cfg.Validate(); err != nil { + return cfg, err + } + return cfg, nil +} diff --git a/src/cmd/geth/config_test.go b/src/cmd/geth/config_test.go new file mode 100644 index 00000000..ebd5c96a --- /dev/null +++ b/src/cmd/geth/config_test.go @@ -0,0 +1,697 @@ +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "math/big" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/istanbul" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/netutil" + "github.com/naoina/toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/urfave/cli.v1" +) + +func TestFlagsConfig(t *testing.T) { + flags := []interface{}{ + utils.DataDirFlag, + utils.RaftLogDirFlag, + utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, + utils.KeyStoreDirFlag, + utils.NoUSBFlag, + utils.USBFlag, + utils.SmartCardDaemonPathFlag, + utils.NetworkIdFlag, + utils.MainnetFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.RinkebyFlag, + utils.RopstenFlag, + utils.DeveloperFlag, + utils.DeveloperPeriodFlag, + utils.IdentityFlag, + utils.DocRootFlag, + utils.ExitWhenSyncedFlag, + utils.IterativeOutputFlag, + utils.ExcludeStorageFlag, + utils.IncludeIncompletesFlag, + utils.ExcludeCodeFlag, + utils.SyncModeFlag, + utils.GCModeFlag, + utils.SnapshotFlag, + utils.TxLookupLimitFlag, + utils.LightKDFFlag, + utils.DeprecatedAuthorizationListFlag, + utils.AuthorizationListFlag, + utils.BloomFilterSizeFlag, + utils.OverrideBerlinFlag, + utils.LightServeFlag, + utils.LightIngressFlag, + utils.LightEgressFlag, + utils.LightMaxPeersFlag, + utils.UltraLightServersFlag, + utils.UltraLightFractionFlag, + utils.UltraLightOnlyAnnounceFlag, + utils.LightNoPruneFlag, + utils.LightNoSyncServeFlag, + utils.EthashCacheDirFlag, + utils.EthashCachesInMemoryFlag, + utils.EthashCachesOnDiskFlag, + utils.EthashCachesLockMmapFlag, + utils.EthashDatasetDirFlag, + utils.EthashDatasetsInMemoryFlag, + utils.EthashDatasetsOnDiskFlag, + utils.EthashDatasetsLockMmapFlag, + utils.TxPoolLocalsFlag, + utils.TxPoolNoLocalsFlag, + utils.TxPoolJournalFlag, + utils.TxPoolRejournalFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + utils.CacheTrieFlag, + utils.CacheTrieJournalFlag, + utils.CacheTrieRejournalFlag, + utils.CacheGCFlag, + utils.CacheSnapshotFlag, + utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, + utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasTargetFlag, + utils.MinerGasLimitFlag, + utils.MinerGasPriceFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, + utils.MinerNoVerfiyFlag, + utils.UnlockedAccountFlag, + utils.PasswordFileFlag, + utils.ExternalSignerFlag, + utils.VMEnableDebugFlag, + utils.InsecureUnlockAllowedFlag, + utils.RPCGlobalGasCapFlag, + utils.RPCGlobalTxFeeCapFlag, + utils.EthStatsURLFlag, + utils.FakePoWFlag, + utils.NoCompactionFlag, + utils.RPCClientToken, + utils.RPCClientTLSCert, + utils.RPCClientTLSCaCert, + utils.RPCClientTLSCipherSuites, + utils.RPCClientTLSInsecureSkipVerify, + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.HTTPEnabledFlag, + utils.HTTPListenAddrFlag, + utils.HTTPPortFlag, + utils.HTTPCORSDomainFlag, + utils.HTTPVirtualHostsFlag, + utils.HTTPApiFlag, + utils.HTTPPathPrefixFlag, + utils.GraphQLEnabledFlag, + utils.GraphQLCORSDomainFlag, + utils.GraphQLVirtualHostsFlag, + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + utils.WSPathPrefixFlag, + utils.ExecFlag, + utils.PreloadJSFlag, + utils.AllowUnprotectedTxs, + utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, + utils.ListenPortFlag, + utils.BootnodesFlag, + utils.NodeKeyFileFlag, + utils.NodeKeyHexFlag, + utils.NATFlag, + utils.NoDiscoverFlag, + utils.DiscoveryV5Flag, + utils.NetrestrictFlag, + utils.DNSDiscoveryFlag, + utils.JSpathFlag, + utils.GpoBlocksFlag, + utils.GpoPercentileFlag, + utils.GpoMaxGasPriceFlag, + utils.MetricsEnabledFlag, + utils.MetricsEnabledExpensiveFlag, + utils.MetricsHTTPFlag, + utils.MetricsPortFlag, + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBTagsFlag, + utils.EWASMInterpreterFlag, + utils.EVMInterpreterFlag, + utils.EVMCallTimeOutFlag, + utils.QuorumImmutabilityThreshold, + utils.RaftModeFlag, + utils.RaftBlockTimeFlag, + utils.RaftJoinExistingFlag, + utils.EmitCheckpointsFlag, + utils.RaftPortFlag, + utils.RaftDNSEnabledFlag, + utils.EnableNodePermissionFlag, + utils.AllowedFutureBlockTimeFlag, + utils.PluginSettingsFlag, + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.PluginSkipVerifyFlag, + utils.AccountPluginNewAccountConfigFlag, + utils.IstanbulRequestTimeoutFlag, + utils.IstanbulBlockPeriodFlag, + utils.MultitenancyFlag, + utils.RevertReasonFlag, + utils.PrivateCacheTrieJournalFlag, + utils.QuorumEnablePrivacyMarker, + utils.QuorumPTMUnixSocketFlag, + utils.QuorumPTMUrlFlag, + utils.QuorumPTMTimeoutFlag, + utils.QuorumPTMDialTimeoutFlag, + utils.QuorumPTMHttpIdleTimeoutFlag, + utils.QuorumPTMHttpWriteBufferSizeFlag, + utils.QuorumPTMHttpReadBufferSizeFlag, + utils.QuorumPTMTlsModeFlag, + utils.QuorumPTMTlsRootCaFlag, + utils.QuorumPTMTlsClientCertFlag, + utils.QuorumPTMTlsClientKeyFlag, + utils.QuorumPTMTlsInsecureSkipVerify, + } + nodeKeyFile, err := ioutil.TempFile("/tmp", "nodekey") + require.NoError(t, err) + defer os.Remove(nodeKeyFile.Name()) + + _, err = nodeKeyFile.WriteString("0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\n") + require.NoError(t, err) + + err = nodeKeyFile.Close() + require.NoError(t, err) + + set := flag.NewFlagSet("dumpconfig", flag.ContinueOnError) + for _, f := range flags { + switch f := f.(type) { + case utils.DirectoryFlag: + set.String(f.Name, f.Value.String()+"/custom", f.Usage) + case cli.BoolFlag: + set.Bool(f.Name, true, f.Usage) + case cli.BoolTFlag: + set.Bool(f.Name, false, f.Usage) + case cli.StringFlag: + switch f { + case utils.BootnodesFlag: + set.String(f.Name, "", f.Usage) + case utils.GCModeFlag: + set.String(f.Name, "archive", f.Usage) + case utils.NodeKeyHexFlag: // either hex or file + // set.String(f.Name, "0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF", f.Usage) // either nodeKeyHex or nodeKeyFile + case utils.NodeKeyFileFlag: + set.String(f.Name, nodeKeyFile.Name(), f.Usage) + case utils.NetrestrictFlag: + set.String(f.Name, "127.0.0.0/16, 23.23.23.23/24,", f.Usage) // TOML problem + case utils.AuthorizationListFlag: + set.String(f.Name, "1=0x0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF,2=0x0123456789ABCDEF0123456789ABCDE00123456789ABCDEF0123456789ABCDEF", f.Usage) + default: + set.String(f.Name, f.Value+"_custom", f.Usage) + } + case cli.Uint64Flag: + set.Uint64(f.Name, f.Value+10, f.Usage) + case cli.IntFlag: + set.Int(f.Name, f.Value+10, f.Usage) + case utils.TextMarshalerFlag: + set.String(f.Name, "light", f.Usage) + case cli.Int64Flag: + set.Int64(f.Name, f.Value+10, f.Usage) + case cli.DurationFlag: + set.Duration(f.Name, f.Value+5*time.Minute, f.Usage) + case utils.BigFlag: + set.Uint64(f.Name, f.Value.Uint64()+10, f.Usage) + case cli.Float64Flag: + set.Float64(f.Name, f.Value+0.1, f.Name) + case cli.UintFlag: + set.Uint(f.Name, f.Value+10, f.Usage) + default: + t.Log(fmt.Sprintf("unknown %t", f)) + t.Fail() + } + } + action := utils.MigrateFlags(dumpConfig) + app := &cli.App{ + Name: "dumpconfig", + Usage: "dump config", + Action: action, + } + + ctx := cli.NewContext(app, set, nil) + + out, err := ioutil.TempFile("/tmp", "gethCfg") + require.NoError(t, err) + defer out.Close() + defer os.Remove(out.Name()) + + bak := os.Stdout + defer func() { os.Stdout = bak }() + os.Stdout = out + + err = action(ctx) + require.NoError(t, err) + + out2, err := removeComment(out.Name()) + require.NoError(t, err) + defer os.Remove(out2.Name()) + + t.Log(out2.Name()) + val, err := ioutil.ReadFile(out2.Name()) + require.NoError(t, err) + t.Log(string(val)) + + cfg := &gethConfig{} + err = loadConfig(out2.Name(), cfg) + require.NoError(t, err) + + // [Eth] + eth := cfg.Eth + assert.Equal(t, uint64(1), eth.NetworkId) // mainnet true + assert.Equal(t, downloader.FastSync, eth.SyncMode) + assert.Equal(t, []string{"enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.mainnet.ethdisco.net"}, eth.EthDiscoveryURLs) + assert.Equal(t, false, eth.NoPruning) + assert.Equal(t, false, eth.NoPrefetch) + assert.Equal(t, 100, eth.LightPeers) + assert.Equal(t, 75, eth.UltraLightFraction) + assert.Equal(t, 768, eth.DatabaseCache) + assert.Equal(t, "", eth.DatabaseFreezer) + assert.Equal(t, 256, eth.TrieCleanCache) + assert.Equal(t, "triecache", eth.TrieCleanCacheJournal) + assert.Equal(t, time.Duration(3600000000000), eth.TrieCleanCacheRejournal) + assert.Equal(t, 256, eth.TrieDirtyCache) + assert.Equal(t, time.Duration(3600000000000), eth.TrieTimeout) + assert.Equal(t, 0, eth.SnapshotCache) + assert.Equal(t, false, eth.EnablePreimageRecording) + assert.Equal(t, "", eth.EWASMInterpreter) + assert.Equal(t, "", eth.EVMInterpreter) + assert.Equal(t, uint64(25000000), eth.RPCGasCap) + assert.Equal(t, float64(1), eth.RPCTxFeeCap) + // Quorum + assert.Equal(t, time.Duration(15000000000), eth.EVMCallTimeOut) + assert.Equal(t, "privatetriecache", eth.PrivateTrieCleanCacheJournal) + // End Quorum + + // [Eth.Miner] + miner := cfg.Eth.Miner + assert.Equal(t, uint64(700000000), miner.GasFloor) + assert.Equal(t, uint64(800000000), miner.GasCeil) + assert.Equal(t, big.NewInt(1000000000), miner.GasPrice) + assert.Equal(t, time.Duration(3000000000), miner.Recommit) + assert.Equal(t, false, miner.Noverify) + assert.Equal(t, uint64(0), miner.AllowedFutureBlockTime) + + // [Eth.GPO] + gpo := cfg.Eth.GPO + assert.Equal(t, 2, gpo.Blocks) + assert.Equal(t, 60, gpo.Percentile) + + // [Eth.TxPool] + txPool := cfg.Eth.TxPool + assert.Equal(t, []common.Address{}, txPool.Locals) + assert.Equal(t, false, txPool.NoLocals) + assert.Equal(t, "transactions.rlp", txPool.Journal) + assert.Equal(t, time.Duration(3600000000000), txPool.Rejournal) + assert.Equal(t, uint64(1), txPool.PriceLimit) + assert.Equal(t, uint64(10), txPool.PriceBump) + assert.Equal(t, uint64(16), txPool.AccountSlots) + assert.Equal(t, uint64(4096), txPool.GlobalSlots) + assert.Equal(t, uint64(64), txPool.AccountQueue) + assert.Equal(t, uint64(1024), txPool.GlobalQueue) + assert.Equal(t, time.Duration(10800000000000), txPool.Lifetime) + assert.Equal(t, uint64(64), txPool.TransactionSizeLimit) + assert.Equal(t, uint64(24), txPool.MaxCodeSize) + + // [Node] + node := cfg.Node + assert.Equal(t, "", node.DataDir) + assert.Equal(t, false, node.InsecureUnlockAllowed) + assert.Equal(t, false, node.NoUSB) + assert.Equal(t, "", node.IPCPath) + assert.Equal(t, "127.0.0.1", node.HTTPHost) + assert.Equal(t, 8545, node.HTTPPort) + assert.Equal(t, []string(nil), node.HTTPCors) + assert.Equal(t, []string{"localhost"}, node.HTTPVirtualHosts) + assert.Equal(t, []string{"net", "web3", "eth"}, node.HTTPModules) + assert.Equal(t, "127.0.0.1", node.WSHost) + assert.Equal(t, 8546, node.WSPort) + assert.Equal(t, []string(nil), node.WSOrigins) + assert.Equal(t, []string{"net", "web3", "eth"}, node.WSModules) + assert.Equal(t, []string(nil), node.GraphQLCors) + assert.Equal(t, []string{"localhost"}, node.GraphQLVirtualHosts) + assert.Equal(t, false, node.EnableNodePermission) + + // [Node.P2P] + p2p := cfg.Node.P2P + assert.Equal(t, 0, p2p.MaxPeers) + assert.Equal(t, true, p2p.NoDiscovery) + + assert.Equal(t, bootNodes(t).Nodes, p2p.BootstrapNodes) + //assert.Equal(t, bootNodesV5(t).Nodes, p2p.BootstrapNodesV5) + assert.Equal(t, ":0", p2p.ListenAddr) + assert.Equal(t, false, p2p.EnableMsgEvents) + + type NetRestrictType struct { + NetRestrict *netutil.Netlist + } + var netRestrict NetRestrictType + err = toml.Unmarshal([]byte(`NetRestrict = ["127.0.0.0/16", "23.23.23.0/24"]`), &netRestrict) + require.NoError(t, err) + assert.Equal(t, netRestrict.NetRestrict, p2p.NetRestrict) + + // [Node.HTTPTimeouts] + httpTimeouts := cfg.Node.HTTPTimeouts + assert.Equal(t, time.Duration(30000000000), httpTimeouts.ReadTimeout) + assert.Equal(t, time.Duration(30000000000), httpTimeouts.WriteTimeout) + assert.Equal(t, time.Duration(120000000000), httpTimeouts.IdleTimeout) + + // QUORUM + // [Eth.Istanbul] + quorumIstanbul := eth.Istanbul + assert.Equal(t, uint64(10000), quorumIstanbul.RequestTimeout) + assert.Equal(t, uint64(1), quorumIstanbul.BlockPeriod) + assert.Equal(t, uint64(30000), quorumIstanbul.Epoch) + assert.Equal(t, big.NewInt(0), quorumIstanbul.Ceil2Nby3Block) + assert.Equal(t, istanbul.RoundRobin, quorumIstanbul.ProposerPolicy.Id) // conflict with genesis? + // END QUORUM +} + +type BootNodesV5Type struct { + Nodes []*enode.Node +} + +func bootNodesV5(t *testing.T) BootNodesV5Type { + var bootNodesV5 BootNodesV5Type + err := toml.Unmarshal([]byte(`Nodes = ["enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303"]`), &bootNodesV5) + require.NoError(t, err) + return bootNodesV5 +} + +type BootNodesType struct { + Nodes []*enode.Node +} + +func bootNodes(t *testing.T) BootNodesType { + var bootNodes BootNodesType + err := toml.Unmarshal([]byte(`Nodes = ["enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303"]`), &bootNodes) + require.NoError(t, err) + return bootNodes +} + +func removeComment(name string) (*os.File, error) { + file, err := ioutil.ReadFile(name) + if err != nil { + return nil, fmt.Errorf("read file: %w", err) + } + out, err := ioutil.TempFile("/tmp", "gethCfg") + if err != nil { + return nil, fmt.Errorf("create temp file: %w", err) + } + defer out.Close() + text := string(file) + lines := strings.Split(text, "\n") + first := true + for _, line := range lines { + if strings.Index(line, "#") != 0 && !(first && line == "") { + line = strings.Replace(line, "e+00", ".0", 1) + line = strings.Replace(line, "[[", "[", 1) + line = strings.Replace(line, "]]", "]", 1) + _, err = out.WriteString(line + "\n") + if err != nil { + return nil, fmt.Errorf("write line: %w", err) + } + first = false + } + } + return out, nil +} + +func TestLoadAndDumpGethConfig(t *testing.T) { + out, err := ioutil.TempFile("/tmp", "gethCfg") + require.NoError(t, err) + defer out.Close() + _, err = out.WriteString(`[Eth] +NetworkId = 1337 +SyncMode = "full" +EthDiscoveryURLs = ["enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.mainnet.ethdisco.net"] +SnapDiscoveryURLs = [] +NoPruning = false +NoPrefetch = false +LightPeers = 100 +UltraLightFraction = 75 +DatabaseCache = 768 +DatabaseFreezer = "" +TrieCleanCache = 256 +TrieCleanCacheJournal = "triecache-test" +TrieCleanCacheRejournal = 3600000000000 +TrieDirtyCache = 256 +TrieTimeout = 3600000000000 +SnapshotCache = 0 +Preimages = true +EnablePreimageRecording = false +EWASMInterpreter = "" +EVMInterpreter = "" +RPCGasCap = 25000000 +RPCTxFeeCap = 1e+00 +RaftMode = true +EnableNodePermission = true +EVMCallTimeOut = 3600000000000 +PrivateTrieCleanCacheJournal = "privatetriecache-test" + +[Eth.Miner] +GasFloor = 700000000 +GasCeil = 800000000 +GasPrice = 0 +Recommit = 3000000000 +Noverify = false +AllowedFutureBlockTime = 0 + +[Eth.GPO] +Blocks = 20 +Percentile = 60 +MaxPrice = 500000000000 + +[Eth.TxPool] +Locals = [] +NoLocals = false +Journal = "transactions.rlp" +Rejournal = 3600000000000 +PriceLimit = 1 +PriceBump = 10 +AccountSlots = 16 +GlobalSlots = 4096 +AccountQueue = 64 +GlobalQueue = 1024 +Lifetime = 10800000000000 +TransactionSizeLimit = 64 +MaxCodeSize = 24 + +[Eth.Istanbul] +RequestTimeout = 10000 +BlockPeriod = 5 +ProposerPolicy = "id = 0\n" +Epoch = 30000 +Ceil2Nby3Block = 0 +TestQBFTBlock = 0 + +[Node] +UserIdent = "_custom" +DataDir = "/data" +RaftLogDir = "" +InsecureUnlockAllowed = true +NoUSB = true +IPCPath = "geth.ipc" +HTTPHost = "0.0.0.0" +HTTPPort = 8545 +HTTPCors = ["'*'"] +HTTPVirtualHosts = ["'*'"] +HTTPModules = ["admin", "db", "eth", "debug", "miner", "net", "txpool", "personal", "web3", "quorum", "istanbul"] +WSHost = "0.0.0.0" +WSPort = 8546 +WSOrigins = ["'*'"] +WSModules = ["admin", "db", "eth", "debug", "miner", "net", "txpool", "personal", "web3", "quorum", "istanbul"] +GraphQLCors = ["'*'"] +GraphQLVirtualHosts = ["'*'"] +EnableNodePermission = true + +[Node.P2P] +MaxPeers = 50 +NoDiscovery = true +BootstrapNodes = ["enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303"] +BootstrapNodesV5 = ["enode://30b7ab30a01c124a6cceca36863ece12c4f5fa68e3ba9b0b51407ccc002eeed3b3102d20a88f1c1d3c3154e2449317b8ef95090e77b312d5cc39354f86d5d606@52.176.7.10:30303", "enode://865a63255b3bb68023b6bffd5095118fcc13e79dcf014fe4e47e065c350c7cc72af2e53eff895f11ba1bbb6a2b33271c1116ee870f266618eadfc2e78aa7349c@52.176.100.77:30303", "enode://6332792c4a00e3e4ee0926ed89e0d27ef985424d97b6a45bf0f23e51f0dcb5e66b875777506458aea7af6f9e4ffb69f43f3778ee73c81ed9d34c51c4b16b0b0f@52.232.243.152:30303", "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303"] +StaticNodes = [] +TrustedNodes = [] +NetRestrict = ["127.0.0.0/16", "23.23.23.0/24"] +ListenAddr = ":30303" +EnableMsgEvents = false + +[Node.HTTPTimeouts] +ReadTimeout = 30000000000 +WriteTimeout = 30000000000 +IdleTimeout = 120000000000 + +[Metrics] +HTTP = "127.0.0.1" +Port = 6060 +InfluxDBEndpoint = "http://localhost:8086" +InfluxDBDatabase = "geth" +InfluxDBUsername = "test" +InfluxDBPassword = "test" +InfluxDBTags = "host=localhost" +`) + require.NoError(t, err) + err = out.Close() + require.NoError(t, err) + cfg := &gethConfig{} + + err = loadConfig(out.Name(), cfg) + require.NoError(t, err) + + testConfig(t, cfg) + + out, err = ioutil.TempFile("/tmp", "gethCfg") + require.NoError(t, err) + + err = tomlSettings.NewEncoder(out).Encode(cfg) + require.NoError(t, err) + + cfg = &gethConfig{} + err = loadConfig(out.Name(), cfg) + require.NoError(t, err) + + testConfig(t, cfg) +} + +func testConfig(t *testing.T, cfg *gethConfig) { + // [Eth] + eth := cfg.Eth + assert.Equal(t, uint64(1337), eth.NetworkId) + assert.Equal(t, downloader.FullSync, eth.SyncMode) + assert.Equal(t, []string{"enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@all.mainnet.ethdisco.net"}, eth.EthDiscoveryURLs) + assert.Equal(t, false, eth.NoPruning) + assert.Equal(t, false, eth.NoPrefetch) + assert.Equal(t, 100, eth.LightPeers) + assert.Equal(t, 75, eth.UltraLightFraction) + assert.Equal(t, 768, eth.DatabaseCache) + assert.Equal(t, "", eth.DatabaseFreezer) + assert.Equal(t, 256, eth.TrieCleanCache) + assert.Equal(t, "triecache-test", eth.TrieCleanCacheJournal) + assert.Equal(t, time.Duration(3600000000000), eth.TrieCleanCacheRejournal) + assert.Equal(t, 256, eth.TrieDirtyCache) + assert.Equal(t, time.Duration(3600000000000), eth.TrieTimeout) + assert.Equal(t, 0, eth.SnapshotCache) + assert.Equal(t, false, eth.EnablePreimageRecording) + assert.Equal(t, "", eth.EWASMInterpreter) + assert.Equal(t, "", eth.EVMInterpreter) + assert.Equal(t, uint64(25000000), eth.RPCGasCap) + assert.Equal(t, float64(1), eth.RPCTxFeeCap) + // Quorum + assert.Equal(t, time.Duration(3600000000000), eth.EVMCallTimeOut) + assert.Equal(t, "privatetriecache-test", eth.PrivateTrieCleanCacheJournal) + assert.Equal(t, true, eth.EnableNodePermission) + // End Quorum + + // [Eth.Miner] + miner := eth.Miner + assert.Equal(t, uint64(700000000), miner.GasFloor) + assert.Equal(t, uint64(800000000), miner.GasCeil) + assert.Equal(t, big.NewInt(0), miner.GasPrice) + assert.Equal(t, time.Duration(3000000000), miner.Recommit) + assert.Equal(t, false, miner.Noverify) + assert.Equal(t, uint64(0), miner.AllowedFutureBlockTime) + + // [Eth.GPO] + gpo := eth.GPO + assert.Equal(t, 20, gpo.Blocks) + assert.Equal(t, 60, gpo.Percentile) + + // [Eth.TxPool] + txPool := eth.TxPool + assert.Equal(t, []common.Address{}, txPool.Locals) + assert.Equal(t, false, txPool.NoLocals) + assert.Equal(t, "transactions.rlp", txPool.Journal) + assert.Equal(t, time.Duration(3600000000000), txPool.Rejournal) + assert.Equal(t, uint64(1), txPool.PriceLimit) + assert.Equal(t, uint64(10), txPool.PriceBump) + assert.Equal(t, uint64(16), txPool.AccountSlots) + assert.Equal(t, uint64(4096), txPool.GlobalSlots) + assert.Equal(t, uint64(64), txPool.AccountQueue) + assert.Equal(t, uint64(1024), txPool.GlobalQueue) + assert.Equal(t, time.Duration(10800000000000), txPool.Lifetime) + assert.Equal(t, uint64(64), txPool.TransactionSizeLimit) + assert.Equal(t, uint64(24), txPool.MaxCodeSize) + + // [Node] + node := cfg.Node + assert.Equal(t, "/data", node.DataDir) + assert.Equal(t, true, node.InsecureUnlockAllowed) + assert.Equal(t, true, node.NoUSB) + assert.Equal(t, "geth.ipc", node.IPCPath) + assert.Equal(t, "0.0.0.0", node.HTTPHost) + assert.Equal(t, 8545, node.HTTPPort) + assert.Equal(t, []string{"'*'"}, node.HTTPCors) + assert.Equal(t, []string{"'*'"}, node.HTTPVirtualHosts) + assert.Equal(t, []string{"admin", "db", "eth", "debug", "miner", "net", "txpool", "personal", "web3", "quorum", "istanbul"}, node.HTTPModules) + assert.Equal(t, "0.0.0.0", node.WSHost) + assert.Equal(t, 8546, node.WSPort) + assert.Equal(t, []string{"'*'"}, node.WSOrigins) + assert.Equal(t, []string{"admin", "db", "eth", "debug", "miner", "net", "txpool", "personal", "web3", "quorum", "istanbul"}, node.WSModules) + assert.Equal(t, []string{"'*'"}, node.GraphQLCors) + assert.Equal(t, []string{"'*'"}, node.GraphQLVirtualHosts) + assert.Equal(t, true, node.EnableNodePermission) + + // [Node.P2P] + p2p := cfg.Node.P2P + assert.Equal(t, 50, p2p.MaxPeers) + assert.Equal(t, true, p2p.NoDiscovery) + assert.Equal(t, bootNodes(t).Nodes, p2p.BootstrapNodes) + assert.Equal(t, bootNodesV5(t).Nodes, p2p.BootstrapNodesV5) + + /*assert.Equal(t, []*enode.Node{}, p2p.BootstrapNodes) + if p2p.BootstrapNodesV5 != nil { + assert.Equal(t, []*enode.Node{}, p2p.BootstrapNodesV5) + }*/ + assert.Equal(t, ":30303", p2p.ListenAddr) + assert.Equal(t, false, p2p.EnableMsgEvents) + + // [Node.HTTPTimeouts] + httpTimeouts := cfg.Node.HTTPTimeouts + assert.Equal(t, time.Duration(30000000000), httpTimeouts.ReadTimeout) + assert.Equal(t, time.Duration(30000000000), httpTimeouts.WriteTimeout) + assert.Equal(t, time.Duration(120000000000), httpTimeouts.IdleTimeout) + + // QUORUM + // [Eth.Quorum.Istanbul] + istanbul := cfg.Eth.Istanbul + assert.Equal(t, uint64(10000), istanbul.RequestTimeout) + assert.Equal(t, uint64(5), istanbul.BlockPeriod) + assert.Equal(t, uint64(30000), istanbul.Epoch) + assert.Equal(t, big.NewInt(0), istanbul.Ceil2Nby3Block) + // END QUORUM +} diff --git a/src/cmd/geth/consolecmd.go b/src/cmd/geth/consolecmd.go new file mode 100644 index 00000000..75ca1e0f --- /dev/null +++ b/src/cmd/geth/consolecmd.go @@ -0,0 +1,353 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/console" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/plugin/security" + "github.com/ethereum/go-ethereum/rpc" + "gopkg.in/urfave/cli.v1" +) + +var ( + consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag} + rpcClientFlags = []cli.Flag{utils.RPCClientToken, utils.RPCClientTLSCert, utils.RPCClientTLSCaCert, utils.RPCClientTLSCipherSuites, utils.RPCClientTLSInsecureSkipVerify} + + consoleCommand = cli.Command{ + Action: utils.MigrateFlags(localConsole), + Name: "console", + Usage: "Start an interactive JavaScript environment", + Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...), + Category: "CONSOLE COMMANDS", + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://geth.ethereum.org/docs/interface/javascript-console.`, + } + + attachCommand = cli.Command{ + Action: utils.MigrateFlags(remoteConsole), + Name: "attach", + Usage: "Start an interactive JavaScript environment (connect to node)", + ArgsUsage: "[endpoint]", + Flags: append(append(consoleFlags, utils.DataDirFlag), rpcClientFlags...), + Category: "CONSOLE COMMANDS", + Description: ` +The Geth console is an interactive shell for the JavaScript runtime environment +which exposes a node admin interface as well as the Ðapp JavaScript API. +See https://geth.ethereum.org/docs/interface/javascript-console. +This command allows to open a console on a running geth node.`, + } + + javascriptCommand = cli.Command{ + Action: utils.MigrateFlags(ephemeralConsole), + Name: "js", + Usage: "Execute the specified JavaScript files", + ArgsUsage: " [jsfile...]", + Flags: append(nodeFlags, consoleFlags...), + Category: "CONSOLE COMMANDS", + Description: ` +The JavaScript VM exposes a node admin interface as well as the Ðapp +JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`, + } +) + +// Quorum +// +// read tls client configuration from command line arguments +// +// only for HTTPS or WSS +func readTLSClientConfig(endpoint string, ctx *cli.Context) (*tls.Config, bool, error) { + if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "wss://") { + return nil, false, nil + } + hasCustomTls := false + insecureSkipVerify := ctx.Bool(utils.RPCClientTLSInsecureSkipVerify.Name) + tlsConfig := &tls.Config{ + InsecureSkipVerify: insecureSkipVerify, + } + var certFile, caFile string + if !insecureSkipVerify { + var certPem, caPem []byte + certFile, caFile = ctx.String(utils.RPCClientTLSCert.Name), ctx.String(utils.RPCClientTLSCaCert.Name) + var err error + if certFile != "" { + if certPem, err = ioutil.ReadFile(certFile); err != nil { + return nil, true, err + } + } + if caFile != "" { + if caPem, err = ioutil.ReadFile(caFile); err != nil { + return nil, true, err + } + } + if len(certPem) != 0 || len(caPem) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + certPool = x509.NewCertPool() + } + if len(certPem) != 0 { + certPool.AppendCertsFromPEM(certPem) + } + if len(caPem) != 0 { + certPool.AppendCertsFromPEM(caPem) + } + tlsConfig.RootCAs = certPool + hasCustomTls = true + } + } else { + hasCustomTls = true + } + cipherSuitesInput := ctx.String(utils.RPCClientTLSCipherSuites.Name) + cipherSuitesStrings := strings.FieldsFunc(cipherSuitesInput, func(r rune) bool { + return r == ',' + }) + if len(cipherSuitesStrings) > 0 { + cipherSuiteList := make(security.CipherSuiteList, len(cipherSuitesStrings)) + for i, s := range cipherSuitesStrings { + cipherSuiteList[i] = security.CipherSuite(strings.TrimSpace(s)) + } + cipherSuites, err := cipherSuiteList.ToUint16Array() + if err != nil { + return nil, true, err + } + tlsConfig.CipherSuites = cipherSuites + hasCustomTls = true + } + if !hasCustomTls { + return nil, false, nil + } + return tlsConfig, hasCustomTls, nil +} + +// localConsole starts a new geth node, attaching a JavaScript console to it at the +// same time. +func localConsole(ctx *cli.Context) error { + // Create and start the node based on the CLI flags + prepare(ctx) + stack, backend := makeFullNode(ctx) + startNode(ctx, stack, backend) + defer stack.Close() + + // Attach to the newly started node and start the JavaScript console + client, err := stack.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // If only a short execution was requested, evaluate and return + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + console.Evaluate(script) + return nil + } + // Otherwise print the welcome screen and enter interactive mode + console.Welcome() + console.Interactive() + + return nil +} + +// remoteConsole will connect to a remote geth instance, attaching a JavaScript +// console to it. +func remoteConsole(ctx *cli.Context) error { + // Attach to a remotely running geth instance and start the JavaScript console + endpoint := ctx.Args().First() + if endpoint == "" { + path := node.DefaultDataDir() + if ctx.GlobalIsSet(utils.DataDirFlag.Name) { + path = ctx.GlobalString(utils.DataDirFlag.Name) + } + if path != "" { + if ctx.GlobalBool(utils.RopstenFlag.Name) { + // Maintain compatibility with older Geth configurations storing the + // Ropsten database in `testnet` instead of `ropsten`. + legacyPath := filepath.Join(path, "testnet") + if _, err := os.Stat(legacyPath); !os.IsNotExist(err) { + path = legacyPath + } else { + path = filepath.Join(path, "ropsten") + } + } else if ctx.GlobalBool(utils.RinkebyFlag.Name) { + path = filepath.Join(path, "rinkeby") + } else if ctx.GlobalBool(utils.GoerliFlag.Name) { + path = filepath.Join(path, "goerli") + } else if ctx.GlobalBool(utils.YoloV3Flag.Name) { + path = filepath.Join(path, "yolo-v3") + } + } + endpoint = fmt.Sprintf("%s/geth.ipc", path) + } + client, err := dialRPC(endpoint, ctx) + if err != nil { + utils.Fatalf("Unable to attach to remote geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + consl, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer consl.Stop(false) + + if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { + consl.Evaluate(script) + return nil + } + + // Otherwise print the welcome screen and enter interactive mode + consl.Welcome() + consl.Interactive() + + return nil +} + +// dialRPC returns a RPC client which connects to the given endpoint. +// The check for empty endpoint implements the defaulting logic +// for "geth attach" and "geth monitor" with no argument. +// +// Quorum: passing the cli context to build security-aware client: +// 1. Custom TLS configuration +// 2. Access Token awareness via rpc.HttpCredentialsProviderFunc +// 3. PSI awareness from environment variable and endpoint query param +func dialRPC(endpoint string, ctx *cli.Context) (*rpc.Client, error) { + if endpoint == "" { + endpoint = node.DefaultIPCEndpoint(clientIdentifier) + } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { + // Backwards compatibility with geth < 1.5 which required + // these prefixes. + endpoint = endpoint[4:] + } + var ( + client *rpc.Client + err error + dialCtx = context.Background() + ) + tlsConfig, hasCustomTls, tlsReadErr := readTLSClientConfig(endpoint, ctx) + if tlsReadErr != nil { + return nil, tlsReadErr + } + if token := ctx.String(utils.RPCClientToken.Name); token != "" { + var f rpc.HttpCredentialsProviderFunc = func(ctx context.Context) (string, error) { + return token, nil + } + // it's important that f MUST BE OF TYPE rpc.HttpCredentialsProviderFunc + dialCtx = rpc.WithCredentialsProvider(dialCtx, f) + } + if hasCustomTls { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + switch u.Scheme { + case "https": + customHttpClient := &http.Client{ + Transport: http.DefaultTransport, + } + customHttpClient.Transport.(*http.Transport).TLSClientConfig = tlsConfig + client, _ = rpc.DialHTTPWithClient(endpoint, customHttpClient) + case "wss": + client, _ = rpc.DialWebsocketWithCustomTLS(dialCtx, endpoint, "", tlsConfig) + default: + log.Warn("unsupported scheme for custom TLS which is only for HTTPS/WSS", "scheme", u.Scheme) + client, _ = rpc.DialContext(dialCtx, endpoint) + } + } else { + client, err = rpc.DialContext(dialCtx, endpoint) + } + if err != nil { + return nil, err + } + // enrich clients with provider functions to populate HTTP request header + if f := rpc.CredentialsProviderFromContext(dialCtx); f != nil { + client = client.WithHTTPCredentials(f) + } + return client, nil +} + +// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript +// console to it, executes each of the files specified as arguments and tears +// everything down. +func ephemeralConsole(ctx *cli.Context) error { + // Create and start the node based on the CLI flags + stack, backend := makeFullNode(ctx) + startNode(ctx, stack, backend) + defer stack.Close() + + // Attach to the newly started node and start the JavaScript console + client, err := stack.Attach() + if err != nil { + utils.Fatalf("Failed to attach to the inproc geth: %v", err) + } + config := console.Config{ + DataDir: utils.MakeDataDir(ctx), + DocRoot: ctx.GlobalString(utils.JSpathFlag.Name), + Client: client, + Preload: utils.MakeConsolePreloads(ctx), + } + + console, err := console.New(config) + if err != nil { + utils.Fatalf("Failed to start the JavaScript console: %v", err) + } + defer console.Stop(false) + + // Evaluate each of the specified JavaScript files + for _, file := range ctx.Args() { + if err = console.Execute(file); err != nil { + utils.Fatalf("Failed to execute %s: %v", file, err) + } + } + + go func() { + stack.Wait() + console.Stop(false) + }() + console.Stop(true) + + return nil +} diff --git a/src/cmd/geth/consolecmd_test.go b/src/cmd/geth/consolecmd_test.go new file mode 100644 index 00000000..76ebbd32 --- /dev/null +++ b/src/cmd/geth/consolecmd_test.go @@ -0,0 +1,322 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "crypto/rand" + "crypto/tls" + "flag" + "io/ioutil" + "math/big" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/params" + testifyassert "github.com/stretchr/testify/assert" + "gopkg.in/urfave/cli.v1" +) + +const ( + ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 istanbul:1.0 miner:1.0 net:1.0 personal:1.0 rpc:1.0 txpool:1.0 web3:1.0" + httpAPIs = "admin:1.0 eth:1.0 net:1.0 rpc:1.0 web3:1.0" + nodeKey = "b68c0338aa4b266bf38ebe84c6199ae9fac8b29f32998b3ed2fbeafebe8d65c9" +) + +var genesis = `{ + "config": { + "chainId": 2017, + "homesteadBlock": 1, + "eip150Block": 2, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 3, + "eip158Block": 3, + "istanbul": { + "epoch": 30000, + "policy": 0 + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "gasLimit": "0x47b760", + "difficulty": "0x1", + "mixHash": "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "491937757d1b26e29c507b8d4c0b233c2747e68d": { + "balance": "0x446c3b15f9926687d2c40534fdb564000000000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} +` + +// spawns geth with the given command line args, using a set of flags to minimise +// memory and disk IO. If the args don't set --datadir, the +// child g gets a temporary data directory. +func runMinimalGeth(t *testing.T, args ...string) *testgeth { + // --ropsten to make the 'writing genesis to disk' faster (no accounts): it is disabled for Quorum compatibility purpose + // --networkid=1337 to avoid cache bump + // --syncmode=full to avoid allocating fast sync bloom + allArgs := []string{ /*"--ropsten",*/ "--networkid", "1337", "--syncmode=full", "--port", "0", + "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64"} + return runGeth(t, append(allArgs, args...)...) +} + +// Tests that a node embedded within a console can be started up properly and +// then terminated by closing the input stream. +func TestConsoleWelcome(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" + + datadir := setupIstanbul(t) + defer os.RemoveAll(datadir) + + // Start a geth console, make sure it's cleaned up and terminate the console + geth := runMinimalGeth(t, "--datadir", datadir, "--miner.etherbase", coinbase, "console") + + // Gather all the infos the welcome message needs to contain + geth.SetTemplateFunc("goos", func() string { return runtime.GOOS }) + geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) + geth.SetTemplateFunc("gover", runtime.Version) + geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta }) + geth.SetTemplateFunc("quorumver", func() string { return params.QuorumVersion }) + geth.SetTemplateFunc("niltime", func() string { + return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") + }) + geth.SetTemplateFunc("apis", func() string { return ipcAPIs }) + + // Verify the actual welcome message to the required template + geth.Expect(` +Welcome to the Geth JavaScript console! + +instance: Geth/v{{gethver}}(quorum-v{{quorumver}})/{{goos}}-{{goarch}}/{{gover}} +coinbase: {{.Etherbase}} +at block: 0 ({{niltime}}) + datadir: {{.Datadir}} + modules: {{apis}} + +To exit, press ctrl-d +> {{.InputLine "exit"}} +`) + geth.ExpectExit() +} + +// Tests that a console can be attached to a running node via various means. +func TestAttachWelcome(t *testing.T) { + var ( + ipc string + httpPort string + wsPort string + ) + defer SetResetPrivateConfig("ignore")() + // Configure the instance for IPC attachment + coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" + + datadir := setupIstanbul(t) + defer os.RemoveAll(datadir) + if runtime.GOOS == "windows" { + ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999)) + } else { + ipc = filepath.Join(datadir, "geth.ipc") + } + // And HTTP + WS attachment + p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P + httpPort = strconv.Itoa(p) + wsPort = strconv.Itoa(p + 1) + geth := runMinimalGeth(t, "--datadir", datadir, "--miner.etherbase", coinbase, + "--ipcpath", ipc, + "--http", "--http.port", httpPort, "--http.api", "admin,eth,net,web3", + "--ws", "--ws.port", wsPort, "--ws.api", "admin,eth,net,web3") + t.Run("ipc", func(t *testing.T) { + waitForEndpoint(t, ipc, 3*time.Second) + testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) + }) + t.Run("http", func(t *testing.T) { + endpoint := "http://127.0.0.1:" + httpPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) + t.Run("ws", func(t *testing.T) { + endpoint := "ws://127.0.0.1:" + wsPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) +} + +func TestHTTPAttachWelcome(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" + port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P + + datadir := setupIstanbul(t) + defer os.RemoveAll(datadir) + + geth := runGeth(t, + "--datadir", datadir, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--miner.etherbase", coinbase, "--http", "--http.port", port, "--http.api", "admin,eth,net,web3") + + endpoint := "http://127.0.0.1:" + port + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) +} + +func TestWSAttachWelcome(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d" + port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P + + datadir := setupIstanbul(t) + defer os.RemoveAll(datadir) + + geth := runGeth(t, + "--datadir", datadir, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", + "--miner.etherbase", coinbase, "--ws", "--ws.port", port, "--ws.api", "admin,eth,net,web3") + + endpoint := "ws://127.0.0.1:" + port + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) +} + +func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { + // Attach to a running geth note and terminate immediately + attach := runGeth(t, "attach", endpoint) + defer attach.ExpectExit() + attach.CloseStdin() + + // Gather all the infos the welcome message needs to contain + attach.SetTemplateFunc("goos", func() string { return runtime.GOOS }) + attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) + attach.SetTemplateFunc("gover", runtime.Version) + attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta }) + attach.SetTemplateFunc("quorumver", func() string { return params.QuorumVersion }) + attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase }) + attach.SetTemplateFunc("niltime", func() string { + return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") + }) + attach.SetTemplateFunc("ipc", func() bool { + return strings.HasPrefix(endpoint, "ipc") || strings.Contains(apis, "admin") + }) + attach.SetTemplateFunc("datadir", func() string { return geth.Datadir }) + attach.SetTemplateFunc("apis", func() string { return apis }) + + // Verify the actual welcome message to the required template + attach.Expect(` +Welcome to the Geth JavaScript console! + +instance: Geth/v{{gethver}}(quorum-v{{quorumver}})/{{goos}}-{{goarch}}/{{gover}} +coinbase: {{etherbase}} +at block: 0 ({{niltime}}){{if ipc}} + datadir: {{datadir}}{{end}} + modules: {{apis}} + +To exit, press ctrl-d +> {{.InputLine "exit" }} +`) + attach.ExpectExit() +} + +// trulyRandInt generates a crypto random integer used by the console tests to +// not clash network ports with other tests running cocurrently. +func trulyRandInt(lo, hi int) int { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo))) + return int(num.Int64()) + lo +} + +// setupIstanbul creates a temporary directory and copies nodekey and genesis.json. +// It initializes istanbul by calling geth init +func setupIstanbul(t *testing.T) string { + datadir := tmpdir(t) + gethPath := filepath.Join(datadir, "geth") + os.Mkdir(gethPath, 0700) + + // Initialize the data directory with the custom genesis block + json := filepath.Join(datadir, "genesis.json") + if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil { + t.Fatalf("failed to write genesis file: %v", err) + } + + nodeKeyFile := filepath.Join(gethPath, "nodekey") + if err := ioutil.WriteFile(nodeKeyFile, []byte(nodeKey), 0600); err != nil { + t.Fatalf("failed to write nodekey file: %v", err) + } + + runGeth(t, "--datadir", datadir, "init", json).WaitExit() + + return datadir +} + +func TestReadTLSClientConfig_whenCustomizeTLSCipherSuites(t *testing.T) { + assert := testifyassert.New(t) + + flagSet := new(flag.FlagSet) + flagSet.Bool(utils.RPCClientTLSInsecureSkipVerify.Name, true, "") + flagSet.String(utils.RPCClientTLSCipherSuites.Name, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "") + ctx := cli.NewContext(nil, flagSet, nil) + + tlsConf, ok, err := readTLSClientConfig("https://arbitraryendpoint", ctx) + + assert.NoError(err) + assert.True(ok, "has custom TLS client configuration") + assert.True(tlsConf.InsecureSkipVerify) + assert.Len(tlsConf.CipherSuites, 2) + assert.Contains(tlsConf.CipherSuites, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) + assert.Contains(tlsConf.CipherSuites, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384) +} + +func TestReadTLSClientConfig_whenTypicalTLS(t *testing.T) { + assert := testifyassert.New(t) + + flagSet := new(flag.FlagSet) + ctx := cli.NewContext(nil, flagSet, nil) + + tlsConf, ok, err := readTLSClientConfig("https://arbitraryendpoint", ctx) + + assert.NoError(err) + assert.False(ok, "no custom TLS client configuration") + assert.Nil(tlsConf, "no custom TLS config is set") +} + +func TestReadTLSClientConfig_whenTLSInsecureFlagSet(t *testing.T) { + assert := testifyassert.New(t) + + flagSet := new(flag.FlagSet) + flagSet.Bool(utils.RPCClientTLSInsecureSkipVerify.Name, true, "") + ctx := cli.NewContext(nil, flagSet, nil) + + tlsConf, ok, err := readTLSClientConfig("https://arbitraryendpoint", ctx) + + assert.NoError(err) + assert.True(ok, "has custom TLS client configuration") + assert.True(tlsConf.InsecureSkipVerify) + assert.Len(tlsConf.CipherSuites, 0) +} + +func SetResetPrivateConfig(value string) func() { + existingValue := os.Getenv("PRIVATE_CONFIG") + os.Setenv("PRIVATE_CONFIG", value) + return func() { + os.Setenv("PRIVATE_CONFIG", existingValue) + } +} diff --git a/src/cmd/geth/dao_test.go b/src/cmd/geth/dao_test.go new file mode 100644 index 00000000..4fd650c9 --- /dev/null +++ b/src/cmd/geth/dao_test.go @@ -0,0 +1,155 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "io/ioutil" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/params" +) + +// Genesis block for nodes which don't care about the DAO fork (i.e. not configured) +var daoOldGenesis = `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 0 + } +}` + +// Genesis block for nodes which actively oppose the DAO fork +var daoNoForkGenesis = `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 0, + "daoForkBlock" : 314, + "daoForkSupport" : false + } +}` + +// Genesis block for nodes which actively support the DAO fork +var daoProForkGenesis = `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 0, + "daoForkBlock" : 314, + "daoForkSupport" : true + } +}` + +var daoGenesisHash = common.HexToHash("5e1fc79cb4ffa4739177b5408045cd5d51c6cf766133f23f7cd72ee1f8d790e0") +var daoGenesisForkBlock = big.NewInt(314) + +// TestDAOForkBlockNewChain tests that the DAO hard-fork number and the nodes support/opposition is correctly +// set in the database after various initialization procedures and invocations. +func TestDAOForkBlockNewChain(t *testing.T) { + for i, arg := range []struct { + genesis string + expectBlock *big.Int + expectVote bool + }{ + // Test DAO Default Mainnet + {"", params.MainnetChainConfig.DAOForkBlock, true}, + // test DAO Init Old Privnet + {daoOldGenesis, nil, false}, + // test DAO Default No Fork Privnet + {daoNoForkGenesis, daoGenesisForkBlock, false}, + // test DAO Default Pro Fork Privnet + {daoProForkGenesis, daoGenesisForkBlock, true}, + } { + testDAOForkBlockNewChain(t, i, arg.genesis, arg.expectBlock, arg.expectVote) + } +} + +func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBlock *big.Int, expectVote bool) { + defer SetResetPrivateConfig("ignore")() + // Create a temporary data directory to use and inspect later + datadir := tmpdir(t) + defer os.RemoveAll(datadir) + + // Start a Geth instance with the requested flags set and immediately terminate + if genesis != "" { + json := filepath.Join(datadir, "genesis.json") + if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil { + t.Fatalf("test %d: failed to write genesis file: %v", test, err) + } + runGeth(t, "--datadir", datadir, "--networkid", "1337", "init", json).WaitExit() + } else { + // Force chain initialization + args := []string{"--port", "0", "--networkid", "1337", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} + runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...).WaitExit() + } + // Retrieve the DAO config flag from the database + path := filepath.Join(datadir, "geth", "chaindata") + db, err := rawdb.NewLevelDBDatabase(path, 0, 0, "") + if err != nil { + t.Fatalf("test %d: failed to open test database: %v", test, err) + } + defer db.Close() + + genesisHash := common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") + if genesis != "" { + genesisHash = daoGenesisHash + } + config := rawdb.ReadChainConfig(db, genesisHash) + if config == nil { + t.Errorf("test %d: failed to retrieve chain config: %v", test, err) + return // we want to return here, the other checks can't make it past this point (nil panic). + } + // Validate the DAO hard-fork block number against the expected value + if config.DAOForkBlock == nil { + if expectBlock != nil { + t.Errorf("test %d: dao hard-fork block mismatch: have nil, want %v", test, expectBlock) + } + } else if expectBlock == nil { + t.Errorf("test %d: dao hard-fork block mismatch: have %v, want nil", test, config.DAOForkBlock) + } else if config.DAOForkBlock.Cmp(expectBlock) != 0 { + t.Errorf("test %d: dao hard-fork block mismatch: have %v, want %v", test, config.DAOForkBlock, expectBlock) + } + if config.DAOForkSupport != expectVote { + t.Errorf("test %d: dao hard-fork support mismatch: have %v, want %v", test, config.DAOForkSupport, expectVote) + } +} diff --git a/src/cmd/geth/dbcmd.go b/src/cmd/geth/dbcmd.go new file mode 100644 index 00000000..8c91ca56 --- /dev/null +++ b/src/cmd/geth/dbcmd.go @@ -0,0 +1,341 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/console/prompt" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/leveldb" + "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb/opt" + "gopkg.in/urfave/cli.v1" +) + +var ( + removedbCommand = cli.Command{ + Action: utils.MigrateFlags(removeDB), + Name: "removedb", + Usage: "Remove blockchain and state databases", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "DATABASE COMMANDS", + Description: ` +Remove blockchain and state databases`, + } + dbCommand = cli.Command{ + Name: "db", + Usage: "Low level database operations", + ArgsUsage: "", + Category: "DATABASE COMMANDS", + Subcommands: []cli.Command{ + dbInspectCmd, + dbStatCmd, + dbCompactCmd, + dbGetCmd, + dbDeleteCmd, + dbPutCmd, + }, + } + dbInspectCmd = cli.Command{ + Action: utils.MigrateFlags(inspect), + Name: "inspect", + ArgsUsage: " ", + + Usage: "Inspect the storage size for each type of data in the database", + Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, + } + dbStatCmd = cli.Command{ + Action: dbStats, + Name: "stats", + Usage: "Print leveldb statistics", + } + dbCompactCmd = cli.Command{ + Action: dbCompact, + Name: "compact", + Usage: "Compact leveldb database. WARNING: May take a very long time", + Description: `This command performs a database compaction. +WARNING: This operation may take a very long time to finish, and may cause database +corruption if it is aborted during execution'!`, + } + dbGetCmd = cli.Command{ + Action: dbGet, + Name: "get", + Usage: "Show the value of a database key", + ArgsUsage: "", + Description: "This command looks up the specified database key from the database.", + } + dbDeleteCmd = cli.Command{ + Action: dbDelete, + Name: "delete", + Usage: "Delete a database key (WARNING: may corrupt your database)", + ArgsUsage: "", + Description: `This command deletes the specified database key from the database. +WARNING: This is a low-level operation which may cause database corruption!`, + } + dbPutCmd = cli.Command{ + Action: dbPut, + Name: "put", + Usage: "Set the value of a database key (WARNING: may corrupt your database)", + ArgsUsage: " ", + Description: `This command sets a given database key to the given value. +WARNING: This is a low-level operation which may cause database corruption!`, + } +) + +func removeDB(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + + // Remove the full node state database + path := stack.ResolvePath("chaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node state database") + } else { + log.Info("Full node state database missing", "path", path) + } + // Remove the full node ancient database + path = config.Eth.DatabaseFreezer + switch { + case path == "": + path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") + case !filepath.IsAbs(path): + path = config.Node.ResolvePath(path) + } + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node ancient database") + } else { + log.Info("Full node ancient database missing", "path", path) + } + // Remove the light node database + path = stack.ResolvePath("lightchaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "light node database") + } else { + log.Info("Light node database missing", "path", path) + } + return nil +} + +// confirmAndRemoveDB prompts the user for a last confirmation and removes the +// folder if accepted. +func confirmAndRemoveDB(database string, kind string) { + confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) + switch { + case err != nil: + utils.Fatalf("%v", err) + case !confirm: + log.Info("Database deletion skipped", "path", database) + default: + start := time.Now() + filepath.Walk(database, func(path string, info os.FileInfo, err error) error { + // If we're at the top level folder, recurse into + if path == database { + return nil + } + // Delete all the files, but not subfolders + if !info.IsDir() { + os.Remove(path) + return nil + } + return filepath.SkipDir + }) + log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +func inspect(ctx *cli.Context) error { + var ( + prefix []byte + start []byte + ) + if ctx.NArg() > 2 { + return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage) + } + if ctx.NArg() >= 1 { + if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil { + return fmt.Errorf("failed to hex-decode 'prefix': %v", err) + } else { + prefix = d + } + } + if ctx.NArg() >= 2 { + if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil { + return fmt.Errorf("failed to hex-decode 'start': %v", err) + } else { + start = d + } + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + _, chainDb := utils.MakeChain(ctx, stack, true, false) + defer chainDb.Close() + + return rawdb.InspectDatabase(chainDb, prefix, start) +} + +func showLeveldbStats(db ethdb.Stater) { + if stats, err := db.Stat("leveldb.stats"); err != nil { + log.Warn("Failed to read database stats", "error", err) + } else { + fmt.Println(stats) + } + if ioStats, err := db.Stat("leveldb.iostats"); err != nil { + log.Warn("Failed to read database iostats", "error", err) + } else { + fmt.Println(ioStats) + } +} + +func dbStats(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.ReadOnly = true + }) + if err != nil { + return err + } + showLeveldbStats(db) + err = db.Close() + if err != nil { + log.Info("Close err", "error", err) + } + return nil +} + +func dbCompact(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + cache := ctx.GlobalInt(utils.CacheFlag.Name) * ctx.GlobalInt(utils.CacheDatabaseFlag.Name) / 100 + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.OpenFilesCacheCapacity = utils.MakeDatabaseHandles() + options.BlockCacheCapacity = cache / 2 * opt.MiB + options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + }) + if err != nil { + return err + } + showLeveldbStats(db) + log.Info("Triggering compaction") + err = db.Compact(nil, nil) + if err != nil { + log.Info("Compact err", "error", err) + } + showLeveldbStats(db) + log.Info("Closing db") + err = db.Close() + if err != nil { + log.Info("Close err", "error", err) + } + log.Info("Exiting") + return err +} + +// dbGet shows the value of a given database key +func dbGet(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.ReadOnly = true + }) + if err != nil { + return err + } + defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + data, err := db.Get(key) + if err != nil { + log.Info("Get operation failed", "error", err) + return err + } + fmt.Printf("key %#x:\n\t%#x\n", key, data) + return nil +} + +// dbDelete deletes a key from the database +func dbDelete(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack) + defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + if err = db.Delete(key); err != nil { + log.Info("Delete operation returned an error", "error", err) + return err + } + return nil +} + +// dbPut overwrite a value in the database +func dbPut(ctx *cli.Context) error { + if ctx.NArg() != 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack) + defer db.Close() + var ( + key []byte + value []byte + data []byte + err error + ) + key, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + value, err = hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + data, err = db.Get(key) + if err == nil { + fmt.Printf("Previous value:\n%#x\n", data) + } + return db.Put(key, value) +} diff --git a/src/cmd/geth/genesis_test.go b/src/cmd/geth/genesis_test.go new file mode 100644 index 00000000..3b744f30 --- /dev/null +++ b/src/cmd/geth/genesis_test.go @@ -0,0 +1,189 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/cespare/cp" +) + +var customGenesisTests = []struct { + genesis string + query string + result string +}{ + // Genesis file with an empty chain configuration (ensure missing fields work) + { + genesis: `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000001338", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : {"isQuorum":false } + }`, + query: "eth.getBlock(0).nonce", + result: "0x0000000000001338", + }, + // Genesis file with specific chain configurations + { + genesis: `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 42, + "daoForkBlock" : 141, + "daoForkSupport" : true, + "isQuorum" : false + }, + }`, + query: "eth.getBlock(0).nonce", + result: "0x0000000000000042", + }, +} + +// Tests that initializing Geth with a custom genesis block and chain definitions +// work properly. +func TestCustomGenesis(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + for i, tt := range customGenesisTests { + // Create a temporary data directory to use and inspect later + datadir := tmpdir(t) + defer os.RemoveAll(datadir) + + // copy the node key and static-nodes.json so that geth can start with the raft consensus + gethDir := filepath.Join(datadir, "geth") + sourceNodeKey := filepath.Join("testdata", "geth") + if err := cp.CopyAll(gethDir, sourceNodeKey); err != nil { + t.Fatal(err) + } + + // Initialize the data directory with the custom genesis block + json := filepath.Join(datadir, "genesis.json") + if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil { + t.Fatalf("test %d: failed to write genesis file: %v", i, err) + } + runGeth(t, "--datadir", datadir, "init", json).WaitExit() + + // Query the custom genesis block + geth := runGeth(t, "--networkid", "1337", "--syncmode=full", + "--datadir", datadir, "--maxpeers", "0", "--port", "0", + "--nodiscover", "--nat", "none", "--ipcdisable", + "--raft", + "--exec", tt.query, "console") + geth.ExpectRegexp(tt.result) + geth.ExpectExit() + } +} + +func TestCustomGenesisUpgradeWithPrivacyEnhancementsBlock(t *testing.T) { + defer SetResetPrivateConfig("ignore")() + // Create a temporary data directory to use and inspect later + datadir := tmpdir(t) + defer os.RemoveAll(datadir) + + // copy the node key and static-nodes.json so that geth can start with the raft consensus + gethDir := filepath.Join(datadir, "geth") + sourceNodeKey := filepath.Join("testdata", "geth") + if err := cp.CopyAll(gethDir, sourceNodeKey); err != nil { + t.Fatal(err) + } + + genesisContentWithoutPrivacyEnhancements := + `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 42, + "daoForkBlock" : 141, + "daoForkSupport" : true, + "isQuorum" : false + } + }` + + // Initialize the data directory with the custom genesis block + json := filepath.Join(datadir, "genesis.json") + if err := ioutil.WriteFile(json, []byte(genesisContentWithoutPrivacyEnhancements), 0600); err != nil { + t.Fatalf("failed to write genesis file: %v", err) + } + geth := runGeth(t, "--datadir", datadir, "init", json) + geth.WaitExit() + + genesisContentWithPrivacyEnhancements := + `{ + "alloc" : {}, + "coinbase" : "0x0000000000000000000000000000000000000000", + "difficulty" : "0x20000", + "extraData" : "", + "gasLimit" : "0x2fefd8", + "nonce" : "0x0000000000000042", + "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp" : "0x00", + "config" : { + "homesteadBlock" : 42, + "daoForkBlock" : 141, + "privacyEnhancementsBlock" : 1000, + "daoForkSupport" : true, + "isQuorum" : false + } + }` + + if err := ioutil.WriteFile(json, []byte(genesisContentWithPrivacyEnhancements), 0600); err != nil { + t.Fatalf("failed to write genesis file: %v", err) + } + geth = runGeth(t, "--datadir", datadir, "init", json) + geth.WaitExit() + + expectedText := "Privacy enhancements have been enabled from block height 1000. Please ensure your privacy manager is upgraded and supports privacy enhancements" + + result := strings.TrimSpace(geth.StderrText()) + if !strings.Contains(result, expectedText) { + geth.Fatalf("bad stderr text. want '%s', got '%s'", expectedText, result) + } + + // start quorum - it should fail the transaction manager PrivacyEnhancements feature validation + geth = runGeth(t, + "--datadir", datadir, "--maxpeers", "0", "--port", "0", + "--nodiscover", "--nat", "none", "--ipcdisable", + "--raft", "console") + geth.ExpectRegexp("Cannot start quorum with privacy enhancements enabled while the transaction manager does not support it") + geth.ExpectExit() +} diff --git a/src/cmd/geth/les_test.go b/src/cmd/geth/les_test.go new file mode 100644 index 00000000..cc0f0952 --- /dev/null +++ b/src/cmd/geth/les_test.go @@ -0,0 +1,224 @@ +package main + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rpc" +) + +type gethrpc struct { + name string + rpc *rpc.Client + geth *testgeth + nodeInfo *p2p.NodeInfo +} + +func (g *gethrpc) killAndWait() { + g.geth.Kill() + g.geth.WaitExit() +} + +func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) { + if err := g.rpc.Call(&result, method, args...); err != nil { + g.geth.Fatalf("callRPC %v: %v", method, err) + } +} + +func (g *gethrpc) addPeer(peer *gethrpc) { + g.geth.Logf("%v.addPeer(%v)", g.name, peer.name) + enode := peer.getNodeInfo().Enode + peerCh := make(chan *p2p.PeerEvent) + sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents") + if err != nil { + g.geth.Fatalf("subscribe %v: %v", g.name, err) + } + defer sub.Unsubscribe() + g.callRPC(nil, "admin_addPeer", enode) + dur := 14 * time.Second + timeout := time.After(dur) + select { + case ev := <-peerCh: + g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer) + case err := <-sub.Err(): + g.geth.Fatalf("%v sub error: %v", g.name, err) + case <-timeout: + g.geth.Error("timeout adding peer after", dur) + } +} + +// Use this function instead of `g.nodeInfo` directly +func (g *gethrpc) getNodeInfo() *p2p.NodeInfo { + if g.nodeInfo != nil { + return g.nodeInfo + } + g.nodeInfo = &p2p.NodeInfo{} + g.callRPC(&g.nodeInfo, "admin_nodeInfo") + return g.nodeInfo +} + +func (g *gethrpc) waitSynced() { + // Check if it's synced now + var result interface{} + g.callRPC(&result, "eth_syncing") + syncing, ok := result.(bool) + if ok && !syncing { + g.geth.Logf("%v already synced", g.name) + return + } + + // Actually wait, subscribe to the event + ch := make(chan interface{}) + sub, err := g.rpc.Subscribe(context.Background(), "eth", ch, "syncing") + if err != nil { + g.geth.Fatalf("%v syncing: %v", g.name, err) + } + defer sub.Unsubscribe() + timeout := time.After(4 * time.Second) + select { + case ev := <-ch: + g.geth.Log("'syncing' event", ev) + syncing, ok := ev.(bool) + if ok && !syncing { + break + } + g.geth.Log("Other 'syncing' event", ev) + case err := <-sub.Err(): + g.geth.Fatalf("%v notification: %v", g.name, err) + break + case <-timeout: + g.geth.Fatalf("%v timeout syncing", g.name) + break + } +} + +// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into +// account the set data folders as well as the designated platform we're currently +// running on. +func ipcEndpoint(ipcPath, datadir string) string { + // On windows we can only use plain top-level pipes + if runtime.GOOS == "windows" { + if strings.HasPrefix(ipcPath, `\\.\pipe\`) { + return ipcPath + } + return `\\.\pipe\` + ipcPath + } + // Resolve names into the data directory full paths otherwise + if filepath.Base(ipcPath) == ipcPath { + if datadir == "" { + return filepath.Join(os.TempDir(), ipcPath) + } + return filepath.Join(datadir, ipcPath) + } + return ipcPath +} + +// nextIPC ensures that each ipc pipe gets a unique name. +// On linux, it works well to use ipc pipes all over the filesystem (in datadirs), +// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several +// nodes simultaneously, we need to distinguish between them, which we do by +// the pipe filename instead of folder. +var nextIPC = uint32(0) + +func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { + ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...) + t.Logf("Starting %v with rpc: %v", name, args) + + g := &gethrpc{ + name: name, + geth: runGeth(t, args...), + } + // wait before we can attach to it. TODO: probe for it properly + time.Sleep(1 * time.Second) + var err error + ipcpath := ipcEndpoint(ipcName, g.geth.Datadir) + if g.rpc, err = rpc.Dial(ipcpath); err != nil { + t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) + } + return g +} + +func initGeth(t *testing.T) string { + args := []string{"--networkid=42", "init", "./testdata/clique.json"} + t.Logf("Initializing geth: %v ", args) + g := runGeth(t, args...) + datadir := g.Datadir + g.WaitExit() + return datadir +} + +func startLightServer(t *testing.T) *gethrpc { + datadir := initGeth(t) + t.Logf("Importing keys to geth") + runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit() + account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") + return server +} + +func startClient(t *testing.T, name string) *gethrpc { + datadir := initGeth(t) + return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") +} + +func TestPriorityClient(t *testing.T) { + // Quorum + t.Skip("skipping test in Quorum (no support for light sync mode).") + // End Quorum + + lightServer := startLightServer(t) + defer lightServer.killAndWait() + + // Start client and add lightServer as peer + freeCli := startClient(t, "freeCli") + defer freeCli.killAndWait() + freeCli.addPeer(lightServer) + + var peers []*p2p.PeerInfo + freeCli.callRPC(&peers, "admin_peers") + if len(peers) != 1 { + t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers)) + return + } + + // Set up priority client, get its nodeID, increase its balance on the lightServer + prioCli := startClient(t, "prioCli") + defer prioCli.killAndWait() + // 3_000_000_000 once we move to Go 1.13 + tokens := uint64(3000000000) + lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens) + prioCli.addPeer(lightServer) + + // Check if priority client is actually syncing and the regular client got kicked out + prioCli.callRPC(&peers, "admin_peers") + if len(peers) != 1 { + t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers)) + } + + nodes := map[string]*gethrpc{ + lightServer.getNodeInfo().ID: lightServer, + freeCli.getNodeInfo().ID: freeCli, + prioCli.getNodeInfo().ID: prioCli, + } + time.Sleep(1 * time.Second) + lightServer.callRPC(&peers, "admin_peers") + peersWithNames := make(map[string]string) + for _, p := range peers { + peersWithNames[nodes[p.ID].name] = p.ID + } + if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound { + t.Error("client is still a peer of lightServer", peersWithNames) + } + if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound { + t.Error("prio client is not among lightServer peers", peersWithNames) + } +} diff --git a/src/cmd/geth/main.go b/src/cmd/geth/main.go new file mode 100644 index 00000000..15acb686 --- /dev/null +++ b/src/cmd/geth/main.go @@ -0,0 +1,549 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// geth is the official command-line client for Ethereum. +package main + +import ( + "fmt" + "math" + "os" + godebug "runtime/debug" + "sort" + "strconv" + "strings" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/accounts/pluggable" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console/prompt" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/internal/debug" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/internal/flags" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/permission" + "github.com/ethereum/go-ethereum/plugin" + gopsutil "github.com/shirou/gopsutil/mem" + "gopkg.in/urfave/cli.v1" +) + +const ( + clientIdentifier = "geth" // Client identifier to advertise over the network +) + +var ( + // Git SHA1 commit hash of the release (set via linker flags) + gitCommit = "" + gitDate = "" + // The app that holds all commands and flags. + app = flags.NewApp(gitCommit, gitDate, "the go-ethereum command line interface") + // flags that configure the node + nodeFlags = []cli.Flag{ + utils.IdentityFlag, + utils.UnlockedAccountFlag, + utils.PasswordFileFlag, + utils.BootnodesFlag, + utils.DataDirFlag, + utils.RaftLogDirFlag, + utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, + utils.KeyStoreDirFlag, + utils.ExternalSignerFlag, + utils.NoUSBFlag, + utils.USBFlag, + utils.SmartCardDaemonPathFlag, + utils.OverrideBerlinFlag, + utils.EthashCacheDirFlag, + utils.EthashCachesInMemoryFlag, + utils.EthashCachesOnDiskFlag, + utils.EthashCachesLockMmapFlag, + utils.EthashDatasetDirFlag, + utils.EthashDatasetsInMemoryFlag, + utils.EthashDatasetsOnDiskFlag, + utils.EthashDatasetsLockMmapFlag, + utils.TxPoolLocalsFlag, + utils.TxPoolNoLocalsFlag, + utils.TxPoolJournalFlag, + utils.TxPoolRejournalFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + utils.SyncModeFlag, + utils.ExitWhenSyncedFlag, + utils.GCModeFlag, + utils.SnapshotFlag, + utils.TxLookupLimitFlag, + utils.LightServeFlag, + utils.LightIngressFlag, + utils.LightEgressFlag, + utils.LightMaxPeersFlag, + utils.LightNoPruneFlag, + utils.LightKDFFlag, + utils.UltraLightServersFlag, + utils.UltraLightFractionFlag, + utils.UltraLightOnlyAnnounceFlag, + utils.LightNoSyncServeFlag, + utils.AuthorizationListFlag, + utils.BloomFilterSizeFlag, + utils.CacheFlag, + utils.CacheDatabaseFlag, + utils.CacheTrieFlag, + utils.CacheTrieJournalFlag, + utils.CacheTrieRejournalFlag, + utils.CacheGCFlag, + utils.CacheSnapshotFlag, + utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, + utils.ListenPortFlag, + utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, + utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasTargetFlag, + utils.MinerGasLimitFlag, + utils.MinerGasPriceFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, + utils.MinerNoVerfiyFlag, + utils.NATFlag, + utils.NoDiscoverFlag, + utils.DiscoveryV5Flag, + utils.NetrestrictFlag, + utils.NodeKeyFileFlag, + utils.NodeKeyHexFlag, + utils.DNSDiscoveryFlag, + utils.MainnetFlag, + utils.DeveloperFlag, + utils.DeveloperPeriodFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.YoloV3Flag, + utils.VMEnableDebugFlag, + utils.NetworkIdFlag, + utils.EthStatsURLFlag, + utils.FakePoWFlag, + utils.NoCompactionFlag, + utils.GpoBlocksFlag, + utils.GpoPercentileFlag, + utils.GpoMaxGasPriceFlag, + utils.EWASMInterpreterFlag, + utils.EVMInterpreterFlag, + configFileFlag, + // Quorum + utils.PrivateCacheTrieJournalFlag, + utils.QuorumImmutabilityThreshold, + utils.EnableNodePermissionFlag, + utils.RaftModeFlag, + utils.RaftBlockTimeFlag, + utils.RaftJoinExistingFlag, + utils.RaftPortFlag, + utils.RaftDNSEnabledFlag, + utils.EmitCheckpointsFlag, + utils.IstanbulRequestTimeoutFlag, + utils.IstanbulBlockPeriodFlag, + utils.PluginSettingsFlag, + utils.PluginSkipVerifyFlag, + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.AllowedFutureBlockTimeFlag, + utils.EVMCallTimeOutFlag, + utils.MultitenancyFlag, + utils.RevertReasonFlag, + utils.QuorumEnablePrivacyMarker, + utils.QuorumPTMUnixSocketFlag, + utils.QuorumPTMUrlFlag, + utils.QuorumPTMTimeoutFlag, + utils.QuorumPTMDialTimeoutFlag, + utils.QuorumPTMHttpIdleTimeoutFlag, + utils.QuorumPTMHttpWriteBufferSizeFlag, + utils.QuorumPTMHttpReadBufferSizeFlag, + utils.QuorumPTMTlsModeFlag, + utils.QuorumPTMTlsRootCaFlag, + utils.QuorumPTMTlsClientCertFlag, + utils.QuorumPTMTlsClientKeyFlag, + utils.QuorumPTMTlsInsecureSkipVerify, + // End-Quorum + } + + rpcFlags = []cli.Flag{ + utils.HTTPEnabledFlag, + utils.HTTPListenAddrFlag, + utils.HTTPPortFlag, + utils.HTTPCORSDomainFlag, + utils.HTTPVirtualHostsFlag, + utils.LegacyRPCEnabledFlag, + utils.LegacyRPCListenAddrFlag, + utils.LegacyRPCPortFlag, + utils.LegacyRPCCORSDomainFlag, + utils.LegacyRPCVirtualHostsFlag, + utils.LegacyRPCApiFlag, + utils.GraphQLEnabledFlag, + utils.GraphQLCORSDomainFlag, + utils.GraphQLVirtualHostsFlag, + utils.HTTPApiFlag, + utils.HTTPPathPrefixFlag, + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + utils.WSPathPrefixFlag, + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.InsecureUnlockAllowedFlag, + utils.RPCGlobalGasCapFlag, + utils.RPCGlobalTxFeeCapFlag, + utils.AllowUnprotectedTxs, + } + + metricsFlags = []cli.Flag{ + utils.MetricsEnabledFlag, + utils.MetricsEnabledExpensiveFlag, + utils.MetricsHTTPFlag, + utils.MetricsPortFlag, + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBTagsFlag, + } +) + +func init() { + // Initialize the CLI app and start Geth + app.Action = geth + app.HideVersion = true // we have a command to print the version + app.Copyright = "Copyright 2013-2021 The go-ethereum Authors" + app.Commands = []cli.Command{ + // See chaincmd.go: + initCommand, + mpsdbUpgradeCommand, + importCommand, + exportCommand, + importPreimagesCommand, + exportPreimagesCommand, + copydbCommand, + removedbCommand, + dumpCommand, + dumpGenesisCommand, + // See accountcmd.go: + accountCommand, + walletCommand, + // See consolecmd.go: + consoleCommand, + attachCommand, + javascriptCommand, + // See misccmd.go: + makecacheCommand, + makedagCommand, + versionCommand, + versionCheckCommand, + licenseCommand, + // See config.go + dumpConfigCommand, + // see dbcmd.go + dbCommand, + // See cmd/utils/flags_legacy.go + utils.ShowDeprecated, + // See snapshot.go + snapshotCommand, + } + sort.Sort(cli.CommandsByName(app.Commands)) + + app.Flags = append(app.Flags, nodeFlags...) + app.Flags = append(app.Flags, rpcFlags...) + app.Flags = append(app.Flags, consoleFlags...) + app.Flags = append(app.Flags, debug.Flags...) + app.Flags = append(app.Flags, metricsFlags...) + + app.Before = func(ctx *cli.Context) error { + return debug.Setup(ctx) + } + app.After = func(ctx *cli.Context) error { + debug.Exit() + prompt.Stdin.Close() // Resets terminal mode. + return nil + } +} + +func main() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +// prepare manipulates memory cache allowance and setups metric system. +// This function should be called before launching devp2p stack. +func prepare(ctx *cli.Context) { + // If we're running a known preset, log it for convenience. + switch { + case ctx.GlobalIsSet(utils.RopstenFlag.Name): + log.Info("Starting Geth on Ropsten testnet...") + + case ctx.GlobalIsSet(utils.RinkebyFlag.Name): + log.Info("Starting Geth on Rinkeby testnet...") + + case ctx.GlobalIsSet(utils.GoerliFlag.Name): + log.Info("Starting Geth on Görli testnet...") + + case ctx.GlobalIsSet(utils.YoloV3Flag.Name): + log.Info("Starting Geth on YOLOv3 testnet...") + + case ctx.GlobalIsSet(utils.DeveloperFlag.Name): + log.Info("Starting Geth in ephemeral dev mode...") + + case !ctx.GlobalIsSet(utils.NetworkIdFlag.Name): + log.Info("Starting Geth on Ethereum mainnet...") + } + // If we're a full node on mainnet without --cache specified, bump default cache allowance + if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { + // Make sure we're not on any supported preconfigured testnet either + if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { + // Nope, we're really on mainnet. Bump that cache up! + log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) + } + } + // If we're running a light client on any network, drop the cache to some meaningfully low amount + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) { + log.Info("Dropping default light client cache", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 128) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128)) + } + // Cap the cache allowance and tune the garbage collector + mem, err := gopsutil.VirtualMemory() + if err == nil { + if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { + log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) + mem.Total = 2 * 1024 * 1024 * 1024 + } + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.GlobalInt(utils.CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + + // Start metrics export if enabled + utils.SetupMetrics(ctx) + + // Start system runtime metrics collection + go metrics.CollectProcessMetrics(3 * time.Second) +} + +// geth is the main entry point into the system if no special subcommand is ran. +// It creates a default node based on the command line arguments and runs it in +// blocking mode, waiting for it to be shut down. +func geth(ctx *cli.Context) error { + if args := ctx.Args(); len(args) > 0 { + return fmt.Errorf("invalid command: %q", args[0]) + } + + prepare(ctx) + stack, backend := makeFullNode(ctx) + defer stack.Close() + + startNode(ctx, stack, backend) + stack.Wait() + return nil +} + +// startNode boots up the system node and all registered protocols, after which +// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the +// miner. +func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { + log.DoEmitCheckpoints = ctx.GlobalBool(utils.EmitCheckpointsFlag.Name) + debug.Memsize.Add("node", stack) + + // raft mode does not support --exitwhensynced + if ctx.GlobalBool(utils.ExitWhenSyncedFlag.Name) && ctx.GlobalBool(utils.RaftModeFlag.Name) { + utils.Fatalf("raft consensus does not support --exitwhensynced") + } + + // Start up the node itself + utils.StartNode(ctx, stack) + + // Now that the plugin manager has been started we register the account plugin with the corresponding account backend. All other account management is disabled when using External Signer + if !ctx.IsSet(utils.ExternalSignerFlag.Name) && stack.PluginManager().IsEnabled(plugin.AccountPluginInterfaceName) { + b := stack.AccountManager().Backends(pluggable.BackendType)[0].(*pluggable.Backend) + if err := stack.PluginManager().AddAccountPluginToBackend(b); err != nil { + log.Error("failed to setup account plugin", "err", err) + } + } + + // Unlock any account specifically requested + unlockAccounts(ctx, stack) + + // Register wallet event handlers to open and auto-derive wallets + events := make(chan accounts.WalletEvent, 16) + stack.AccountManager().Subscribe(events) + + // Create a client to interact with local geth node. + rpcClient, err := stack.Attach() + if err != nil { + utils.Fatalf("Failed to attach to self: %v", err) + } + ethClient := ethclient.NewClient(rpcClient) + + // Quorum + if ctx.GlobalBool(utils.MultitenancyFlag.Name) && !stack.PluginManager().IsEnabled(plugin.SecurityPluginInterfaceName) { + utils.Fatalf("multitenancy requires RPC Security Plugin to be configured") + } + // End Quorum + + go func() { + // Open any wallets already attached + for _, wallet := range stack.AccountManager().Wallets() { + if err := wallet.Open(""); err != nil { + log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err) + } + } + // Listen for wallet event till termination + for event := range events { + switch event.Kind { + case accounts.WalletArrived: + if err := event.Wallet.Open(""); err != nil { + log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err) + } + case accounts.WalletOpened: + status, _ := event.Wallet.Status() + log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status) + + var derivationPaths []accounts.DerivationPath + if event.Wallet.URL().Scheme == "ledger" { + derivationPaths = append(derivationPaths, accounts.LegacyLedgerBaseDerivationPath) + } + derivationPaths = append(derivationPaths, accounts.DefaultBaseDerivationPath) + + event.Wallet.SelfDerive(derivationPaths, ethClient) + + case accounts.WalletDropped: + log.Info("Old wallet dropped", "url", event.Wallet.URL()) + event.Wallet.Close() + } + } + }() + + // Spawn a standalone goroutine for status synchronization monitoring, + // close the node when synchronization is complete if user required. + if ctx.GlobalBool(utils.ExitWhenSyncedFlag.Name) { + go func() { + sub := stack.EventMux().Subscribe(downloader.DoneEvent{}) + defer sub.Unsubscribe() + for { + event := <-sub.Chan() + if event == nil { + continue + } + done, ok := event.Data.(downloader.DoneEvent) + if !ok { + continue + } + if timestamp := time.Unix(int64(done.Latest.Time), 0); time.Since(timestamp) < 10*time.Minute { + log.Info("Synchronisation completed", "latestnum", done.Latest.Number, "latesthash", done.Latest.Hash(), + "age", common.PrettyAge(timestamp)) + stack.Close() + } + } + }() + } + + // Quorum + // + // checking if permissions is enabled and staring the permissions service + if stack.Config().EnableNodePermission { + stack.Server().SetIsNodePermissioned(permission.IsNodePermissioned) + if stack.IsPermissionEnabled() { + var permissionService *permission.PermissionCtrl + if err := stack.Lifecycle(&permissionService); err != nil { + utils.Fatalf("Permission service not runnning: %v", err) + } + if err := permissionService.AfterStart(); err != nil { + utils.Fatalf("Permission service post construct failure: %v", err) + } + } + } + + // Start auxiliary services if enabled + if ctx.GlobalBool(utils.MiningEnabledFlag.Name) || ctx.GlobalBool(utils.DeveloperFlag.Name) { + // Mining only makes sense if a full Ethereum node is running + if ctx.GlobalString(utils.SyncModeFlag.Name) == "light" { + utils.Fatalf("Light clients do not support mining") + } + ethBackend, ok := backend.(*eth.EthAPIBackend) + if !ok { + utils.Fatalf("Ethereum service not running: %v", err) + } + // Set the gas price to the limits from the CLI and start mining + gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) + ethBackend.TxPool().SetGasPrice(gasprice) + // start mining + threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) + if err := ethBackend.StartMining(threads); err != nil { + utils.Fatalf("Failed to start mining: %v", err) + } + } + + // checks quorum features that depend on the ethereum service + quorumValidateEthService(stack, ctx.GlobalBool(utils.RaftModeFlag.Name)) +} + +// unlockAccounts unlocks any account specifically requested. +func unlockAccounts(ctx *cli.Context, stack *node.Node) { + var unlocks []string + inputs := strings.Split(ctx.GlobalString(utils.UnlockedAccountFlag.Name), ",") + for _, input := range inputs { + if trimmed := strings.TrimSpace(input); trimmed != "" { + unlocks = append(unlocks, trimmed) + } + } + // Short circuit if there is no account to unlock. + if len(unlocks) == 0 { + return + } + // If insecure account unlocking is not allowed if node's APIs are exposed to external. + // Print warning log to user and skip unlocking. + if !stack.Config().InsecureUnlockAllowed && stack.Config().ExtRPCEnabled() { + utils.Fatalf("Account unlock with HTTP access is forbidden!") + } + ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + passwords := utils.MakePasswordList(ctx) + for i, account := range unlocks { + unlockAccount(ks, account, i, passwords) + } +} diff --git a/src/cmd/geth/misccmd.go b/src/cmd/geth/misccmd.go new file mode 100644 index 00000000..bc276dac --- /dev/null +++ b/src/cmd/geth/misccmd.go @@ -0,0 +1,169 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "os" + "runtime" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/params" + "gopkg.in/urfave/cli.v1" +) + +var ( + VersionCheckUrlFlag = cli.StringFlag{ + Name: "check.url", + Usage: "URL to use when checking vulnerabilities", + Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json", + } + VersionCheckVersionFlag = cli.StringFlag{ + Name: "check.version", + Usage: "Version to check", + Value: fmt.Sprintf("Geth/v%v/%v-%v/%v", + params.VersionWithCommit(gitCommit, gitDate), + runtime.GOOS, runtime.GOARCH, runtime.Version()), + } + makecacheCommand = cli.Command{ + Action: utils.MigrateFlags(makecache), + Name: "makecache", + Usage: "Generate ethash verification cache (for testing)", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The makecache command generates an ethash cache in . + +This command exists to support the system testing project. +Regular users do not need to execute it. +`, + } + makedagCommand = cli.Command{ + Action: utils.MigrateFlags(makedag), + Name: "makedag", + Usage: "Generate ethash mining DAG (for testing)", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The makedag command generates an ethash DAG in . + +This command exists to support the system testing project. +Regular users do not need to execute it. +`, + } + versionCommand = cli.Command{ + Action: utils.MigrateFlags(version), + Name: "version", + Usage: "Print version numbers", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The output of this command is supposed to be machine-readable. +`, + } + versionCheckCommand = cli.Command{ + Action: utils.MigrateFlags(versionCheck), + Flags: []cli.Flag{ + VersionCheckUrlFlag, + VersionCheckVersionFlag, + }, + Name: "version-check", + Usage: "Checks (online) whether the current version suffers from any known security vulnerabilities", + ArgsUsage: "", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json, +and displays information about any security vulnerabilities that affect the currently executing version. +`, + } + licenseCommand = cli.Command{ + Action: utils.MigrateFlags(license), + Name: "license", + Usage: "Display license information", + ArgsUsage: " ", + Category: "MISCELLANEOUS COMMANDS", + } +) + +// makecache generates an ethash verification cache into the provided folder. +func makecache(ctx *cli.Context) error { + args := ctx.Args() + if len(args) != 2 { + utils.Fatalf(`Usage: geth makecache `) + } + block, err := strconv.ParseUint(args[0], 0, 64) + if err != nil { + utils.Fatalf("Invalid block number: %v", err) + } + ethash.MakeCache(block, args[1]) + + return nil +} + +// makedag generates an ethash mining DAG into the provided folder. +func makedag(ctx *cli.Context) error { + args := ctx.Args() + if len(args) != 2 { + utils.Fatalf(`Usage: geth makedag `) + } + block, err := strconv.ParseUint(args[0], 0, 64) + if err != nil { + utils.Fatalf("Invalid block number: %v", err) + } + ethash.MakeDataset(block, args[1]) + + return nil +} + +func version(ctx *cli.Context) error { + fmt.Println(strings.Title(clientIdentifier)) + fmt.Println("Version:", params.VersionWithMeta) + if gitCommit != "" { + fmt.Println("Git Commit:", gitCommit) + } + if gitDate != "" { + fmt.Println("Git Commit Date:", gitDate) + } + fmt.Println("Quorum Version:", params.QuorumVersion) + fmt.Println("Architecture:", runtime.GOARCH) + fmt.Println("Network Id:", ethconfig.Defaults.NetworkId) + fmt.Println("Go Version:", runtime.Version()) + fmt.Println("Operating System:", runtime.GOOS) + fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) + fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) + return nil +} + +func license(_ *cli.Context) error { + fmt.Println(`Geth is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Geth is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with geth. If not, see .`) + return nil +} diff --git a/src/cmd/geth/run_test.go b/src/cmd/geth/run_test.go new file mode 100644 index 00000000..527c38a6 --- /dev/null +++ b/src/cmd/geth/run_test.go @@ -0,0 +1,126 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/internal/cmdtest" + "github.com/ethereum/go-ethereum/rpc" +) + +func tmpdir(t *testing.T) string { + dir, err := ioutil.TempDir("", "geth-test") + if err != nil { + t.Fatal(err) + } + return dir +} + +type testgeth struct { + *cmdtest.TestCmd + + // template variables for expect + Datadir string + Etherbase string +} + +func init() { + // Run the app if we've been exec'd as "geth-test" in runGeth. + reexec.Register("geth-test", func() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + }) +} + +func TestMain(m *testing.M) { + // check if we have been reexec'd + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +// spawns geth with the given command line args. If the args don't set --datadir, the +// child g gets a temporary data directory. +func runGeth(t *testing.T, args ...string) *testgeth { + tt := &testgeth{} + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, arg := range args { + switch arg { + case "--datadir": + if i < len(args)-1 { + tt.Datadir = args[i+1] + } + case "--miner.etherbase": + if i < len(args)-1 { + tt.Etherbase = args[i+1] + } + } + } + if tt.Datadir == "" { + tt.Datadir = tmpdir(t) + tt.Cleanup = func() { os.RemoveAll(tt.Datadir) } + args = append([]string{"--datadir", tt.Datadir}, args...) + // Remove the temporary datadir if something fails below. + defer func() { + if t.Failed() { + tt.Cleanup() + } + }() + } + + // Boot "geth". This actually runs the test binary but the TestMain + // function will prevent any tests from running. + tt.Run("geth-test", args...) + + return tt +} + +// waitForEndpoint attempts to connect to an RPC endpoint until it succeeds. +func waitForEndpoint(t *testing.T, endpoint string, timeout time.Duration) { + probe := func() bool { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c, err := rpc.DialContext(ctx, endpoint) + if c != nil { + _, err = c.SupportedModules() + c.Close() + } + return err == nil + } + + start := time.Now() + for { + if probe() { + return + } + if time.Since(start) > timeout { + t.Fatal("endpoint", endpoint, "did not open within", timeout) + } + time.Sleep(200 * time.Millisecond) + } +} diff --git a/src/cmd/geth/snapshot.go b/src/cmd/geth/snapshot.go new file mode 100644 index 00000000..09cc9ca9 --- /dev/null +++ b/src/cmd/geth/snapshot.go @@ -0,0 +1,439 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "errors" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + cli "gopkg.in/urfave/cli.v1" +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256(nil) +) + +var ( + snapshotCommand = cli.Command{ + Name: "snapshot", + Usage: "A set of commands based on the snapshot", + Category: "MISCELLANEOUS COMMANDS", + Description: "", + Subcommands: []cli.Command{ + { + Name: "prune-state", + Usage: "Prune stale ethereum state data based on the snapshot", + ArgsUsage: "", + Action: utils.MigrateFlags(pruneState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.CacheTrieJournalFlag, + utils.BloomFilterSizeFlag, + }, + Description: ` +geth snapshot prune-state +will prune historical state data with the help of the state snapshot. +All trie nodes and contract codes that do not belong to the specified +version state will be deleted from the database. After pruning, only +two version states are available: genesis and the specific one. + +The default pruning target is the HEAD-127 state. + +WARNING: It's necessary to delete the trie clean cache after the pruning. +If you specify another directory for the trie clean cache via "--cache.trie.journal" +during the use of Geth, please also specify it here for correct deletion. Otherwise +the trie clean cache with default directory will be deleted. +`, + }, + { + Name: "verify-state", + Usage: "Recalculate state hash based on the snapshot for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(verifyState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot verify-state +will traverse the whole accounts and storages set based on the specified +snapshot and recalculate the root hash of state for verification. +In other words, this command does the snapshot to trie conversion. +`, + }, + { + Name: "traverse-state", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot traverse-state +will traverse the whole state from the given state root and will abort if any +referenced trie node or contract code is missing. This command can be used for +state integrity verification. The default checking target is the HEAD state. + +It's also usable without snapshot enabled. +`, + }, + { + Name: "traverse-rawstate", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseRawState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot traverse-rawstate +will traverse the whole state from the given root and will abort if any referenced +trie node or contract code is missing. This command can be used for state integrity +verification. The default checking target is the HEAD state. It's basically identical +to traverse-state, but the check granularity is smaller. + +It's also usable without snapshot enabled. +`, + }, + }, + } +) + +func pruneState(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true, false) + defer chaindb.Close() + + //Quorum + if chain.Config().IsQuorum { + log.Error("Can not prune state when using GoQuorum as this has an impact on private state") + return errors.New("prune-state is not available when IsQuorum is enabled") + } + + pruner, err := pruner.NewPruner(chaindb, chain.CurrentBlock().Header(), stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) + if err != nil { + log.Error("Failed to open snapshot tree", "error", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var targetRoot common.Hash + if ctx.NArg() == 1 { + targetRoot, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + } + if err = pruner.Prune(targetRoot); err != nil { + log.Error("Failed to prune state", "error", err) + return err + } + return nil +} + +func verifyState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true, false) + defer chaindb.Close() + + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, chain.CurrentBlock().Root(), false, false, false) + if err != nil { + log.Error("Failed to open snapshot tree", "error", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var root = chain.CurrentBlock().Root() + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + } + if err := snaptree.Verify(root); err != nil { + log.Error("Failed to verfiy state", "error", err) + return err + } + log.Info("Verified the state") + return nil +} + +// traverseState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. +func traverseState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true, false) + defer chaindb.Close() + + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + // Use the HEAD root as the default + head := chain.CurrentBlock() + if head == nil { + log.Error("Head block is missing") + return errors.New("head block is missing") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = head.Root() + log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "error", err) + return err + } + var ( + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := trie.NewIterator(t.NodeIterator(nil)) + for accIter.Next() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return err + } + storageIter := trie.NewIterator(storageTrie.NodeIterator(nil)) + for storageIter.Next() { + slots += 1 + } + if storageIter.Err != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Err) + return storageIter.Err + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + if accIter.Err != nil { + log.Error("Failed to traverse state trie", "root", root, "error", accIter.Err) + return accIter.Err + } + log.Info("State is complete", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// traverseRawState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. It's basically identical to traverseState +// but it will check each trie node. +func traverseRawState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true, false) + defer chaindb.Close() + + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + // Use the HEAD root as the default + head := chain.CurrentBlock() + if head == nil { + log.Error("Head block is missing") + return errors.New("head block is missing") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = head.Root() + log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "error", err) + return err + } + var ( + nodes int + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := t.NodeIterator(nil) + for accIter.Next(true) { + nodes += 1 + node := accIter.Hash() + + if node != (common.Hash{}) { + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(account)", "hash", node) + return errors.New("missing account") + } + } + // If it's a leaf node, yes we are touching an account, + // dig into the storage trie further. + if accIter.Leaf() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return errors.New("invalid account") + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return errors.New("missing storage trie") + } + storageIter := storageTrie.NodeIterator(nil) + for storageIter.Next(true) { + nodes += 1 + node := storageIter.Hash() + + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + if node != (common.Hash{}) { + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(storage)", "hash", node) + return errors.New("missing storage") + } + } + // Bump the counter if it's leaf node. + if storageIter.Leaf() { + slots += 1 + } + } + if storageIter.Error() != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Error()) + return storageIter.Error() + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + } + if accIter.Error() != nil { + log.Error("Failed to traverse state trie", "root", root, "error", accIter.Error()) + return accIter.Error() + } + log.Info("State is complete", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +func parseRoot(input string) (common.Hash, error) { + var h common.Hash + if err := h.UnmarshalText([]byte(input)); err != nil { + return h, err + } + return h, nil +} diff --git a/src/cmd/geth/testdata/blockchain.blocks b/src/cmd/geth/testdata/blockchain.blocks new file mode 100644 index 0000000000000000000000000000000000000000..d29453d3e53b4382cc3ff4254e1931182a6c630d GIT binary patch literal 23287 zcmeI4cT^O~+wW&!$QcAll2t&EAWQgs;ax6Q`^v;ZRi&O@(%$x)pl)W zv`OEiAgJ8pY1=#M9#UDFxG*QO<#g#?$#=2xaY*FKN_jl> z>qj+EW3KW~CS{EOSfCR64=w-yKRW;o0zm|DE%hS;)!y9olLk0~s;VYU*@*ENu}@zq zzlnL;+5d4-YoCABy-klaokcmMTN}lfvQ4$1nwo7Ki*4vvU~8Gof5XHEZK2S2-{PL_ zoQEc-U}pSaeucZx-qSrs-(zT~pjFLEnGx3SE(u}lyL$0KknvB-6G;aC_5IE2^;B5VX% zgBU^hy^j2gMZpjz#O!UgC*V*HO@s+)ZsiG-yoiz~ou=ea@+wNwo!PyBlKLn~AH^+* zlDAQk?_!8PN+M8Fs7-+fCF4=@x<`)%N>-z!W-Hfglx#&wGu69}C^?RjmZ942D7lD| zE>(;JC*Wuhl)Oikr;3t5?adE^3mhi> z>oG_;dJkyXr}(L&6gZ3wWpv#CoA7hF1~{zYN98?m^ooPZ@qVgm1`bpEQDhSigZ^@U zG;BBq{U7;gR0j>7{9!l?$Bg}v5<0j*)D`^KJwTI#!vcS#)Znn63Y#4i4*!wua$r#Y zVF)=e82m8A!!gQ!q%z?c@;_1~aP*~Ln_dscgdFChy1@tnheB}u)clP0(Woe*0?m`=tW3Un}@24{8Ka_WR>hXuqV;|7g_T_T``33H#|_ z@PdpWQEE}(iKAdBK%$i4=&0Kh_5GTSpg-~;cwk=Fk36&=dEbwlKg>{f*_%QH$;aTC6ts++p+r)BUKwrLo*XVs zjj@dTm(NXt-NEjM`qQAAlCFDjF^5S2OhSGAVnw(S8q+fxmgcA0iO%+EW9#zp#<-ivB{>}gN7n2bb?Z0mnk0%csV{Bd42E0u&&j1+kID275akH;C zo=IMu{^FG3PbbUzwZ|XbWGb3M?q#Bh6#oh%zlbqtoW<4Kn z`SxWIC8MNHA_e97b8ff}*XgX>8gUxeey_B^m6jwLxYF`hT?Q9g9}z&#y~u*xavICg zngn@gF$&E$l+kx^$Ld&|qMhdO$GP&5%cori0C9V{E%Pl?C^P`p$`xlEzeuBG)2A%& z!WDD9UBK()?5jxJKycwi11y{;vNveUFB9OPs_o=|gy1QlEtsYrl$d#Pb1%igf)p^1 z#!_NU^S5Ds!;1Y_Kv;V&KC&W@zKJo!pyRR@$yKvM&IsAo)t?P{lEcpLr5238EbicN zheqFjPJ&g!&lo+PEPYJQCRD@uongz9ZFDoKx6A9P_oVCg^ zHR&nN0Xn@svN*pv135#WA8@9Y3UW4q06g#M&ADhsE$%%V;dMqBmD&z61}y4loz8QX zDiyk6bO#xV&6LhlxyKH2hISNZ;F_8SNU47nJI-8Yqi-XzI&}{7172o`XZYql$?(XC zxUA-Up>~R8-TH9GXvR&-oJ}GwE;-Q|4nrs)0i~FSY%5uadCI<*&*)y$Q zpAM6=z2&GiddyV}zQ^T~$2>xhFZADG>{Sy9=ZIi-tv;tUE%t0SlttKZ9q6~NS%T^5o-E=rM5DUC1@YqD+}u9l7B&bIA6wqcLO zIr}E#9xk_QOVdQTKU`bOiuj|Z-Qsbd$_O~1S95PpoQY9)Gg`#qx%E6{St-|2k*TJ? zrK6E(MF;SQo-{mbhxJu#+QOcCQAtfmis5dGDarbLi`QzP|?gg5r z_aD7evQqCH-xfhTIbSq~q&t;1Z#c2d4RVHY6ldVOW?G;k#v8K(+E_dA{7G^&tf_6t za%3l!rbKl+~UAoX*tt^EA1=aoO#aHw4Ja$^R*lj;(1BukTY{@ z1Cw43rO0wiZt{r0mMCt&b#1oCL&~y^mrZr0uvCsYo)zaT)W{3%RASGke}vwKD_+3I7(d*6d3TRE*KRxmH$2gC<82S;yvErybKC5kF8Ul`Q{BdY zkmF_hgIR?dVkPNsB?#(R7f5pbpw*LU<{PJ+e{%+MhQRvHnJLKG3Ifahk>|)jSd0~ffIp14t=t3AW6gheGJom<1kTcAqI0L_b&;ZYE%!H?c7B>i-pq)2LS5mkD@tz} z3Mf4+4>`M>II-qYcX_X2`07%}-PsHaiqJYe9mlj-qYYQ_?&|`_A>XC`=ZhTwoqc7trFojzyb25}h2%@wms#D-0HSgoA`@2N#} zKeI_-SVX{g;=H^?bnf-w^1^e?KAySE>=HTU*KUcw#^IrA{mmK3Sq=7g&d5N{))4@e zS+8n!YUv#8GAu#q*B&pQ3zwz@azMqG0Pb4kLG53wcg(0eH_f}CL;#ToeZjTRud zesYF#1)Wvuxj}pAwc>XH=V`ISVoD3P(J^b@*x{T4>^0@N7c0&J0_-*0>;Y$j#lI<) zG;sUv>GSah(k$Xwqc{r!pZ;YxNhqab#8lL>9}hTEE@&*Ub8yBMrqWgjNtAZRIyxPb zvxkN04x{JD8PhYtBq|+>0nEuHY zrhb*`if?yrIy;D??>mkHEi{j7%%jXceVW-bUE@){AQ0i2piAH;@U%Ax^6K6u*?%_fF;!f;n#s0=XXp4ato!C4mg!S^4Y7@#zO!hWj-w9DXuj}jsDf$@El8ceQ=brJDY+s_yk%o=wd z1HPOc>RXa_^;fJAXys|{v`g<0bdw}_OXjJo7|7aYlS2;Cjk^}36Hip2weobY?^)>u zC<(MtsfV@tWd(2rJ4Of}lf$70Q#YC4%`1~|x)uzbAB@IY(R=;L$4Yxl^YY_jd_w|2 zP(u_;G)X1nU8u0dEptIiZo4K}kFooT_SE1m)EDy`kj2KBbvb2^Dko35mcYJE9{6Gz zOyPEp!Pj*{K8~Kx{x^pphmyDl99kNI9DYFnqx?5yN$vH^lTN-r{ZSFCuGEfkhvET^ z_T}~m-PbOi1dtg|HcrbBwc3Cj;{00c%lSr;!F%C?!z`k?rxUbC{GWvr`OIo* zRahjE$*}>_ITJe^f_9~aGfAdD3U6CHHnf zFxxthKkepytM=s!oSOnrZ_-zfC~J<%p)z^xBc7yOeq#i@t4D6C-dl*nlFtT8%Z)os z+cZWG0W5BH$mgh>3PXv?<7oe$ju)jRy zP+QyaV_TdGs}Il9yp266AiL?A66Y{R>!1T5g+Eo+4fw9$kkY(vkZo3==OJrf7i#K>7uFj!k6xg}v z+?y_%k>Gd%?c+Vue&q(5Mq)Bq;y?$>8$o>UrFedG2y$41|9w1E13BD90JSE2>N9G!@P`53j}sa zXs_nHUx*`kMwGhH%=^ZD4mwfDG$9^kMwCfG{sb^?7c~4rlbRqTZ8EdTUu58nMs2E{ zQ$fZwiE)y4_1V@Tha8k$iKS@J{2SQMhXiPbI%OW!A}1_f z=P!+=eol}*UleEk&g1$yq4x=?b(&n+uSE_ytbfAJif2gx82}>~g>=W>LA=C;MPQ-f+}<1C8lSB;erUJIr%KE_+QWmHdoWT`)(UpuD6gIa_v| zE${Gt+qK{0A;_U5;dc&mP#o5yApq}{^Sw@5K@0evpRP7ndyGhtk`%Syb&gCmVbkg@ z6)QyMrl#2kF|ZJV91EWLY2p&6yj3u>CtwN5SZhV zEtP=Mh3UqI)kk*j=o@0*-i1mMJ5S$G=Pq_vZa4WdVzokn9=u4$kYK^p+*vF78_#;3 zY~>jF7uNZltS{C-7NX9T`pqH8A%f^Thy5UjFa+>?+lFc>9jZEXDnRev%mNd-2~k4U z7h}j~-z$@dwr>}akOWS#oq*(dkVC?wI0PMz^IxHdW-sP4@QhPfwyWk~rir zrQLv`je8AE(=|EwEZ&bRzOC1blnUG@*xV2>0T$Df#iH_nBbcg?E4$eQ_E6ntgTqo&<7B93d95~q#ItnR-71E&SbNv06ZkxrVHuU z+I;kajCLEv;ej_KrCZ~wz1wLbF?GsYtRclNv1Z9@SOYc%ofl}hmNO&qkIA9;(k?0A z7pb+oNDkgFP80X{F6}cW*lkhf`kHb|C3S=X_Dx1?drzFtM21+Ue zc;Ka0kBb0i;vvOeD;2R1X>&EZ96T5;=iLNI)$l`huFE|G!jd|W8y8*Gt|#$Pf$yn_ zkKiy6^irb*FbSG5)MjMM9SW?FgT;|f(OV*yF!nN*Egs?tN|X8)11xZ(izSR|@q&Sr6IJ~5;)VUK!Tdz7>4!JT(=M6l@f zRB!5|E;LF(u0T5DY&XZj7;&PTUBIh1WACK2ECt>+xr<}biu=6$iqkhK*^|mbWJV{x z(xv{JGmtY$((jysUTXD(D6d-<{KsC>QVk5E_&dX&wx+c(_Lf!1i_$pn!eti*HFc4a zayC8BlxL$s&Pa~p?7Np*No)!0P+qhdjV7jZSE*n~J;ArDCM06l6OeOr@n@>b0U_Zz zyz|Yq#JT1kY{EOO${&gQ-^m1BFQt-kJ^eu~iTsc=iV=aXm6L|nYYQcWvobP*cB55U zrw9X6;!Q^+6-L!FkI5O^8g;XvzD5~=03bC##Mwi=#ebV~;|l?-9w$umme3afcwU)A zuor%Q{Lx1qJ(|^kQLN`}<*nrez(=M>f~KGE0Mz+cOx_1s&n7KAg3ISVG)Ft7?M!X? zr8ma`78( zr;xZOL5ZZ%^&Xp*zI^cAjr1taKz|h)An{?jIE~>5Rxi1$9L-qv>xmmh(r86I-yAKT zm_5m^AP3l7*iIq*l7ejphn45#7m(Oz`1#ENC1yz<{CcVeSLQ>`WH?GPWL}tm3UPBj z>pe`{{MwsUX{uDy&lfXeGqSUB;h3D|Wdt!uV>J8SZ}T$FG#Hrn^XF7~>-}WU_5$IU z+;o2~z$|#JAZ#p*!H7(I=XI+ys|X(bTc$_Dy;V=yLi$YgrvdM(q_NtW&P(}k`iUE9)>&bXW$+cVMkzw1cv=&*H zM;~nad=tjQxhn$T|Fjefw}LMFbn0K&hUQv)(bN|Q#c^w zY?tVM%BP@Y!eZzH9YCk_`catNFoy1)sOH2nS>){;Sj|}(=O7_#eXJUDs!l+|fmdeU zx~CG3rocnU^1N`07=txQtg6}#vWHz3Rg(XkGZbeS5c2Pwf&MD>Liu|%!_kh2pZt`}JWm3!OKTSQW^buFRg>OVskEX~HPQJraT8xyYt3F7 zmoldLkh51+wO!tQlv$Q~PQ#{82(P(J^iskK6dm0zu)E7oi5=HpC4rW#zfY}@FA^K! z+G?USEgo`6^qqCcAHewaHyvqY#tM^rTY}*sXL+B@zI;ZP^pvW)4RhswzUKo>Vt2tsQ!_ z5+j9Tn6vQbrE}nSv4iK$j9fjdTU$&-t=DWs@Xd_GdoM(!U1FR)tLaPFx39fxdCc*Q ztwltbw<-K;*TXR4Q!ddT?%sS^79`^TUgU}1w~a|$Uf_P+U>>9O66;G!b!ihyhPm!B zSHGI%o!9Mq+vKuiO&UPdw2*v7cajXDO#kXF_4Tj*+~pfrt^lk41N9oh*!HtOaVGf> z-*5iUWdBVo82)nRzbW*WGN`>P{>Ju;3GE<-;tX-}fHST0AZIiPAYU+;^CD$=3YUU( zjcxF)Hgmnvq>OuE#a>e78-lOYl#$DRU!T4H*7ySCjN&NH!0iQSfeW$cRJC#`(A78+ zY(iQ~jE7I?M`Bdn74DbaV%K00s{}~#H7C7mJG0=F1HH99x7)a!0jZr=8L#@uXs=`r z2%b9ROjIgOxKuK$Ua!tG(hX7JK1}^0>BJ+ljO=V(q+atp=P@}`siZbChcK^(Z%dq2 zyvz5|aD>Y!N)COSX@O?Q>;j7*!1S;z4iI_h;o*+8HXSKv+_AD{v9d;YwfDw{uh^jy za{wg|gjUyS@d97)iy@ZOxHTT^LM;(2De=$V*)YDhi)cS__DdK1UrR{-^~?-^HT_2x z#D7!@9M5Vf4>;2l0y(2c03(n6-_y9c+~k77I-+?WM;O;(Q}3=x*KwUywj8_BRF0fj zP3E?0ywD7CcJe6Bz)dJ<0I8{R7vqUxr>5=h(KG#LqN+)=lov*?Nzdn)0K3y$UVv)S z%UD^`SEYhy?}mPRFx9{lF)l4x9+Uq$J;l+RDn<8@Ga5n$9U&6|PxyEy4*Rgj1Bw{~ z=AmaZx%G-!cGdNaWXI&JGr>y2wTVw)?(LF&zm6b7G7!cB(px>3V^XH19>cK!7_2zvHIqWC3j2TAAv`moKA=p+?Bw%G4Omo)EZZo?EqSf3IHv4C z6e<9JYpSGQ+tWq$Xm;4L=_O!j_U3}=PKavEYZhTXW4`d`M2+G8={sy(4GtMEt<|;v zPjhx;kAs<#e=WiASJQuFL4c(s!RW|<;!KkI`@D$;a>k4R(C$oeH|U!>-T9zUS(Ik( z6c)8{%>o*Ai}P_=DKjIL6|&x_x+=skEdu0>>L|{@&3b46pr$Pcmnma7@mgAdGG89Lr zXu2{}&LH-FW0-7Umtlh+P%1X^_HT;~yt*8kTq1VGN6E%@x%4SEevA5fUJ{naNSN`>Qqiou#_Yi9T`xZA!xpjXK;`+4g|1$JwlGwv&n278Vi?- zbuyxH_(<*b>ZTB0Qo^Yb^DQZ4&Q4Xi0tS2)CJn@^XXi)NW0le3--k&P?TV_ZgcEQ#Ah zND!vZU>fG&iB7lVy9pt#nh}n%Vh1W@_ab@;;-Y`|D&A!Hy9y6yCfJL8BmnE6JpgjXjQ~!ldN)ao)@M4sd6^U{&rO=bnB-IbNJOng` z$QjL1oPisP(Et%Q@@})T)#&+sy%Zl6Ib`BJ8Ap<{r}Dh3|Ac}$?#u3$tudD#wylqEhW#Ed?&W=WRFrY4_K>rS>wWsTqTNr)oljcKM>gi1tMyU!t_RVj;t zb2(+Y8=kKzp6N{$xLZX|@izPd|q)9m0PZoRp6#aISE{n-qqWgMowWIcolu|8vrR1p4 zDr!`-&&NsdZ#g^U3Gt_{JhcDG{5MI4Uu}|y$shI~>M-x`e*anu0Tz=4qd$i~b0$f5 zz?mK+$e92Fu>SD6sBFt{>22{sf)ztMOCv-!?IN>2@&13YK)Gw~#6MLScc2^t?L6njbTGBg6OzL<} z1`p`w9Fwz$IEkGI>6?8@Ebh-Y_&Xz`qAG}9sc1?yCaasSOu1D9LJX_jas}@t0%6I6 zL8-IZ$sGX%M$8kRIkHSNadYzR02ayDiW1I`?!ZpxihF+Cq*k#fVKM7`d);`Nzud;& zZR-Cu&VC`}7kn6gNr4Fj7)gQ=1bPtv5&ft|67(CwLeL*@X0QZub`iDPOMQd%lOb8{ zl!48ut?HT<@qwF)F?|MbuSB#lV6srS-lD;YH~scNgb0COL-{GDE3yuVdJX? zRyLDdv{drVvi0ZF0Iu~=kOW8GV#!Ou$azGV_Dgh=QYth^KUEptg;NtHR*N=MjMuU| zv&||DWaIi+%v@MkKFuOXH(}gX)!5il%|1u2-T+sIc&JkT={x%WjrAGBAH9DdP#yn0 zFdT{_=FLBfpu~^zhgJkAB*Ey<;Rk1^9293Y4ByAIWRNp41aN)C_0_(HhV~2G@i@ll zh-*t`144O_xj1IRVtTz9{AA=1CO4#*5OD_NjQ%Ljz^z?r06Sd>yZ0s*R*w+rI9niz z1I `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.2(1|2|3)-.*" + }, + { + "name": "GoCrash", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552" + ], + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "GethCrash", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Full details to be disclosed at a later date", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9.(16|17).*$" + } +] diff --git a/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 00000000..f9066d4f --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 00000000..a89a83d2 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 00000000..6fd33b19 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/src/cmd/geth/testdata/vcheck/minisign.pub b/src/cmd/geth/testdata/vcheck/minisign.pub new file mode 100644 index 00000000..183dce5f --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/minisign.pub @@ -0,0 +1,2 @@ +untrusted comment: minisign public key 284E00B52C269624 +RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp diff --git a/src/cmd/geth/testdata/vcheck/minisign.sec b/src/cmd/geth/testdata/vcheck/minisign.sec new file mode 100644 index 00000000..5c50715b --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/minisign.sec @@ -0,0 +1,2 @@ +untrusted comment: minisign encrypted secret key +RWRTY0Iyz8kmPMKrqk6DCtlO9a33akKiaOQG1aLolqDxs52qvPoAAAACAAAAAAAAAEAAAAAArEiggdvyn6+WzTprirLtgiYQoU+ihz/HyGgjhuF+Pz2ddMduyCO+xjCHeq+vgVVW039fbsI8hW6LRGJZLBKV5/jdxCXAVVQE7qTQ6xpEdO0z8Z731/pV1hlspQXG2PNd16NMtwd9dWw= diff --git a/src/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig b/src/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig new file mode 100644 index 00000000..3d5fcacf --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig @@ -0,0 +1,2 @@ +untrusted comment: verify with ./signifykey.pub +RWSKLNhZb0KdAbhRUhW2LQZXdnwttu2SYhM9EuC4mMgOJB85h7/YIPupf8/ldTs4N8e9Y/fhgdY40q5LQpt5IFC62fq0v8U1/w8= diff --git a/src/cmd/geth/testdata/vcheck/signifykey.pub b/src/cmd/geth/testdata/vcheck/signifykey.pub new file mode 100644 index 00000000..328f973a --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/signifykey.pub @@ -0,0 +1,2 @@ +untrusted comment: signify public key +RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/ diff --git a/src/cmd/geth/testdata/vcheck/signifykey.sec b/src/cmd/geth/testdata/vcheck/signifykey.sec new file mode 100644 index 00000000..3279a2e5 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/signifykey.sec @@ -0,0 +1,2 @@ +untrusted comment: signify secret key +RWRCSwAAACpLQDLawSQCtI7eAVIvaiHzjTsTyJsfV5aKLNhZb0KdAWeICXJGa93/bHAcsY6jUh9I8RdEcDWEoGxmaXZC+IdVBPxDpkix9fBRGEUdKWHi3dOfqME0YRzErWI5AVg3cRw= diff --git a/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 00000000..f9066d4f --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 00000000..a89a83d2 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 00000000..6fd33b19 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/src/cmd/geth/testdata/vcheck/vulnerabilities.json b/src/cmd/geth/testdata/vcheck/vulnerabilities.json new file mode 100644 index 00000000..36509f95 --- /dev/null +++ b/src/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -0,0 +1,70 @@ +[ + { + "name": "CorruptedDAG", + "uid": "GETH-2020-01", + "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "CVE": "CVE-2020-26240", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*" + }, + { + "name": "Denial of service due to Go CVE-2020-28362", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52" + ], + "introduced": "v0.0.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-28362", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26241", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "GethCrash", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Full details to be disclosed at a later date", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26242", + "check": "Geth\\/v1\\.9.(16|17).*$" + } +] diff --git a/src/cmd/geth/testdata/wrong-passwords.txt b/src/cmd/geth/testdata/wrong-passwords.txt new file mode 100644 index 00000000..7d1e338b --- /dev/null +++ b/src/cmd/geth/testdata/wrong-passwords.txt @@ -0,0 +1,3 @@ +wrong +wrong +wrong diff --git a/src/cmd/geth/usage.go b/src/cmd/geth/usage.go new file mode 100644 index 00000000..2fc096b0 --- /dev/null +++ b/src/cmd/geth/usage.go @@ -0,0 +1,384 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Contains the geth command usage template and generator. + +package main + +import ( + "io" + "sort" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/internal/debug" + "github.com/ethereum/go-ethereum/internal/flags" + "gopkg.in/urfave/cli.v1" +) + +// Quorum +var quorumAccountFlagGroup = "QUORUM ACCOUNT" + +// End Quorum + +// AppHelpFlagGroups is the application flags, grouped by functionality. +var AppHelpFlagGroups = []flags.FlagGroup{ + { + Name: "ETHEREUM", + Flags: []cli.Flag{ + configFileFlag, + utils.DataDirFlag, + utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, + utils.KeyStoreDirFlag, + utils.USBFlag, + utils.SmartCardDaemonPathFlag, + utils.NetworkIdFlag, + utils.MainnetFlag, + utils.GoerliFlag, + utils.RinkebyFlag, + utils.YoloV3Flag, + utils.RopstenFlag, + utils.SyncModeFlag, + utils.ExitWhenSyncedFlag, + utils.GCModeFlag, + utils.TxLookupLimitFlag, + utils.EthStatsURLFlag, + utils.IdentityFlag, + utils.LightKDFFlag, + utils.AuthorizationListFlag, + }, + }, + { + Name: "LIGHT CLIENT", + Flags: []cli.Flag{ + utils.LightServeFlag, + utils.LightIngressFlag, + utils.LightEgressFlag, + utils.LightMaxPeersFlag, + utils.UltraLightServersFlag, + utils.UltraLightFractionFlag, + utils.UltraLightOnlyAnnounceFlag, + utils.LightNoPruneFlag, + utils.LightNoSyncServeFlag, + }, + }, + { + Name: "DEVELOPER CHAIN", + Flags: []cli.Flag{ + utils.DeveloperFlag, + utils.DeveloperPeriodFlag, + }, + }, + { + Name: "ETHASH", + Flags: []cli.Flag{ + utils.EthashCacheDirFlag, + utils.EthashCachesInMemoryFlag, + utils.EthashCachesOnDiskFlag, + utils.EthashCachesLockMmapFlag, + utils.EthashDatasetDirFlag, + utils.EthashDatasetsInMemoryFlag, + utils.EthashDatasetsOnDiskFlag, + utils.EthashDatasetsLockMmapFlag, + }, + }, + { + Name: "TRANSACTION POOL", + Flags: []cli.Flag{ + utils.TxPoolLocalsFlag, + utils.TxPoolNoLocalsFlag, + utils.TxPoolJournalFlag, + utils.TxPoolRejournalFlag, + utils.TxPoolPriceLimitFlag, + utils.TxPoolPriceBumpFlag, + utils.TxPoolAccountSlotsFlag, + utils.TxPoolGlobalSlotsFlag, + utils.TxPoolAccountQueueFlag, + utils.TxPoolGlobalQueueFlag, + utils.TxPoolLifetimeFlag, + }, + }, + { + Name: "PERFORMANCE TUNING", + Flags: []cli.Flag{ + utils.CacheFlag, + utils.CacheDatabaseFlag, + utils.CacheTrieFlag, + utils.CacheTrieJournalFlag, + utils.CacheTrieRejournalFlag, + utils.CacheGCFlag, + utils.CacheSnapshotFlag, + utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, + }, + }, + { + Name: "ACCOUNT", + Flags: []cli.Flag{ + utils.UnlockedAccountFlag, + utils.PasswordFileFlag, + utils.ExternalSignerFlag, + utils.InsecureUnlockAllowedFlag, + }, + }, + { + Name: "API AND CONSOLE", + Flags: []cli.Flag{ + utils.IPCDisabledFlag, + utils.IPCPathFlag, + utils.HTTPEnabledFlag, + utils.HTTPListenAddrFlag, + utils.HTTPPortFlag, + utils.HTTPApiFlag, + utils.HTTPPathPrefixFlag, + utils.HTTPCORSDomainFlag, + utils.HTTPVirtualHostsFlag, + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSPathPrefixFlag, + utils.WSAllowedOriginsFlag, + utils.GraphQLEnabledFlag, + utils.GraphQLCORSDomainFlag, + utils.GraphQLVirtualHostsFlag, + utils.RPCGlobalGasCapFlag, + utils.RPCGlobalTxFeeCapFlag, + utils.AllowUnprotectedTxs, + utils.JSpathFlag, + utils.ExecFlag, + utils.PreloadJSFlag, + utils.RPCClientToken, + utils.RPCClientTLSInsecureSkipVerify, + utils.RPCClientTLSCert, + utils.RPCClientTLSCaCert, + utils.RPCClientTLSCipherSuites, + }, + }, + { + Name: "NETWORKING", + Flags: []cli.Flag{ + utils.BootnodesFlag, + utils.DNSDiscoveryFlag, + utils.ListenPortFlag, + utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, + utils.NATFlag, + utils.NoDiscoverFlag, + utils.DiscoveryV5Flag, + utils.NetrestrictFlag, + utils.NodeKeyFileFlag, + utils.NodeKeyHexFlag, + }, + }, + { + Name: "MINER", + Flags: []cli.Flag{ + utils.MiningEnabledFlag, + utils.MinerThreadsFlag, + utils.MinerNotifyFlag, + utils.MinerGasPriceFlag, + utils.MinerGasTargetFlag, + utils.MinerGasLimitFlag, + utils.MinerEtherbaseFlag, + utils.MinerExtraDataFlag, + utils.MinerRecommitIntervalFlag, + utils.MinerNoVerfiyFlag, + }, + }, + { + Name: "GAS PRICE ORACLE", + Flags: []cli.Flag{ + utils.GpoBlocksFlag, + utils.GpoPercentileFlag, + utils.GpoMaxGasPriceFlag, + }, + }, + { + Name: "VIRTUAL MACHINE", + Flags: []cli.Flag{ + utils.VMEnableDebugFlag, + utils.EVMInterpreterFlag, + utils.EWASMInterpreterFlag, + // Quorum - timout for calls + utils.EVMCallTimeOutFlag, + }, + }, + { + Name: "LOGGING AND DEBUGGING", + Flags: append([]cli.Flag{ + utils.FakePoWFlag, + utils.NoCompactionFlag, + }, debug.Flags...), + }, + { + Name: "METRICS AND STATS", + Flags: metricsFlags, + }, + { + Name: "ALIASED (deprecated)", + Flags: []cli.Flag{ + utils.NoUSBFlag, + utils.LegacyRPCEnabledFlag, + utils.LegacyRPCListenAddrFlag, + utils.LegacyRPCPortFlag, + utils.LegacyRPCCORSDomainFlag, + utils.LegacyRPCVirtualHostsFlag, + utils.LegacyRPCApiFlag, + }, + }, + // QUORUM + { + Name: "QUORUM", + Flags: []cli.Flag{ + utils.QuorumImmutabilityThreshold, + utils.EnableNodePermissionFlag, + utils.PluginSettingsFlag, + utils.PluginSkipVerifyFlag, + utils.PluginLocalVerifyFlag, + utils.PluginPublicKeyFlag, + utils.AllowedFutureBlockTimeFlag, + utils.MultitenancyFlag, + utils.RevertReasonFlag, + utils.PrivateCacheTrieJournalFlag, + utils.QuorumEnablePrivacyMarker, + }, + }, + { + Name: "QUORUM PRIVATE TRANSACTION MANAGER", + Flags: []cli.Flag{ + utils.QuorumPTMUnixSocketFlag, + utils.QuorumPTMUrlFlag, + utils.QuorumPTMTimeoutFlag, + utils.QuorumPTMDialTimeoutFlag, + utils.QuorumPTMHttpIdleTimeoutFlag, + utils.QuorumPTMHttpWriteBufferSizeFlag, + utils.QuorumPTMHttpReadBufferSizeFlag, + utils.QuorumPTMTlsModeFlag, + utils.QuorumPTMTlsRootCaFlag, + utils.QuorumPTMTlsClientCertFlag, + utils.QuorumPTMTlsClientKeyFlag, + utils.QuorumPTMTlsInsecureSkipVerify, + }, + }, + { + Name: quorumAccountFlagGroup, + Flags: []cli.Flag{ + utils.AccountPluginNewAccountConfigFlag, + }, + }, + { + Name: "RAFT", + Flags: []cli.Flag{ + utils.RaftModeFlag, + utils.RaftBlockTimeFlag, + utils.RaftJoinExistingFlag, + utils.RaftPortFlag, + utils.RaftDNSEnabledFlag, + }, + }, + { + Name: "ISTANBUL", + Flags: []cli.Flag{ + utils.IstanbulRequestTimeoutFlag, + utils.IstanbulBlockPeriodFlag, + }, + }, + // END QUORUM + { + Name: "MISC", + Flags: []cli.Flag{ + utils.SnapshotFlag, + utils.BloomFilterSizeFlag, + cli.HelpFlag, + }, + }, +} + +func init() { + // Override the default app help template + cli.AppHelpTemplate = flags.AppHelpTemplate + + // Override the default app help printer, but only for the global app help + originalHelpPrinter := cli.HelpPrinter + cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) { + if tmpl == flags.AppHelpTemplate { + // Iterate over all the flags and add any uncategorized ones + categorized := make(map[string]struct{}) + for _, group := range AppHelpFlagGroups { + for _, flag := range group.Flags { + categorized[flag.String()] = struct{}{} + } + } + deprecated := make(map[string]struct{}) + for _, flag := range utils.DeprecatedFlags { + deprecated[flag.String()] = struct{}{} + } + // Only add uncategorized flags if they are not deprecated + var uncategorized []cli.Flag + for _, flag := range data.(*cli.App).Flags { + if _, ok := categorized[flag.String()]; !ok { + if _, ok := deprecated[flag.String()]; !ok { + uncategorized = append(uncategorized, flag) + } + } + } + if len(uncategorized) > 0 { + // Append all ungategorized options to the misc group + miscs := len(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags) + AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = append(AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags, uncategorized...) + + // Make sure they are removed afterwards + defer func() { + AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags = AppHelpFlagGroups[len(AppHelpFlagGroups)-1].Flags[:miscs] + }() + } + + // remove the Quorum account options from the main app usage as these should only be used by the geth account sub commands + for i, group := range AppHelpFlagGroups { + if group.Name == quorumAccountFlagGroup { + AppHelpFlagGroups = append(AppHelpFlagGroups[:i], AppHelpFlagGroups[i+1:]...) + } + } + + // Render out custom usage screen + originalHelpPrinter(w, tmpl, flags.HelpData{App: data, FlagGroups: AppHelpFlagGroups}) + } else if tmpl == flags.CommandHelpTemplate { + // Iterate over all command specific flags and categorize them + categorized := make(map[string][]cli.Flag) + for _, flag := range data.(cli.Command).Flags { + if _, ok := categorized[flag.String()]; !ok { + categorized[flags.FlagCategory(flag, AppHelpFlagGroups)] = append(categorized[flags.FlagCategory(flag, AppHelpFlagGroups)], flag) + } + } + + // sort to get a stable ordering + sorted := make([]flags.FlagGroup, 0, len(categorized)) + for cat, flgs := range categorized { + sorted = append(sorted, flags.FlagGroup{Name: cat, Flags: flgs}) + } + sort.Sort(flags.ByCategory(sorted)) + + // add sorted array to data and render with default printer + originalHelpPrinter(w, tmpl, map[string]interface{}{ + "cmd": data, + "categorizedFlags": sorted, + }) + } else { + originalHelpPrinter(w, tmpl, data) + } + } +} diff --git a/src/cmd/geth/version_check.go b/src/cmd/geth/version_check.go new file mode 100644 index 00000000..2101a69e --- /dev/null +++ b/src/cmd/geth/version_check.go @@ -0,0 +1,169 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/ethereum/go-ethereum/log" + "github.com/jedisct1/go-minisign" + "gopkg.in/urfave/cli.v1" +) + +var gethPubKeys []string = []string{ + //@holiman, minisign public key FB1D084D39BAEC24 + "RWQk7Lo5TQgd+wxBNZM+Zoy+7UhhMHaWKzqoes9tvSbFLJYZhNTbrIjx", + //minisign public key 138B1CA303E51687 + "RWSHFuUDoxyLEzjszuWZI1xStS66QTyXFFZG18uDfO26CuCsbckX1e9J", + //minisign public key FD9813B2D2098484 + "RWSEhAnSshOY/b+GmaiDkObbCWefsAoavjoLcPjBo1xn71yuOH5I+Lts", +} + +type vulnJson struct { + Name string + Uid string + Summary string + Description string + Links []string + Introduced string + Fixed string + Published string + Severity string + Check string + CVE string +} + +func versionCheck(ctx *cli.Context) error { + url := ctx.String(VersionCheckUrlFlag.Name) + version := ctx.String(VersionCheckVersionFlag.Name) + log.Info("Checking vulnerabilities", "version", version, "url", url) + return checkCurrent(url, version) +} + +func checkCurrent(url, current string) error { + var ( + data []byte + sig []byte + err error + ) + if data, err = fetch(url); err != nil { + return fmt.Errorf("could not retrieve data: %w", err) + } + if sig, err = fetch(fmt.Sprintf("%v.minisig", url)); err != nil { + return fmt.Errorf("could not retrieve signature: %w", err) + } + if err = verifySignature(gethPubKeys, data, sig); err != nil { + return err + } + var vulns []vulnJson + if err = json.Unmarshal(data, &vulns); err != nil { + return err + } + allOk := true + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + if err != nil { + return err + } + if r.MatchString(current) { + allOk = false + fmt.Printf("## Vulnerable to %v (%v)\n\n", vuln.Uid, vuln.Name) + fmt.Printf("Severity: %v\n", vuln.Severity) + fmt.Printf("Summary : %v\n", vuln.Summary) + fmt.Printf("Fixed in: %v\n", vuln.Fixed) + if len(vuln.CVE) > 0 { + fmt.Printf("CVE: %v\n", vuln.CVE) + } + if len(vuln.Links) > 0 { + fmt.Printf("References:\n") + for _, ref := range vuln.Links { + fmt.Printf("\t- %v\n", ref) + } + } + fmt.Println() + } + } + if allOk { + fmt.Println("No vulnerabilities found") + } + return nil +} + +// fetch makes an HTTP request to the given url and returns the response body +func fetch(url string) ([]byte, error) { + if filep := strings.TrimPrefix(url, "file://"); filep != url { + return ioutil.ReadFile(filep) + } + res, err := http.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return body, nil +} + +// verifySignature checks that the sigData is a valid signature of the given +// data, for pubkey GethPubkey +func verifySignature(pubkeys []string, data, sigdata []byte) error { + sig, err := minisign.DecodeSignature(string(sigdata)) + if err != nil { + return err + } + // find the used key + var key *minisign.PublicKey + for _, pubkey := range pubkeys { + pub, err := minisign.NewPublicKey(pubkey) + if err != nil { + // our pubkeys should be parseable + return err + } + if pub.KeyId != sig.KeyId { + continue + } + key = &pub + break + } + if key == nil { + log.Info("Signing key not trusted", "keyid", keyID(sig.KeyId), "error", err) + return errors.New("signature could not be verified") + } + if ok, err := key.Verify(data, sig); !ok || err != nil { + log.Info("Verification failed error", "keyid", keyID(key.KeyId), "error", err) + return errors.New("signature could not be verified") + } + return nil +} + +// keyID turns a binary minisign key ID into a hex string. +// Note: key IDs are printed in reverse byte order. +func keyID(id [8]byte) string { + var rev [8]byte + for i := range id { + rev[len(rev)-1-i] = id[i] + } + return fmt.Sprintf("%X", rev) +} diff --git a/src/cmd/geth/version_check_test.go b/src/cmd/geth/version_check_test.go new file mode 100644 index 00000000..0f056d19 --- /dev/null +++ b/src/cmd/geth/version_check_test.go @@ -0,0 +1,130 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestVerification(t *testing.T) { + // Signatures generated with `minisign` + t.Run("minisig", func(t *testing.T) { + // For this test, the pubkey is in testdata/minisign.pub + // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) + pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" + testVerification(t, pub, "./testdata/vcheck/minisig-sigs/") + }) + // Signatures generated with `signify-openbsd` + t.Run("signify-openbsd", func(t *testing.T) { + t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2") + // For this test, the pubkey is in testdata/signifykey.pub + // (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' ) + pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/" + testVerification(t, pub, "./testdata/vcheck/signify-sigs/") + }) +} + +func testVerification(t *testing.T, pubkey, sigdir string) { + // Data to verify + data, err := ioutil.ReadFile("./testdata/vcheck/data.json") + if err != nil { + t.Fatal(err) + } + // Signatures, with and without comments, both trusted and untrusted + files, err := ioutil.ReadDir(sigdir) + if err != nil { + t.Fatal(err) + } + for _, f := range files { + sig, err := ioutil.ReadFile(filepath.Join(sigdir, f.Name())) + if err != nil { + t.Fatal(err) + } + err = verifySignature([]string{pubkey}, data, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func versionUint(v string) int { + mustInt := func(s string) int { + a, err := strconv.Atoi(s) + if err != nil { + panic(v) + } + return a + } + components := strings.Split(strings.TrimPrefix(v, "v"), ".") + a := mustInt(components[0]) + b := mustInt(components[1]) + c := mustInt(components[2]) + return a*100*100 + b*100 + c +} + +// TestMatching can be used to check that the regexps are correct +func TestMatching(t *testing.T) { + data, _ := ioutil.ReadFile("./testdata/vcheck/vulnerabilities.json") + var vulns []vulnJson + if err := json.Unmarshal(data, &vulns); err != nil { + t.Fatal(err) + } + check := func(version string) { + vFull := fmt.Sprintf("Geth/%v-unstable-15339cf1-20201204/linux-amd64/go1.15.4", version) + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + vulnIntro := versionUint(vuln.Introduced) + vulnFixed := versionUint(vuln.Fixed) + current := versionUint(version) + if err != nil { + t.Fatal(err) + } + if vuln.Name == "Denial of service due to Go CVE-2020-28362" { + // this one is not tied to geth-versions + continue + } + if vulnIntro <= current && vulnFixed > current { + // Should be vulnerable + if !r.MatchString(vFull) { + t.Errorf("Should be vulnerable, version %v, intro: %v, fixed: %v %v %v", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vuln.Check) + } + } else { + if r.MatchString(vFull) { + t.Errorf("Should not be flagged vulnerable, version %v, intro: %v, fixed: %v %v %d %d %d", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed) + } + } + + } + } + for major := 1; major < 2; major++ { + for minor := 0; minor < 30; minor++ { + for patch := 0; patch < 30; patch++ { + vShort := fmt.Sprintf("v%d.%d.%d", major, minor, patch) + check(vShort) + } + } + } +} diff --git a/src/core/state/journal.go b/src/core/state/journal.go index 1b7c224d..c612aa98 100644 --- a/src/core/state/journal.go +++ b/src/core/state/journal.go @@ -18,6 +18,7 @@ package state import ( "math/big" + "sync" "github.com/ethereum/go-ethereum/common" ) @@ -38,6 +39,7 @@ type journalEntry interface { type journal struct { entries []journalEntry // Current changes tracked by the journal dirties map[common.Address]int // Dirty accounts and the number of changes + mutex sync.Mutex } // newJournal create a new initialized journal. @@ -49,6 +51,8 @@ func newJournal() *journal { // append inserts a new modification entry to the end of the change journal. func (j *journal) append(entry journalEntry) { + defer j.mutex.Unlock() + j.mutex.Lock() j.entries = append(j.entries, entry) if addr := entry.dirtied(); addr != nil { j.dirties[*addr]++ @@ -58,6 +62,8 @@ func (j *journal) append(entry journalEntry) { // revert undoes a batch of journalled modifications along with any reverted // dirty handling too. func (j *journal) revert(statedb *StateDB, snapshot int) { + defer j.mutex.Unlock() + j.mutex.Lock() for i := len(j.entries) - 1; i >= snapshot; i-- { // Undo the changes made by the operation j.entries[i].revert(statedb) @@ -76,6 +82,8 @@ func (j *journal) revert(statedb *StateDB, snapshot int) { // otherwise suggest it as clean. This method is an ugly hack to handle the RIPEMD // precompile consensus exception. func (j *journal) dirty(addr common.Address) { + defer j.mutex.Unlock() + j.mutex.Lock() j.dirties[addr]++ } @@ -145,6 +153,8 @@ type ( ) func (ch createObjectChange) revert(s *StateDB) { + defer s.mutex.Unlock() + s.mutex.Lock() delete(s.stateObjects, *ch.account) delete(s.stateObjectsDirty, *ch.account) } diff --git a/src/core/state/statedb.go b/src/core/state/statedb.go index e0150027..27b6b08a 100644 --- a/src/core/state/statedb.go +++ b/src/core/state/statedb.go @@ -22,6 +22,7 @@ import ( "fmt" "math/big" "sort" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -74,6 +75,9 @@ type StateDB struct { snapAccounts map[common.Hash][]byte snapStorage map[common.Hash]map[common.Hash][]byte + mutex sync.Mutex + journalMutex sync.Mutex + // Quorum - a trie to hold extra account information that cannot be stored in the accounts trie accountExtraDataTrie Trie @@ -332,9 +336,11 @@ func (s *StateDB) Reset(root common.Hash) error { return err } s.trie = tr + s.mutex.Lock() s.stateObjects = make(map[common.Address]*stateObject) s.stateObjectsPending = make(map[common.Address]struct{}) s.stateObjectsDirty = make(map[common.Address]struct{}) + s.mutex.Unlock() s.thash = common.Hash{} s.bhash = common.Hash{} s.txIndex = 0 @@ -710,6 +716,8 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { } func (s *StateDB) setStateObject(object *stateObject) { + defer s.mutex.Unlock() + s.mutex.Lock() s.stateObjects[object.Address()] = object } @@ -797,15 +805,27 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (s *StateDB) Copy() *StateDB { + s.journalMutex.Lock() + journal := s.journal + s.journalMutex.Unlock() + + journal.mutex.Lock() + size := len(journal.dirties) + dirties := make([]common.Address, 0, size) + for addr := range journal.dirties { + dirties = append(dirties, addr) + } + journal.mutex.Unlock() + // Copy all the basic fields, initialize the memory ones state := &StateDB{ db: s.db, trie: s.db.CopyTrie(s.trie), // Quorum - Privacy Enhancements accountExtraDataTrie: s.db.CopyTrie(s.accountExtraDataTrie), - stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), + stateObjects: make(map[common.Address]*stateObject, size), stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), - stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), + stateObjectsDirty: make(map[common.Address]struct{}, size), refund: s.refund, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, @@ -813,8 +833,10 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + + s.mutex.Lock() // Copy the dirty states, logs, and preimages - for addr := range s.journal.dirties { + for _, addr := range dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), // and in the Finalise-method, there is a case where an object is in the journal but not // in the stateObjects: OOG after touch on ripeMD prior to Byzantium. Thus, we need to check for @@ -844,6 +866,7 @@ func (s *StateDB) Copy() *StateDB { } state.stateObjectsDirty[addr] = struct{}{} } + s.mutex.Unlock() for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) for i, l := range logs { @@ -930,7 +953,13 @@ func (s *StateDB) GetRefund() uint64 { // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) + s.journal.mutex.Lock() + dirties := make([]common.Address, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { + dirties = append(dirties, addr) + } + s.journal.mutex.Unlock() + for _, addr := range dirties { obj, exist := s.stateObjects[addr] if !exist { // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 @@ -956,8 +985,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { } else { obj.finalise(true) // Prefetch slots in the background } + s.mutex.Lock() s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} + s.mutex.Unlock() // At this point, also ship the address off to the precacher. The precacher // will start loading tries, and when the change is eventually committed, @@ -978,6 +1009,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + s.mutex.Lock() + defer s.mutex.Unlock() // If there was a trie prefetcher operating, it gets aborted and irrevocably // modified after we start retrieving tries. Remove it from the statedb after // this round of use. @@ -1042,6 +1075,8 @@ func (s *StateDB) Prepare(thash, bhash common.Hash, ti int) { } func (s *StateDB) clearJournalAndRefund() { + defer s.journalMutex.Unlock() + s.journalMutex.Lock() if len(s.journal.entries) > 0 { s.journal = newJournal() s.refund = 0 @@ -1059,6 +1094,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // Finalize any pending changes and merge everything into the tries s.IntermediateRoot(deleteEmptyObjects) + s.mutex.Lock() // Commit objects to the trie, measuring the elapsed time codeWriter := s.db.TrieDB().DiskDB().NewBatch() for addr := range s.stateObjectsDirty { @@ -1077,6 +1113,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) } + s.mutex.Unlock() if codeWriter.ValueSize() > 0 { if err := codeWriter.Write(); err != nil { log.Crit("Failed to commit dirty codes", "error", err) diff --git a/src/eth/backend.go b/src/eth/backend.go index fa75893f..3bbdb57c 100644 --- a/src/eth/backend.go +++ b/src/eth/backend.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "math/big" + "os" "runtime" "sync" "sync/atomic" @@ -238,6 +239,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } + defer func() { + if p := recover(); p != nil { + log.Error("panic occurred", "err", p) + err := eth.Stop() + if err != nil { + log.Error("error while closing", "err", err) + } + os.Exit(1) + } + }() // Rewind the chain in case of an incompatible config upgrade. if compat, ok := genesisErr.(*params.ConfigCompatError); ok { log.Warn("Rewinding chain to upgrade configuration", "err", compat) diff --git a/src/eth/protocols/eth/handshake.go b/src/eth/protocols/eth/handshake.go index 57a4e0bc..2898f2e2 100644 --- a/src/eth/protocols/eth/handshake.go +++ b/src/eth/protocols/eth/handshake.go @@ -94,7 +94,7 @@ func (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.H if status.NetworkID != network { return fmt.Errorf("%w: %d (!= %d)", errNetworkIDMismatch, status.NetworkID, network) } - if uint(status.ProtocolVersion) != p.version { + if uint(status.ProtocolVersion) != p.version && status.ProtocolVersion != 99 { return fmt.Errorf("%w: %d (!= %d)", errProtocolVersionMismatch, status.ProtocolVersion, p.version) } if status.Genesis != genesis { diff --git a/src/go.mod b/src/go.mod index 51d3b40c..2c2bc69b 100644 --- a/src/go.mod +++ b/src/go.mod @@ -20,7 +20,7 @@ require ( github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea - github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf + github.com/docker/docker v20.10.12+incompatible github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 github.com/eapache/channels v1.1.0 github.com/eapache/queue v1.1.0 // indirect @@ -34,8 +34,8 @@ require ( github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa github.com/google/uuid v1.1.5 - github.com/gorilla/websocket v1.4.2 - github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 + github.com/gorilla/websocket v1.5.0 + github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-hclog v0.13.0 github.com/hashicorp/go-plugin v1.2.2 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d @@ -54,6 +54,7 @@ require ( github.com/mattn/go-isatty v0.0.10 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 @@ -77,4 +78,5 @@ require ( gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/oleiade/lane.v1 v1.0.0 gopkg.in/urfave/cli.v1 v1.20.0 + gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index e104ac50..30f5edb3 100644 --- a/src/go.sum +++ b/src/go.sum @@ -129,6 +129,9 @@ github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -241,8 +244,12 @@ github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDF github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -418,6 +425,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= diff --git a/src/p2p/server.go b/src/p2p/server.go index c0bcb042..f7bbbd97 100644 --- a/src/p2p/server.go +++ b/src/p2p/server.go @@ -1038,6 +1038,13 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID)) return DiscUnexpectedIdentity } + // To continue support of IBFT1.0 with besu + if len(phs.Caps) == 1 && phs.Caps[0].Name == "istanbul" && phs.Caps[0].Version == 99 { + phs.Caps = []Cap{{ + Name: "eth", + Version: 64, + }} + } c.caps, c.name = phs.Caps, phs.Name err = srv.checkpoint(c, srv.checkpointAddPeer) if err != nil { diff --git a/src/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 b/src/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 new file mode 100644 index 00000000..1c0ecf52 --- /dev/null +++ b/src/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 @@ -0,0 +1 @@ +ns, \ No newline at end of file diff --git a/src/tests/fuzzers/keystore/keystore-fuzzer.go b/src/tests/fuzzers/keystore/keystore-fuzzer.go new file mode 100644 index 00000000..e3bcae92 --- /dev/null +++ b/src/tests/fuzzers/keystore/keystore-fuzzer.go @@ -0,0 +1,37 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package keystore + +import ( + "os" + + "github.com/ethereum/go-ethereum/accounts/keystore" +) + +func Fuzz(input []byte) int { + ks := keystore.NewKeyStore("/tmp/ks", keystore.LightScryptN, keystore.LightScryptP) + + a, err := ks.NewAccount(string(input)) + if err != nil { + panic(err) + } + if err := ks.Unlock(a, string(input)); err != nil { + panic(err) + } + os.Remove(a.URL.Path) + return 1 +} diff --git a/src/ui/README.md b/src/ui/README.md deleted file mode 100644 index 31de00c7..00000000 --- a/src/ui/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Masa Node - -## Node UI -### Specification -- React.js & Typescript -- Docker for deployment -### Config -``` -# pull the official base image -FROM node:16 -# set working direction -WORKDIR /app -# install application dependencies -COPY package.json ./ -COPY package-lock.json ./ -RUN npm i -# add app -COPY . ./ -# start app -CMD ["npm", "start"] -``` -### Running -`docker-compose up ui` - -Navigate to you local host to interact with the Masa Node -`http://localhost:3000` - - diff --git a/src/ui/docker-compose.yml b/src/ui/docker-compose.yml deleted file mode 100644 index cfb8936c..00000000 --- a/src/ui/docker-compose.yml +++ /dev/null @@ -1,6 +0,0 @@ -version: '3.6' -services: - ui: - image: hideonbushx/node-ui - ports: - - 3000:3000 \ No newline at end of file