diff --git a/.circleci/config.yml b/.circleci/config.yml index f460c959abb..7ee41eb7b35 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,45 +1,9 @@ version: 2.1 jobs: - go-sqlite: - docker: - - image: smartcontract/builder:1.0.29 - steps: - - checkout - - run: echo $CACHE_VERSION > cache.version - - restore_cache: - name: Restore Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }} - - run: go mod download - - save_cache: - name: Save Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }} - paths: - - '/go/pkg/mod' - - run: ./tools/ci/init_gcloud - - run: ./tools/ci/go_test | tee /tmp/go_test.txt - - store_artifacts: - path: /tmp/go_test.txt - go-sqlite-race: - resource_class: large - docker: - - image: smartcontract/builder:1.0.29 - steps: - - checkout - - run: echo $CACHE_VERSION > cache.version - - restore_cache: - name: Restore Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }} - - save_cache: - name: Save Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }} - paths: - - '/go/pkg/mod' - - run: ./tools/ci/gorace_test | tee /tmp/gorace_test.txt - - store_artifacts: - path: /tmp/gorace_test.txt go-postgres: + resource_class: large docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 - image: circleci/postgres:11-alpine environment: POSTGRES_USER: circleci_postgres @@ -71,18 +35,9 @@ jobs: - run: go test -v -p 3 -parallel 2 ./... rust: docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - - run: echo $CACHE_VERSION > cache.version - - restore_cache: - name: Restore Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }}-v2 - - save_cache: - name: Save Go Vendor Cache - key: go-mod-{{ checksum "go.sum" }}-v2 - paths: - - '/go/pkg/mod' - run: ./tools/ci/rust_test geth-postgres: resource_class: 2xlarge @@ -95,6 +50,21 @@ jobs: steps: - checkout - run: cd ./tools/docker && timeout --foreground 1200s ./compose test + - store_artifacts: + path: ./tools/docker/logs + geth-postgres-ts: + resource_class: 2xlarge + environment: + GETH_MODE: true + machine: + image: ubuntu-1604:201903-01 + docker_layer_caching: true + working_directory: ~/chainlink + steps: + - checkout + - run: cd ./tools/docker && timeout --foreground 1200s ./compose test:ts + - store_artifacts: + path: ./tools/docker/logs parity-postgres: resource_class: 2xlarge machine: @@ -104,10 +74,23 @@ jobs: steps: - checkout - run: cd ./tools/docker && timeout --foreground 1200s ./compose test + - store_artifacts: + path: ./tools/docker/logs + parity-postgres-ts: + resource_class: 2xlarge + machine: + image: ubuntu-1604:201903-01 + docker_layer_caching: true + working_directory: ~/chainlink + steps: + - checkout + - run: cd ./tools/docker && timeout --foreground 1200s ./compose test:ts + - store_artifacts: + path: ./tools/docker/logs solidity: resource_class: xlarge docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -126,7 +109,7 @@ jobs: path: ./integration/logs styleguide: docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -142,7 +125,7 @@ jobs: - run: yarn workspace @chainlink/styleguide run test json-api-client: docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -159,7 +142,7 @@ jobs: operator-ui: resource_class: xlarge docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -217,7 +200,7 @@ jobs: path: ./integration/logs feeds: docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -230,6 +213,9 @@ jobs: key: v{{ checksum "cache.version" }}-yarn-vendor-{{ checksum "yarn.lock" }} paths: - /usr/local/share/.cache/yarn + - run: + name: Run dependency setup + command: yarn workspace @chainlink/feeds run setup - run: name: Run Tests command: yarn workspace @chainlink/feeds run test @@ -251,8 +237,8 @@ jobs: path: ./integration/forks/logs build-publish-explorer: machine: - image: circleci/classic:201808-01 - docker_layer_caching: true + image: circleci/classic:201808-01 + docker_layer_caching: true steps: - checkout - run: @@ -266,24 +252,11 @@ jobs: name: Docker push, if applicable command: | tools/ci/push_explorer "${CIRCLE_BRANCH}" "${CIRCLE_TAG}" - build-explorer: - machine: - image: circleci/classic:201808-01 - docker_layer_caching: true - steps: - - checkout - - run: - name: Docker login - command: | - echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USER" --password-stdin - - run: - name: Docker build - command: docker build -f explorer/Dockerfile -t smartcontract/explorer:circleci . build-publish-chainlink: resource_class: large machine: - image: circleci/classic:201808-01 - docker_layer_caching: true + image: circleci/classic:201808-01 + docker_layer_caching: true steps: - checkout - run: @@ -298,26 +271,11 @@ jobs: name: Docker push, if applicable command: | tools/ci/push_chainlink "${CIRCLE_BRANCH}" "${CIRCLE_TAG}" - build-chainlink: - resource_class: xlarge - machine: - image: circleci/classic:201808-01 - docker_layer_caching: true - steps: - - checkout - - run: - name: Docker login - command: | - echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USER" --password-stdin - - run: - name: Docker build - command: | - DOCKER_TAG=circleci make docker build-chainlink-sgx: resource_class: xlarge machine: - image: circleci/classic:201808-01 - docker_layer_caching: true + image: circleci/classic:201808-01 + docker_layer_caching: true steps: - checkout - run: @@ -330,7 +288,7 @@ jobs: SGX_ENABLED=yes DOCKER_TAG=circleci make docker reportcoverage: docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: ./tools/ci/init_gcloud @@ -347,7 +305,7 @@ jobs: resource_class: large working_directory: ~/chainlink docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -364,7 +322,7 @@ jobs: prepublish_npm: resource_class: xlarge docker: - - image: smartcontract/builder:1.0.29 + - image: smartcontract/builder:1.0.31 steps: - checkout - run: echo $CACHE_VERSION > cache.version @@ -393,10 +351,6 @@ workflows: - /^release\/.*/ - master - /^hotfix\/.*/ - - go-sqlite: - filters: # all branches, and /^v../ tags for build-publish... - tags: - only: /^v.*/ - go-postgres: filters: tags: @@ -409,10 +363,18 @@ workflows: filters: tags: only: /^v.*/ + - geth-postgres-ts: + filters: + tags: + only: /^v.*/ - parity-postgres: filters: tags: only: /^v.*/ + - parity-postgres-ts: + filters: + tags: + only: /^v.*/ - styleguide: filters: tags: @@ -445,57 +407,35 @@ workflows: filters: tags: only: /^v.*/ - - build-explorer: - filters: - branches: - ignore: - - develop - - /^release\/.*/ - - master - build-publish-explorer: requires: - styleguide - json-api-client - explorer filters: - branches: - only: - - develop - - /^release\/.*/ tags: - only: /^explorer-v.*/ # handles final versioned releases - - build-chainlink: - filters: - branches: - ignore: - - develop - - /^release\/.*/ - - master + only: /^v.*/ - build-chainlink-sgx: filters: tags: only: /^v.*/ - build-publish-chainlink: requires: - - go-sqlite - go-postgres - solidity - geth-postgres + - geth-postgres-ts - parity-postgres + - parity-postgres-ts - styleguide - json-api-client - operator-ui - rust filters: - branches: - only: - - develop - - /^release\/.*/ tags: - only: /^v.*/ # handles final versioned releases + only: /^v.*/ - reportcoverage: requires: - - go-sqlite - go-postgres - solidity - operator-ui diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..55712c19f1d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "typescript.tsdk": "node_modules/typescript/lib" +} \ No newline at end of file diff --git a/README.md b/README.md index 21b32a8b653..93bd0d3b40f 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Examples of how to utilize and integrate Chainlinks can be found in the [example ## Install -1. [Install Go 1.12+](https://golang.org/doc/install#install), and add your GOPATH's [bin directory to your PATH](https://golang.org/doc/code.html#GOPATH) +1. [Install Go 1.13+](https://golang.org/doc/install#install), and add your GOPATH's [bin directory to your PATH](https://golang.org/doc/code.html#GOPATH) 2. Install [NodeJS](https://nodejs.org/en/download/package-manager/) & [Yarn](https://yarnpkg.com/lang/en/docs/install/) 3. Download Chainlink: `git clone https://github.com/smartcontractkit/chainlink && cd chainlink` 4. Build and install Chainlink: `make install` @@ -49,7 +49,7 @@ Ethereum node versions currently tested and supported: To start your Chainlink node, simply run: ```bash -$ chainlink local node +chainlink local node ``` By default this will start on port 6688, where it exposes a [REST API](https://github.com/smartcontractkit/chainlink/wiki/REST-API). @@ -57,13 +57,13 @@ By default this will start on port 6688, where it exposes a [REST API](https://g Once your node has started, you can view your current jobs with: ```bash -$ chainlink jobspecs +chainlink jobspecs ``` View details of a specific job with: ```bash -$ chainlink show $JOB_ID +chainlink show "$JOB_ID" ``` To find out more about the Chainlink CLI, you can always run `chainlink help`. @@ -102,13 +102,13 @@ For the latest information on setting up a development environment, see the [gui ### Build your current version ```bash -$ go build -o chainlink ./core/ +go build -o chainlink ./core/ ``` - Run the binary: ```bash -$ ./chainlink +./chainlink ``` ### Test @@ -118,14 +118,14 @@ $ ./chainlink 2. Build contracts: ```bash -$ yarn -$ yarn setup:contracts +yarn +yarn setup:contracts ``` 3. Ready for testing: ```bash -$ go test -parallel=1 ./... +go test -parallel=1 ./... ``` ### Solidity Development @@ -134,14 +134,14 @@ $ go test -parallel=1 ./... 2. Install the dependencies: ```bash -$ cd evm -$ yarn install +cd evm +yarn install ``` 3. Run tests: ```bash -$ yarn run test-sol +yarn run test-sol ``` ### Use of Go Generate diff --git a/VERSION b/VERSION index e7c7d3cc3c8..a3df0a6959e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.7.8 +0.8.0 diff --git a/base.Dockerfile b/base.Dockerfile new file mode 100644 index 00000000000..c39733c3de2 --- /dev/null +++ b/base.Dockerfile @@ -0,0 +1,6 @@ +FROM smartcontract/builder:1.0.31 + +COPY . . +RUN yarn +RUN go mod download +RUN make install-chainlink diff --git a/belt/package.json b/belt/package.json index 9e0205774f3..a02ac30b91c 100644 --- a/belt/package.json +++ b/belt/package.json @@ -27,11 +27,11 @@ "cli-ux": "^5.4.4", "debug": "^4.1.1", "execa": "^4.0.0", - "inquirer": "^7.0.4", + "inquirer": "^7.0.5", "shelljs": "^0.8.3", "ts-generator": "^0.0.8", "tslib": "^1", - "typechain": "^1.0.4", + "typechain": "^1.0.5", "typechain-target-ethers": "^1.0.3" }, "devDependencies": { @@ -40,11 +40,11 @@ "@types/inquirer": "^6.5.0", "@types/jest": "^25.1.1", "@types/mock-fs": "^4.10.0", - "@types/node": "^10", + "@types/node": "^13", "@types/shelljs": "^0.8.6", "globby": "^10", "jest": "^25.1.0", - "mock-fs": "^4.10.4", + "mock-fs": "^4.11.0", "ts-jest": "^25.2.0", "ts-node": "^8", "typescript": "^3.7.5" diff --git a/core/adapters/adapter.go b/core/adapters/adapter.go index 5da6d3f364d..0b58510c192 100644 --- a/core/adapters/adapter.go +++ b/core/adapters/adapter.go @@ -25,6 +25,10 @@ var ( TaskTypeEthTx = models.MustNewTaskType("ethtx") // TaskTypeEthTxABIEncode is the identifier for the EthTxABIEncode adapter. TaskTypeEthTxABIEncode = models.MustNewTaskType("ethtxabiencode") + // TaskTypeHTTPGetWithUnrestrictedNetworkAccess is the identifier for the HTTPGet adapter, with local/private IP access enabled. + TaskTypeHTTPGetWithUnrestrictedNetworkAccess = models.MustNewTaskType("httpgetwithunrestrictednetworkaccess") + // TaskTypeHTTPPostWithUnrestrictedNetworkAccess is the identifier for the HTTPPost adapter, with local/private IP access enabled. + TaskTypeHTTPPostWithUnrestrictedNetworkAccess = models.MustNewTaskType("httppostwithunrestrictednetworkaccess") // TaskTypeHTTPGet is the identifier for the HTTPGet adapter. TaskTypeHTTPGet = models.MustNewTaskType("httpget") // TaskTypeHTTPPost is the identifier for the HTTPPost adapter. @@ -52,6 +56,7 @@ var ( // BaseAdapter is the minimum interface required to create an adapter. Only core // adapters have this minimum requirement. type BaseAdapter interface { + TaskType() models.TaskType Perform(models.RunInput, *store.Store) models.RunOutput } @@ -101,6 +106,12 @@ func For(task models.TaskSpec, config orm.ConfigReader, orm *orm.ORM) (*Pipeline case TaskTypeEthTxABIEncode: ba = &EthTxABIEncode{} err = unmarshalParams(task.Params, ba) + case TaskTypeHTTPGetWithUnrestrictedNetworkAccess: + ba = &HTTPGet{AllowUnrestrictedNetworkAccess: true} + err = unmarshalParams(task.Params, ba) + case TaskTypeHTTPPostWithUnrestrictedNetworkAccess: + ba = &HTTPPost{AllowUnrestrictedNetworkAccess: true} + err = unmarshalParams(task.Params, ba) case TaskTypeHTTPGet: ba = &HTTPGet{} err = unmarshalParams(task.Params, ba) diff --git a/core/adapters/bridge.go b/core/adapters/bridge.go index fd4d6d96b8a..38916365b59 100644 --- a/core/adapters/bridge.go +++ b/core/adapters/bridge.go @@ -22,6 +22,11 @@ type Bridge struct { Params models.JSON } +// TaskType returns the bridges defined type. +func (ba *Bridge) TaskType() models.TaskType { + return ba.Name +} + // Perform sends a POST request containing the JSON of the input to the // external adapter specified in the BridgeType. // diff --git a/core/adapters/compare.go b/core/adapters/compare.go index e18925bba50..4331470ded5 100644 --- a/core/adapters/compare.go +++ b/core/adapters/compare.go @@ -22,6 +22,11 @@ var ( ErrValueNotSpecified = errors.New("Value not specified") ) +// TaskType returns the type of Adapter. +func (c *Compare) TaskType() models.TaskType { + return TaskTypeCompare +} + // Perform uses the Operator to check the run's result against the // specified Value. func (c *Compare) Perform(input models.RunInput, _ *store.Store) models.RunOutput { diff --git a/core/adapters/compare_test.go b/core/adapters/compare_test.go index 2e8886970fc..5492e484261 100644 --- a/core/adapters/compare_test.go +++ b/core/adapters/compare_test.go @@ -347,7 +347,7 @@ func TestCompare_Perform(t *testing.T) { Operator: "gte", Value: "2.12", }, - true, + false, }, { "less than integer as string", @@ -803,7 +803,6 @@ func TestCompare_Perform(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - t.Parallel() input := cltest.NewRunInputWithResult(test.input) adapter := test.adapter result := adapter.Perform(input, nil) @@ -879,8 +878,6 @@ func TestCompareError_Perform(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - t.Parallel() - input := cltest.NewRunInputWithResult(test.input) adapter := test.adapter result := adapter.Perform(input, nil) diff --git a/core/adapters/copy.go b/core/adapters/copy.go index 99de8ea0e3a..d5b40928a0f 100644 --- a/core/adapters/copy.go +++ b/core/adapters/copy.go @@ -11,6 +11,11 @@ type Copy struct { CopyPath JSONPath `json:"copyPath"` } +// TaskType returns the type of Adapter. +func (c *Copy) TaskType() models.TaskType { + return TaskTypeCopy +} + // Perform returns the copied values from the desired mapping within the `data` JSON object func (c *Copy) Perform(input models.RunInput, store *store.Store) models.RunOutput { data, err := models.JSON{}.Add("result", input.Data().String()) diff --git a/core/adapters/copy_test.go b/core/adapters/copy_test.go index 9e1826fabef..a93e90e0d9c 100644 --- a/core/adapters/copy_test.go +++ b/core/adapters/copy_test.go @@ -80,16 +80,14 @@ func TestCopy_Perform(t *testing.T) { } for _, tt := range tests { - test := tt - t.Run(test.name, func(t *testing.T) { - t.Parallel() - input := cltest.NewRunInputWithString(t, test.input) - adapter := adapters.Copy{CopyPath: test.copyPath} + t.Run(tt.name, func(t *testing.T) { + input := cltest.NewRunInputWithString(t, tt.input) + adapter := adapters.Copy{CopyPath: tt.copyPath} result := adapter.Perform(input, nil) - assert.Equal(t, test.wantData, result.Data().String()) - assert.Equal(t, test.wantStatus, result.Status()) + assert.Equal(t, tt.wantData, result.Data().String()) + assert.Equal(t, tt.wantStatus, result.Status()) - assert.Equal(t, test.wantResultError, result.Error()) + assert.Equal(t, tt.wantResultError, result.Error()) }) } } diff --git a/core/adapters/doc.go b/core/adapters/doc.go index a2d62f79cdd..39c9d2b4cb1 100644 --- a/core/adapters/doc.go +++ b/core/adapters/doc.go @@ -20,11 +20,28 @@ // The HTTPGet adapter is used to grab the JSON data from the given URL. // { "type": "HTTPGet", "params": {"get": "https://some-api-example.net/api" }} // +// NOTE: For security, since the URL is untrusted, HTTPGet imposes some +// restrictions on which IPs may be fetched. Local network and multicast IPs +// are disallowed by default and attempting to connect will result in an error. +// +// // HTTPPost // // Sends a POST request to the specified URL and will return the response. // { "type": "HTTPPost", "params": {"post": "https://weiwatchers.com/api" }} // +// NOTE: For security, since the URL is untrusted, HTTPPost imposes some +// restrictions on which IPs may be fetched. Local network and multicast IPs +// are disallowed by default and attempting to connect will result in an error. +// +// HTTPGetWithUnrestrictedNetworkAccess +// +// Identical to HTTPGet except there are no IP restrictions. Use with caution. +// +// HTTPPostWithUnrestrictedNetworkAccess +// +// Identical to HTTPPost except there are no IP restrictions. Use with caution. +// // JSONParse // // The JSONParse adapter will obtain the value(s) for the given field(s). @@ -83,16 +100,64 @@ // // Random // -// Random adapter generates a number between 0 and 2**256-1 -// WARNING: The random adapter as implemented is not verifiable. -// Outputs from this adapters are not verifiable onchain as a fairly-drawn random samples. -// As a result, the oracle potentially has complete discretion to instead deliberately choose -// values with favorable onchain outcomes. Don't use it for a lottery, for instance, unless -// you fully trust the oracle not to pick its own tickets. -// We intend to either improve it in the future, or introduce a verifiable alternative. -// For now it is provided as an alternative to making web requests for random numbers, -// which is similarly unverifiable and has additional possible points of failure. -// { "type": "Random" } +// Random adapter generates proofs of randomness verifiable against a public key +// +// WARNING: The Random apdater's output is NOT the randomness you are looking +// WARNING: for! The node must send the output onchain for verification by the +// WARNING: method VRFCoordinator.sol#fulfillRandomnessRequest, which will +// WARNING: pass the actual random output back to the consuming contract. +// WARNING: Don't use the output of this adapter in any other way, unless you +// WARNING: thoroughly understand the cryptography in use here, and the exact +// WARNING: security guarantees it provides. See notes in VRFCoordinator.sol +// WARNING: for more info. +// +// WARNING: This system guarantees that the oracle cannot independently +// WARNING: concoct a random output to suit itself, but it does not protect +// WARNING: against collusion between the oracle and the provider of the seed +// WARNING: the oracle uses to generate the randomness. It also does not +// WARNING: protect against the oracle simply refusing to respond to a +// WARNING: randomness request, if it doesn't like the output it would be +// WARNING: required to provide. Solutions to these limitations are planned. +// +// Here is an example of a Random task specification. For an example of a full +// jobspec using this, see ../internal/testdata/randomness_job.json. +// +// { +// "type": "Random", +// "params": { +// "publicKey": +// "0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179800" +// } +// } +// +// The publicKey must be the concatenation of its hex representation of its the +// secp256k1 point's x-ordinate as a uint256, followed by 00 if the y-ordinate +// is even, or 01 if it's odd. (Note that this is NOT an RFC 5480 section 2.2 +// public-key representation. DO NOT prefix with 0x02, 0x03 or 0x04.) +// +// The chainlink node must know the corresponding secret key. Such a key pair +// can be created with the `chainlink local vrf create` command, and exported to +// a keystore with `vrf export `. +// +// E.g. `chainlink local vrf create -p ` will log the public key +// under the field "public id". +// +// To see the public keys which have already been imported, use the command +// `chainlink local vrf list`. See `chainlink local vrf help` for more +// key-manipulation commands. +// +// The adapter output should be passed via EthTx to VRFCoordinator.sol's method +// fulfillRandomnessRequest. +// +// A "random" task must be initiated by a "randomnesslog" initiator which +// explicitly specifies which ethereum address the logs will be emitted from, +// such as +// +// {"initiators": [{"type": "randomnesslog","address": "0xvrfCoordinatorAddr"}]} +// +// This prevents the node from responding to potentially hostile log requests +// from other contracts, which could be crafted to prematurely reveal the random +// output if someone learns a prospective input seed prior to its use in the VRF. // // EthTxABIEncode // diff --git a/core/adapters/eth_bool.go b/core/adapters/eth_bool.go index fc337b39c73..9041dbeeba7 100644 --- a/core/adapters/eth_bool.go +++ b/core/adapters/eth_bool.go @@ -13,6 +13,11 @@ var evmTrue = "0x000000000000000000000000000000000000000000000000000000000000000 // EthBool holds no fields type EthBool struct{} +// TaskType returns the type of Adapter. +func (e *EthBool) TaskType() models.TaskType { + return TaskTypeEthBool +} + // Perform returns the abi encoding for a boolean // // For example, after converting the result false to hex encoded Ethereum diff --git a/core/adapters/eth_format.go b/core/adapters/eth_format.go index 22a107b9636..ec418decafc 100644 --- a/core/adapters/eth_format.go +++ b/core/adapters/eth_format.go @@ -12,6 +12,11 @@ import ( // EthBytes32 holds no fields. type EthBytes32 struct{} +// TaskType returns the type of Adapter. +func (e *EthBytes32) TaskType() models.TaskType { + return TaskTypeEthBytes32 +} + // Perform returns the hex value of the first 32 bytes of a string // so that it is in the proper format to be written to the blockchain. // @@ -33,6 +38,11 @@ func (*EthBytes32) Perform(input models.RunInput, _ *store.Store) models.RunOutp // EthInt256 holds no fields type EthInt256 struct{} +// TaskType returns the type of Adapter. +func (e *EthInt256) TaskType() models.TaskType { + return TaskTypeEthInt256 +} + // Perform returns the hex value of a given string so that it // is in the proper format to be written to the blockchain. // @@ -51,6 +61,11 @@ func (*EthInt256) Perform(input models.RunInput, _ *store.Store) models.RunOutpu // EthUint256 holds no fields. type EthUint256 struct{} +// TaskType returns the type of Adapter. +func (e *EthUint256) TaskType() models.TaskType { + return TaskTypeEthUint256 +} + // Perform returns the hex value of a given string so that it // is in the proper format to be written to the blockchain. // diff --git a/core/adapters/eth_tx.go b/core/adapters/eth_tx.go index dbce3332e48..1418be8ba94 100644 --- a/core/adapters/eth_tx.go +++ b/core/adapters/eth_tx.go @@ -33,6 +33,11 @@ type EthTx struct { GasLimit uint64 `json:"gasLimit"` } +// TaskType returns the type of Adapter. +func (e *EthTx) TaskType() models.TaskType { + return TaskTypeEthTx +} + // Perform creates the run result for the transaction if the existing run result // is not currently pending. Then it confirms the transaction was confirmed on // the blockchain. @@ -90,7 +95,7 @@ func createTxRunResult( gasLimit, ) if err != nil { - return models.NewRunOutputPendingConfirmationsWithData(models.JSON{}) + return models.NewRunOutputPendingConfirmationsWithData(input.Data()) } output, err := models.JSON{}.Add("result", tx.Hash.String()) @@ -114,7 +119,12 @@ func createTxRunResult( ) if state == strpkg.Safe { - return addReceiptToResult(receipt, input, output) + // I don't see how the receipt could possibly be nil here, but handle it just in case + if receipt == nil { + err := errors.New("missing receipt for transaction") + return models.NewRunOutputError(err) + } + return addReceiptToResult(*receipt, input, output) } return models.NewRunOutputPendingConfirmationsWithData(output) @@ -156,14 +166,22 @@ func ensureTxRunResult(input models.RunInput, str *strpkg.Store) models.RunOutpu } if state == strpkg.Safe { - return addReceiptToResult(receipt, input, output) + // FIXME: Receipt can definitely be nil here, although I don't really know how + // it can be "Safe" without a receipt... maybe we should just keep + // waiting for confirmations instead? + if receipt == nil { + err := errors.New("missing receipt for transaction") + return models.NewRunOutputError(err) + } + + return addReceiptToResult(*receipt, input, output) } return models.NewRunOutputPendingConfirmationsWithData(output) } func addReceiptToResult( - receipt *eth.TxReceipt, + receipt eth.TxReceipt, input models.RunInput, data models.JSON, ) models.RunOutput { @@ -176,12 +194,7 @@ func addReceiptToResult( } } - if receipt == nil { - err := errors.New("missing receipt for transaction") - return models.NewRunOutputError(err) - } - - receipts = append(receipts, *receipt) + receipts = append(receipts, receipt) var err error data, err = data.Add("ethereumReceipts", receipts) if err != nil { diff --git a/core/adapters/eth_tx_abi_encode.go b/core/adapters/eth_tx_abi_encode.go index 340f5df15ca..01bbb2a7875 100644 --- a/core/adapters/eth_tx_abi_encode.go +++ b/core/adapters/eth_tx_abi_encode.go @@ -31,6 +31,11 @@ type EthTxABIEncode struct { GasLimit uint64 `json:"gasLimit"` } +// TaskType returns the type of Adapter. +func (etx *EthTxABIEncode) TaskType() models.TaskType { + return TaskTypeEthTxABIEncode +} + // UnmarshalJSON for custom JSON unmarshal that is strict, i.e. doesn't // accept spurious fields. (In particular, we wan't to ensure that we don't // get spurious fields in the FunctionABI, so that users don't get any wrong diff --git a/core/adapters/eth_tx_internal_test.go b/core/adapters/eth_tx_internal_test.go deleted file mode 100644 index dd31a857607..00000000000 --- a/core/adapters/eth_tx_internal_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package adapters - -import ( - "chainlink/core/store/models" - "github.com/stretchr/testify/assert" - "testing" -) - -// In pathological cases, the receipt can be nil. -// Need to ensure we don't panic in this case and return errored output instead -func TestEthTxAdapter_addReceiptToResult(t *testing.T) { - t.Parallel() - - j := models.JSON{} - input := *models.NewRunInput(models.NewID(), j, models.RunStatusUnstarted) - - output := addReceiptToResult(nil, input, j) - assert.True(t, output.HasError()) - assert.EqualError(t, output.Error(), "missing receipt for transaction") -} diff --git a/core/adapters/http.go b/core/adapters/http.go index abedf4cbfef..906e2e38703 100644 --- a/core/adapters/http.go +++ b/core/adapters/http.go @@ -2,6 +2,7 @@ package adapters import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -11,19 +12,36 @@ import ( "net/url" "path" "strings" + "time" "chainlink/core/store" "chainlink/core/store/models" "chainlink/core/utils" + + "github.com/avast/retry-go" ) // HTTPGet requires a URL which is used for a GET request when the adapter is called. type HTTPGet struct { - URL models.WebURL `json:"url"` - GET models.WebURL `json:"get"` - Headers http.Header `json:"headers"` - QueryParams QueryParameters `json:"queryParams"` - ExtendedPath ExtendedPath `json:"extPath"` + URL models.WebURL `json:"url"` + GET models.WebURL `json:"get"` + Headers http.Header `json:"headers"` + QueryParams QueryParameters `json:"queryParams"` + ExtendedPath ExtendedPath `json:"extPath"` + AllowUnrestrictedNetworkAccess bool `json:"-"` +} + +// HTTPRequestConfig holds the configurable settings for an http request +type HTTPRequestConfig struct { + timeout time.Duration + maxAttempts uint + sizeLimit int64 + allowUnrestrictedNetworkAccess bool +} + +// TaskType returns the type of Adapter. +func (hga *HTTPGet) TaskType() models.TaskType { + return TaskTypeHTTPGet } // Perform ensures that the adapter's URL responds to a GET request without @@ -33,7 +51,9 @@ func (hga *HTTPGet) Perform(input models.RunInput, store *store.Store) models.Ru if err != nil { return models.NewRunOutputError(err) } - return sendRequest(input, request, store.Config.DefaultHTTPLimit()) + httpConfig := defaultHTTPConfig(store) + httpConfig.allowUnrestrictedNetworkAccess = hga.AllowUnrestrictedNetworkAccess + return sendRequest(input, request, httpConfig) } // GetURL retrieves the GET field if set otherwise returns the URL field @@ -58,12 +78,18 @@ func (hga *HTTPGet) GetRequest() (*http.Request, error) { // HTTPPost requires a URL which is used for a POST request when the adapter is called. type HTTPPost struct { - URL models.WebURL `json:"url"` - POST models.WebURL `json:"post"` - Headers http.Header `json:"headers"` - QueryParams QueryParameters `json:"queryParams"` - Body *string `json:"body,omitempty"` - ExtendedPath ExtendedPath `json:"extPath"` + URL models.WebURL `json:"url"` + POST models.WebURL `json:"post"` + Headers http.Header `json:"headers"` + QueryParams QueryParameters `json:"queryParams"` + Body *string `json:"body,omitempty"` + ExtendedPath ExtendedPath `json:"extPath"` + AllowUnrestrictedNetworkAccess bool `json:"-"` +} + +// TaskType returns the type of Adapter. +func (hpa *HTTPPost) TaskType() models.TaskType { + return TaskTypeHTTPPost } // Perform ensures that the adapter's URL responds to a POST request without @@ -73,7 +99,9 @@ func (hpa *HTTPPost) Perform(input models.RunInput, store *store.Store) models.R if err != nil { return models.NewRunOutputError(err) } - return sendRequest(input, request, store.Config.DefaultHTTPLimit()) + httpConfig := defaultHTTPConfig(store) + httpConfig.allowUnrestrictedNetworkAccess = hpa.AllowUnrestrictedNetworkAccess + return sendRequest(input, request, httpConfig) } // GetURL retrieves the POST field if set otherwise returns the URL field @@ -132,19 +160,24 @@ func setHeaders(request *http.Request, headers http.Header, contentType string) } } -func sendRequest(input models.RunInput, request *http.Request, limit int64) models.RunOutput { +func sendRequest(input models.RunInput, request *http.Request, config HTTPRequestConfig) models.RunOutput { tr := &http.Transport{ DisableCompression: true, } + if !config.allowUnrestrictedNetworkAccess { + tr.DialContext = restrictedDialContext + } client := &http.Client{Transport: tr} - response, err := client.Do(request) + + response, err := withRetry(client, request, config) + if err != nil { return models.NewRunOutputError(err) } defer response.Body.Close() - source := newMaxBytesReader(response.Body, limit) + source := newMaxBytesReader(response.Body, config.sizeLimit) bytes, err := ioutil.ReadAll(source) if err != nil { return models.NewRunOutputError(err) @@ -158,6 +191,34 @@ func sendRequest(input models.RunInput, request *http.Request, limit int64) mode return models.NewRunOutputCompleteWithResult(responseBody) } +func withRetry( + client *http.Client, + originalRequest *http.Request, + config HTTPRequestConfig, +) (*http.Response, error) { + var response *http.Response + err := retry.Do( + func() error { + ctx, cancel := context.WithTimeout(context.Background(), config.timeout) + defer cancel() + requestWithTimeout := originalRequest.Clone(ctx) + + r, err := client.Do(requestWithTimeout) + if err != nil { + return err + } + response = r + return nil + }, + retry.Attempts(config.maxAttempts), + ) + + if err != nil { + return nil, err + } + return response, nil +} + // maxBytesReader is inspired by // https://github.com/gin-contrib/size/blob/master/size.go type maxBytesReader struct { @@ -286,3 +347,12 @@ func (ep *ExtendedPath) UnmarshalJSON(input []byte) error { *ep = ExtendedPath(values) return err } + +func defaultHTTPConfig(store *store.Store) HTTPRequestConfig { + return HTTPRequestConfig{ + store.Config.DefaultHTTPTimeout(), + store.Config.DefaultMaxHTTPAttempts(), + store.Config.DefaultHTTPLimit(), + false, + } +} diff --git a/core/adapters/http_allowed_ips.go b/core/adapters/http_allowed_ips.go new file mode 100644 index 00000000000..8c7329a2913 --- /dev/null +++ b/core/adapters/http_allowed_ips.go @@ -0,0 +1,75 @@ +package adapters + +import ( + "context" + "fmt" + "net" + "time" +) + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "169.254.0.0/16", // RFC3927 link-local + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, err := net.ParseCIDR(cidr) + if err != nil { + panic(fmt.Errorf("parse error on %q: %v", cidr, err)) + } + privateIPBlocks = append(privateIPBlocks, block) + } +} + +func isRestrictedIP(ip net.IP) bool { + if !ip.IsGlobalUnicast() || + ip.IsLoopback() || + ip.IsLinkLocalUnicast() || + ip.IsLinkLocalMulticast() || + ip.IsInterfaceLocalMulticast() || + ip.IsUnspecified() || + ip.Equal(net.IPv4bcast) || + ip.Equal(net.IPv4allsys) || + ip.Equal(net.IPv4allrouter) || + ip.Equal(net.IPv4zero) || + ip.IsMulticast() { + return true + } + + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true + } + } + return false +} + +// restrictedDialContext wraps the Dialer such that after successful connection, +// we check the IP. +// If the resolved IP is restricted, close the connection and return an error. +func restrictedDialContext(ctx context.Context, network, address string) (net.Conn, error) { + con, err := (&net.Dialer{ + // Defaults from GoLang standard http package + // https://golang.org/pkg/net/http/#RoundTripper + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + if err == nil { + // If a connection could be established, ensure its not local or private + a, _ := con.RemoteAddr().(*net.TCPAddr) + + if isRestrictedIP(a.IP) { + defer con.Close() + return nil, fmt.Errorf("disallowed IP %s. Connections to local/private and multicast networks are disabled by default for security reasons. If you really want to allow this, consider using the httpgetwithunrestrictednetworkaccess or httppostwithunrestrictednetworkaccess adapter instead", a.IP.String()) + } + } + return con, err +} diff --git a/core/adapters/http_allowed_ips_test.go b/core/adapters/http_allowed_ips_test.go new file mode 100644 index 00000000000..d7af8413116 --- /dev/null +++ b/core/adapters/http_allowed_ips_test.go @@ -0,0 +1,44 @@ +package adapters + +import ( + "github.com/stretchr/testify/assert" + "net" + "testing" +) + +func TestHttpAllowedIPS_isRestrictedIP(t *testing.T) { + t.Parallel() + + tests := []struct { + ip net.IP + isRestricted bool + }{ + {net.ParseIP("1.1.1.1"), false}, + {net.ParseIP("216.239.32.10"), false}, + {net.ParseIP("2001:4860:4860::8888"), false}, + {net.ParseIP("127.0.0.1"), true}, + {net.ParseIP("255.255.255.255"), true}, + {net.ParseIP("224.0.0.1"), true}, + {net.ParseIP("224.0.0.2"), true}, + {net.ParseIP("224.1.1.1"), true}, + {net.ParseIP("0.0.0.0"), true}, + {net.ParseIP("192.168.0.1"), true}, + {net.ParseIP("192.168.1.255"), true}, + {net.ParseIP("255.255.255.255"), true}, + {net.ParseIP("10.0.0.1"), true}, + {net.ParseIP("::1"), true}, + {net.ParseIP("fd57:03f9:9ef5:8a81::1"), true}, + {net.ParseIP("FD00::1"), true}, + {net.ParseIP("FF02::1"), true}, + {net.ParseIP("FE80:0000:0000:0000:abcd:abcd:abcd:abcd"), true}, + {net.IP{0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}, true}, + {net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}, true}, + {net.IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02}, true}, + } + + for _, test := range tests { + t.Run(test.ip.String(), func(t *testing.T) { + assert.Equal(t, test.isRestricted, isRestrictedIP(test.ip)) + }) + } +} diff --git a/core/adapters/http_test.go b/core/adapters/http_test.go index 8c74ccefb3f..305b3ed85b8 100644 --- a/core/adapters/http_test.go +++ b/core/adapters/http_test.go @@ -83,9 +83,10 @@ func TestHTTPGet_Perform(t *testing.T) { defer cleanup() hga := adapters.HTTPGet{ - URL: cltest.WebURL(t, mock.URL), - Headers: test.headers, - QueryParams: test.queryParams, + URL: cltest.WebURL(t, mock.URL), + Headers: test.headers, + QueryParams: test.queryParams, + AllowUnrestrictedNetworkAccess: true, } assert.Equal(t, test.queryParams, hga.QueryParams) @@ -111,8 +112,12 @@ func TestHTTP_TooLarge(t *testing.T) { verb string factory func(models.WebURL) adapters.BaseAdapter }{ - {"GET", func(url models.WebURL) adapters.BaseAdapter { return &adapters.HTTPGet{URL: url} }}, - {"POST", func(url models.WebURL) adapters.BaseAdapter { return &adapters.HTTPPost{URL: url} }}, + {"GET", func(url models.WebURL) adapters.BaseAdapter { + return &adapters.HTTPGet{URL: url, AllowUnrestrictedNetworkAccess: true} + }}, + {"POST", func(url models.WebURL) adapters.BaseAdapter { + return &adapters.HTTPPost{URL: url, AllowUnrestrictedNetworkAccess: true} + }}, } for _, test := range tests { t.Run(test.verb, func(t *testing.T) { @@ -131,6 +136,38 @@ func TestHTTP_TooLarge(t *testing.T) { } } +func TestHTTP_PerformWithRestrictedIP(t *testing.T) { + cfg := orm.NewConfig() + store := &store.Store{Config: cfg} + + tests := []struct { + verb string + factory func(models.WebURL) adapters.BaseAdapter + }{ + {"GET", func(url models.WebURL) adapters.BaseAdapter { + return &adapters.HTTPGet{URL: url, AllowUnrestrictedNetworkAccess: false} + }}, + {"POST", func(url models.WebURL) adapters.BaseAdapter { + return &adapters.HTTPPost{URL: url, AllowUnrestrictedNetworkAccess: false} + }}, + } + for _, test := range tests { + t.Run(test.verb, func(t *testing.T) { + input := cltest.NewRunInputWithResult("inputValue") + payload := "" + mock, _ := cltest.NewHTTPMockServer(t, http.StatusOK, test.verb, payload) + defer mock.Close() + + h := test.factory(cltest.WebURL(t, mock.URL)) + result := h.Perform(input, store) + + require.Error(t, result.Error()) + assert.Contains(t, result.Error().Error(), "disallowed IP") + assert.Equal(t, "", result.Result().String()) + }) + } +} + func stringRef(str string) *string { return &str } @@ -263,10 +300,11 @@ func TestHttpPost_Perform(t *testing.T) { defer cleanup() hpa := adapters.HTTPPost{ - URL: cltest.WebURL(t, mock.URL), - Headers: test.headers, - QueryParams: test.queryParams, - Body: test.body, + URL: cltest.WebURL(t, mock.URL), + Headers: test.headers, + QueryParams: test.queryParams, + Body: test.body, + AllowUnrestrictedNetworkAccess: true, } assert.Equal(t, test.queryParams, hpa.QueryParams) @@ -653,3 +691,15 @@ func TestHTTP_BuildingURL(t *testing.T) { }) } } + +func TestHTTP_JSONDeserializationDoesNotSetAllowUnrestrictedNetworkAccess(t *testing.T) { + hga := adapters.HTTPGet{} + err := json.Unmarshal([]byte(`{"allowUnrestrictedNetworkAccess": true}`), &hga) + require.NoError(t, err) + assert.False(t, hga.AllowUnrestrictedNetworkAccess) + + hpa := adapters.HTTPPost{} + err = json.Unmarshal([]byte(`{"allowUnrestrictedNetworkAccess": true}`), &hpa) + require.NoError(t, err) + assert.False(t, hpa.AllowUnrestrictedNetworkAccess) +} diff --git a/core/adapters/json_parse.go b/core/adapters/json_parse.go index 63090a157c2..bf4b5e0ee6c 100644 --- a/core/adapters/json_parse.go +++ b/core/adapters/json_parse.go @@ -20,6 +20,11 @@ type JSONParse struct { Path JSONPath `json:"path"` } +// TaskType returns the type of Adapter. +func (jpa *JSONParse) TaskType() models.TaskType { + return TaskTypeJSONParse +} + // Perform returns the value associated to the desired field for a // given JSON object. // diff --git a/core/adapters/multiply.go b/core/adapters/multiply.go index ed1a54e8911..ac65d2a88b4 100644 --- a/core/adapters/multiply.go +++ b/core/adapters/multiply.go @@ -1,34 +1,17 @@ package adapters import ( - "encoding/json" - "math/big" + "chainlink/core/store/models" - "chainlink/core/utils" + "github.com/shopspring/decimal" ) // Multiply holds the a number to multiply the given value by. type Multiply struct { - Times *big.Float `json:"-"` + Times *decimal.Decimal `json:"times,omitempty"` } -type jsonMultiply struct { - Times *utils.BigFloat `json:"times,omitempty"` -} - -// MarshalJSON implements the json.Marshal interface. -func (ma Multiply) MarshalJSON() ([]byte, error) { - jsonObj := jsonMultiply{Times: (*utils.BigFloat)(ma.Times)} - return json.Marshal(jsonObj) -} - -// UnmarshalJSON implements the json.Unmarshal interface. -func (ma *Multiply) UnmarshalJSON(buf []byte) error { - var jsonObj jsonMultiply - err := json.Unmarshal(buf, &jsonObj) - if err != nil { - return err - } - ma.Times = jsonObj.Times.Value() - return nil +// TaskType returns the type of Adapter. +func (m *Multiply) TaskType() models.TaskType { + return TaskTypeMultiply } diff --git a/core/adapters/multiply_perform.go b/core/adapters/multiply_perform.go index 483a55a572a..ba0c6005f79 100644 --- a/core/adapters/multiply_perform.go +++ b/core/adapters/multiply_perform.go @@ -5,8 +5,9 @@ package adapters import ( "chainlink/core/store" "chainlink/core/store/models" - "fmt" - "math/big" + + "github.com/pkg/errors" + "github.com/shopspring/decimal" ) // Perform returns the input's "result" field, multiplied times the adapter's @@ -16,13 +17,12 @@ import ( // set to "100", the result's value will be "9999.4". func (ma *Multiply) Perform(input models.RunInput, _ *store.Store) models.RunOutput { val := input.Result() - i, ok := (&big.Float{}).SetString(val.String()) - if !ok { - return models.NewRunOutputError(fmt.Errorf("cannot parse into big.Float: %v", val.String())) + dec, err := decimal.NewFromString(val.String()) + if err != nil { + return models.NewRunOutputError(errors.Wrapf(err, "cannot parse into big.Float: %v", val.String())) } - if ma.Times != nil { - i.Mul(i, ma.Times) + dec = dec.Mul(*ma.Times) } - return models.NewRunOutputCompleteWithResult(i.String()) + return models.NewRunOutputCompleteWithResult(dec.String()) } diff --git a/core/adapters/multiply_test.go b/core/adapters/multiply_test.go index 0613c3e3ba1..a279a429adc 100644 --- a/core/adapters/multiply_test.go +++ b/core/adapters/multiply_test.go @@ -2,16 +2,22 @@ package adapters_test import ( "encoding/json" - "math/big" "testing" "chainlink/core/adapters" "chainlink/core/internal/cltest" + "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func mustDecimal(t *testing.T, arg string) *decimal.Decimal { + ret, err := decimal.NewFromString(arg) + require.NoError(t, err) + return &ret +} + func TestMultiply_Marshal(t *testing.T) { tests := []struct { name string @@ -20,13 +26,18 @@ func TestMultiply_Marshal(t *testing.T) { }{ { "w/ value", - adapters.Multiply{Times: big.NewFloat(3.142)}, + adapters.Multiply{Times: mustDecimal(t, "3.142")}, `{"times":"3.142"}`, }, { - "w/ value", - adapters.Multiply{Times: big.NewFloat(5)}, - `{"times":"5"}`, + "w/ large value", + adapters.Multiply{Times: mustDecimal(t, "1000000000000000000")}, + `{"times":"1000000000000000000"}`, + }, + { + "w/ large float", + adapters.Multiply{Times: mustDecimal(t, "100000000000000000000.23")}, + `{"times":"100000000000000000000.23"}`, }, { "w/o value", @@ -52,8 +63,23 @@ func TestMultiply_Unmarshal(t *testing.T) { }{ { "w/ value", - `{"Times": 5}`, - adapters.Multiply{Times: big.NewFloat(5)}, + `{"Times": 1.23}`, + adapters.Multiply{Times: mustDecimal(t, "1.23")}, + }, + { + "w/ large value", + `{"Times": 1000000000000000000}`, + adapters.Multiply{Times: mustDecimal(t, "1000000000000000000")}, + }, + { + "w/ large string", + `{"Times": 100000000000000000000.23}`, + adapters.Multiply{Times: mustDecimal(t, "100000000000000000000.23")}, + }, + { + "w/ large float", + `{"Times": 100000000000000000000.23}`, + adapters.Multiply{Times: mustDecimal(t, "100000000000000000000.23")}, }, { "w/o value", @@ -75,22 +101,23 @@ func TestMultiply_Unmarshal(t *testing.T) { func TestMultiply_Perform(t *testing.T) { tests := []struct { name string - Times *big.Float + Times decimal.Decimal json string want string }{ - {"by 100", big.NewFloat(100), `{"result":"1.23"}`, "123"}, - {"float", big.NewFloat(100), `{"result":1.23}`, "123"}, - {"negative", big.NewFloat(-5), `{"result":"1.23"}`, "-6.15"}, - {"no times parameter", nil, `{"result":"3.14"}`, "3.14"}, - {"zero", big.NewFloat(0), `{"result":"1.23"}`, "0"}, + {"by 100", *mustDecimal(t, "100"), `{"result":"1.23"}`, "123"}, + {"float", *mustDecimal(t, "100"), `{"result":1.23}`, "123"}, + {"negative", *mustDecimal(t, "-5"), `{"result":"1.23"}`, "-6.15"}, + {"no times parameter", *mustDecimal(t, "1"), `{"result":"3.14"}`, "3.14"}, + {"zero", *mustDecimal(t, "0"), `{"result":"1.23"}`, "0"}, + {"large value", *mustDecimal(t, "1000000000000000000"), `{"result":"1.23"}`, "1230000000000000000"}, } for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { input := cltest.NewRunInputWithString(t, test.json) - adapter := adapters.Multiply{Times: test.Times} + adapter := adapters.Multiply{Times: &test.Times} result := adapter.Perform(input, nil) require.NoError(t, result.Error()) @@ -102,18 +129,17 @@ func TestMultiply_Perform(t *testing.T) { func TestMultiply_Perform_Failure(t *testing.T) { tests := []struct { name string - Times *big.Float + Times decimal.Decimal json string - want string }{ - {"object", big.NewFloat(100), `{"result":{"foo":"bar"}}`, ""}, + {"object", *mustDecimal(t, "100"), `{"result":{"foo":"bar"}}`}, } for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { input := cltest.NewRunInputWithString(t, test.json) - adapter := adapters.Multiply{Times: test.Times} + adapter := adapters.Multiply{Times: &test.Times} result := adapter.Perform(input, nil) require.Error(t, result.Error()) }) diff --git a/core/adapters/no_op.go b/core/adapters/no_op.go index b8816807f96..94e8d69442a 100644 --- a/core/adapters/no_op.go +++ b/core/adapters/no_op.go @@ -8,6 +8,11 @@ import ( // NoOp adapter type holds no fields type NoOp struct{} +// TaskType returns the type of Adapter. +func (noa *NoOp) TaskType() models.TaskType { + return TaskTypeNoOp +} + // Perform returns the input func (noa *NoOp) Perform(input models.RunInput, _ *store.Store) models.RunOutput { val := input.Result().Value() @@ -17,6 +22,11 @@ func (noa *NoOp) Perform(input models.RunInput, _ *store.Store) models.RunOutput // NoOpPend adapter type holds no fields type NoOpPend struct{} +// TaskType returns the type of Adapter. +func (noa *NoOpPend) TaskType() models.TaskType { + return TaskTypeNoOpPend +} + // Perform on this adapter type returns an empty RunResult with an // added field for the status to indicate the task is Pending. func (noa *NoOpPend) Perform(_ models.RunInput, _ *store.Store) models.RunOutput { diff --git a/core/adapters/quotient.go b/core/adapters/quotient.go index b2401eb0350..d11f35774c7 100644 --- a/core/adapters/quotient.go +++ b/core/adapters/quotient.go @@ -15,6 +15,11 @@ type Quotient struct { Dividend *big.Float `json:"-"` } +// TaskType returns the type of Adapter. +func (q *Quotient) TaskType() models.TaskType { + return TaskTypeQuotient +} + type jsonQuotient struct { Dividend *utils.BigFloat `json:"dividend,omitempty"` } diff --git a/core/adapters/random.go b/core/adapters/random.go index a7e5a4168d4..cc2bc92ee99 100644 --- a/core/adapters/random.go +++ b/core/adapters/random.go @@ -1,23 +1,113 @@ package adapters import ( - "crypto/rand" + "fmt" "math/big" "chainlink/core/store" "chainlink/core/store/models" + "chainlink/core/store/models/vrfkey" + "chainlink/core/utils" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" + "github.com/tidwall/gjson" ) -// Random adapter type holds no fields -type Random struct{} +// Random adapter type implements VRF calculation in its Perform method. +// +// The VRFCoordinator.sol contract and its integration with the chainlink node +// will handle interaction with the Random adapter, but if you need to interact +// with it directly, its input to should be a JSON object with "seed" and +// "keyHash" fields containing the input seed as a hex-represented uint256, and +// the keccak256 hash of the UNCOMPRESSED REPRESENTATION(*) of the public key +// E.g., given the input +// +// { +// "seed": +// "0x0000000000000000000000000000000000000000000000000000000000000001", +// "keyHash": +// "0xc0a6c424ac7157ae408398df7e5f4552091a69125d5dfcb7b8c2659029395bdf", +// } +// +// the adapter will return a proof for the VRF output given seed 1, as long as +// the keccak256 hash of its public key matches the hash in the input. +// Otherwise, it will error. +// +// The adapter returns the hex representation of a solidity bytes array which +// can be verified on-chain by VRF.sol#randomValueFromVRFProof. (I.e., it is the +// proof expected by that method, prepended by its length as a uint256.) +// +// (*) I.e., the 64-byte concatenation of the point's x- and y- ordinates as +// uint256's +type Random struct { + // Compressed hex representation public key used in Random's VRF proofs + // + // This is just a hex string because Random is instantiated by json.Unmarshal. + // (See adapters.For function.) + PublicKey string `json:"publicKey"` +} + +// TaskType returns the type of Adapter. +func (ra *Random) TaskType() models.TaskType { + return TaskTypeRandom +} -// Perform returns a random uint256 number in 0 | 2**256-1 range -func (ra *Random) Perform(input models.RunInput, _ *store.Store) models.RunOutput { - b := make([]byte, 32) - _, err := rand.Read(b) +// Perform returns the the proof for the VRF output given seed, or an error. +func (ra *Random) Perform(input models.RunInput, store *store.Store) models.RunOutput { + key, err := getKey(ra, input) + if err != nil { + return models.NewRunOutputError(errors.Wrapf(err, "bad key for vrf task")) + } + seed, err := getSeed(input) + if err != nil { + return models.NewRunOutputError(errors.Wrap(err, "bad seed for vrf task")) + } + solidityProof, err := store.VRFKeyStore.GenerateProof(key, seed) if err != nil { return models.NewRunOutputError(err) } - ran := new(big.Int).SetBytes(b) - return models.NewRunOutputCompleteWithResult(ran.String()) + ethereumByteArray := fmt.Sprintf("0x%x", utils.EVMEncodeBytes(solidityProof[:])) + return models.NewRunOutputCompleteWithResult(ethereumByteArray) +} + +// getSeed returns the numeric seed for the vrf task, or an error +func getSeed(input models.RunInput) (*big.Int, error) { + rawSeed, err := extractHex(input, "seed") + if err != nil { + return nil, err + } + seed := big.NewInt(0).SetBytes(rawSeed) + if err := utils.CheckUint256(seed); err != nil { + return nil, err + } + return seed, nil +} + +// getKey returns the public key for the VRF, or an error. +func getKey(ra *Random, input models.RunInput) (*vrfkey.PublicKey, error) { + hash, err := extractHex(input, "keyHash") + if err != nil { + return nil, err + } + key, err := vrfkey.NewPublicKeyFromHex(ra.PublicKey) + if err != nil { + return nil, errors.Wrapf(err, "could not parse %v as public key", ra.PublicKey) + } + if key.Hash() != common.BytesToHash(hash) { + return nil, fmt.Errorf( + "this task's keyHash %x does not match the input hash %x", key.Hash(), hash) + } + return key, nil +} + +// extractHex returns the bytes corresponding to the string input at the key +// field, or an error. +func extractHex(input models.RunInput, key string) ([]byte, error) { + rawValue := input.Data().Get(key) + if rawValue.Type != gjson.String { + return nil, fmt.Errorf("%s %#+v is not a hex string", key, rawValue) + } + return hexutil.Decode(rawValue.String()) } diff --git a/core/adapters/random_test.go b/core/adapters/random_test.go index cdcc11b4aa0..76fade3000b 100644 --- a/core/adapters/random_test.go +++ b/core/adapters/random_test.go @@ -5,17 +5,67 @@ import ( "testing" "chainlink/core/adapters" + "chainlink/core/internal/cltest" + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/services/vrf/generated/solidity_verifier_wrapper" "chainlink/core/store/models" + "chainlink/core/utils" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var suite = secp256k1.NewBlakeKeccackSecp256k1() + +// NB: For changes to the VRF solidity code to be reflected here, "go generate" +// must be run in core/services/vrf. +func vrfVerifier(t *testing.T) *solidity_verifier_wrapper.VRFTestHelper { + ethereumKey, err := crypto.GenerateKey() + require.NoError(t, err) + auth := bind.NewKeyedTransactor(ethereumKey) + genesisData := core.GenesisAlloc{auth.From: {Balance: big.NewInt(1000000000)}} + gasLimit := eth.DefaultConfig.Miner.GasCeil + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + _, _, verifier, err := solidity_verifier_wrapper.DeployVRFTestHelper(auth, backend) + require.NoError(t, err) + backend.Commit() + return verifier +} + func TestRandom_Perform(t *testing.T) { - adapter := adapters.Random{} - result := adapter.Perform(models.RunInput{}, nil) - require.NoError(t, result.Error()) - res := new(big.Int) - res, ok := res.SetString(result.Result().String(), 10) - assert.True(t, ok) + store, cleanup := cltest.NewStore(t) + defer cleanup() + publicKey := cltest.StoredVRFKey(t, store) + adapter := adapters.Random{PublicKey: publicKey.String()} + jsonInput, err := models.JSON{}.Add("seed", "0x10") + require.NoError(t, err) // Can't fail + jsonInput, err = jsonInput.Add("keyHash", publicKey.Hash().Hex()) + require.NoError(t, err) // Can't fail + input := models.NewRunInput(&models.ID{}, jsonInput, models.RunStatusUnstarted) + result := adapter.Perform(*input, store) + require.NoError(t, result.Error(), "while running random adapter") + proof := hexutil.MustDecode(result.Result().String()) + // Check that proof is a solidity bytes array containing the actual proof + length := big.NewInt(0).SetBytes(proof[:utils.EVMWordByteLen]).Uint64() + require.Equal(t, length, uint64(len(proof)-utils.EVMWordByteLen)) + actualProof := proof[utils.EVMWordByteLen:] + randomOutput, err := vrfVerifier(t).RandomValueFromVRFProof(nil, actualProof) + require.NoError(t, err, "proof was invalid") + expected := common.HexToHash( + "c0a5642a409290ac65d9d44a4c52e53f31921ff1b7d235c585193a18190c82f1") + assert.Equal(t, expected, common.BigToHash(randomOutput), + "unexpected VRF output; perhas vrfkey.json or the output hashing function "+ + "in RandomValueFromVRFProof has changed?") + jsonInput, err = jsonInput.Add("keyHash", common.Hash{}) + require.NoError(t, err) + input = models.NewRunInput(&models.ID{}, jsonInput, models.RunStatusUnstarted) + result = adapter.Perform(*input, store) + require.Error(t, result.Error(), "must reject if keyHash doesn't match") } diff --git a/core/adapters/sleep.go b/core/adapters/sleep.go index 2746c40b220..c8d67e5416d 100644 --- a/core/adapters/sleep.go +++ b/core/adapters/sleep.go @@ -14,6 +14,11 @@ type Sleep struct { Until models.AnyTime `json:"until"` } +// TaskType returns the type of Adapter. +func (adapter *Sleep) TaskType() models.TaskType { + return TaskTypeSleep +} + // Perform returns the input RunResult after waiting for the specified Until parameter. func (adapter *Sleep) Perform(input models.RunInput, str *store.Store) models.RunOutput { duration := adapter.Duration() diff --git a/core/adapters/wasm.go b/core/adapters/wasm.go index bce4e33dc3f..202e248786d 100644 --- a/core/adapters/wasm.go +++ b/core/adapters/wasm.go @@ -14,6 +14,11 @@ type Wasm struct { WasmT string `json:"wasmt"` } +// TaskType returns the type of Adapter. +func (wasm *Wasm) TaskType() models.TaskType { + return TaskTypeWasm +} + // Perform ships the wasm representation to the SGX enclave where it is evaluated. func (wasm *Wasm) Perform(input models.RunInput, _ *store.Store) models.RunOutput { err := fmt.Errorf("Wasm is not supported without SGX") diff --git a/core/adapters/wasm_sgx.go b/core/adapters/wasm_sgx.go index 405a2d254ec..642e7909313 100644 --- a/core/adapters/wasm_sgx.go +++ b/core/adapters/wasm_sgx.go @@ -25,6 +25,11 @@ type Wasm struct { Wasm string `json:"wasm"` } +// TaskType returns the type of Adapter. +func (wasm *Wasm) TaskType() models.TaskType { + return TaskTypeWasm +} + // Perform ships the wasm representation to the SGX enclave where it is evaluated. func (wasm *Wasm) Perform(input models.RunInput, _ *store.Store) models.RunOutput { adapterJSON, err := json.Marshal(wasm) diff --git a/core/chainlink-sgx.Dockerfile b/core/chainlink-sgx.Dockerfile index 3ec6b319626..7edd5bb8817 100644 --- a/core/chainlink-sgx.Dockerfile +++ b/core/chainlink-sgx.Dockerfile @@ -1,5 +1,5 @@ # Build Chainlink with SGX -FROM smartcontract/builder:1.0.29 as builder +FROM smartcontract/builder:1.0.31 as builder WORKDIR /chainlink COPY GNUmakefile VERSION ./ @@ -17,7 +17,7 @@ COPY styleguide/package.json ./styleguide/ COPY tools/json-api-client/package.json ./tools/json-api-client/ COPY tools/local-storage/package.json ./tools/local-storage/ COPY tools/redux/package.json ./tools/redux/ -COPY tools/ts-test-helpers/package.json ./tools/ts-test-helpers/ +COPY tools/ts-helpers/package.json ./tools/ts-helpers/ COPY belt/package.json ./belt/ COPY belt/bin ./belt/bin COPY evm-test-helpers/package.json ./evm-test-helpers/ @@ -37,7 +37,7 @@ COPY styleguide ./styleguide COPY tools/json-api-client ./tools/json-api-client COPY tools/local-storage ./tools/local-storage COPY tools/redux ./tools/redux -COPY tools/ts-test-helpers ./tools/ts-test-helpers +COPY tools/ts-helpers ./tools/ts-helpers COPY belt ./belt COPY belt/bin ./belt/bin COPY evm-test-helpers ./evm-test-helpers diff --git a/core/chainlink.Dockerfile b/core/chainlink.Dockerfile index cab016211e0..6b952d5039f 100644 --- a/core/chainlink.Dockerfile +++ b/core/chainlink.Dockerfile @@ -1,5 +1,5 @@ # Build Chainlink -FROM smartcontract/builder:1.0.29 as builder +FROM smartcontract/builder:1.0.31 as builder # Have to reintroduce ENV vars from builder image ENV PATH /go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin @@ -16,7 +16,7 @@ COPY styleguide/package.json ./styleguide/ COPY tools/json-api-client/package.json ./tools/json-api-client/ COPY tools/local-storage/package.json ./tools/local-storage/ COPY tools/redux/package.json ./tools/redux/ -COPY tools/ts-test-helpers/package.json ./tools/ts-test-helpers/ +COPY tools/ts-helpers/package.json ./tools/ts-helpers/ COPY belt/package.json ./belt/ COPY belt/bin ./belt/bin COPY evm-test-helpers/package.json ./evm-test-helpers/ @@ -38,7 +38,7 @@ COPY styleguide ./styleguide COPY tools/json-api-client ./tools/json-api-client COPY tools/local-storage ./tools/local-storage COPY tools/redux ./tools/redux -COPY tools/ts-test-helpers ./tools/ts-test-helpers +COPY tools/ts-helpers ./tools/ts-helpers COPY belt ./belt COPY belt/bin ./belt/bin COPY evm-test-helpers ./evm-test-helpers diff --git a/core/cmd/app.go b/core/cmd/app.go index 7b61eb882f8..4c5e4e95dba 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -3,6 +3,7 @@ package cmd import ( "fmt" "os" + "regexp" "chainlink/core/store" @@ -242,6 +243,10 @@ func NewApp(client *Client) *cli.App { Name: "password, p", Usage: "text file holding the password for the node's account", }, + cli.StringFlag{ + Name: "vrfpassword, vp", + Usage: "textfile holding the password for the vrf keys; enables chainlink VRF oracle", + }, cli.Int64Flag{ Name: "replay-from-block, r", Usage: "historical block height from which to replay log-initiated jobs", @@ -251,6 +256,60 @@ func NewApp(client *Client) *cli.App { Usage: "Run the chainlink node", Action: client.RunNode, }, + cli.Command{ + Name: "vrf", + Usage: format(`Local commands for administering the database of VRF proof + keys. These commands will not affect the extant in-memory keys of + any live node.`), + Hidden: !client.Config.Dev(), + Subcommands: cli.Commands{ + { + Name: "create", + Usage: format(`Create a VRF key, encrypted with password from the + password file, and store it in the database.`), + Flags: flags("password, p"), + Action: client.CreateVRFKey, + }, + { + Name: "import", + Usage: "Import key from keyfile.", + Flags: append(flags("password, p"), flags("file, f")...), + Action: client.ImportVRFKey, + }, + { + Name: "export", + Usage: "Export key to keyfile.", + Flags: append(flags("file, f"), flags("publicKey, pk")...), + Action: client.ExportVRFKey, + }, + { + Name: "delete", + Usage: "Remove key from database, if present", + Flags: flags("publicKey, pk"), + Action: client.DeleteVRFKey, + }, + { + Name: "list", Usage: "List the public keys in the db", + Action: client.ListKeys, + }, + { + Name: "", + }, + { + Name: "xxxCreateWeakKeyPeriodYesIReallyKnowWhatIAmDoingAndDoNotCareAboutThisKeyMaterialFallingIntoTheWrongHandsExclamationPointExclamationPointExclamationPointExclamationPointIAmAMasochistExclamationPointExclamationPointExclamationPointExclamationPointExclamationPoint", + Usage: format(` + For testing purposes ONLY! DO NOT USE FOR ANY OTHER PURPOSE! + + Creates a key with weak key-devrivation-function parameters, so that it can be + decrypted quickly during tests. As a result, it would be cheap to brute-force + the encryption password for the key, if the ciphertext fell into the wrong + hands!`), + Flags: append(flags("password, p"), flags("file, f")...), + Action: client.CreateAndExportWeakVRFKey, + Hidden: !client.Config.Dev(), // For when this suite gets promoted out of dev mode + }, + }, + }, { Name: "rebroadcast-transactions", Usage: "manually rebroadcast txs matching nonce range with the specified gas price. This is useful in emergencies e.g. high gas prices and/or network congestion to forcibly clear out the pending TX queue", @@ -373,6 +432,15 @@ func NewApp(client *Client) *cli.App { }, }, }...) - return app } + +var whitespace = regexp.MustCompile(`\s+`) + +// format returns result of replacing all whitespace in s with a single space +func format(s string) string { + return string(whitespace.ReplaceAll([]byte(s), []byte(" "))) +} + +// flags is an abbreviated way to express a CLI flag +func flags(s string) []cli.Flag { return []cli.Flag{cli.StringFlag{Name: s}} } diff --git a/core/cmd/client.go b/core/cmd/client.go index 84552210085..f9335e2236f 100644 --- a/core/cmd/client.go +++ b/core/cmd/client.go @@ -298,7 +298,7 @@ type DiskCookieStore struct { // Save stores a cookie. func (d DiskCookieStore) Save(cookie *http.Cookie) error { - return ioutil.WriteFile(d.cookiePath(), []byte(cookie.String()), 0660) + return ioutil.WriteFile(d.cookiePath(), []byte(cookie.String()), 0600) } // Retrieve returns any Saved cookies. diff --git a/core/cmd/key_store_authenticator.go b/core/cmd/key_store_authenticator.go index 00b16b67e2d..ae64c78fac6 100644 --- a/core/cmd/key_store_authenticator.go +++ b/core/cmd/key_store_authenticator.go @@ -1,16 +1,19 @@ package cmd import ( - "errors" "fmt" + "github.com/pkg/errors" + "chainlink/core/store" + "chainlink/core/utils" ) // KeyStoreAuthenticator implements the Authenticate method for the store and // a password string. type KeyStoreAuthenticator interface { Authenticate(*store.Store, string) (string, error) + AuthenticateVRFKey(*store.Store, string) error } // TerminalKeyStoreAuthenticator contains fields for prompting the user and an @@ -77,10 +80,34 @@ func (auth TerminalKeyStoreAuthenticator) promptAndCreateAccount(store *store.St func createAccount(store *store.Store, password string) error { _, err := store.KeyStore.NewAccount(password) if err != nil { - return err - } - if err := store.SyncDiskKeyStoreToDB(); err != nil { - return err + return errors.Wrapf(err, "while creating ethereum keys") } return checkPassword(store, password) } + +// AuthenticateVRFKey creates an encrypted VRF key protected by password in +// store's db if db store has no extant keys. It unlocks at least one VRF key +// with given password, or returns an error. password must be non-trivial, as an +// empty password signifies that the VRF oracle functionality is disabled. +func (auth TerminalKeyStoreAuthenticator) AuthenticateVRFKey(store *store.Store, + password string) error { + if password == "" { + return fmt.Errorf("VRF password must be non-trivial") + } + keys, err := store.VRFKeyStore.Get(nil) + if err != nil { + return errors.Wrapf(err, "while checking for extant VRF keys") + } + if len(keys) == 0 { + fmt.Println( + "There are no VRF keys; creating a new key encrypted with given password") + if _, err := store.VRFKeyStore.CreateKey(password); err != nil { + return errors.Wrapf(err, "while creating a new encrypted VRF key") + } + } + return errors.Wrapf(utils.JustError(store.VRFKeyStore.Unlock(password)), + "there are VRF keys in the DB, but that password did not unlock any of "+ + "them... please check the password in the file specified by vrfpassword"+ + ". You can add and delete VRF keys in the DB using the "+ + "`chainlink local vrf` subcommands") +} diff --git a/core/cmd/local_client.go b/core/cmd/local_client.go index 8887ad64bc6..f11f59ee855 100644 --- a/core/cmd/local_client.go +++ b/core/cmd/local_client.go @@ -1,14 +1,16 @@ package cmd import ( - "errors" "fmt" "io" "io/ioutil" "math/big" "os" + "path/filepath" "strings" + "github.com/pkg/errors" + "chainlink/core/logger" "chainlink/core/services/chainlink" strpkg "chainlink/core/store" @@ -21,13 +23,21 @@ import ( "go.uber.org/zap/zapcore" ) +// ownerPermsMask are the file permission bits reserved for owner. +const ownerPermsMask = os.FileMode(0700) + // RunNode starts the Chainlink core. func (cli *Client) RunNode(c *clipkg.Context) error { + err := cli.Config.Validate() + if err != nil { + return err + } + updateConfig(cli.Config, c.Bool("debug"), c.Int64("replay-from-block")) logger.SetLogger(cli.Config.CreateProductionLogger()) logger.Infow("Starting Chainlink Node " + strpkg.Version + " at commit " + strpkg.Sha) - err := InitEnclave() + err = InitEnclave() if err != nil { return cli.errorOut(fmt.Errorf("error initializing SGX enclave: %+v", err)) } @@ -38,6 +48,9 @@ func (cli *Client) RunNode(c *clipkg.Context) error { logIfNonceOutOfSync(store) }) store := app.GetStore() + if err := checkFilePermissions(cli.Config.RootDir()); err != nil { + logger.Warn(err) + } pwd, err := passwordFromFile(c.String("password")) if err != nil { return cli.errorOut(fmt.Errorf("error reading password: %+v", err)) @@ -46,6 +59,17 @@ func (cli *Client) RunNode(c *clipkg.Context) error { if err != nil { return cli.errorOut(fmt.Errorf("error authenticating keystore: %+v", err)) } + if len(c.String("vrfpassword")) != 0 { + vrfpwd, err := passwordFromFile(c.String("vrfpassword")) + if err != nil { + return cli.errorOut(errors.Wrapf(err, + "error reading VRF password from vrfpassword file \"%s\"", + c.String("vrfpassword"))) + } + if err := cli.KeyStoreAuthenticator.AuthenticateVRFKey(store, vrfpwd); err != nil { + return cli.errorOut(errors.Wrapf(err, "while authenticating with VRF password")) + } + } var user models.User if _, err = NewFileAPIInitializer(c.String("api")).Initialize(store); err != nil && err != errNoCredentialFile { @@ -74,6 +98,37 @@ func loggedStop(app chainlink.Application) { logger.WarnIf(app.Stop()) } +func checkFilePermissions(rootDir string) error { + errorMsg := "%s has overly permissive file permissions, should be atleast %s" + keysDir := filepath.Join(rootDir, "tempkeys") + protectedFiles := []string{"secret", "cookie"} + err := filepath.Walk(keysDir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + fileMode := info.Mode().Perm() + if fileMode&^ownerPermsMask != 0 { + return fmt.Errorf(errorMsg, path, ownerPermsMask) + } + return nil + }) + if err != nil { + return err + } + for _, fileName := range protectedFiles { + fileInfo, err := os.Lstat(filepath.Join(rootDir, fileName)) + if err != nil { + return err + } + perm := fileInfo.Mode().Perm() + if perm&^ownerPermsMask != 0 { + return fmt.Errorf(errorMsg, fileName, ownerPermsMask) + } + } + return nil +} + func passwordFromFile(pwdFile string) (string, error) { if len(pwdFile) == 0 { return "", nil diff --git a/core/cmd/local_client_vrf.go b/core/cmd/local_client_vrf.go new file mode 100644 index 00000000000..7c1e45b2897 --- /dev/null +++ b/core/cmd/local_client_vrf.go @@ -0,0 +1,222 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" + clipkg "github.com/urfave/cli" + + "chainlink/core/logger" + "chainlink/core/store" + "chainlink/core/store/models/vrfkey" + "chainlink/core/utils" +) + +func vRFKeyStore(cli *Client) *store.VRFKeyStore { + return cli.AppFactory.NewApplication(cli.Config).GetStore().VRFKeyStore +} + +// CreateVRFKey creates a key in the VRF keystore, protected by the password in +// the password file +func (cli *Client) CreateVRFKey(c *clipkg.Context) error { + password, err := getPassword(c) + if err != nil { + return err + } + key, err := vRFKeyStore(cli).CreateKey(string(password)) + if err != nil { + return errors.Wrapf(err, "while creating new account") + } + fmt.Printf(`Created keypair, with public key + +%s + +The following command will export the encrypted secret key from the db to : + +chainlink local vrf export -f -pk %s +`, key, key) + return nil +} + +// CreateAndExportWeakVRFKey creates a key in the VRF keystore, protected by the +// password in the password file, but with weak key-derivation-function +// parameters, which makes it cheaper for testing, but also more vulnerable to +// bruteforcing of the encyrpted key material. For testing purposes only! +// +// The key is only stored at the specified file location, not stored in the DB. +func (cli *Client) CreateAndExportWeakVRFKey(c *clipkg.Context) error { + password, err := getPassword(c) + if err != nil { + return err + } + key, err := vRFKeyStore(cli).CreateWeakInMemoryEncryptedKeyXXXTestingOnly( + string(password)) + if err != nil { + return errors.Wrapf(err, "while creating testing key") + } + if !c.IsSet("file") || !noFileToOverwrite(c.String("file")) { + errmsg := "must specify path to key file which does not already exist" + fmt.Println(errmsg) + return fmt.Errorf(errmsg) + } + fmt.Println("Don't use this key for anything sensitive!") + return key.WriteToDisk(c.String("file")) +} + +// getPassword retrieves the password from the file specified on the CL, or errors +func getPassword(c *clipkg.Context) ([]byte, error) { + if !c.IsSet("password") { + return nil, fmt.Errorf("must specify password file") + } + rawPassword, err := passwordFromFile(c.String("password")) + if err != nil { + return nil, errors.Wrapf(err, "could not read password from file %s", + c.String("password")) + } + return []byte(rawPassword), nil +} + +// getPasswordAndKeyFile retrieves the password and key json from the files +// specified on the CL, or errors +func getPasswordAndKeyFile(c *clipkg.Context) (password []byte, keyjson []byte, err error) { + password, err = getPassword(c) + if err != nil { + return nil, nil, err + } + if !c.IsSet("file") { + return nil, nil, fmt.Errorf("must specify key file") + } + keypath := c.String("file") + keyjson, err = ioutil.ReadFile(keypath) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to read file %s", keypath) + } + return password, keyjson, nil +} + +// ImportVRFKey reads a file into an EncryptedSecretKey in the db +func (cli *Client) ImportVRFKey(c *clipkg.Context) error { + password, keyjson, err := getPasswordAndKeyFile(c) + if err != nil { + return err + } + if err := vRFKeyStore(cli).Import(keyjson, string(password)); err != nil { + if err == store.MatchingVRFKeyError { + fmt.Println(`The database already has an entry for that public key.`) + var key struct{ PublicKey string } + if err := json.Unmarshal(keyjson, &key); err != nil { + fmt.Println("could not extract public key from json input") + return errors.Wrapf(err, "while extracting public key from %s", keyjson) + } + fmt.Printf(`If you want to import the new key anyway, delete the old key with the command + + %s + +(but maybe back it up first, with %s.) +`, + fmt.Sprintf("chainlink local delete -pk %s", key.PublicKey), + fmt.Sprintf("`chainlink local export -f -pk %s`", + key.PublicKey)) + return errors.Wrap(err, "while attempting to import key from CL") + } + return err + } + return nil +} + +// ExportVRFKey saves encrypted copy of VRF key with given public key to +// requested file path. +func (cli *Client) ExportVRFKey(c *clipkg.Context) error { + encryptedKey, err := getKeys(cli, c) + if err != nil { + return err + } + if !c.IsSet("file") { + return fmt.Errorf("must specify file to export to") // Or could default to stdout? + } + keypath := c.String("file") + _, err = os.Stat(keypath) + if err == nil { + return fmt.Errorf( + "refusing to overwrite existing file %s. Please move it or change the save path", + keypath) + } + if !os.IsNotExist(err) { + return errors.Wrapf(err, "while checking whether file %s exists", keypath) + } + if err := encryptedKey.WriteToDisk(keypath); err != nil { + return errors.Wrapf(err, "could not save %#+v to %s", encryptedKey, keypath) + } + return nil +} + +// getKeys retrieves the keys for an ExportVRFKey request +func getKeys(cli *Client, c *clipkg.Context) (*vrfkey.EncryptedSecretKey, error) { + publicKey, err := getPublicKey(c) + if err != nil { + return nil, err + } + enckey, err := vRFKeyStore(cli).GetSpecificKey(publicKey) + if err != nil { + return nil, errors.Wrapf(err, + "while retrieving keys with matching public key %s", publicKey.String()) + } + return enckey, nil +} + +// DeleteVRFKey deletes the VRF key with given public key from the db +// +// Since this runs in an independent process from any chainlink node, it cannot +// cause running nodes to forget the key, if they already have it unlocked. +func (cli *Client) DeleteVRFKey(c *clipkg.Context) error { + publicKey, err := getPublicKey(c) + if err != nil { + return err + } + if err := vRFKeyStore(cli).Delete(publicKey); err != nil { + if err == store.AttemptToDeleteNonExistentKeyFromDB { + fmt.Printf("There is already no entry in the DB for %s\n", publicKey) + } + return err + } + return nil +} + +func getPublicKey(c *clipkg.Context) (*vrfkey.PublicKey, error) { + if !c.IsSet("publicKey") { + return nil, fmt.Errorf("must specify public key") + } + publicKey, err := vrfkey.NewPublicKeyFromHex(c.String("publicKey")) + if err != nil { + return nil, errors.Wrap(err, "failed to parse public key") + } + return publicKey, nil +} + +// ListKeys Lists the keys in the db +func (cli *Client) ListKeys(c *clipkg.Context) error { + keys, err := vRFKeyStore(cli).ListKeys() + if err != nil { + return err + } + // TODO(alx) Figure out how to make a nice box out of this, like the other + // commands do. + fmt.Println( + `******************************************************************** +Public keys of encrypted keys in database +********************************************************************`) + for _, key := range keys { + fmt.Println(key) + } + fmt.Println( + "********************************************************************") + logger.Infow("keys", "keys", keys) + return nil +} + +func noFileToOverwrite(path string) bool { + return os.IsNotExist(utils.JustError(os.Stat(path))) +} diff --git a/core/eth/client.go b/core/eth/client.go index 15eb94b36de..2c55d6be05b 100644 --- a/core/eth/client.go +++ b/core/eth/client.go @@ -2,9 +2,7 @@ package eth import ( "context" - "fmt" "math/big" - "strings" "chainlink/core/assets" "chainlink/core/utils" @@ -12,43 +10,38 @@ import ( ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/pkg/errors" - "github.com/shopspring/decimal" -) - -const ( - // FluxAggregatorName is the name of Chainlink's Ethereum contract for - // aggregating numerical data such as prices. - FluxAggregatorName = "FluxAggregator" ) //go:generate mockery -name Client -output ../internal/mocks/ -case=underscore // Client is the interface used to interact with an ethereum node. type Client interface { + CallerSubscriber LogSubscriber GetNonce(address common.Address) (uint64, error) GetEthBalance(address common.Address) (*assets.Eth, error) GetERC20Balance(address common.Address, contractAddress common.Address) (*big.Int, error) - GetAggregatorPrice(address common.Address, precision int32) (decimal.Decimal, error) - GetAggregatorRound(address common.Address) (*big.Int, error) - GetLatestSubmission(aggregatorAddress common.Address, oracleAddress common.Address) (*big.Int, *big.Int, error) SendRawTx(hex string) (common.Hash, error) GetTxReceipt(hash common.Hash) (*TxReceipt, error) + GetBlockHeight() (uint64, error) GetBlockByNumber(hex string) (BlockHeader, error) GetChainID() (*big.Int, error) - SubscribeToNewHeads(channel chan<- BlockHeader) (Subscription, error) + SubscribeToNewHeads(ctx context.Context, channel chan<- BlockHeader) (Subscription, error) } // LogSubscriber encapsulates only the methods needed for subscribing to ethereum log events. type LogSubscriber interface { GetLogs(q ethereum.FilterQuery) ([]Log, error) - SubscribeToLogs(channel chan<- Log, q ethereum.FilterQuery) (Subscription, error) + SubscribeToLogs(ctx context.Context, channel chan<- Log, q ethereum.FilterQuery) (Subscription, error) } //go:generate mockery -name Subscription -output ../internal/mocks/ -case=underscore // Subscription holds the methods for an ethereum log subscription. +// +// The Unsubscribe method cancels the sending of events. You must call Unsubscribe in all +// cases to ensure that resources related to the subscription are released. It can be +// called any number of times. type Subscription interface { Err() <-chan error Unsubscribe() @@ -60,6 +53,8 @@ type CallerSubscriberClient struct { CallerSubscriber } +var _ Client = (*CallerSubscriberClient)(nil) + //go:generate mockery -name CallerSubscriber -output ../internal/mocks/ -case=underscore // CallerSubscriber implements the Call and Subscribe functions. Call performs @@ -118,126 +113,6 @@ func (client *CallerSubscriberClient) GetERC20Balance(address common.Address, co return numLinkBigInt, nil } -var dec10 = decimal.NewFromInt(10) - -func newBigIntFromString(arg string) (*big.Int, error) { - if arg == "0x" { - // Oddly a legal value for zero - arg = "0x0" - } - ret, ok := new(big.Int).SetString(arg, 0) - if !ok { - return nil, fmt.Errorf("cannot convert '%s' to big int", arg) - } - return ret, nil -} - -func newDecimalFromString(arg string) (decimal.Decimal, error) { - if strings.HasPrefix(arg, "0x") { - // decimal package does not parse Hex values - value, err := newBigIntFromString(arg) - if err != nil { - return decimal.Zero, fmt.Errorf("cannot convert '%s' to decimal", arg) - } - return decimal.NewFromString(value.Text(10)) - } - return decimal.NewFromString(arg) -} - -// GetAggregatorPrice returns the current price at the given address. -func (client *CallerSubscriberClient) GetAggregatorPrice(address common.Address, precision int32) (decimal.Decimal, error) { - aggregator, err := GetV6Contract(FluxAggregatorName) - if err != nil { - return decimal.Decimal{}, errors.Wrap(err, "unable to get contract "+FluxAggregatorName) - } - data, err := aggregator.EncodeMessageCall("latestAnswer") - if err != nil { - return decimal.Decimal{}, errors.Wrap(err, "unable to encode latestAnswer message for contract "+FluxAggregatorName) - } - - var result string - args := CallArgs{ - To: address, - Data: data, - } - err = client.Call(&result, "eth_call", args, "latest") - if err != nil { - return decimal.Decimal{}, errors.Wrap(err, fmt.Sprintf("unable to fetch aggregator price from %s", address.Hex())) - } - raw, err := newDecimalFromString(result) - if err != nil { - return decimal.Decimal{}, errors.Wrap(err, fmt.Sprintf("unable to fetch aggregator price from %s", address.Hex())) - } - precisionDivisor := dec10.Pow(decimal.NewFromInt32(precision)) - return raw.Div(precisionDivisor), nil -} - -// GetAggregatorRound returns the latest round at the given address. -func (client *CallerSubscriberClient) GetAggregatorRound(address common.Address) (*big.Int, error) { - aggregator, err := GetV6Contract(FluxAggregatorName) - if err != nil { - return nil, errors.Wrap(err, "unable to get contract "+FluxAggregatorName) - } - data, err := aggregator.EncodeMessageCall("latestRound") - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("unable to fetch aggregator round from %s", address.Hex())) - } - - var result string - args := CallArgs{To: address, Data: data} - err = client.Call(&result, "eth_call", args, "latest") - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("unable to fetch aggregator round from %s", address.Hex())) - } - - round, err := newBigIntFromString(result) - if err != nil { - return nil, errors.Wrapf( - fmt.Errorf("unable to parse int from %s", result), - "unable to fetch aggregator round from %s", address.Hex()) - } - return round, nil -} - -// GetLatestSubmission returns the latest submission as a tuple, (answer, round) -// for a given oracle address. -func (client *CallerSubscriberClient) GetLatestSubmission(aggregatorAddress common.Address, oracleAddress common.Address) (*big.Int, *big.Int, error) { - errMessage := fmt.Sprintf("unable to fetch latest submission for %s from %s", oracleAddress.Hex(), aggregatorAddress.Hex()) - aggregator, err := GetV6Contract(FluxAggregatorName) - if err != nil { - return nil, nil, errors.Wrap(err, "unable to get contract "+FluxAggregatorName) - } - data, err := aggregator.EncodeMessageCall("latestSubmission", oracleAddress) - if err != nil { - return nil, nil, errors.Wrap(err, errMessage+"- unable to encode message call") - } - - var result string - args := CallArgs{To: aggregatorAddress, Data: data} - err = client.Call(&result, "eth_call", args, "latest") - if err != nil { - return nil, nil, errors.Wrap(err, errMessage+"- unable to call client") - } - - method, exists := aggregator.ABI.Methods["latestSubmission"] - if !exists { - return nil, nil, errors.New(errMessage + "- cannot find method latestSubmission on ABI") - } - - resultBytes, err := hexutil.Decode(result) - if err != nil { - return nil, nil, errors.Wrap(err, errMessage+"- unable to decode result") - } - - values, err := method.Outputs.UnpackValues(resultBytes) - if err != nil { - return nil, nil, errors.Wrap(err, errMessage+"- unable to unpack values") - } - latestAnswer := values[0].(*big.Int) - lastReportedRound := values[1].(*big.Int) - return latestAnswer, lastReportedRound, nil -} - // SendRawTx sends a signed transaction to the transaction pool. func (client *CallerSubscriberClient) SendRawTx(hex string) (common.Hash, error) { result := common.Hash{} @@ -252,6 +127,12 @@ func (client *CallerSubscriberClient) GetTxReceipt(hash common.Hash) (*TxReceipt return &receipt, err } +func (client *CallerSubscriberClient) GetBlockHeight() (uint64, error) { + var height hexutil.Uint64 + err := client.Call(&height, "eth_blockNumber") + return uint64(height), err +} + // GetBlockByNumber returns the block for the passed hex, or "latest", "earliest", "pending". func (client *CallerSubscriberClient) GetBlockByNumber(hex string) (BlockHeader, error) { var header BlockHeader @@ -275,21 +156,23 @@ func (client *CallerSubscriberClient) GetChainID() (*big.Int, error) { // SubscribeToLogs registers a subscription for push notifications of logs // from a given address. +// +// Inspired by the eth client's SubscribeToLogs: +// https://github.com/ethereum/go-ethereum/blob/762f3a48a00da02fe58063cb6ce8dc2d08821f15/ethclient/ethclient.go#L359 func (client *CallerSubscriberClient) SubscribeToLogs( + ctx context.Context, channel chan<- Log, q ethereum.FilterQuery, ) (Subscription, error) { - // https://github.com/ethereum/go-ethereum/blob/762f3a48a00da02fe58063cb6ce8dc2d08821f15/ethclient/ethclient.go#L359 - ctx := context.Background() sub, err := client.Subscribe(ctx, channel, "logs", utils.ToFilterArg(q)) return sub, err } // SubscribeToNewHeads registers a subscription for push notifications of new blocks. func (client *CallerSubscriberClient) SubscribeToNewHeads( + ctx context.Context, channel chan<- BlockHeader, ) (Subscription, error) { - ctx := context.Background() sub, err := client.Subscribe(ctx, channel, "newHeads") return sub, err } diff --git a/core/eth/client_test.go b/core/eth/client_test.go index 79bf8a8800e..22f647e2c99 100644 --- a/core/eth/client_test.go +++ b/core/eth/client_test.go @@ -13,8 +13,6 @@ import ( "chainlink/core/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -200,137 +198,3 @@ func TestCallerSubscriberClient_GetERC20Balance(t *testing.T) { }) } } - -func TestCallerSubscriberClient_GetAggregatorPrice(t *testing.T) { - address := cltest.NewAddress() - - // aggregatorLatestAnswerID is the first 4 bytes of the keccak256 of - // Chainlink's aggregator latestAnswer function. - const aggregatorLatestAnswerID = "50d25bcd" - aggregatorLatestAnswerSelector := eth.HexToFunctionSelector(aggregatorLatestAnswerID) - - expectedCallArgs := eth.CallArgs{ - To: address, - Data: aggregatorLatestAnswerSelector.Bytes(), - } - - tests := []struct { - name, response string - precision int32 - expectation decimal.Decimal - }{ - {"hex - Zero", "0x", 2, decimal.NewFromFloat(0)}, - {"hex", "0x0100", 2, decimal.NewFromFloat(2.56)}, - {"decimal", "10000000000000", 11, decimal.NewFromInt(100)}, - {"large decimal", "52050000000000000000", 11, decimal.RequireFromString("520500000")}, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - caller := new(mocks.CallerSubscriber) - ethClient := ð.CallerSubscriberClient{CallerSubscriber: caller} - - caller.On("Call", mock.Anything, "eth_call", expectedCallArgs, "latest").Return(nil). - Run(func(args mock.Arguments) { - res := args.Get(0).(*string) - *res = test.response - }) - result, err := ethClient.GetAggregatorPrice(address, test.precision) - require.NoError(t, err) - assert.True(t, test.expectation.Equal(result)) - caller.AssertExpectations(t) - }) - } -} - -func TestCallerSubscriberClient_GetAggregatorRound(t *testing.T) { - address := cltest.NewAddress() - - const aggregatorLatestRoundID = "668a0f02" - aggregatorLatestRoundSelector := eth.HexToFunctionSelector(aggregatorLatestRoundID) - - expectedCallArgs := eth.CallArgs{ - To: address, - Data: aggregatorLatestRoundSelector.Bytes(), - } - large, ok := new(big.Int).SetString("52050000000000000000", 10) - require.True(t, ok) - - tests := []struct { - name, response string - expectation *big.Int - }{ - {"zero", "0", big.NewInt(0)}, - {"small", "12", big.NewInt(12)}, - {"large", "52050000000000000000", large}, - {"hex zero default", "0x", big.NewInt(0)}, - {"hex zero", "0x0", big.NewInt(0)}, - {"hex", "0x0100", big.NewInt(256)}, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - caller := new(mocks.CallerSubscriber) - ethClient := ð.CallerSubscriberClient{CallerSubscriber: caller} - - caller.On("Call", mock.Anything, "eth_call", expectedCallArgs, "latest").Return(nil). - Run(func(args mock.Arguments) { - res := args.Get(0).(*string) - *res = test.response - }) - result, err := ethClient.GetAggregatorRound(address) - require.NoError(t, err) - assert.Equal(t, test.expectation, result) - caller.AssertExpectations(t) - }) - } -} - -func TestCallerSubscriberClient_GetLatestSubmission(t *testing.T) { - caller := new(mocks.CallerSubscriber) - ethClient := ð.CallerSubscriberClient{CallerSubscriber: caller} - aggregatorAddress := cltest.NewAddress() - oracleAddress := cltest.NewAddress() - - const aggregatorLatestSubmission = "bb07bacd" - aggregatorLatestSubmissionSelector := eth.HexToFunctionSelector(aggregatorLatestSubmission) - - callData := utils.ConcatBytes(aggregatorLatestSubmissionSelector.Bytes(), oracleAddress.Hash().Bytes()) - - expectedCallArgs := eth.CallArgs{ - To: aggregatorAddress, - Data: callData, - } - - tests := []struct { - name string - answer int64 - round int64 - expectedAnswer *big.Int - expectedRound *big.Int - }{ - {"zero", 0, 0, big.NewInt(0), big.NewInt(0)}, - {"small", 8, 12, big.NewInt(8), big.NewInt(12)}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - caller.On("Call", mock.Anything, "eth_call", expectedCallArgs, "latest").Return(nil). - Run(func(args mock.Arguments) { - res := args.Get(0).(*string) - answerBytes, err := utils.EVMWordSignedBigInt(big.NewInt(test.answer)) - require.NoError(t, err) - roundBytes, err := utils.EVMWordBigInt(big.NewInt(test.round)) - require.NoError(t, err) - *res = hexutil.Encode(append(answerBytes, roundBytes...)) - }) - answer, round, err := ethClient.GetLatestSubmission(aggregatorAddress, oracleAddress) - require.NoError(t, err) - assert.Equal(t, test.expectedAnswer.String(), answer.String()) - assert.Equal(t, test.expectedRound.String(), round.String()) - caller.AssertExpectations(t) - }) - } -} diff --git a/core/eth/contracts.go b/core/eth/contracts.go index bac7c328bb3..caa3b1d0988 100644 --- a/core/eth/contracts.go +++ b/core/eth/contracts.go @@ -1,10 +1,11 @@ package eth import ( - "chainlink/core/logger" "fmt" "strings" + "chainlink/core/logger" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/gobuffalo/packr" @@ -12,12 +13,19 @@ import ( "github.com/tidwall/gjson" ) +type ContractCodec interface { + ABI() *abi.ABI + GetMethodID(method string) ([]byte, error) + EncodeMessageCall(method string, args ...interface{}) ([]byte, error) + UnpackLog(out interface{}, event string, log Log) error +} + // Contract holds the solidity contract's parsed ABI -type Contract struct { - ABI abi.ABI +type contractCodec struct { + abi abi.ABI } -func getContract(name string, box packr.Box) (*Contract, error) { +func getContractCodec(name string, box packr.Box) (ContractCodec, error) { jsonFile, err := box.Find(name + ".json") if err != nil { return nil, errors.Wrap(err, "unable to read contract JSON") @@ -29,7 +37,7 @@ func getContract(name string, box packr.Box) (*Contract, error) { return nil, err } - return &Contract{abiParsed}, nil + return &contractCodec{abiParsed}, nil } // GetContract loads the contract JSON file from ../../evm-contracts/abi/v0.4 @@ -38,9 +46,9 @@ func getContract(name string, box packr.Box) (*Contract, error) { // NB: These contracts can be built by running // yarn setup:contracts // in the base project directory. -func GetContract(name string) (*Contract, error) { +func GetContractCodec(name string) (ContractCodec, error) { box := packr.NewBox("../../evm-contracts/abi/v0.4") - return getContract(name, box) + return getContractCodec(name, box) } // GetV6Contract loads the contract JSON file from ../../evm-contracts/abi/v0.6 @@ -49,15 +57,19 @@ func GetContract(name string) (*Contract, error) { // NB: These contracts can be built by running // yarn setup:contracts // in the base project directory. -func GetV6Contract(name string) (*Contract, error) { +func GetV6ContractCodec(name string) (ContractCodec, error) { box := packr.NewBox("../../evm-contracts/abi/v0.6") - return getContract(name, box) + return getContractCodec(name, box) +} + +func (contract *contractCodec) ABI() *abi.ABI { + return &contract.abi } // EncodeMessageCall encodes method name and arguments into a byte array // to conform with the contract's ABI -func (contract *Contract) EncodeMessageCall(method string, args ...interface{}) ([]byte, error) { - return contract.ABI.Pack(method, args...) +func (cc *contractCodec) EncodeMessageCall(method string, args ...interface{}) ([]byte, error) { + return cc.abi.Pack(method, args...) } // GetMethodID returns the first 4 bytes of the keccak256 hash of the method @@ -70,8 +82,8 @@ func (contract *Contract) EncodeMessageCall(method string, args ...interface{}) // * foo(uint,uint) // The method name of the first one will be resolved as foo while the second one // will be resolved as foo0. -func (contract *Contract) GetMethodID(method string) ([]byte, error) { - mabi, found := contract.ABI.Methods[method] +func (cc *contractCodec) GetMethodID(method string) ([]byte, error) { + mabi, found := cc.abi.Methods[method] if !found { return []byte{}, errors.New("unable to find contract method " + method) } @@ -81,14 +93,18 @@ func (contract *Contract) GetMethodID(method string) ([]byte, error) { // MustGetV6ContractEventID finds the event for the given contract by searching // embedded contract assets from evm/, or panics if not found. func MustGetV6ContractEventID(name, eventName string) common.Hash { - contract, err := GetV6Contract(name) + cc, err := GetV6ContractCodec(name) if err != nil { logger.Panic(fmt.Errorf("unable to find contract %s", name)) } - event, found := contract.ABI.Events[eventName] + event, found := cc.ABI().Events[eventName] if !found { logger.Panic(fmt.Errorf("unable to find event %s for contract %s", eventName, name)) } return event.ID() } + +func (cc *contractCodec) UnpackLog(out interface{}, event string, log Log) error { + return gethUnpackLog(cc, out, event, log) +} diff --git a/core/eth/contracts_test.go b/core/eth/contracts_test.go index 4153e146cae..e44250db386 100644 --- a/core/eth/contracts_test.go +++ b/core/eth/contracts_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestGetContract(t *testing.T) { +func TestGetContractCodec(t *testing.T) { t.Parallel() tests := []struct { @@ -23,7 +23,7 @@ func TestGetContract(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - contract, err := GetContract(test.contract) + contract, err := GetContractCodec(test.contract) if test.expectErr { assert.Error(t, err) assert.Nil(t, contract) @@ -41,11 +41,11 @@ var address common.Address = common.HexToAddress( // NB: This test needs a compiled oracle contract, which can be built with // `yarn workspace chainlink run setup` in the base project directory. -func TestContract_EncodeMessageCall(t *testing.T) { +func TestContractCodec_EncodeMessageCall(t *testing.T) { t.Parallel() // Test with the Oracle contract - oracle, err := GetContract("Oracle") + oracle, err := GetContractCodec("Oracle") require.NoError(t, err) require.NotNil(t, oracle) @@ -56,11 +56,11 @@ func TestContract_EncodeMessageCall(t *testing.T) { // NB: This test needs a compiled oracle contract, which can be built with // `yarn workspace chainlink run setup` in the base project directory. -func TestContract_EncodeMessageCall_errors(t *testing.T) { +func TestContractCodec_EncodeMessageCall_errors(t *testing.T) { t.Parallel() // Test with the Oracle contract - oracle, err := GetContract("Oracle") + oracle, err := GetContractCodec("Oracle") require.NoError(t, err) require.NotNil(t, oracle) diff --git a/core/eth/gen_log_json.go b/core/eth/gen_log_json.go index 6e8633ebce5..249e6036f01 100755 --- a/core/eth/gen_log_json.go +++ b/core/eth/gen_log_json.go @@ -28,7 +28,7 @@ func (l Log) MarshalJSON() ([]byte, error) { var enc Log enc.Address = l.Address enc.Topics = l.Topics - enc.Data = l.Data + enc.Data = hexutil.Bytes(l.Data) enc.BlockNumber = hexutil.Uint64(l.BlockNumber) enc.TxHash = l.TxHash enc.TxIndex = hexutil.Uint(l.TxIndex) @@ -66,7 +66,7 @@ func (l *Log) UnmarshalJSON(input []byte) error { if dec.Data == nil { return errors.New("missing required field 'data' for Log") } - l.Data = *dec.Data + l.Data = UntrustedBytes(*dec.Data) if dec.BlockNumber != nil { l.BlockNumber = uint64(*dec.BlockNumber) } diff --git a/core/eth/geth_copied.go b/core/eth/geth_copied.go new file mode 100644 index 00000000000..3f77939b3f8 --- /dev/null +++ b/core/eth/geth_copied.go @@ -0,0 +1,144 @@ +package eth + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" +) + +// The code in this file is taken from the go-ethereum codebase. + +// UnpackLog is taken from the go-ethereum codebase: +// https://github.com/ethereum/go-ethereum/blob/v1.9.11/accounts/abi/bind/base.go#L328 +func gethUnpackLog(codec *contractCodec, out interface{}, event string, log Log) error { + if len(log.Data) > 0 { + if err := codec.abi.Unpack(out, event, log.Data); err != nil { + return err + } + } + var indexed abi.Arguments + for _, arg := range codec.abi.Events[event].Inputs { + if arg.Indexed { + indexed = append(indexed, arg) + } + } + return parseTopics(out, indexed, log.Topics[1:]) +} + +// parseTopics is taken from the go-ethereum codebase: +// https://github.com/ethereum/go-ethereum/blob/v1.9.11/accounts/abi/bind/topics.go#L106 +var ( + reflectHash = reflect.TypeOf(common.Hash{}) + reflectAddress = reflect.TypeOf(common.Address{}) + reflectBigInt = reflect.TypeOf(new(big.Int)) +) + +func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) error { + // Sanity check that the fields and topics match up + if len(fields) != len(topics) { + return errors.New("topic/field count mismatch") + } + // Iterate over all the fields and reconstruct them from topics + for _, arg := range fields { + if !arg.Indexed { + return errors.New("non-indexed field in topic reconstruction") + } + + // If Go structs aren't kept correctly in sync with log fields defined in Solidity, this error will be returned. + // The name convention is to remove underscores, capitalize all characters following them, and capitalize the + // first letter of the field: + // + // round_id => RoundId + // roundId => RoundId + // _roundId => RoundId + _, exists := reflect.TypeOf(out).Elem().FieldByName(capitalise(arg.Name)) + if !exists { + return errors.Errorf(`can't find matching struct field for log "%T", field "%v" (expected "%v")`, out, arg.Name, capitalise(arg.Name)) + } + + field := reflect.ValueOf(out).Elem().FieldByName(capitalise(arg.Name)) + + // Try to parse the topic back into the fields based on primitive types + switch field.Kind() { + case reflect.Bool: + if topics[0][common.HashLength-1] == 1 { + field.Set(reflect.ValueOf(true)) + } + case reflect.Int8: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(int8(num.Int64()))) + + case reflect.Int16: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(int16(num.Int64()))) + + case reflect.Int32: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(int32(num.Int64()))) + + case reflect.Int64: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(num.Int64())) + + case reflect.Uint8: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(uint8(num.Uint64()))) + + case reflect.Uint16: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(uint16(num.Uint64()))) + + case reflect.Uint32: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(uint32(num.Uint64()))) + + case reflect.Uint64: + num := new(big.Int).SetBytes(topics[0][:]) + field.Set(reflect.ValueOf(num.Uint64())) + + default: + // Ran out of plain primitive types, try custom types + + switch field.Type() { + case reflectHash: // Also covers all dynamic types + field.Set(reflect.ValueOf(topics[0])) + + case reflectAddress: + var addr common.Address + copy(addr[:], topics[0][common.HashLength-common.AddressLength:]) + field.Set(reflect.ValueOf(addr)) + + case reflectBigInt: + num := new(big.Int).SetBytes(topics[0][:]) + if arg.Type.T == abi.IntTy { + if num.Cmp(abi.MaxInt256) > 0 { + num.Add(abi.MaxUint256, big.NewInt(0).Neg(num)) + num.Add(num, big.NewInt(1)) + num.Neg(num) + } + } + field.Set(reflect.ValueOf(num)) + + default: + // Ran out of custom types, try the crazies + switch { + // static byte array + case arg.Type.T == abi.FixedBytesTy: + reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size])) + default: + return fmt.Errorf("unsupported indexed type: %v", arg.Type) + } + } + } + topics = topics[1:] + } + return nil +} + +func capitalise(input string) string { + return abi.ToCamelCase(input) +} diff --git a/core/eth/types.go b/core/eth/types.go index 1c9b4e57075..345e054957e 100644 --- a/core/eth/types.go +++ b/core/eth/types.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "regexp" "chainlink/core/utils" @@ -14,10 +15,18 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +// WeiPerEth is amount of Wei currency units in one Eth. +var WeiPerEth = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil) + +// This data can contain anything and is submitted by user on-chain, so we must +// be extra careful how we interact with it +type UntrustedBytes []byte + //go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go // Log represents a contract log event. These events are generated by the LOG opcode and // stored/indexed by the node. +// NOTE: This is almost (but not quite) a copy of go-ethereum/core/types.Log, in log.go type Log struct { // Consensus fields: // address of the contract that generated the event @@ -25,7 +34,7 @@ type Log struct { // list of topics provided by the contract. Topics []common.Hash `json:"topics" gencodec:"required"` // supplied by the contract, usually ABI-encoded - Data []byte `json:"data" gencodec:"required"` + Data UntrustedBytes `json:"data" gencodec:"required"` // Derived fields. These fields are filled in by the node // but not secured by consensus. @@ -54,6 +63,31 @@ func (log Log) GetTopic(idx uint) (common.Hash, error) { return log.Topics[idx], nil } +// Copy creates a deep copy of a log. The LogBroadcaster creates a single websocket +// subscription for all log events that we're interested in and distributes them to +// the relevant subscribers elsewhere in the codebase. If a given log needs to be +// distributed to multiple subscribers while avoiding data races, it's necessary +// to make copies. +func (log Log) Copy() Log { + var cpy Log + cpy.Address = log.Address + if log.Topics != nil { + cpy.Topics = make([]common.Hash, len(log.Topics)) + copy(cpy.Topics, log.Topics) + } + if log.Data != nil { + cpy.Data = make([]byte, len(log.Data)) + copy(cpy.Data, log.Data) + } + cpy.BlockNumber = log.BlockNumber + cpy.TxHash = log.TxHash + cpy.TxIndex = log.TxIndex + cpy.BlockHash = log.BlockHash + cpy.Index = log.Index + cpy.Removed = log.Removed + return cpy +} + // logMarshaling represents an ethereum event log. // // NOTE: If this is changed, gen_log_json.go must be changed accordingly. It was @@ -116,8 +150,8 @@ func (txr *TxReceipt) Unconfirmed() bool { } // ChainlinkFulfilledTopic is the signature for the event emitted after calling -// ChainlinkClient.validateChainlinkCallback(requestId). -// https://chainlink/blob/master/evm/contracts/ChainlinkClient.sol +// ChainlinkClient.validateChainlinkCallback(requestId). See +// ../../evm-contracts/src/v0.6/ChainlinkClient.sol var ChainlinkFulfilledTopic = utils.MustHash("ChainlinkFulfilled(bytes32)") // FulfilledRunLog returns true if this tx receipt is the result of a @@ -218,3 +252,13 @@ func (f FunctionSelector) Scan(value interface{}) error { copy(f[:], temp) return nil } + +// SafeByteSlice returns an error on out of bounds access to a byte array, where a +// normal slice would panic instead +func (ary UntrustedBytes) SafeByteSlice(start int, end int) ([]byte, error) { + if end > len(ary) || start > end || start < 0 || end < 0 { + var empty []byte + return empty, errors.New("out of bounds slice access") + } + return ary[start:end], nil +} diff --git a/core/eth/types_test.go b/core/eth/types_test.go index 08da55fac4b..21d275109a3 100644 --- a/core/eth/types_test.go +++ b/core/eth/types_test.go @@ -119,3 +119,47 @@ func TestModels_Header_UnmarshalJSON(t *testing.T) { }) } } + +func TestSafeByteSlice_Success(t *testing.T) { + tests := []struct { + ary eth.UntrustedBytes + start int + end int + expected []byte + }{ + {[]byte{1, 2, 3}, 0, 0, []byte{}}, + {[]byte{1, 2, 3}, 0, 1, []byte{1}}, + {[]byte{1, 2, 3}, 1, 3, []byte{2, 3}}, + } + + for i, test := range tests { + t.Run(string(i), func(t *testing.T) { + actual, err := test.ary.SafeByteSlice(test.start, test.end) + assert.NoError(t, err) + assert.Equal(t, test.expected, actual) + }) + } +} + +func TestSafeByteSlice_Error(t *testing.T) { + tests := []struct { + ary eth.UntrustedBytes + start int + end int + }{ + {[]byte{1, 2, 3}, 2, -1}, + {[]byte{1, 2, 3}, 0, 4}, + {[]byte{1, 2, 3}, 3, 4}, + {[]byte{1, 2, 3}, 3, 2}, + {[]byte{1, 2, 3}, -1, 2}, + } + + for i, test := range tests { + t.Run(string(i), func(t *testing.T) { + actual, err := test.ary.SafeByteSlice(test.start, test.end) + assert.EqualError(t, err, "out of bounds slice access") + var expected []byte + assert.Equal(t, expected, actual) + }) + } +} diff --git a/core/gracefulpanic/channel.go b/core/gracefulpanic/channel.go index e5d800d708e..c0093900fca 100644 --- a/core/gracefulpanic/channel.go +++ b/core/gracefulpanic/channel.go @@ -2,15 +2,28 @@ package gracefulpanic import "sync" -var ch = make(chan struct{}) -var panicOnce sync.Once +type signal struct { + ch chan struct{} + panicOnce sync.Once +} + +type Signal interface { + Panic() + Wait() <-chan struct{} +} + +func NewSignal() Signal { + return &signal{ + ch: make(chan struct{}), + } +} -func Panic() { - panicOnce.Do(func() { - go close(ch) +func (p *signal) Panic() { + p.panicOnce.Do(func() { + go close(p.ch) }) } -func Wait() <-chan struct{} { - return ch +func (p *signal) Wait() <-chan struct{} { + return p.ch } diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index f37b9ff3397..328e28bdffa 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -20,6 +20,7 @@ import ( "chainlink/core/assets" "chainlink/core/auth" "chainlink/core/cmd" + "chainlink/core/gracefulpanic" "chainlink/core/logger" "chainlink/core/services/chainlink" strpkg "chainlink/core/store" @@ -70,7 +71,7 @@ func init() { gin.SetMode(gin.TestMode) gomega.SetDefaultEventuallyTimeout(3 * time.Second) lvl := logLevelFromEnv() - logger.SetLogger(logger.CreateTestLogger(lvl)) + logger.SetLogger(CreateTestLogger(lvl)) } func logLevelFromEnv() zapcore.Level { @@ -420,7 +421,7 @@ func (ta *TestApplication) MustCreateJobRun(txHashBytes []byte, blockHashBytes [ // NewStoreWithConfig creates a new store with given config func NewStoreWithConfig(config *TestConfig) (*strpkg.Store, func()) { cleanupDB := PrepareTestDB(config) - s := strpkg.NewInsecureStore(config.Config) + s := strpkg.NewInsecureStore(config.Config, gracefulpanic.NewSignal()) return s, func() { cleanUpStore(config.t, s) cleanupDB() diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index c191f51a632..b282c4eea37 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -18,6 +18,7 @@ import ( "chainlink/core/assets" "chainlink/core/eth" "chainlink/core/logger" + "chainlink/core/services/vrf" "chainlink/core/store" strpkg "chainlink/core/store" "chainlink/core/store/models" @@ -233,11 +234,24 @@ func CreateTxWithNonceAndGasPrice( sentAt uint64, nonce uint64, gasPrice int64, +) *models.Tx { + return CreateTxWithNonceGasPriceAndRecipient(t, store, from, common.Address{}, sentAt, nonce, gasPrice) +} + +// CreateTxWithNonceGasPriceAndRecipient creates a Tx from a specified sender, recipient, sentAt, nonce and gas price +func CreateTxWithNonceGasPriceAndRecipient( + t testing.TB, + store *strpkg.Store, + from common.Address, + to common.Address, + sentAt uint64, + nonce uint64, + gasPrice int64, ) *models.Tx { data := make([]byte, 36) binary.LittleEndian.PutUint64(data, sentAt) - transaction := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), 250000, big.NewInt(gasPrice), data) + transaction := types.NewTransaction(nonce, to, big.NewInt(0), 250000, big.NewInt(gasPrice), data) tx := &models.Tx{ From: from, SentAt: sentAt, @@ -378,6 +392,22 @@ func NewRunLog( } } +// NewRandomnessRequestLog(t, r, emitter, blk) is a RandomnessRequest log for +// the randomness request log represented by r. +func NewRandomnessRequestLog(t *testing.T, r vrf.RandomnessRequestLog, + emitter common.Address, blk int) eth.Log { + rawData, err := r.RawData() + require.NoError(t, err) + return eth.Log{ + Address: emitter, + BlockNumber: uint64(blk), + Data: rawData, + TxHash: NewHash(), + BlockHash: NewHash(), + Topics: []common.Hash{models.RandomnessRequestLogTopic, r.JobID}, + } +} + // NewServiceAgreementExecutionLog creates a log event for the given jobid, // address, block, and json, to simulate a request for execution on a service // agreement. @@ -409,6 +439,13 @@ func NewLink(t *testing.T, amount string) *assets.Link { return link } +func NewEth(t *testing.T, amount string) *assets.Eth { + eth := assets.NewEth(0) + eth, ok := eth.SetString(amount, 10) + assert.True(t, ok) + return eth +} + func StringToVersionedLogData0(t *testing.T, internalID, str string) []byte { buf := bytes.NewBuffer(hexutil.MustDecode(StringToHash(internalID).Hex())) buf.Write(utils.EVMWordUint64(1)) @@ -523,6 +560,18 @@ func Int(val interface{}) *utils.Big { } } +func MustEVMUintHexFromBase10String(t *testing.T, strings ...string) string { + var allBytes []byte + for _, s := range strings { + i, ok := big.NewInt(0).SetString(s, 10) + require.True(t, ok) + bs, err := utils.EVMWordBigInt(i) + require.NoError(t, err) + allBytes = append(allBytes, bs...) + } + return fmt.Sprintf("0x%0x", allBytes) +} + type MockSigner struct{} func (s MockSigner) SignHash(common.Hash) (models.Signature, error) { diff --git a/core/internal/cltest/logger_test.go b/core/internal/cltest/logger_test.go new file mode 100644 index 00000000000..a878674e52c --- /dev/null +++ b/core/internal/cltest/logger_test.go @@ -0,0 +1,14 @@ +package cltest + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +func TestTestLogger(t *testing.T) { + logger := CreateTestLogger(zapcore.DebugLevel) + logger.Warn("this is a log") + require.Contains(t, MemoryLogTestingOnly().String(), "this is a log") +} diff --git a/core/internal/cltest/memory_sink.go b/core/internal/cltest/memory_sink.go new file mode 100644 index 00000000000..5c21d02a20f --- /dev/null +++ b/core/internal/cltest/memory_sink.go @@ -0,0 +1,75 @@ +package cltest + +// Based on https://stackoverflow.com/a/52737940 + +import ( + "bytes" + "log" + "net/url" + "sync" + + "chainlink/core/logger" + + "github.com/fatih/color" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// MemorySink implements zap.Sink by writing all messages to a buffer. +type MemorySink struct { + m sync.Mutex + b bytes.Buffer +} + +var _ zap.Sink = &MemorySink{} + +func (s *MemorySink) Write(p []byte) (n int, err error) { + s.m.Lock() + defer s.m.Unlock() + return s.b.Write(p) +} + +// Close is a dummy method to satisfy the zap.Sink interface +func (s *MemorySink) Close() error { return nil } + +// Sync is a dummy method to satisfy the zap.Sink interface +func (s *MemorySink) Sync() error { return nil } + +// String returns the full log contents, as a string +func (s *MemorySink) String() string { + s.m.Lock() + defer s.m.Unlock() + return s.b.String() +} + +var testMemoryLog *MemorySink +var createSinkOnce sync.Once + +func registerMemorySink() { + testMemoryLog = &MemorySink{m: sync.Mutex{}, b: bytes.Buffer{}} + if err := zap.RegisterSink("memory", func(*url.URL) (zap.Sink, error) { + return logger.PrettyConsole{testMemoryLog}, nil + }); err != nil { + panic(err) + } +} + +func MemoryLogTestingOnly() *MemorySink { + createSinkOnce.Do(registerMemorySink) + return testMemoryLog +} + +// CreateTestLogger creates a logger that directs output to PrettyConsole +// configured for test output, and to the buffer testMemoryLog. +func CreateTestLogger(lvl zapcore.Level) *zap.Logger { + var _ *MemorySink = MemoryLogTestingOnly() // Make sure memory log is created + color.NoColor = false + config := zap.NewProductionConfig() + config.Level.SetLevel(lvl) + config.OutputPaths = []string{"pretty://console", "memory://"} + zl, err := config.Build(zap.AddCallerSkip(1)) + if err != nil { + log.Fatal(err) + } + return zl +} diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go index ae1527cb620..3fef61ad539 100644 --- a/core/internal/cltest/mocks.go +++ b/core/internal/cltest/mocks.go @@ -27,6 +27,7 @@ import ( "chainlink/core/store/orm" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -39,6 +40,11 @@ const LenientEthMock = "lenient" // and returns the store.config.ChainID const EthMockRegisterChainID = "eth_mock_register_chain_id" +// NoRegisterGetBlockNumber prevents the EthMock from expecting to have eth_blockNumber +// called by the LogBroadcaster at application startup. Most of our tests should expect +// this, so this flag should not be used frequently. +const NoRegisterGetBlockNumber = "no_register_get_block_number" + // MockCallerSubscriberClient create new EthMock Client func (ta *TestApplication) MockCallerSubscriberClient(flags ...string) *EthMock { if ta.ChainlinkApplication.HeadTracker.Connected() { @@ -50,13 +56,19 @@ func (ta *TestApplication) MockCallerSubscriberClient(flags ...string) *EthMock // MockEthOnStore given store return new EthMock Client func MockEthOnStore(t testing.TB, s *store.Store, flags ...string) *EthMock { mock := &EthMock{t: t, strict: true} + registerGetBlockNumber := true for _, flag := range flags { if flag == LenientEthMock { mock.strict = false } else if flag == EthMockRegisterChainID { mock.Register("eth_chainId", s.Config.ChainID()) + } else if flag == NoRegisterGetBlockNumber { + registerGetBlockNumber = false } } + if registerGetBlockNumber { + mock.Register("eth_blockNumber", hexutil.Uint64(1)) + } eth := ð.CallerSubscriberClient{CallerSubscriber: mock} if txm, ok := s.TxManager.(*store.EthTxManager); ok { txm.Client = eth @@ -69,7 +81,7 @@ func MockEthOnStore(t testing.TB, s *store.Store, flags ...string) *EthMock { // EthMock is a mock ethereum client type EthMock struct { Responses []MockResponse - Subscriptions []MockSubscription + Subscriptions []*MockSubscription newHeadsCalled bool logsCalled bool mutex sync.RWMutex @@ -213,16 +225,19 @@ func (mock *EthMock) Call(result interface{}, method string, args ...interface{} // Call, falling back to reflection if the values dont support the required // encoding interfaces func assignResult(result, response interface{}) error { - unmarshaler, uok := result.(encoding.TextUnmarshaler) - marshaler, mok := response.(encoding.TextMarshaler) - - if uok && mok { - bytes, err := marshaler.MarshalText() - if err != nil { - return err + if unmarshaler, ok := result.(encoding.TextUnmarshaler); ok { + switch resp := response.(type) { + case encoding.TextMarshaler: + bytes, err := resp.MarshalText() + if err != nil { + return err + } + return unmarshaler.UnmarshalText(bytes) + case string: + return unmarshaler.UnmarshalText([]byte(resp)) + case []byte: + return unmarshaler.UnmarshalText(resp) } - - return unmarshaler.UnmarshalText(bytes) } ref := reflect.ValueOf(result) @@ -231,7 +246,7 @@ func assignResult(result, response interface{}) error { } // RegisterSubscription register a mock subscription to the given name and channels -func (mock *EthMock) RegisterSubscription(name string, channels ...interface{}) MockSubscription { +func (mock *EthMock) RegisterSubscription(name string, channels ...interface{}) *MockSubscription { var channel interface{} if len(channels) > 0 { channel = channels[0] @@ -239,7 +254,7 @@ func (mock *EthMock) RegisterSubscription(name string, channels ...interface{}) channel = channelFromSubscriptionName(name) } - sub := MockSubscription{ + sub := &MockSubscription{ name: name, channel: channel, Errors: make(chan error, 1), @@ -288,7 +303,7 @@ func (mock *EthMock) Subscribe( return EmptyMockSubscription(), nil } else if args[0] == "logs" && !mock.logsCalled { mock.logsCalled = true - return MockSubscription{ + return &MockSubscription{ channel: make(chan eth.Log), Errors: make(chan error), }, nil @@ -334,21 +349,30 @@ func fwdHeaders(actual, mock interface{}) { // MockSubscription a mock subscription type MockSubscription struct { - name string - channel interface{} - Errors chan error + mut sync.Mutex + name string + channel interface{} + unsubscribed bool + Errors chan error } // EmptyMockSubscription return empty MockSubscription -func EmptyMockSubscription() MockSubscription { - return MockSubscription{Errors: make(chan error, 1), channel: make(chan struct{})} +func EmptyMockSubscription() *MockSubscription { + return &MockSubscription{Errors: make(chan error, 1), channel: make(chan struct{})} } // Err returns error channel from mes -func (mes MockSubscription) Err() <-chan error { return mes.Errors } +func (mes *MockSubscription) Err() <-chan error { return mes.Errors } // Unsubscribe closes the subscription -func (mes MockSubscription) Unsubscribe() { +func (mes *MockSubscription) Unsubscribe() { + mes.mut.Lock() + defer mes.mut.Unlock() + + if mes.unsubscribed { + return + } + mes.unsubscribed = true switch mes.channel.(type) { case chan struct{}: close(mes.channel.(chan struct{})) @@ -482,6 +506,12 @@ func (a CallbackAuthenticator) Authenticate(store *store.Store, pwd string) (str return a.Callback(store, pwd) } +func (a CallbackAuthenticator) AuthenticateVRFKey(*store.Store, string) error { + return nil +} + +var _ cmd.KeyStoreAuthenticator = CallbackAuthenticator{} + // BlockedRunner is a Runner that blocks until its channel is posted to type BlockedRunner struct { Done chan struct{} diff --git a/core/internal/cltest/postgres.go b/core/internal/cltest/postgres.go index 979bb4821ed..2eff0c7bef2 100644 --- a/core/internal/cltest/postgres.go +++ b/core/internal/cltest/postgres.go @@ -4,6 +4,8 @@ import ( "database/sql" "fmt" "net/url" + "regexp" + "strings" "testing" "chainlink/core/store/dbutil" @@ -46,7 +48,16 @@ func createPostgresChildDB(tc *TestConfig, originalURL string) func() { } func createTestDB(t testing.TB, parsed *url.URL) *url.URL { - dbname := fmt.Sprintf("%s_%s", parsed.Path[1:], models.NewID().String()) + dbname := fmt.Sprintf("%s_%s_%s", parsed.Path[0:], models.NewID().String()[:8], t.Name()) + dbname = strings.Replace(dbname, "/", "_", -1) + dbname = strings.Replace(dbname, "Test", "", 1) + dbname = strings.ToLower(dbname) + var safeCharsOnly = regexp.MustCompile(`[^0-9a-z_]+`) + dbname = safeCharsOnly.ReplaceAllString(dbname, "") + // NOTE: PostgreSQL's Max Identifier Length Is 63 Bytes + if len(dbname) > 63 { + dbname = dbname[:63] + } db, err := sql.Open(string(orm.DialectPostgres), parsed.String()) if err != nil { t.Fatalf("unable to open postgres database for creating test db: %+v", err) diff --git a/core/internal/cltest/vrf.go b/core/internal/cltest/vrf.go new file mode 100644 index 00000000000..8b31d0a5030 --- /dev/null +++ b/core/internal/cltest/vrf.go @@ -0,0 +1,28 @@ +package cltest + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + strpkg "chainlink/core/store" + "chainlink/core/store/models/vrfkey" +) + +// StoredVRFKey creates a VRFKeyStore on store, imports a known VRF key into it, +// and returns the corresponding public key. +func StoredVRFKey(t *testing.T, store *strpkg.Store) *vrfkey.PublicKey { + store.VRFKeyStore = strpkg.NewVRFKeyStore(store) + keyFile, err := ioutil.ReadFile("../../tools/clroot/vrfkey.json") + require.NoError(t, err) + rawPassword, err := ioutil.ReadFile("../../tools/clroot/password.txt") + require.NoError(t, err) + password := strings.TrimSpace(string(rawPassword)) + require.NoError(t, store.VRFKeyStore.Import(keyFile, password)) + keys, err := store.VRFKeyStore.Unlock(password) // Extracts public key + require.NoError(t, err) + require.Len(t, keys, 1) + return &keys[0] +} diff --git a/core/internal/features_test.go b/core/internal/features_test.go index 8efc4e0ca2a..57bf298f0df 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -2,24 +2,33 @@ package internal_test import ( "bytes" + "encoding/hex" "encoding/json" "fmt" "io" "io/ioutil" + "math/big" "net/http" "net/http/httptest" "strings" "testing" "time" + "chainlink/core/assets" "chainlink/core/auth" ethpkg "chainlink/core/eth" "chainlink/core/internal/cltest" + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/services/vrf" "chainlink/core/store/models" + "chainlink/core/store/models/vrfkey" "chainlink/core/utils" "chainlink/core/web" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -211,6 +220,7 @@ func TestIntegration_FeeBump(t *testing.T) { // received sufficient confirmations, so we wait again... eth.Context("ethTx.Perform()#6", func(eth *cltest.EthMock) { eth.Register("eth_getTransactionReceipt", thirdTxConfirmedReceipt) + eth.Register("eth_getBalance", "0x0000") }) newHeads <- ethpkg.BlockHeader{Number: cltest.BigHexInt(thirdTxConfirmedAt)} eth.EventuallyAllCalled(t) @@ -562,7 +572,7 @@ func TestIntegration_WeiWatchers(t *testing.T) { require.NoError(t, app.Start()) j := cltest.NewJobWithLogInitiator() - post := cltest.NewTask(t, "httppost", fmt.Sprintf(`{"url":"%v"}`, mockServer.URL)) + post := cltest.NewTask(t, "httppostwithunrestrictednetworkaccess", fmt.Sprintf(`{"url":"%v"}`, mockServer.URL)) tasks := []models.TaskSpec{post} j.Tasks = tasks j = cltest.CreateJobSpecViaWeb(t, app, j) @@ -713,6 +723,7 @@ func TestIntegration_SyncJobRuns(t *testing.T) { err := json.Unmarshal([]byte(message), &run) require.NoError(t, err) assert.Equal(t, j.ID, run.JobSpecID) + cltest.WaitForJobRunToComplete(t, app.Store, run) } func TestIntegration_SleepAdapter(t *testing.T) { @@ -792,6 +803,7 @@ func TestIntegration_ExternalInitiator(t *testing.T) { jobRun := cltest.CreateJobRunViaExternalInitiator(t, app, jobSpec, *eia, "") _, err = app.Store.JobRunsFor(jobRun.ID) assert.NoError(t, err) + cltest.WaitForJobRunToComplete(t, app.Store, jobRun) } func TestIntegration_ExternalInitiator_WithoutURL(t *testing.T) { @@ -824,6 +836,7 @@ func TestIntegration_ExternalInitiator_WithoutURL(t *testing.T) { jobRun := cltest.CreateJobRunViaExternalInitiator(t, app, jobSpec, *eia, "") _, err = app.Store.JobRunsFor(jobRun.ID) assert.NoError(t, err) + cltest.WaitForJobRunToComplete(t, app.Store, jobRun) } func TestIntegration_AuthToken(t *testing.T) { @@ -863,8 +876,14 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { // Configure fake Eth Node to return 10,000 cents when FM initiates price. eth.Context("Flux Monitor initializes price", func(mock *cltest.EthMock) { - mock.Register("eth_call", "10000") // 10,000 cents - mock.Register("eth_call", "0x1") // aggregator round: 1 + var data []byte + data = append(data, utils.EVMWordUint64(2)...) // RoundID + data = append(data, utils.EVMWordUint64(1)...) // Eligible + data = append(data, utils.EVMWordUint64(10000)...) // LatestAnswer + data = append(data, utils.EVMWordUint64(0)...) // TimesOutAt + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // AvailableFunds + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // PaymentAmount + mock.Register("eth_call", "0x"+hex.EncodeToString(data)) }) // Have server respond with 102 for price when FM checks external price @@ -891,6 +910,7 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { err := json.Unmarshal(buffer, &job) require.NoError(t, err) job.Initiators[0].InitiatorParams.Feeds = cltest.JSONFromString(t, fmt.Sprintf(`["%s"]`, mockServer.URL)) + job.Initiators[0].InitiatorParams.PollingInterval = models.Duration(15 * time.Second) j := cltest.CreateJobSpecViaWeb(t, app, job) jrs := cltest.WaitForRuns(t, j, app.Store, 1) @@ -901,7 +921,7 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { eth.Context("ethTx.Perform() for safe", func(eth *cltest.EthMock) { eth.Register("eth_getTransactionReceipt", confirmedReceipt) eth.Register("eth_getBalance", "0x100") - eth.Register("eth_call", "0x100") + eth.Register("eth_call", cltest.MustEVMUintHexFromBase10String(t, "256")) }) newHeads <- ethpkg.BlockHeader{Number: cltest.BigHexInt(10)} @@ -935,9 +955,15 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { eth.EventuallyAllCalled(t) // Configure fake Eth Node to return 10,000 cents when FM initiates price. - eth.Context("Flux Monitor initializes price", func(mock *cltest.EthMock) { - mock.Register("eth_call", "10000") // 10,000 cents - mock.Register("eth_call", "0x1") // aggregator round: 1 + eth.Context("Flux Monitor queries FluxAggregator.RoundState()", func(mock *cltest.EthMock) { + var data []byte + data = append(data, utils.EVMWordUint64(2)...) // RoundID + data = append(data, utils.EVMWordUint64(1)...) // Eligible + data = append(data, utils.EVMWordUint64(10000)...) // LatestAnswer + data = append(data, utils.EVMWordUint64(0)...) // TimesOutAt + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // AvailableFunds + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // PaymentAmount + mock.Register("eth_call", "0x"+hex.EncodeToString(data)) }) // Have price adapter server respond with 100 for price on initialization, @@ -956,6 +982,7 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { err := json.Unmarshal(buffer, &job) require.NoError(t, err) job.Initiators[0].InitiatorParams.Feeds = cltest.JSONFromString(t, fmt.Sprintf(`["%s"]`, mockServer.URL)) + job.Initiators[0].InitiatorParams.PollingInterval = models.Duration(15 * time.Second) j := cltest.CreateJobSpecViaWeb(t, app, job) _ = cltest.WaitForRuns(t, j, app.Store, 0) @@ -964,6 +991,8 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { // Send a NewRound log event to trigger a run. log := cltest.LogFromFixture(t, "testdata/new_round_log.json") + log.Address = job.Initiators[0].InitiatorParams.Address + attemptHash := cltest.NewHash() confirmedReceipt := ethpkg.TxReceipt{ Hash: attemptHash, @@ -973,8 +1002,116 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { eth.Register("eth_sendRawTransaction", attemptHash) // Initial tx attempt sent eth.Register("eth_getTransactionReceipt", confirmedReceipt) // confirmed for gas bumped txat }) + eth.Context("Flux Monitor queries FluxAggregator.RoundState()", func(mock *cltest.EthMock) { + var data []byte + data = append(data, utils.EVMWordUint64(3)...) // RoundID + data = append(data, utils.EVMWordUint64(1)...) // Eligible + data = append(data, utils.EVMWordUint64(10000)...) // LatestAnswer + data = append(data, utils.EVMWordUint64(0)...) // TimesOutAt + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // AvailableFunds + data = append(data, utils.EVMWordUint64(app.Store.Config.MinimumContractPayment().ToInt().Uint64())...) // PaymentAmount + mock.Register("eth_call", "0x"+hex.EncodeToString(data)) + }) newRounds <- log jrs := cltest.WaitForRuns(t, j, app.Store, 1) _ = cltest.WaitForJobRunToPendConfirmations(t, app.Store, jrs[0]) eth.EventuallyAllCalled(t) } + +func TestIntegration_RandomnessRequest(t *testing.T) { + app, cleanup := cltest.NewApplicationWithKey(t, cltest.NoRegisterGetBlockNumber) + defer cleanup() + eth := app.MockCallerSubscriberClient() + logs := make(chan ethpkg.Log, 1) + txHash := cltest.NewHash() + eth.Context("app.Start()", func(eth *cltest.EthMock) { + eth.RegisterSubscription("logs", logs) + eth.Register("eth_getTransactionCount", `0x100`) // activate account nonce + eth.Register("eth_sendRawTransaction", txHash) + eth.Register("eth_getTransactionReceipt", ethpkg.TxReceipt{ + Hash: cltest.NewHash(), + BlockNumber: cltest.Int(10), + }) + }) + config, cfgCleanup := cltest.NewConfig(t) + defer cfgCleanup() + eth.Register("eth_chainId", config.ChainID()) + app.Start() + + j := cltest.FixtureCreateJobViaWeb(t, app, "testdata/randomness_job.json") + rawKey := j.Tasks[0].Params.Get("publicKey").String() + pk, err := vrfkey.NewPublicKeyFromHex(rawKey) + require.NoError(t, err) + var sk int64 = 1 + coordinatorAddress := j.Initiators[0].Address + + provingKey := vrfkey.NewPrivateKeyXXXTestingOnly(big.NewInt(sk)) + require.Equal(t, &provingKey.PublicKey, pk, + "public key in fixture %s does not match secret key in test %d (which has public key %s)", + pk, sk, provingKey.PublicKey.String()) + app.Store.VRFKeyStore.StoreInMemoryXXXTestingOnly(provingKey) + rawID := []byte(j.ID.String()) // CL requires ASCII hex encoding of jobID + r := vrf.RandomnessRequestLog{ + KeyHash: provingKey.PublicKey.Hash(), + Seed: big.NewInt(2), + JobID: common.BytesToHash(rawID), + Sender: cltest.NewAddress(), + Fee: assets.NewLink(100), + } + requestlog := cltest.NewRandomnessRequestLog(t, r, coordinatorAddress, 1) + + logs <- requestlog + cltest.WaitForRuns(t, j, app.Store, 1) + runs, err := app.Store.JobRunsFor(j.ID) + assert.NoError(t, err) + require.Len(t, runs, 1) + jr := runs[0] + require.Len(t, jr.TaskRuns, 2) + assert.False(t, jr.TaskRuns[0].Confirmations.Valid) + attempts := cltest.WaitForTxAttemptCount(t, app.Store, 1) + require.True(t, eth.AllCalled(), eth.Remaining()) + require.Len(t, attempts, 1) + + rawTx, err := hexutil.Decode(attempts[0].SignedRawTx) + require.NoError(t, err) + var tx *types.Transaction + require.NoError(t, rlp.DecodeBytes(rawTx, &tx)) + fixtureToAddress := j.Tasks[1].Params.Get("address").String() + require.Equal(t, *tx.To(), common.HexToAddress(fixtureToAddress)) + payload := tx.Data() + require.Equal(t, hexutil.Encode(payload[:4]), vrf.FulfillSelector()) + proofContainer := make(map[string]interface{}) + err = vrf.FulfillMethod().Inputs.UnpackIntoMap(proofContainer, payload[4:]) + require.NoError(t, err) + proof, ok := proofContainer["_proof"].([]byte) + require.True(t, ok) + require.Len(t, proof, vrf.ProofLength) + publicPoint, err := provingKey.PublicKey.Point() + require.NoError(t, err) + require.Equal(t, proof[:64], secp256k1.LongMarshal(publicPoint)) + goProof, err := vrf.UnmarshalSolidityProof(proof) + require.NoError(t, err, "problem parsing solidity proof") + proofValid, err := goProof.VerifyVRFProof() + require.NoError(t, err, "problem verifying solidity proof") + require.True(t, proofValid, "vrf proof was invalid: %s", goProof.String()) + + // Check that a log from a different address is rejected. (The node will only + // ever see this situation if the ethereum.FilterQuery for this job breaks, + // but it's hard to test that without a full integration test.) + badAddress := common.HexToAddress("0x0000000000000000000000000000000000000001") + badRequestlog := cltest.NewRandomnessRequestLog(t, r, badAddress, 1) + logs <- badRequestlog + expectedLogTemplate := `log received from address %s, but expect logs from %s` + expectedLog := fmt.Sprintf(expectedLogTemplate, badAddress.String(), + coordinatorAddress.String()) + millisecondsWaited := 0 + expectedLogDeadline := 200 + for !strings.Contains(cltest.MemoryLogTestingOnly().String(), expectedLog) && + millisecondsWaited < expectedLogDeadline { + time.Sleep(time.Millisecond) + millisecondsWaited += 1 + if millisecondsWaited >= expectedLogDeadline { + assert.Fail(t, "message about log with bad source address not found") + } + } +} diff --git a/core/internal/mocks/client.go b/core/internal/mocks/client.go index 004c5593a89..d14928da70d 100644 --- a/core/internal/mocks/client.go +++ b/core/internal/mocks/client.go @@ -8,7 +8,7 @@ import ( common "github.com/ethereum/go-ethereum/common" - decimal "github.com/shopspring/decimal" + context "context" eth "chainlink/core/eth" @@ -22,43 +22,37 @@ type Client struct { mock.Mock } -// GetAggregatorPrice provides a mock function with given fields: address, precision -func (_m *Client) GetAggregatorPrice(address common.Address, precision int32) (decimal.Decimal, error) { - ret := _m.Called(address, precision) +// Call provides a mock function with given fields: result, method, args +func (_m *Client) Call(result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) - var r0 decimal.Decimal - if rf, ok := ret.Get(0).(func(common.Address, int32) decimal.Decimal); ok { - r0 = rf(address, precision) + var r0 error + if rf, ok := ret.Get(0).(func(interface{}, string, ...interface{}) error); ok { + r0 = rf(result, method, args...) } else { - r0 = ret.Get(0).(decimal.Decimal) + r0 = ret.Error(0) } - var r1 error - if rf, ok := ret.Get(1).(func(common.Address, int32) error); ok { - r1 = rf(address, precision) - } else { - r1 = ret.Error(1) - } - - return r0, r1 + return r0 } -// GetAggregatorRound provides a mock function with given fields: address -func (_m *Client) GetAggregatorRound(address common.Address) (*big.Int, error) { - ret := _m.Called(address) +// GetBlockByNumber provides a mock function with given fields: hex +func (_m *Client) GetBlockByNumber(hex string) (eth.BlockHeader, error) { + ret := _m.Called(hex) - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Address) *big.Int); ok { - r0 = rf(address) + var r0 eth.BlockHeader + if rf, ok := ret.Get(0).(func(string) eth.BlockHeader); ok { + r0 = rf(hex) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } + r0 = ret.Get(0).(eth.BlockHeader) } var r1 error - if rf, ok := ret.Get(1).(func(common.Address) error); ok { - r1 = rf(address) + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(hex) } else { r1 = ret.Error(1) } @@ -66,20 +60,20 @@ func (_m *Client) GetAggregatorRound(address common.Address) (*big.Int, error) { return r0, r1 } -// GetBlockByNumber provides a mock function with given fields: hex -func (_m *Client) GetBlockByNumber(hex string) (eth.BlockHeader, error) { - ret := _m.Called(hex) +// GetBlockHeight provides a mock function with given fields: +func (_m *Client) GetBlockHeight() (uint64, error) { + ret := _m.Called() - var r0 eth.BlockHeader - if rf, ok := ret.Get(0).(func(string) eth.BlockHeader); ok { - r0 = rf(hex) + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() } else { - r0 = ret.Get(0).(eth.BlockHeader) + r0 = ret.Get(0).(uint64) } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(hex) + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() } else { r1 = ret.Error(1) } @@ -156,38 +150,6 @@ func (_m *Client) GetEthBalance(address common.Address) (*assets.Eth, error) { return r0, r1 } -// GetLatestSubmission provides a mock function with given fields: aggregatorAddress, oracleAddress -func (_m *Client) GetLatestSubmission(aggregatorAddress common.Address, oracleAddress common.Address) (*big.Int, *big.Int, error) { - ret := _m.Called(aggregatorAddress, oracleAddress) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Address, common.Address) *big.Int); ok { - r0 = rf(aggregatorAddress, oracleAddress) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 *big.Int - if rf, ok := ret.Get(1).(func(common.Address, common.Address) *big.Int); ok { - r1 = rf(aggregatorAddress, oracleAddress) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*big.Int) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(common.Address, common.Address) error); ok { - r2 = rf(aggregatorAddress, oracleAddress) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - // GetLogs provides a mock function with given fields: q func (_m *Client) GetLogs(q ethereum.FilterQuery) ([]eth.Log, error) { ret := _m.Called(q) @@ -278,13 +240,39 @@ func (_m *Client) SendRawTx(hex string) (common.Hash, error) { return r0, r1 } -// SubscribeToLogs provides a mock function with given fields: channel, q -func (_m *Client) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQuery) (eth.Subscription, error) { - ret := _m.Called(channel, q) +// Subscribe provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Client) Subscribe(_a0 context.Context, _a1 interface{}, _a2 ...interface{}) (eth.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _a2...) + ret := _m.Called(_ca...) + + var r0 eth.Subscription + if rf, ok := ret.Get(0).(func(context.Context, interface{}, ...interface{}) eth.Subscription); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(eth.Subscription) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, interface{}, ...interface{}) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeToLogs provides a mock function with given fields: ctx, channel, q +func (_m *Client) SubscribeToLogs(ctx context.Context, channel chan<- eth.Log, q ethereum.FilterQuery) (eth.Subscription, error) { + ret := _m.Called(ctx, channel, q) var r0 eth.Subscription - if rf, ok := ret.Get(0).(func(chan<- eth.Log, ethereum.FilterQuery) eth.Subscription); ok { - r0 = rf(channel, q) + if rf, ok := ret.Get(0).(func(context.Context, chan<- eth.Log, ethereum.FilterQuery) eth.Subscription); ok { + r0 = rf(ctx, channel, q) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(eth.Subscription) @@ -292,8 +280,8 @@ func (_m *Client) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQuery } var r1 error - if rf, ok := ret.Get(1).(func(chan<- eth.Log, ethereum.FilterQuery) error); ok { - r1 = rf(channel, q) + if rf, ok := ret.Get(1).(func(context.Context, chan<- eth.Log, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, channel, q) } else { r1 = ret.Error(1) } @@ -301,13 +289,13 @@ func (_m *Client) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQuery return r0, r1 } -// SubscribeToNewHeads provides a mock function with given fields: channel -func (_m *Client) SubscribeToNewHeads(channel chan<- eth.BlockHeader) (eth.Subscription, error) { - ret := _m.Called(channel) +// SubscribeToNewHeads provides a mock function with given fields: ctx, channel +func (_m *Client) SubscribeToNewHeads(ctx context.Context, channel chan<- eth.BlockHeader) (eth.Subscription, error) { + ret := _m.Called(ctx, channel) var r0 eth.Subscription - if rf, ok := ret.Get(0).(func(chan<- eth.BlockHeader) eth.Subscription); ok { - r0 = rf(channel) + if rf, ok := ret.Get(0).(func(context.Context, chan<- eth.BlockHeader) eth.Subscription); ok { + r0 = rf(ctx, channel) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(eth.Subscription) @@ -315,8 +303,8 @@ func (_m *Client) SubscribeToNewHeads(channel chan<- eth.BlockHeader) (eth.Subsc } var r1 error - if rf, ok := ret.Get(1).(func(chan<- eth.BlockHeader) error); ok { - r1 = rf(channel) + if rf, ok := ret.Get(1).(func(context.Context, chan<- eth.BlockHeader) error); ok { + r1 = rf(ctx, channel) } else { r1 = ret.Error(1) } diff --git a/core/internal/mocks/deviation_checker.go b/core/internal/mocks/deviation_checker.go index db885ebb65a..0fc8b89dffe 100644 --- a/core/internal/mocks/deviation_checker.go +++ b/core/internal/mocks/deviation_checker.go @@ -2,30 +2,16 @@ package mocks -import ( - eth "chainlink/core/eth" - context "context" - - mock "github.com/stretchr/testify/mock" -) +import mock "github.com/stretchr/testify/mock" // DeviationChecker is an autogenerated mock type for the DeviationChecker type type DeviationChecker struct { mock.Mock } -// Start provides a mock function with given fields: _a0, _a1 -func (_m *DeviationChecker) Start(_a0 context.Context, _a1 eth.Client) error { - ret := _m.Called(_a0, _a1) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, eth.Client) error); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Error(0) - } - - return r0 +// Start provides a mock function with given fields: +func (_m *DeviationChecker) Start() { + _m.Called() } // Stop provides a mock function with given fields: diff --git a/core/internal/mocks/deviation_checker_factory.go b/core/internal/mocks/deviation_checker_factory.go index a1823994202..a25192b2e2c 100644 --- a/core/internal/mocks/deviation_checker_factory.go +++ b/core/internal/mocks/deviation_checker_factory.go @@ -10,6 +10,8 @@ import ( models "chainlink/core/store/models" orm "chainlink/core/store/orm" + + time "time" ) // DeviationCheckerFactory is an autogenerated mock type for the DeviationCheckerFactory type @@ -17,13 +19,13 @@ type DeviationCheckerFactory struct { mock.Mock } -// New provides a mock function with given fields: _a0, _a1, _a2 -func (_m *DeviationCheckerFactory) New(_a0 models.Initiator, _a1 fluxmonitor.RunManager, _a2 *orm.ORM) (fluxmonitor.DeviationChecker, error) { - ret := _m.Called(_a0, _a1, _a2) +// New provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *DeviationCheckerFactory) New(_a0 models.Initiator, _a1 fluxmonitor.RunManager, _a2 *orm.ORM, _a3 time.Duration) (fluxmonitor.DeviationChecker, error) { + ret := _m.Called(_a0, _a1, _a2, _a3) var r0 fluxmonitor.DeviationChecker - if rf, ok := ret.Get(0).(func(models.Initiator, fluxmonitor.RunManager, *orm.ORM) fluxmonitor.DeviationChecker); ok { - r0 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(0).(func(models.Initiator, fluxmonitor.RunManager, *orm.ORM, time.Duration) fluxmonitor.DeviationChecker); ok { + r0 = rf(_a0, _a1, _a2, _a3) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(fluxmonitor.DeviationChecker) @@ -31,8 +33,8 @@ func (_m *DeviationCheckerFactory) New(_a0 models.Initiator, _a1 fluxmonitor.Run } var r1 error - if rf, ok := ret.Get(1).(func(models.Initiator, fluxmonitor.RunManager, *orm.ORM) error); ok { - r1 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(1).(func(models.Initiator, fluxmonitor.RunManager, *orm.ORM, time.Duration) error); ok { + r1 = rf(_a0, _a1, _a2, _a3) } else { r1 = ret.Error(1) } diff --git a/core/internal/mocks/flux_aggregator.go b/core/internal/mocks/flux_aggregator.go new file mode 100644 index 00000000000..32cfbfff850 --- /dev/null +++ b/core/internal/mocks/flux_aggregator.go @@ -0,0 +1,161 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + abi "github.com/ethereum/go-ethereum/accounts/abi" + common "github.com/ethereum/go-ethereum/common" + + contracts "chainlink/core/services/eth/contracts" + + coreeth "chainlink/core/eth" + + eth "chainlink/core/services/eth" + + mock "github.com/stretchr/testify/mock" +) + +// FluxAggregator is an autogenerated mock type for the FluxAggregator type +type FluxAggregator struct { + mock.Mock +} + +// ABI provides a mock function with given fields: +func (_m *FluxAggregator) ABI() *abi.ABI { + ret := _m.Called() + + var r0 *abi.ABI + if rf, ok := ret.Get(0).(func() *abi.ABI); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abi.ABI) + } + } + + return r0 +} + +// Call provides a mock function with given fields: result, methodName, args +func (_m *FluxAggregator) Call(result interface{}, methodName string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, result, methodName) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}, string, ...interface{}) error); ok { + r0 = rf(result, methodName, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EncodeMessageCall provides a mock function with given fields: method, args +func (_m *FluxAggregator) EncodeMessageCall(method string, args ...interface{}) ([]byte, error) { + var _ca []interface{} + _ca = append(_ca, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string, ...interface{}) []byte); ok { + r0 = rf(method, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, ...interface{}) error); ok { + r1 = rf(method, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetMethodID provides a mock function with given fields: method +func (_m *FluxAggregator) GetMethodID(method string) ([]byte, error) { + ret := _m.Called(method) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(method) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(method) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RoundState provides a mock function with given fields: oracle +func (_m *FluxAggregator) RoundState(oracle common.Address) (contracts.FluxAggregatorRoundState, error) { + ret := _m.Called(oracle) + + var r0 contracts.FluxAggregatorRoundState + if rf, ok := ret.Get(0).(func(common.Address) contracts.FluxAggregatorRoundState); ok { + r0 = rf(oracle) + } else { + r0 = ret.Get(0).(contracts.FluxAggregatorRoundState) + } + + var r1 error + if rf, ok := ret.Get(1).(func(common.Address) error); ok { + r1 = rf(oracle) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeToLogs provides a mock function with given fields: listener +func (_m *FluxAggregator) SubscribeToLogs(listener eth.LogListener) (bool, eth.UnsubscribeFunc) { + ret := _m.Called(listener) + + var r0 bool + if rf, ok := ret.Get(0).(func(eth.LogListener) bool); ok { + r0 = rf(listener) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 eth.UnsubscribeFunc + if rf, ok := ret.Get(1).(func(eth.LogListener) eth.UnsubscribeFunc); ok { + r1 = rf(listener) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(eth.UnsubscribeFunc) + } + } + + return r0, r1 +} + +// UnpackLog provides a mock function with given fields: out, event, log +func (_m *FluxAggregator) UnpackLog(out interface{}, event string, log coreeth.Log) error { + ret := _m.Called(out, event, log) + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}, string, coreeth.Log) error); ok { + r0 = rf(out, event, log) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/internal/mocks/job_subscriber.go b/core/internal/mocks/job_subscriber.go index 16b593c5a41..8bafc871f07 100644 --- a/core/internal/mocks/job_subscriber.go +++ b/core/internal/mocks/job_subscriber.go @@ -81,20 +81,6 @@ func (_m *JobSubscriber) RemoveJob(ID *models.ID) error { return r0 } -// Start provides a mock function with given fields: -func (_m *JobSubscriber) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Stop provides a mock function with given fields: func (_m *JobSubscriber) Stop() error { ret := _m.Called() diff --git a/core/internal/mocks/log_broadcaster.go b/core/internal/mocks/log_broadcaster.go new file mode 100644 index 00000000000..76522f1d556 --- /dev/null +++ b/core/internal/mocks/log_broadcaster.go @@ -0,0 +1,45 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + eth "chainlink/core/services/eth" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// LogBroadcaster is an autogenerated mock type for the LogBroadcaster type +type LogBroadcaster struct { + mock.Mock +} + +// Register provides a mock function with given fields: address, listener +func (_m *LogBroadcaster) Register(address common.Address, listener eth.LogListener) bool { + ret := _m.Called(address, listener) + + var r0 bool + if rf, ok := ret.Get(0).(func(common.Address, eth.LogListener) bool); ok { + r0 = rf(address, listener) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *LogBroadcaster) Start() { + _m.Called() +} + +// Stop provides a mock function with given fields: +func (_m *LogBroadcaster) Stop() { + _m.Called() +} + +// Unregister provides a mock function with given fields: address, listener +func (_m *LogBroadcaster) Unregister(address common.Address, listener eth.LogListener) { + _m.Called(address, listener) +} diff --git a/core/internal/mocks/log_listener.go b/core/internal/mocks/log_listener.go new file mode 100644 index 00000000000..022e308c662 --- /dev/null +++ b/core/internal/mocks/log_listener.go @@ -0,0 +1,25 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// LogListener is an autogenerated mock type for the LogListener type +type LogListener struct { + mock.Mock +} + +// HandleLog provides a mock function with given fields: log, err +func (_m *LogListener) HandleLog(log interface{}, err error) { + _m.Called(log, err) +} + +// OnConnect provides a mock function with given fields: +func (_m *LogListener) OnConnect() { + _m.Called() +} + +// OnDisconnect provides a mock function with given fields: +func (_m *LogListener) OnDisconnect() { + _m.Called() +} diff --git a/core/internal/mocks/service.go b/core/internal/mocks/service.go new file mode 100644 index 00000000000..dfde6f78b82 --- /dev/null +++ b/core/internal/mocks/service.go @@ -0,0 +1,52 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + models "chainlink/core/store/models" + + mock "github.com/stretchr/testify/mock" +) + +// Service is an autogenerated mock type for the Service type +type Service struct { + mock.Mock +} + +// AddJob provides a mock function with given fields: _a0 +func (_m *Service) AddJob(_a0 models.JobSpec) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(models.JobSpec) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveJob provides a mock function with given fields: _a0 +func (_m *Service) RemoveJob(_a0 *models.ID) { + _m.Called(_a0) +} + +// Start provides a mock function with given fields: +func (_m *Service) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Service) Stop() { + _m.Called() +} diff --git a/core/internal/mocks/tx_manager.go b/core/internal/mocks/tx_manager.go index 35fb20849a6..251460e3c81 100644 --- a/core/internal/mocks/tx_manager.go +++ b/core/internal/mocks/tx_manager.go @@ -11,7 +11,7 @@ import ( common "github.com/ethereum/go-ethereum/common" - decimal "github.com/shopspring/decimal" + context "context" eth "chainlink/core/eth" @@ -61,6 +61,23 @@ func (_m *TxManager) BumpGasUntilSafe(hash common.Hash) (*eth.TxReceipt, store.A return r0, r1, r2 } +// Call provides a mock function with given fields: result, method, args +func (_m *TxManager) Call(result interface{}, method string, args ...interface{}) error { + var _ca []interface{} + _ca = append(_ca, result, method) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}, string, ...interface{}) error); ok { + r0 = rf(result, method, args...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // CheckAttempt provides a mock function with given fields: txAttempt, blockHeight func (_m *TxManager) CheckAttempt(txAttempt *models.TxAttempt, blockHeight uint64) (*eth.TxReceipt, store.AttemptState, error) { ret := _m.Called(txAttempt, blockHeight) @@ -214,43 +231,20 @@ func (_m *TxManager) Disconnect() { _m.Called() } -// GetAggregatorPrice provides a mock function with given fields: address, precision -func (_m *TxManager) GetAggregatorPrice(address common.Address, precision int32) (decimal.Decimal, error) { - ret := _m.Called(address, precision) - - var r0 decimal.Decimal - if rf, ok := ret.Get(0).(func(common.Address, int32) decimal.Decimal); ok { - r0 = rf(address, precision) - } else { - r0 = ret.Get(0).(decimal.Decimal) - } - - var r1 error - if rf, ok := ret.Get(1).(func(common.Address, int32) error); ok { - r1 = rf(address, precision) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAggregatorRound provides a mock function with given fields: address -func (_m *TxManager) GetAggregatorRound(address common.Address) (*big.Int, error) { - ret := _m.Called(address) +// GetBlockByNumber provides a mock function with given fields: hex +func (_m *TxManager) GetBlockByNumber(hex string) (eth.BlockHeader, error) { + ret := _m.Called(hex) - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Address) *big.Int); ok { - r0 = rf(address) + var r0 eth.BlockHeader + if rf, ok := ret.Get(0).(func(string) eth.BlockHeader); ok { + r0 = rf(hex) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } + r0 = ret.Get(0).(eth.BlockHeader) } var r1 error - if rf, ok := ret.Get(1).(func(common.Address) error); ok { - r1 = rf(address) + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(hex) } else { r1 = ret.Error(1) } @@ -258,20 +252,20 @@ func (_m *TxManager) GetAggregatorRound(address common.Address) (*big.Int, error return r0, r1 } -// GetBlockByNumber provides a mock function with given fields: hex -func (_m *TxManager) GetBlockByNumber(hex string) (eth.BlockHeader, error) { - ret := _m.Called(hex) +// GetBlockHeight provides a mock function with given fields: +func (_m *TxManager) GetBlockHeight() (uint64, error) { + ret := _m.Called() - var r0 eth.BlockHeader - if rf, ok := ret.Get(0).(func(string) eth.BlockHeader); ok { - r0 = rf(hex) + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() } else { - r0 = ret.Get(0).(eth.BlockHeader) + r0 = ret.Get(0).(uint64) } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(hex) + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() } else { r1 = ret.Error(1) } @@ -371,38 +365,6 @@ func (_m *TxManager) GetLINKBalance(address common.Address) (*assets.Link, error return r0, r1 } -// GetLatestSubmission provides a mock function with given fields: aggregatorAddress, oracleAddress -func (_m *TxManager) GetLatestSubmission(aggregatorAddress common.Address, oracleAddress common.Address) (*big.Int, *big.Int, error) { - ret := _m.Called(aggregatorAddress, oracleAddress) - - var r0 *big.Int - if rf, ok := ret.Get(0).(func(common.Address, common.Address) *big.Int); ok { - r0 = rf(aggregatorAddress, oracleAddress) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - var r1 *big.Int - if rf, ok := ret.Get(1).(func(common.Address, common.Address) *big.Int); ok { - r1 = rf(aggregatorAddress, oracleAddress) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*big.Int) - } - } - - var r2 error - if rf, ok := ret.Get(2).(func(common.Address, common.Address) error); ok { - r2 = rf(aggregatorAddress, oracleAddress) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - // GetLogs provides a mock function with given fields: q func (_m *TxManager) GetLogs(q ethereum.FilterQuery) ([]eth.Log, error) { ret := _m.Called(q) @@ -540,13 +502,39 @@ func (_m *TxManager) SignedRawTxWithBumpedGas(originalTx models.Tx, gasLimit uin return r0, r1 } -// SubscribeToLogs provides a mock function with given fields: channel, q -func (_m *TxManager) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQuery) (eth.Subscription, error) { - ret := _m.Called(channel, q) +// Subscribe provides a mock function with given fields: _a0, _a1, _a2 +func (_m *TxManager) Subscribe(_a0 context.Context, _a1 interface{}, _a2 ...interface{}) (eth.Subscription, error) { + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _a2...) + ret := _m.Called(_ca...) + + var r0 eth.Subscription + if rf, ok := ret.Get(0).(func(context.Context, interface{}, ...interface{}) eth.Subscription); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(eth.Subscription) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, interface{}, ...interface{}) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SubscribeToLogs provides a mock function with given fields: ctx, channel, q +func (_m *TxManager) SubscribeToLogs(ctx context.Context, channel chan<- eth.Log, q ethereum.FilterQuery) (eth.Subscription, error) { + ret := _m.Called(ctx, channel, q) var r0 eth.Subscription - if rf, ok := ret.Get(0).(func(chan<- eth.Log, ethereum.FilterQuery) eth.Subscription); ok { - r0 = rf(channel, q) + if rf, ok := ret.Get(0).(func(context.Context, chan<- eth.Log, ethereum.FilterQuery) eth.Subscription); ok { + r0 = rf(ctx, channel, q) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(eth.Subscription) @@ -554,8 +542,8 @@ func (_m *TxManager) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQu } var r1 error - if rf, ok := ret.Get(1).(func(chan<- eth.Log, ethereum.FilterQuery) error); ok { - r1 = rf(channel, q) + if rf, ok := ret.Get(1).(func(context.Context, chan<- eth.Log, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, channel, q) } else { r1 = ret.Error(1) } @@ -563,13 +551,13 @@ func (_m *TxManager) SubscribeToLogs(channel chan<- eth.Log, q ethereum.FilterQu return r0, r1 } -// SubscribeToNewHeads provides a mock function with given fields: channel -func (_m *TxManager) SubscribeToNewHeads(channel chan<- eth.BlockHeader) (eth.Subscription, error) { - ret := _m.Called(channel) +// SubscribeToNewHeads provides a mock function with given fields: ctx, channel +func (_m *TxManager) SubscribeToNewHeads(ctx context.Context, channel chan<- eth.BlockHeader) (eth.Subscription, error) { + ret := _m.Called(ctx, channel) var r0 eth.Subscription - if rf, ok := ret.Get(0).(func(chan<- eth.BlockHeader) eth.Subscription); ok { - r0 = rf(channel) + if rf, ok := ret.Get(0).(func(context.Context, chan<- eth.BlockHeader) eth.Subscription); ok { + r0 = rf(ctx, channel) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(eth.Subscription) @@ -577,8 +565,8 @@ func (_m *TxManager) SubscribeToNewHeads(channel chan<- eth.BlockHeader) (eth.Su } var r1 error - if rf, ok := ret.Get(1).(func(chan<- eth.BlockHeader) error); ok { - r1 = rf(channel) + if rf, ok := ret.Get(1).(func(context.Context, chan<- eth.BlockHeader) error); ok { + r1 = rf(ctx, channel) } else { r1 = ret.Error(1) } diff --git a/core/internal/testdata/hello_world_job.json b/core/internal/testdata/hello_world_job.json index e710d50d06a..f69eeab2f9d 100644 --- a/core/internal/testdata/hello_world_job.json +++ b/core/internal/testdata/hello_world_job.json @@ -1,7 +1,7 @@ { "initiators": [{ "type": "web" }], "tasks": [ - { "type": "HttpGet", "params": { + { "type": "HTTPGetWithUnrestrictedNetworkAccess", "params": { "get": "https://bitstamp.net/api/ticker/", "headers": { "Key1": ["value"], diff --git a/core/internal/testdata/new_round_log.json b/core/internal/testdata/new_round_log.json index fed7951334c..5f5e32de360 100644 --- a/core/internal/testdata/new_round_log.json +++ b/core/internal/testdata/new_round_log.json @@ -10,9 +10,9 @@ "blockHash": "0x5e3bd2cc97a68136cead922330e2ec27201420b3eff182875e388474079fcd9e", "blockNumber": "0xa", "address": "0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6", - "data": "0x", + "data": "0x000000000000000000000000000000000000000000000000000000000000000f", "topics": [ - "0xc3c45d1924f55369653f407ee9f095309d1e687b2c0011b1f709042d4f457e17", + "0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271", "0x0000000000000000000000000000000000000000000000000000000000000009", "0x000000000000000000000000f17f52151ebef6c7334fad080c5704d77216b732" ], diff --git a/core/internal/testdata/randomness_job.json b/core/internal/testdata/randomness_job.json new file mode 100644 index 00000000000..a2bc2597f4b --- /dev/null +++ b/core/internal/testdata/randomness_job.json @@ -0,0 +1,31 @@ +{ + "initiators": [ + { + "type": "randomnesslog", + "params": {"address": "0xaba5edc1a551e55b1a570c0e1f1055e5be11eca7"} + } + ], + "tasks": [ + { + "type": "random", + "params": { + "_comment": "Note: the following key is ONLY AN EXAMPLE, and not secure.", + "_comment2": "Use the public key reported when you ran `chainlink local vrf create`, instead", + "publicKey": + "0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F8179800", + "_comment3": "Corresponds to a secret key of 1. (So not secure at all!)" + } + }, + { + "type": "ethtx", + "params": { + "format": "preformatted", + "comment": "ethereum address of the VRF coordinator contract goes in 'address' field:", + "address": "0x5e1f1e555ca1ab1eb01dfaceca11ab1eba5eba11", + "comment2": "functionSelector from VRFCoordinator.sol, and javascript call:", + "comment3": "web3.eth.abi.encodeFunctionSignature('fulfillRandomnessRequest(bytes)')", + "functionSelector": "0x5e1c1059" + } + } + ] +} diff --git a/core/logger/logger.go b/core/logger/logger.go index d6875c71dbb..57b32296a75 100644 --- a/core/logger/logger.go +++ b/core/logger/logger.go @@ -8,7 +8,6 @@ import ( "net/url" "os" - "github.com/fatih/color" "github.com/pkg/errors" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -85,20 +84,6 @@ func CreateProductionLogger( return zl } -// CreateTestLogger creates a logger that directs output to PrettyConsole -// configured for test output. -func CreateTestLogger(lvl zapcore.Level) *zap.Logger { - color.NoColor = false - config := zap.NewProductionConfig() - config.Level.SetLevel(lvl) - config.OutputPaths = []string{"pretty://console"} - zl, err := config.Build(zap.AddCallerSkip(1)) - if err != nil { - log.Fatal(err) - } - return zl -} - // Infow logs an info message and any additional given information. func Infow(msg string, keysAndValues ...interface{}) { logger.Infow(msg, keysAndValues...) diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index b3610882269..891953ee401 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -65,6 +65,7 @@ type ChainlinkApplication struct { SessionReaper services.SleeperTask pendingConnectionResumer *pendingConnectionResumer shutdownOnce sync.Once + shutdownSignal gracefulpanic.Signal } // NewApplication initializes a new store if one is not already @@ -72,7 +73,8 @@ type ChainlinkApplication struct { // the logger at the same directory and returns the Application to // be used by the node. func NewApplication(config *orm.Config, onConnectCallbacks ...func(Application)) Application { - store := store.NewStore(config) + shutdownSignal := gracefulpanic.NewSignal() + store := store.NewStore(config, shutdownSignal) config.SetRuntimeStore(store.ORM) statsPusher := synchronization.NewStatsPusher( @@ -97,13 +99,13 @@ func NewApplication(config *orm.Config, onConnectCallbacks ...func(Application)) SessionReaper: services.NewStoreReaper(store), Exiter: os.Exit, pendingConnectionResumer: pendingConnectionResumer, + shutdownSignal: shutdownSignal, } headTrackables := []strpkg.HeadTrackable{ store.TxManager, jobSubscriber, pendingConnectionResumer, - fluxMonitor, } for _, onConnectCallback := range onConnectCallbacks { headTrackable := &headTrackableCallback{func() { @@ -127,7 +129,7 @@ func (app *ChainlinkApplication) Start() error { go func() { select { case <-sigs: - case <-gracefulpanic.Wait(): + case <-app.shutdownSignal.Wait(): } logger.ErrorIf(app.Stop()) app.Exiter(0) diff --git a/core/services/eth/contract.go b/core/services/eth/contract.go new file mode 100644 index 00000000000..b85bef95c14 --- /dev/null +++ b/core/services/eth/contract.go @@ -0,0 +1,56 @@ +package eth + +import ( + "chainlink/core/eth" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/pkg/errors" +) + +type ConnectedContract interface { + eth.ContractCodec + Call(result interface{}, methodName string, args ...interface{}) error + SubscribeToLogs(listener LogListener) (connected bool, _ UnsubscribeFunc) +} + +type connectedContract struct { + eth.ContractCodec + address common.Address + ethClient eth.Client + logBroadcaster LogBroadcaster +} + +type UnsubscribeFunc func() + +func NewConnectedContract( + codec eth.ContractCodec, + address common.Address, + ethClient eth.Client, + logBroadcaster LogBroadcaster, +) ConnectedContract { + return &connectedContract{codec, address, ethClient, logBroadcaster} +} + +func (contract *connectedContract) Call(result interface{}, methodName string, args ...interface{}) error { + data, err := contract.EncodeMessageCall(methodName, args...) + if err != nil { + return errors.Wrap(err, "unable to encode message call") + } + + var rawResult hexutil.Bytes + callArgs := eth.CallArgs{To: contract.address, Data: data} + err = contract.ethClient.Call(&rawResult, "eth_call", callArgs, "latest") + if err != nil { + return errors.Wrap(err, "unable to call client") + } + + err = contract.ABI().Unpack(result, methodName, rawResult) + return errors.Wrap(err, "unable to unpack values") +} + +func (contract *connectedContract) SubscribeToLogs(listener LogListener) (connected bool, _ UnsubscribeFunc) { + connected = contract.logBroadcaster.Register(contract.address, listener) + unsub := func() { contract.logBroadcaster.Unregister(contract.address, listener) } + return connected, unsub +} diff --git a/core/services/eth/contracts/FluxAggregator.go b/core/services/eth/contracts/FluxAggregator.go new file mode 100644 index 00000000000..ff1af44321b --- /dev/null +++ b/core/services/eth/contracts/FluxAggregator.go @@ -0,0 +1,91 @@ +package contracts + +import ( + "math/big" + + "chainlink/core/eth" + ethsvc "chainlink/core/services/eth" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" +) + +//go:generate mockery -name FluxAggregator -output ../../../internal/mocks/ -case=underscore + +type FluxAggregator interface { + ethsvc.ConnectedContract + RoundState(oracle common.Address) (FluxAggregatorRoundState, error) +} + +const ( + // FluxAggregatorName is the name of Chainlink's Ethereum contract for + // aggregating numerical data such as prices. + FluxAggregatorName = "FluxAggregator" +) + +var ( + // AggregatorNewRoundLogTopic20191220 is the NewRound filter topic for + // the FluxAggregator as of Dec. 20th 2019. Eagerly fails if not found. + AggregatorNewRoundLogTopic20191220 = eth.MustGetV6ContractEventID("FluxAggregator", "NewRound") + // AggregatorAnswerUpdatedLogTopic20191220 is the AnswerUpdated filter topic for + // the FluxAggregator as of Dec. 20th 2019. Eagerly fails if not found. + AggregatorAnswerUpdatedLogTopic20191220 = eth.MustGetV6ContractEventID("FluxAggregator", "AnswerUpdated") +) + +type fluxAggregator struct { + ethsvc.ConnectedContract + ethClient eth.Client + address common.Address +} + +type LogNewRound struct { + eth.Log + RoundId *big.Int + StartedBy common.Address + StartedAt *big.Int +} + +type LogAnswerUpdated struct { + eth.Log + Current *big.Int + RoundId *big.Int + Timestamp *big.Int +} + +var fluxAggregatorLogTypes = map[common.Hash]interface{}{ + AggregatorNewRoundLogTopic20191220: LogNewRound{}, + AggregatorAnswerUpdatedLogTopic20191220: LogAnswerUpdated{}, +} + +func NewFluxAggregator(address common.Address, ethClient eth.Client, logBroadcaster ethsvc.LogBroadcaster) (FluxAggregator, error) { + codec, err := eth.GetV6ContractCodec(FluxAggregatorName) + if err != nil { + return nil, err + } + connectedContract := ethsvc.NewConnectedContract(codec, address, ethClient, logBroadcaster) + return &fluxAggregator{connectedContract, ethClient, address}, nil +} + +func (fa *fluxAggregator) SubscribeToLogs(listener ethsvc.LogListener) (connected bool, _ ethsvc.UnsubscribeFunc) { + return fa.ConnectedContract.SubscribeToLogs( + ethsvc.NewDecodingLogListener(fa, fluxAggregatorLogTypes, listener), + ) +} + +type FluxAggregatorRoundState struct { + ReportableRoundID uint32 `abi:"_reportableRoundId"` + EligibleToSubmit bool `abi:"_eligibleToSubmit"` + LatestAnswer *big.Int `abi:"_latestRoundAnswer"` + TimesOutAt uint64 `abi:"_timesOutAt"` + AvailableFunds *big.Int `abi:"_availableFunds"` + PaymentAmount *big.Int `abi:"_paymentAmount"` +} + +func (fa *fluxAggregator) RoundState(oracle common.Address) (FluxAggregatorRoundState, error) { + var result FluxAggregatorRoundState + err := fa.Call(&result, "roundState", oracle) + if err != nil { + return FluxAggregatorRoundState{}, errors.Wrap(err, "unable to encode message call") + } + return result, nil +} diff --git a/core/services/eth/contracts/FluxAggregator_test.go b/core/services/eth/contracts/FluxAggregator_test.go new file mode 100644 index 00000000000..e8c87be8944 --- /dev/null +++ b/core/services/eth/contracts/FluxAggregator_test.go @@ -0,0 +1,139 @@ +package contracts_test + +import ( + "encoding" + "math/big" + "testing" + + "chainlink/core/eth" + "chainlink/core/internal/cltest" + "chainlink/core/internal/mocks" + "chainlink/core/services/eth/contracts" + "chainlink/core/utils" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func mustEVMBigInt(t *testing.T, val *big.Int) []byte { + ret, err := utils.EVMWordBigInt(val) + require.NoError(t, err, "evm BigInt serialization") + return ret +} + +func makeRoundStateReturnData(roundID uint64, eligible bool, answer, timesOutAt, availableFunds, paymentAmount uint64) string { + var data []byte + data = append(data, utils.EVMWordUint64(roundID)...) + if eligible { + data = append(data, utils.EVMWordUint64(1)...) + } else { + data = append(data, utils.EVMWordUint64(0)...) + } + data = append(data, utils.EVMWordUint64(answer)...) + data = append(data, utils.EVMWordUint64(timesOutAt)...) + data = append(data, utils.EVMWordUint64(availableFunds)...) + data = append(data, utils.EVMWordUint64(paymentAmount)...) + return hexutil.Encode(data) +} + +func TestFluxAggregatorClient_RoundState(t *testing.T) { + aggregatorAddress := cltest.NewAddress() + + const aggregatorRoundState = "c410579e" + aggregatorRoundStateSelector := eth.HexToFunctionSelector(aggregatorRoundState) + + selector := make([]byte, 16) + copy(selector, aggregatorRoundStateSelector.Bytes()) + nodeAddr := cltest.NewAddress() + expectedCallArgs := eth.CallArgs{ + To: aggregatorAddress, + Data: append(selector, nodeAddr[:]...), + } + + rawReturnData := `0x00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000100` + + tests := []struct { + name string + response string + expectedRoundID uint32 + expectedEligible bool + expectedAnswer *big.Int + expectedTimesOutAt uint64 + expectedAvailableFunds uint64 + expectedPaymentAmount uint64 + }{ + {"zero, false", makeRoundStateReturnData(0, false, 0, 0, 0, 0), 0, false, big.NewInt(0), 0, 0, 0}, + {"non-zero, false", makeRoundStateReturnData(1, false, 23, 1234, 36, 72), 1, false, big.NewInt(23), 1234, 36, 72}, + {"zero, true", makeRoundStateReturnData(0, true, 0, 0, 0, 0), 0, true, big.NewInt(0), 0, 0, 0}, + {"non-zero true", makeRoundStateReturnData(12, true, 91, 9876, 45, 999), 12, true, big.NewInt(91), 9876, 45, 999}, + {"real call data", rawReturnData, 3, true, big.NewInt(15), 14, 10, 256}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ethClient := new(mocks.Client) + + ethClient.On("Call", mock.Anything, "eth_call", expectedCallArgs, "latest").Return(nil). + Run(func(args mock.Arguments) { + res := args.Get(0) + err := res.(encoding.TextUnmarshaler).UnmarshalText([]byte(test.response)) + require.NoError(t, err) + }) + + fa, err := contracts.NewFluxAggregator(aggregatorAddress, ethClient, nil) + require.NoError(t, err) + + roundState, err := fa.RoundState(nodeAddr) + require.NoError(t, err) + assert.Equal(t, test.expectedRoundID, roundState.ReportableRoundID) + assert.Equal(t, test.expectedEligible, roundState.EligibleToSubmit) + assert.True(t, test.expectedAnswer.Cmp(roundState.LatestAnswer) == 0) + assert.Equal(t, test.expectedTimesOutAt, roundState.TimesOutAt) + assert.Equal(t, test.expectedAvailableFunds, roundState.AvailableFunds.Uint64()) + assert.Equal(t, test.expectedPaymentAmount, roundState.PaymentAmount.Uint64()) + ethClient.AssertExpectations(t) + }) + } +} + +func TestFluxAggregatorClient_DecodesLogs(t *testing.T) { + fa, err := contracts.NewFluxAggregator(common.Address{}, nil, nil) + require.NoError(t, err) + + newRoundLogRaw := cltest.LogFromFixture(t, "../../testdata/new_round_log.json") + var newRoundLog contracts.LogNewRound + err = fa.UnpackLog(&newRoundLog, "NewRound", newRoundLogRaw) + require.NoError(t, err) + require.Equal(t, int64(1), newRoundLog.RoundId.Int64()) + require.Equal(t, common.HexToAddress("f17f52151ebef6c7334fad080c5704d77216b732"), newRoundLog.StartedBy) + require.Equal(t, int64(15), newRoundLog.StartedAt.Int64()) + + type BadLogNewRound struct { + RoundID *big.Int + StartedBy common.Address + StartedAt *big.Int + } + var badNewRoundLog BadLogNewRound + err = fa.UnpackLog(&badNewRoundLog, "NewRound", newRoundLogRaw) + require.Error(t, err) + + answerUpdatedLogRaw := cltest.LogFromFixture(t, "../../testdata/answer_updated_log.json") + var answerUpdatedLog contracts.LogAnswerUpdated + err = fa.UnpackLog(&answerUpdatedLog, "AnswerUpdated", answerUpdatedLogRaw) + require.NoError(t, err) + require.Equal(t, int64(1), answerUpdatedLog.Current.Int64()) + require.Equal(t, int64(2), answerUpdatedLog.RoundId.Int64()) + require.Equal(t, int64(3), answerUpdatedLog.Timestamp.Int64()) + + type BadLogAnswerUpdated struct { + Current *big.Int + RoundID *big.Int + Timestamp *big.Int + } + var badAnswerUpdatedLog BadLogAnswerUpdated + err = fa.UnpackLog(&badAnswerUpdatedLog, "AnswerUpdated", answerUpdatedLogRaw) + require.Error(t, err) +} diff --git a/core/services/eth/log_broadcaster.go b/core/services/eth/log_broadcaster.go new file mode 100644 index 00000000000..c5e5e8357c4 --- /dev/null +++ b/core/services/eth/log_broadcaster.go @@ -0,0 +1,360 @@ +package eth + +import ( + "context" + "math/big" + "reflect" + "time" + + "chainlink/core/eth" + "chainlink/core/logger" + "chainlink/core/store/models" + "chainlink/core/store/orm" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/jinzhu/gorm" +) + +//go:generate mockery -name LogBroadcaster -output ../../internal/mocks/ -case=underscore +//go:generate mockery -name LogListener -output ../../internal/mocks/ -case=underscore + +// The LogBroadcaster manages log subscription requests for the Chainlink node. Instead +// of creating a new websocket subscription for each request, it multiplexes all subscriptions +// to all of the relevant contracts over a single connection and forwards the logs to the +// relevant subscribers. +type LogBroadcaster interface { + Start() + Register(address common.Address, listener LogListener) (connected bool) + Unregister(address common.Address, listener LogListener) + Stop() +} + +type LogListener interface { + OnConnect() + OnDisconnect() + HandleLog(log interface{}, err error) +} + +type logBroadcaster struct { + ethClient eth.Client + orm *orm.ORM + cursor models.LogCursor + connected bool + + listeners map[common.Address]map[LogListener]struct{} + chAddListener chan registration + chRemoveListener chan registration + + chStop chan struct{} + chDone chan struct{} +} + +type registration struct { + address common.Address + listener LogListener +} + +func NewLogBroadcaster(ethClient eth.Client, orm *orm.ORM) LogBroadcaster { + return &logBroadcaster{ + ethClient: ethClient, + orm: orm, + listeners: make(map[common.Address]map[LogListener]struct{}), + chAddListener: make(chan registration), + chRemoveListener: make(chan registration), + chStop: make(chan struct{}), + chDone: make(chan struct{}), + } +} + +const logBroadcasterCursorName = "logBroadcaster" + +func (b *logBroadcaster) Start() { + // Grab the current on-chain block height + var currentHeight uint64 + for { + var err error + currentHeight, err = b.ethClient.GetBlockHeight() + if err == nil { + break + } + + logger.Errorf("error fetching current block height: %v", err) + select { + case <-b.chStop: + return + case <-time.After(10 * time.Second): + } + continue + } + + // Grab the cursor from the DB + cursor, err := b.orm.FindLogCursor(logBroadcasterCursorName) + if err != nil && !gorm.IsRecordNotFoundError(err) { + logger.Errorf("error fetching log cursor: %v", err) + } + b.cursor = cursor + + // If the latest block is newer than the one in the cursor (or if we have + // no cursor), start from that block height. + if currentHeight > cursor.BlockIndex { + b.updateLogCursor(currentHeight, 0) + } + + go b.startResubscribeLoop() +} + +func (b *logBroadcaster) Stop() { + close(b.chStop) + <-b.chDone +} + +func (b *logBroadcaster) Register(address common.Address, listener LogListener) (connected bool) { + select { + case b.chAddListener <- registration{address, listener}: + case <-b.chStop: + } + return b.connected +} + +func (b *logBroadcaster) Unregister(address common.Address, listener LogListener) { + select { + case b.chRemoveListener <- registration{address, listener}: + case <-b.chStop: + } +} + +// The subscription is closed in two cases: +// - intentionally, when the set of contracts we're listening to changes +// - on a connection error +// +// This method recreates the subscription in both cases. In the event of a connection +// error, it attempts to reconnect. Any time there's a change in connection state, it +// notifies its subscribers. +func (b *logBroadcaster) startResubscribeLoop() { + defer close(b.chDone) +ResubscribeLoop: + for { + subscription, chRawLogs, err := b.createSubscription() + if err != nil { + logger.Errorf("error creating subscription to Ethereum node: %v", err) + + select { + case <-b.chStop: + return + case <-time.After(10 * time.Second): + // Don't hammer the Ethereum node with subscription requests in case of an error. + // A configurable timeout might be useful here. + continue ResubscribeLoop + } + } + + b.notifyConnect() + + shouldResubscribe, err := b.process(subscription, chRawLogs) + if err != nil { + logger.Error(err) + b.notifyDisconnect() + continue ResubscribeLoop + } else if !shouldResubscribe { + b.notifyDisconnect() + return + } + } +} + +func (b *logBroadcaster) notifyConnect() { + b.connected = true + for _, listeners := range b.listeners { + for listener := range listeners { + listener.OnConnect() + } + } +} + +func (b *logBroadcaster) notifyDisconnect() { + b.connected = false + for _, listeners := range b.listeners { + for listener := range listeners { + listener.OnDisconnect() + } + } +} + +func (b *logBroadcaster) updateLogCursor(blockIdx, logIdx uint64) { + b.cursor.Initialized = true + b.cursor.Name = logBroadcasterCursorName + b.cursor.BlockIndex = blockIdx + b.cursor.LogIndex = logIdx + + err := b.orm.SaveLogCursor(&b.cursor) + if err != nil { + logger.Error("can't save log cursor to DB:", err) + } +} + +func (b *logBroadcaster) process(subscription eth.Subscription, chRawLogs <-chan eth.Log) (shouldResubscribe bool, _ error) { + defer subscription.Unsubscribe() + + // We debounce requests to subscribe and unsubscribe to avoid making too many + // RPC calls to the Ethereum node, particularly on startup. + var needsResubscribe bool + debounceResubscribe := time.NewTicker(1 * time.Second) + defer debounceResubscribe.Stop() + +ProcessLoop: + for { + select { + case rawLog := <-chRawLogs: + // Skip logs that we've already seen + if b.cursor.Initialized && + (rawLog.BlockNumber < b.cursor.BlockIndex || + (rawLog.BlockNumber == b.cursor.BlockIndex && uint64(rawLog.Index) <= b.cursor.LogIndex)) { + continue ProcessLoop + } + + for listener := range b.listeners[rawLog.Address] { + // Make a copy of the log for each listener to avoid data races + listener.HandleLog(rawLog.Copy(), nil) + } + + b.updateLogCursor(rawLog.BlockNumber, uint64(rawLog.Index)) + + case r := <-b.chAddListener: + _, knownAddress := b.listeners[r.address] + if !knownAddress { + b.listeners[r.address] = make(map[LogListener]struct{}) + } + if _, exists := b.listeners[r.address][r.listener]; exists { + panic("registration already exists") + } + b.listeners[r.address][r.listener] = struct{}{} + + if !knownAddress { + // Recreate the subscription with the new contract address + needsResubscribe = true + } + + case r := <-b.chRemoveListener: + r.listener.OnDisconnect() + delete(b.listeners[r.address], r.listener) + if len(b.listeners[r.address]) == 0 { + delete(b.listeners, r.address) + // Recreate the subscription without this contract address + needsResubscribe = true + } + + case <-debounceResubscribe.C: + if needsResubscribe { + return true, nil + } + + case err := <-subscription.Err(): + return true, err + + case <-b.chStop: + return false, nil + } + } +} + +func (b *logBroadcaster) createSubscription() (eth.Subscription, chan eth.Log, error) { + if len(b.listeners) == 0 { + return noopSubscription{}, nil, nil + } + + var addresses []common.Address + for address := range b.listeners { + addresses = append(addresses, address) + } + + filterQuery := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(b.cursor.BlockIndex)), + Addresses: addresses, + } + chRawLogs := make(chan eth.Log) + + subscription, err := b.ethClient.SubscribeToLogs(context.Background(), chRawLogs, filterQuery) + if err != nil { + return nil, nil, err + } + return subscription, chRawLogs, nil +} + +type noopSubscription struct{} + +func (s noopSubscription) Err() <-chan error { return nil } +func (s noopSubscription) Unsubscribe() {} + +// DecodingLogListener receives raw logs from the LogBroadcaster and decodes them into +// Go structs using the provided ContractCodec (a simple wrapper around a go-ethereum +// ABI type). +type decodingLogListener struct { + logTypes map[common.Hash]reflect.Type + codec eth.ContractCodec + LogListener +} + +// Ensure that DecodingLogListener conforms to the LogListener interface +var _ LogListener = (*decodingLogListener)(nil) + +func NewDecodingLogListener(codec eth.ContractCodec, nativeLogTypes map[common.Hash]interface{}, innerListener LogListener) LogListener { + logTypes := make(map[common.Hash]reflect.Type) + for eventID, logStruct := range nativeLogTypes { + logTypes[eventID] = reflect.TypeOf(logStruct) + } + + return &decodingLogListener{ + logTypes: logTypes, + codec: codec, + LogListener: innerListener, + } +} + +func (l *decodingLogListener) HandleLog(log interface{}, err error) { + if err != nil { + l.LogListener.HandleLog(nil, err) + return + } + + rawLog, is := log.(eth.Log) + if !is { + panic("DecodingLogListener expects to receive an eth.Log") + } + if len(rawLog.Topics) == 0 { + return + } + + eventID := rawLog.Topics[0] + + logType, exists := l.logTypes[eventID] + if !exists { + // If a particular log type hasn't been registered with the decoder, we simply ignore it. + return + } + + var decodedLog interface{} + if logType.Kind() == reflect.Ptr { + decodedLog = reflect.New(logType.Elem()).Interface() + } else { + decodedLog = reflect.New(logType).Interface() + } + + // Insert the raw log into the ".Log" field + logStructV := reflect.ValueOf(decodedLog).Elem() + logStructV.FieldByName("Log").Set(reflect.ValueOf(rawLog)) + + // Decode the raw log into the struct + event, err := l.codec.ABI().EventByID(eventID) + if err != nil { + l.LogListener.HandleLog(nil, err) + return + } + err = l.codec.UnpackLog(decodedLog, event.RawName, rawLog) + if err != nil { + l.LogListener.HandleLog(nil, err) + return + } + + l.LogListener.HandleLog(decodedLog, nil) +} diff --git a/core/services/eth/log_broadcaster_test.go b/core/services/eth/log_broadcaster_test.go new file mode 100644 index 00000000000..4c01a523b11 --- /dev/null +++ b/core/services/eth/log_broadcaster_test.go @@ -0,0 +1,320 @@ +package eth_test + +import ( + "math/big" + "testing" + "time" + + "chainlink/core/eth" + "chainlink/core/internal/cltest" + "chainlink/core/internal/mocks" + ethsvc "chainlink/core/services/eth" + "chainlink/core/services/eth/contracts" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" + "github.com/pkg/errors" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestLogBroadcaster_ResubscribesOnAddOrRemoveContract(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + + const ( + numContracts = 3 + blockHeight uint64 = 123 + ) + + ethClient := new(mocks.Client) + sub := new(mocks.Subscription) + + var subscribeCalls int + var unsubscribeCalls int + ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything, mock.Anything). + Return(sub, nil). + Run(func(args mock.Arguments) { + subscribeCalls++ + q := args.Get(2).(ethereum.FilterQuery) + require.Equal(t, int64(blockHeight), q.FromBlock.Int64()) + }) + ethClient.On("GetBlockHeight"). + Return(blockHeight, nil) + sub.On("Unsubscribe"). + Return(). + Run(func(mock.Arguments) { unsubscribeCalls++ }) + sub.On("Err").Return(nil) + + lb := ethsvc.NewLogBroadcaster(ethClient, store.ORM) + lb.Start() + + type registration struct { + common.Address + ethsvc.LogListener + } + registrations := make([]registration, numContracts) + for i := 0; i < numContracts; i++ { + listener := new(mocks.LogListener) + listener.On("OnConnect").Return() + listener.On("OnDisconnect").Return() + registrations[i] = registration{cltest.NewAddress(), listener} + lb.Register(registrations[i].Address, registrations[i].LogListener) + } + + require.Eventually(t, func() bool { return subscribeCalls == 1 }, 5*time.Second, 10*time.Millisecond) + gomega.NewGomegaWithT(t).Consistently(subscribeCalls).Should(gomega.Equal(1)) + gomega.NewGomegaWithT(t).Consistently(unsubscribeCalls).Should(gomega.Equal(0)) + + for _, r := range registrations { + lb.Unregister(r.Address, r.LogListener) + } + require.Eventually(t, func() bool { return unsubscribeCalls == 1 }, 5*time.Second, 10*time.Millisecond) + gomega.NewGomegaWithT(t).Consistently(subscribeCalls).Should(gomega.Equal(1)) + + lb.Stop() + gomega.NewGomegaWithT(t).Consistently(unsubscribeCalls).Should(gomega.Equal(1)) + + ethClient.AssertExpectations(t) + sub.AssertExpectations(t) +} + +type funcLogListener struct { + fn func(log interface{}, err error) +} + +func (fn funcLogListener) HandleLog(log interface{}, err error) { + fn.fn(log, err) +} +func (fn funcLogListener) OnConnect() {} +func (fn funcLogListener) OnDisconnect() {} + +func TestLogBroadcaster_BroadcastsToCorrectRecipients(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + + const blockHeight uint64 = 0 + + ethClient := new(mocks.Client) + sub := new(mocks.Subscription) + + chchRawLogs := make(chan chan<- eth.Log, 1) + ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + q := args.Get(2).(ethereum.FilterQuery) + require.Equal(t, int64(blockHeight), q.FromBlock.Int64()) + + chchRawLogs <- args.Get(1).(chan<- eth.Log) + }). + Return(sub, nil). + Once() + ethClient.On("GetBlockHeight").Return(blockHeight, nil) + sub.On("Err").Return(nil) + sub.On("Unsubscribe").Return() + + lb := ethsvc.NewLogBroadcaster(ethClient, store.ORM) + lb.Start() + + addr1 := cltest.NewAddress() + addr2 := cltest.NewAddress() + addr1SentLogs := []eth.Log{ + {Address: addr1, BlockNumber: 0}, + {Address: addr1, BlockNumber: 1}, + {Address: addr1, BlockNumber: 2}, + } + addr2SentLogs := []eth.Log{ + {Address: addr2, BlockNumber: 4}, + {Address: addr2, BlockNumber: 5}, + {Address: addr2, BlockNumber: 6}, + } + + var addr1Logs1, addr1Logs2, addr2Logs1, addr2Logs2 []interface{} + lb.Register(addr1, &funcLogListener{func(log interface{}, err error) { + require.NoError(t, err) + addr1Logs1 = append(addr1Logs1, log) + }}) + lb.Register(addr1, &funcLogListener{func(log interface{}, err error) { + require.NoError(t, err) + addr1Logs2 = append(addr1Logs2, log) + }}) + lb.Register(addr2, &funcLogListener{func(log interface{}, err error) { + require.NoError(t, err) + addr2Logs1 = append(addr2Logs1, log) + }}) + lb.Register(addr2, &funcLogListener{func(log interface{}, err error) { + require.NoError(t, err) + addr2Logs2 = append(addr2Logs2, log) + }}) + chRawLogs := <-chchRawLogs + + for _, log := range addr1SentLogs { + chRawLogs <- log + } + for _, log := range addr2SentLogs { + chRawLogs <- log + } + + require.Eventually(t, func() bool { return len(addr1Logs1) == len(addr1SentLogs) }, time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return len(addr1Logs2) == len(addr1SentLogs) }, time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return len(addr2Logs1) == len(addr2SentLogs) }, time.Second, 10*time.Millisecond) + require.Eventually(t, func() bool { return len(addr2Logs2) == len(addr2SentLogs) }, time.Second, 10*time.Millisecond) + + lb.Stop() + + for i := range addr1SentLogs { + require.Equal(t, addr1SentLogs[i], addr1Logs1[i]) + require.Equal(t, addr1SentLogs[i], addr1Logs2[i]) + } + for i := range addr2SentLogs { + require.Equal(t, addr2SentLogs[i], addr2Logs1[i]) + require.Equal(t, addr2SentLogs[i], addr2Logs2[i]) + } + + ethClient.AssertExpectations(t) + sub.AssertExpectations(t) +} + +func TestLogBroadcaster_SkipsOldLogs(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + + ethClient := new(mocks.Client) + sub := new(mocks.Subscription) + + ethClient.On("GetBlockHeight"). + Return(uint64(0), nil) + chchRawLogs := make(chan chan<- eth.Log, 1) + ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { chchRawLogs <- args.Get(1).(chan<- eth.Log) }). + Return(sub, nil). + Once() + + sub.On("Unsubscribe").Return() + sub.On("Err").Return(nil) + + lb := ethsvc.NewLogBroadcaster(ethClient, store.ORM) + lb.Start() + + addr := cltest.NewAddress() + logs := []eth.Log{ + {Address: addr, BlockNumber: 0, Index: 0}, + {Address: addr, BlockNumber: 0, Index: 1}, + {Address: addr, BlockNumber: 0, Index: 2}, + {Address: addr, BlockNumber: 1, Index: 0}, + {Address: addr, BlockNumber: 1, Index: 1}, + {Address: addr, BlockNumber: 1, Index: 2}, + {Address: addr, BlockNumber: 2, Index: 0}, + {Address: addr, BlockNumber: 2, Index: 1}, + {Address: addr, BlockNumber: 2, Index: 2}, + } + + var recvd []interface{} + lb.Register(addr, &funcLogListener{func(log interface{}, err error) { + require.NoError(t, err) + recvd = append(recvd, log) + }}) + + chRawLogs := <-chchRawLogs + + // Simulates resuming the subscription repeatedly as new blocks are coming in + for i := 0; i < len(logs); i++ { + for _, log := range logs[0 : i+1] { + chRawLogs <- log + } + } + + lb.Stop() // This should ensure that all sending is complete + + require.Len(t, recvd, len(logs)) + for i := range recvd { + require.Equal(t, recvd[i], logs[i]) + } + + ethClient.AssertExpectations(t) +} + +func TestLogBroadcaster_Register_ResubscribesToMostRecentlySeenBlock(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + + const expectedBlock = 3 + + ethClient := new(mocks.Client) + sub := new(mocks.Subscription) + + addr1 := cltest.NewAddress() + addr2 := cltest.NewAddress() + + ethClient.On("GetBlockHeight").Return(uint64(0), nil) + chchRawLogs := make(chan chan<- eth.Log, 1) + ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + chchRawLogs <- args.Get(1).(chan<- eth.Log) + }). + Return(sub, nil). + Once() + ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + query := args.Get(2).(ethereum.FilterQuery) + require.Equal(t, big.NewInt(expectedBlock), query.FromBlock) + require.Contains(t, query.Addresses, addr1) + require.Contains(t, query.Addresses, addr2) + require.Len(t, query.Addresses, 2) + chchRawLogs <- args.Get(1).(chan<- eth.Log) + }). + Return(sub, nil). + Once() + + sub.On("Unsubscribe").Return() + sub.On("Err").Return(nil) + + listener1 := new(mocks.LogListener) + listener2 := new(mocks.LogListener) + listener1.On("OnConnect").Return() + listener2.On("OnConnect").Return() + listener1.On("OnDisconnect").Return() + listener2.On("OnDisconnect").Return() + + lb := ethsvc.NewLogBroadcaster(ethClient, store.ORM) + lb.Start() // Subscribe #1 + lb.Register(addr1, listener1) // Subscribe #2 + chRawLogs := <-chchRawLogs + chRawLogs <- eth.Log{BlockNumber: expectedBlock} + lb.Register(addr2, listener2) // Subscribe #3 + <-chchRawLogs + + lb.Stop() + + ethClient.AssertExpectations(t) + listener1.AssertExpectations(t) + listener2.AssertExpectations(t) + sub.AssertExpectations(t) +} + +func TestDecodingLogListener(t *testing.T) { + contract, err := eth.GetV6ContractCodec("FluxAggregator") + require.NoError(t, err) + + logTypes := map[common.Hash]interface{}{ + eth.MustGetV6ContractEventID("FluxAggregator", "NewRound"): contracts.LogNewRound{}, + } + + var decodedLog interface{} + listener := ethsvc.NewDecodingLogListener(contract, logTypes, &funcLogListener{func(decoded interface{}, innerErr error) { + err = innerErr + decodedLog = decoded + }}) + rawLog := cltest.LogFromFixture(t, "../testdata/new_round_log.json") + listener.HandleLog(rawLog, nil) + require.NoError(t, err) + newRoundLog := decodedLog.(*contracts.LogNewRound) + require.Equal(t, newRoundLog.Log, rawLog) + require.True(t, newRoundLog.RoundId.Cmp(big.NewInt(1)) == 0) + require.Equal(t, newRoundLog.StartedBy, common.HexToAddress("f17f52151ebef6c7334fad080c5704d77216b732")) + require.True(t, newRoundLog.StartedAt.Cmp(big.NewInt(15)) == 0) + + expectedErr := errors.New("oh no!") + listener.HandleLog(nil, expectedErr) + require.Equal(t, err, expectedErr) +} diff --git a/core/services/fluxmonitor/fetchers.go b/core/services/fluxmonitor/fetchers.go index 72d1aea06bf..0e22f1c758a 100644 --- a/core/services/fluxmonitor/fetchers.go +++ b/core/services/fluxmonitor/fetchers.go @@ -70,6 +70,9 @@ func (p *httpFetcher) Fetch() (decimal.Decimal, error) { if result == nil { return decimal.Decimal{}, errors.Wrap(errors.New("no result returned"), fmt.Sprintf("unable to fetch price from %s", p.url.String())) } + + resultFloat, _ := result.Float64() + promFMIndividualReportedValue.WithLabelValues(p.url.String()).Set(resultFloat) logger.Debugw( fmt.Sprintf("fetched price %v from %s", *result, p.url.String()), "price", result, diff --git a/core/services/fluxmonitor/fetchers_test.go b/core/services/fluxmonitor/fetchers_test.go index da9cd8f8386..c9016aae65e 100644 --- a/core/services/fluxmonitor/fetchers_test.go +++ b/core/services/fluxmonitor/fetchers_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "net/url" "testing" + "time" "github.com/guregu/null" "github.com/shopspring/decimal" @@ -17,6 +18,7 @@ import ( // external adapters. // https://github.com/smartcontractkit/price-adapters const ethUSDPairing = `{"data":{"coin":"ETH","market":"USD"}}` +const defaultHTTPTimeout = 15 * time.Second func TestNewMedianFetcherFromURLs_Happy(t *testing.T) { tests := []struct { diff --git a/core/services/fluxmonitor/flux_monitor.go b/core/services/fluxmonitor/flux_monitor.go index fe6f4885dba..5c8237e5547 100644 --- a/core/services/fluxmonitor/flux_monitor.go +++ b/core/services/fluxmonitor/flux_monitor.go @@ -1,35 +1,30 @@ package fluxmonitor import ( - "chainlink/core/eth" - "chainlink/core/logger" - "chainlink/core/store" - "chainlink/core/store/models" - "chainlink/core/store/orm" - "chainlink/core/utils" - "context" "encoding/json" "fmt" - "math" "math/big" "net/url" + "sync" "time" - "github.com/ethereum/go-ethereum/common" + "chainlink/core/logger" + "chainlink/core/services/eth" + "chainlink/core/services/eth/contracts" + "chainlink/core/store" + "chainlink/core/store/models" + "chainlink/core/store/orm" + "chainlink/core/utils" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/jinzhu/gorm" "github.com/pkg/errors" "github.com/shopspring/decimal" - "go.uber.org/multierr" ) -//go:generate mockery -name FluxMonitor -output ../../internal/mocks/ -case=underscore - -// defaultHTTPTimeout is the timeout used by the price adapter fetcher for outgoing HTTP requests. -const defaultHTTPTimeout = 5 * time.Second - -// MinimumPollingInterval is the smallest possible polling interval the Flux -// Monitor supports. -const MinimumPollingInterval = models.Duration(defaultHTTPTimeout) +//go:generate mockery -name Service -output ../../internal/mocks/ -case=underscore +//go:generate mockery -name DeviationCheckerFactory -output ../../internal/mocks/ -case=underscore +//go:generate mockery -name DeviationChecker -output ../../internal/mocks/ -case=underscore type RunManager interface { Create( @@ -43,7 +38,6 @@ type RunManager interface { // Service is the interface encapsulating all functionality // needed to listen to price deviations and new round requests. type Service interface { - store.HeadTrackable // (Dis)Connect methods handle initial boot and intermittent connectivity. AddJob(models.JobSpec) error RemoveJob(*models.ID) Start() error @@ -53,19 +47,19 @@ type Service interface { type concreteFluxMonitor struct { store *store.Store runManager RunManager + logBroadcaster eth.LogBroadcaster checkerFactory DeviationCheckerFactory - adds chan addEntry - removes chan *models.ID - connect chan *models.Head - disconnect chan struct{} - ctx context.Context - cancel context.CancelFunc + chAdd chan addEntry + chRemove chan models.ID + chConnect chan *models.Head + chDisconnect chan struct{} + chStop chan struct{} + chDone chan struct{} } type addEntry struct { - jobID string + jobID models.ID checkers []DeviationChecker - errChan chan error } // New creates a service that manages a collection of DeviationCheckers, @@ -74,191 +68,164 @@ func New( store *store.Store, runManager RunManager, ) Service { + logBroadcaster := eth.NewLogBroadcaster(store.TxManager, store.ORM) return &concreteFluxMonitor{ store: store, runManager: runManager, - checkerFactory: pollingDeviationCheckerFactory{store: store}, + logBroadcaster: logBroadcaster, + checkerFactory: pollingDeviationCheckerFactory{ + store: store, + logBroadcaster: logBroadcaster, + }, + chAdd: make(chan addEntry), + chRemove: make(chan models.ID), + chConnect: make(chan *models.Head), + chDisconnect: make(chan struct{}), + chStop: make(chan struct{}), + chDone: make(chan struct{}), } } func (fm *concreteFluxMonitor) Start() error { - fm.ctx, fm.cancel = context.WithCancel(context.Background()) - fm.adds = make(chan addEntry) - fm.removes = make(chan *models.ID) - fm.connect = make(chan *models.Head) - fm.disconnect = make(chan struct{}) + fm.logBroadcaster.Start() - go fm.actionConsumer(fm.ctx) + go fm.serveInternalRequests() - count := 0 - errChan := make(chan error) + var wg sync.WaitGroup err := fm.store.Jobs(func(j *models.JobSpec) bool { - go func(j *models.JobSpec) { - errChan <- fm.AddJob(*j) - }(j) - count++ + if j == nil { + err := errors.New("received nil job") + logger.Error(err) + return true + } + job := *j + + wg.Add(1) + go func() { + defer wg.Done() + err := fm.AddJob(job) + if err != nil { + logger.Errorf("error adding FluxMonitor job: %v", err) + } + }() return true }, models.InitiatorFluxMonitor) - var merr error - for i := 0; i < count; i++ { - err := <-errChan - merr = multierr.Combine(merr, err) - } - return multierr.Append(err, merr) + wg.Wait() + + return err } -// Connect initializes all DeviationCheckers and starts their listening. -func (fm *concreteFluxMonitor) Connect(head *models.Head) error { - fm.connect <- head - return nil +// Disconnect cleans up running deviation checkers. +func (fm *concreteFluxMonitor) Stop() { + fm.logBroadcaster.Stop() + close(fm.chStop) + <-fm.chDone } -// actionConsumer is the CSP consumer. It's run on a single goroutine to -// coordinate the collection of DeviationCheckers in a thread-safe fashion. -func (fm *concreteFluxMonitor) actionConsumer(ctx context.Context) { - jobMap := map[string][]DeviationChecker{} +// serveInternalRequests handles internal requests for state change via +// channels. Inspired by the ideas of Communicating Sequential Processes, or +// CSP. +func (fm *concreteFluxMonitor) serveInternalRequests() { + defer close(fm.chDone) - // init w a noop cancel, so we never have to deal with nils - connectionCtx, cancelConnection := context.WithCancel(ctx) - var connected bool + jobMap := map[models.ID][]DeviationChecker{} for { select { - case <-ctx.Done(): - cancelConnection() - return - case <-fm.connect: - // every connection, create a new ctx for canceling on disconnect. - connectionCtx, cancelConnection = context.WithCancel(ctx) - connectCheckers(connectionCtx, jobMap, fm.store.TxManager) - connected = true - case <-fm.disconnect: - cancelConnection() - connected = false - case entry := <-fm.adds: - entry.errChan <- fm.addAction( - ctx, - connected, - jobMap, - fm.store, - entry.jobID, - entry.checkers, - ) - case jobID := <-fm.removes: - for _, checker := range jobMap[jobID.String()] { - checker.Stop() + case entry := <-fm.chAdd: + if _, ok := jobMap[entry.jobID]; ok { + logger.Errorf("job '%s' has already been added to flux monitor", entry.jobID) + return } - delete(jobMap, jobID.String()) - } - } -} + for _, checker := range entry.checkers { + checker.Start() + } + jobMap[entry.jobID] = entry.checkers -// Disconnect cleans up running deviation checkers. -func (fm *concreteFluxMonitor) Disconnect() { - fm.disconnect <- struct{}{} -} + case jobID := <-fm.chRemove: + checkers, ok := jobMap[jobID] + if !ok { + logger.Errorf("job '%s' is missing from the flux monitor", jobID) + return + } + for _, checker := range checkers { + checker.Stop() + } + delete(jobMap, jobID) -// Disconnect cleans up running deviation checkers. -func (fm *concreteFluxMonitor) Stop() { - if fm.cancel != nil { - fm.cancel() + case <-fm.chStop: + for _, checkers := range jobMap { + for _, checker := range checkers { + checker.Stop() + } + } + return + } } } -// OnNewHead is a noop. -func (fm *concreteFluxMonitor) OnNewHead(*models.Head) {} - // AddJob created a DeviationChecker for any job initiators of type // InitiatorFluxMonitor. func (fm *concreteFluxMonitor) AddJob(job models.JobSpec) error { - validCheckers := []DeviationChecker{} + if job.ID == nil { + err := errors.New("received job with nil ID") + logger.Error(err) + return err + } + + var validCheckers []DeviationChecker for _, initr := range job.InitiatorsFor(models.InitiatorFluxMonitor) { logger.Debugw("Adding job to flux monitor", "job", job.ID.String(), "initr", initr.ID, ) - checker, err := fm.checkerFactory.New(initr, fm.runManager, fm.store.ORM) + timeout := fm.store.Config.DefaultHTTPTimeout() + checker, err := fm.checkerFactory.New(initr, fm.runManager, fm.store.ORM, timeout) if err != nil { return errors.Wrap(err, "factory unable to create checker") } validCheckers = append(validCheckers, checker) } - - errChan := make(chan error) - fm.adds <- addEntry{job.ID.String(), validCheckers, errChan} - return <-errChan -} - -func connectCheckers(ctx context.Context, jobMap map[string][]DeviationChecker, client eth.Client) { - for _, checkers := range jobMap { - for _, checker := range checkers { - // XXX: Add mechanism to asynchronously communicate when a job spec has - // an ethereum interaction error. - // https://www.pivotaltracker.com/story/show/170349568 - logger.ErrorIf(connectSingleChecker(ctx, checker, client)) - } - } -} - -func (fm *concreteFluxMonitor) addAction( - ctx context.Context, - connected bool, - jobMap map[string][]DeviationChecker, - store *store.Store, - jobSpecID string, - checkers []DeviationChecker, -) error { - if _, ok := jobMap[jobSpecID]; ok { - return fmt.Errorf( - "job %s has already been added to flux monitor", - jobSpecID, - ) - } - - if connected { - for _, checker := range checkers { - err := connectSingleChecker(ctx, checker, fm.store.TxManager) - if err != nil { - return errors.Wrap(err, "unable to connect checker") - } - } + if len(validCheckers) == 0 { + return nil } - if len(checkers) > 0 { - jobMap[jobSpecID] = checkers - } + fm.chAdd <- addEntry{*job.ID, validCheckers} return nil } -func connectSingleChecker(ctx context.Context, checker DeviationChecker, client eth.Client) error { - return checker.Start(ctx, client) -} - // RemoveJob stops and removes the checker for all Flux Monitor initiators belonging // to the passed job ID. -func (fm *concreteFluxMonitor) RemoveJob(ID *models.ID) { - fm.removes <- ID +func (fm *concreteFluxMonitor) RemoveJob(id *models.ID) { + if id == nil { + logger.Warn("nil job ID passed to FluxMonitor#RemoveJob") + return + } + fm.chRemove <- *id } -//go:generate mockery -name DeviationCheckerFactory -output ../../internal/mocks/ -case=underscore - // DeviationCheckerFactory holds the New method needed to create a new instance // of a DeviationChecker. type DeviationCheckerFactory interface { - New(models.Initiator, RunManager, *orm.ORM) (DeviationChecker, error) + New(models.Initiator, RunManager, *orm.ORM, time.Duration) (DeviationChecker, error) } type pollingDeviationCheckerFactory struct { - store *store.Store + store *store.Store + logBroadcaster eth.LogBroadcaster } -func (f pollingDeviationCheckerFactory) New(initr models.Initiator, runManager RunManager, orm *orm.ORM) (DeviationChecker, error) { - if initr.InitiatorParams.PollingInterval < MinimumPollingInterval { - return nil, fmt.Errorf( - "pollingInterval must be equal or greater than %s", - MinimumPollingInterval, - ) +func (f pollingDeviationCheckerFactory) New( + initr models.Initiator, + runManager RunManager, + orm *orm.ORM, + timeout time.Duration, +) (DeviationChecker, error) { + minimumPollingInterval := models.Duration(f.store.Config.DefaultHTTPTimeout()) + + if initr.InitiatorParams.PollingInterval < minimumPollingInterval { + return nil, fmt.Errorf("pollingInterval must be equal or greater than %s", minimumPollingInterval) } urls, err := ExtractFeedURLs(initr.InitiatorParams.Feeds, orm) @@ -267,15 +234,21 @@ func (f pollingDeviationCheckerFactory) New(initr models.Initiator, runManager R } fetcher, err := newMedianFetcherFromURLs( - defaultHTTPTimeout, + timeout, initr.InitiatorParams.RequestData.String(), urls) if err != nil { return nil, err } + fluxAggregator, err := contracts.NewFluxAggregator(initr.InitiatorParams.Address, f.store.TxManager, f.logBroadcaster) + if err != nil { + return nil, err + } + return NewPollingDeviationChecker( f.store, + fluxAggregator, initr, runManager, fetcher, @@ -327,356 +300,504 @@ func GetBridgeURLFromName(name string, orm *orm.ORM) (*url.URL, error) { return &bridgeURL, nil } -//go:generate mockery -name DeviationChecker -output ../../internal/mocks/ -case=underscore - // DeviationChecker encapsulate methods needed to initialize and check prices // for price deviations. type DeviationChecker interface { - Start(context.Context, eth.Client) error + Start() Stop() } // PollingDeviationChecker polls external price adapters via HTTP to check for price swings. type PollingDeviationChecker struct { - store *store.Store + store *store.Store + fluxAggregator contracts.FluxAggregator + runManager RunManager + fetcher Fetcher + initr models.Initiator - address common.Address requestData models.JSON - idleThreshold time.Duration threshold float64 precision int32 - runManager RunManager - currentPrice decimal.Decimal - currentRound *big.Int - fetcher Fetcher - delay time.Duration - cancel context.CancelFunc - newRounds chan eth.Log + idleThreshold time.Duration + + connected utils.AtomicBool + chMaybeLogs chan maybeLog + reportableRoundID *big.Int + mostRecentSubmittedRoundID uint64 + pollTicker *ResettableTicker + idleTicker <-chan time.Time + roundTimeoutTicker <-chan time.Time + chStop chan struct{} waitOnStop chan struct{} } +// maybeLog is just a tuple that allows us to send either an error or a log over the +// logs channel. This is preferable to using two separate channels, as it ensures +// that we don't drop valid (but unprocessed) logs if we receive an error. +type maybeLog struct { + Log interface{} + Err error +} + // NewPollingDeviationChecker returns a new instance of PollingDeviationChecker. func NewPollingDeviationChecker( store *store.Store, + fluxAggregator contracts.FluxAggregator, initr models.Initiator, runManager RunManager, fetcher Fetcher, - delay time.Duration, + pollDelay time.Duration, ) (*PollingDeviationChecker, error) { return &PollingDeviationChecker{ - store: store, - initr: initr, - address: initr.InitiatorParams.Address, - requestData: initr.InitiatorParams.RequestData, - idleThreshold: initr.InitiatorParams.IdleThreshold.Duration(), - threshold: float64(initr.InitiatorParams.Threshold), - precision: initr.InitiatorParams.Precision, - runManager: runManager, - currentPrice: decimal.NewFromInt(0), - currentRound: big.NewInt(0), - fetcher: fetcher, - delay: delay, - newRounds: make(chan eth.Log), - - waitOnStop: make(chan struct{}), + store: store, + fluxAggregator: fluxAggregator, + initr: initr, + requestData: initr.InitiatorParams.RequestData, + idleThreshold: initr.InitiatorParams.IdleThreshold.Duration(), + threshold: float64(initr.InitiatorParams.Threshold), + precision: initr.InitiatorParams.Precision, + runManager: runManager, + fetcher: fetcher, + pollTicker: NewResettableTicker(pollDelay), + idleTicker: nil, + roundTimeoutTicker: nil, + chMaybeLogs: make(chan maybeLog, 100), + chStop: make(chan struct{}), + waitOnStop: make(chan struct{}), }, nil } // Start begins the CSP consumer in a single goroutine to // poll the price adapters and listen to NewRound events. -func (p *PollingDeviationChecker) Start(ctx context.Context, client eth.Client) error { +func (p *PollingDeviationChecker) Start() { logger.Debugw("Starting checker for job", "job", p.initr.JobSpecID.String(), "initr", p.initr.ID) - err := p.fetchAggregatorData(client) - if err != nil { - return err - } - roundSubscription, err := p.subscribeToNewRounds(client) - if err != nil { - return err + go p.consume() +} + +// Stop stops this instance from polling, cleaning up resources. +func (p *PollingDeviationChecker) Stop() { + close(p.chStop) + <-p.waitOnStop +} + +func (p *PollingDeviationChecker) OnConnect() { + logger.Debugw("PollingDeviationChecker connected to Ethereum node", + "address", p.initr.InitiatorParams.Address.Hex(), + ) + p.connected.Set(true) +} + +func (p *PollingDeviationChecker) OnDisconnect() { + logger.Debugw("PollingDeviationChecker disconnected from Ethereum node", + "address", p.initr.InitiatorParams.Address.Hex(), + ) + p.connected.Set(false) +} + +type ResettableTicker struct { + *time.Ticker + d time.Duration +} + +func NewResettableTicker(d time.Duration) *ResettableTicker { + return &ResettableTicker{nil, d} +} + +func (t *ResettableTicker) Tick() <-chan time.Time { + if t.Ticker == nil { + return nil } + return t.Ticker.C +} - _, err = p.poll(p.threshold) - if err != nil { - return err +func (t *ResettableTicker) Stop() { + if t.Ticker != nil { + t.Ticker.Stop() + t.Ticker = nil } +} - ctx, p.cancel = context.WithCancel(ctx) - go p.consume(ctx, roundSubscription, client) - return nil +func (t *ResettableTicker) Reset() { + t.Stop() + t.Ticker = time.NewTicker(t.d) } -// stopTimer stops and clears the timer as suggested by the documentation. -func stopTimer(arg *time.Timer) { - if !arg.Stop() && len(arg.C) > 0 { - // Residual events are the timer's channel and need to be cleared. - // - // Refer to timer.Stop's documentation or - // https://developpaper.com/detailed-explanation-of-the-trap-of-timer-in-golang/ - <-arg.C +func (p *PollingDeviationChecker) HandleLog(log interface{}, err error) { + select { + case p.chMaybeLogs <- maybeLog{log, err}: + case <-p.chStop: } } -func (p *PollingDeviationChecker) consume(ctx context.Context, roundSubscription eth.Subscription, client eth.Client) { - defer roundSubscription.Unsubscribe() +func (p *PollingDeviationChecker) consume() { + defer close(p.waitOnStop) - idleThreshold := p.idleThreshold - if idleThreshold == 0 { - idleThreshold = math.MaxInt64 - } + p.determineMostRecentSubmittedRoundID() - idleThresholdTimer := time.NewTimer(idleThreshold) - defer stopTimer(idleThresholdTimer) + connected, unsubscribeLogs := p.fluxAggregator.SubscribeToLogs(p) + defer unsubscribeLogs() - for { - jobRunTriggered := false + p.connected.Set(connected) + // Try to do an initial poll + p.pollIfEligible(p.threshold) + p.pollTicker.Reset() + defer p.pollTicker.Stop() + + if p.idleThreshold > 0 { + p.idleTicker = time.After(p.idleThreshold) + } + + for { select { - case <-ctx.Done(): - close(p.waitOnStop) + case <-p.chStop: return - case err := <-roundSubscription.Err(): - logger.Error(errors.Wrap(err, "checker lost subscription to NewRound log events")) - case log := <-p.newRounds: - err := p.respondToNewRound(log) - logger.ErrorIf(err, "checker unable to respond to new round") - case <-time.After(p.delay): - jobRunTriggered = p.pollIfRoundOpen(client) - case <-idleThresholdTimer.C: - ok, err := p.poll(0) - logger.ErrorIf(err, "checker unable to poll") - jobRunTriggered = ok - } - if jobRunTriggered { - // Reset expects stopped or expired timer. - stopTimer(idleThresholdTimer) - idleThresholdTimer.Reset(idleThreshold) + case maybeLog := <-p.chMaybeLogs: + if maybeLog.Err != nil { + logger.Errorf("error received from log broadcaster: %v", maybeLog.Err) + continue + } + p.respondToLog(maybeLog.Log) + + case <-p.pollTicker.Tick(): + p.pollIfEligible(p.threshold) + + case <-p.idleTicker: + p.pollIfEligible(0) + + case <-p.roundTimeoutTicker: + p.pollIfEligible(p.threshold) } } } -func (p *PollingDeviationChecker) pollIfRoundOpen(client eth.Client) bool { - open, err := p.isRoundOpen(client) - logger.ErrorIf(err, "Unable to determine if round is open:") - if !open { - logger.Info("Round is currently not open to new submissions - polling paused") - return false +func (p *PollingDeviationChecker) determineMostRecentSubmittedRoundID() { + myAccount, err := p.store.KeyStore.GetFirstAccount() + if err != nil { + logger.Error("error determining most recent submitted round ID: ", err) + return } - ok, err := p.poll(p.threshold) - logger.ErrorIf(err, "checker unable to poll") - return ok -} -func (p *PollingDeviationChecker) isRoundOpen(client eth.Client) (bool, error) { - latestRound, err := client.GetAggregatorRound(p.address) - if err != nil { - return false, err + // Just to be particularly defensive against issues with the DB or TxManager, we + // fetch the most recent 5 transactions we've submitted to this aggregator from our + // Chainlink node address. Take the highest round ID among them and store it so + // that we avoid re-polling for a given round when our tx takes a while to confirm. + txs, err := p.store.ORM.FindTxsBySenderAndRecipient(myAccount.Address, p.initr.InitiatorParams.Address, 0, 5) + if err != nil && !gorm.IsRecordNotFoundError(err) { + logger.Error("error determining most recent submitted round ID: ", err) + return } - nodeAddress := p.store.KeyStore.Accounts()[0].Address - _, lastRoundAnswered, err := client.GetLatestSubmission(p.address, nodeAddress) - if err != nil { - return false, err + + // Parse the round IDs from the transaction data + for _, tx := range txs { + if len(tx.Data) != 68 { + logger.Warnw("found Flux Monitor tx with bad data payload", + "txID", tx.ID, + ) + continue + } + + roundIDBytes := tx.Data[4:36] + roundID := big.NewInt(0).SetBytes(roundIDBytes).Uint64() + if roundID > p.mostRecentSubmittedRoundID { + p.mostRecentSubmittedRoundID = roundID + } } - return lastRoundAnswered.Cmp(latestRound) <= 0, nil + logger.Infow(fmt.Sprintf("roundID of most recent submission is %v", p.mostRecentSubmittedRoundID), + "jobID", p.initr.JobSpecID, + "aggregator", p.initr.InitiatorParams.Address.Hex(), + ) } -// Stop stops this instance from polling, cleaning up resources. -func (p *PollingDeviationChecker) Stop() { - if p.cancel != nil { - p.cancel() - <-p.waitOnStop +func (p *PollingDeviationChecker) respondToLog(log interface{}) { + switch log := log.(type) { + case *contracts.LogNewRound: + logger.Debugw("NewRound log", p.loggerFieldsForNewRound(log)...) + p.respondToNewRoundLog(log) + + case *contracts.LogAnswerUpdated: + logger.Debugw("AnswerUpdated log", p.loggerFieldsForAnswerUpdated(log)...) + p.respondToAnswerUpdatedLog(log) + + default: } } -// fetchAggregatorData retrieves the price that's on-chain, with which we check -// the deviation against. -func (p *PollingDeviationChecker) fetchAggregatorData(client eth.Client) error { - price, err := client.GetAggregatorPrice(p.address, p.precision) - if err != nil { - return err +// The AnswerUpdated log tells us that round has successfully close with a new +// answer. This tells us that we need to reset our poll ticker. +// +// Only invoked by the CSP consumer on the single goroutine for thread safety. +func (p *PollingDeviationChecker) respondToAnswerUpdatedLog(log *contracts.LogAnswerUpdated) { + if p.reportableRoundID != nil && log.RoundId.Cmp(p.reportableRoundID) < 0 { + // Ignore old rounds + logger.Debugw("Ignoring stale AnswerUpdated log", p.loggerFieldsForAnswerUpdated(log)...) + return } - p.currentPrice = price + p.pollTicker.Reset() +} + +// The NewRound log tells us that an oracle has initiated a new round. This tells us that we +// need to poll and submit an answer to the contract regardless of the deviation. +// +// Only invoked by the CSP consumer on the single goroutine for thread safety. +func (p *PollingDeviationChecker) respondToNewRoundLog(log *contracts.LogNewRound) { + // The idleThreshold resets when a new round starts + if p.idleThreshold > 0 { + p.idleTicker = time.After(p.idleThreshold) + } + + jobSpecID := p.initr.JobSpecID.String() + promSetBigInt(promFMSeenRound.WithLabelValues(jobSpecID), log.RoundId) - round, err := client.GetAggregatorRound(p.address) + // Ignore rounds we started + acct, err := p.store.KeyStore.GetFirstAccount() if err != nil { - return err + logger.Errorw(fmt.Sprintf("error fetching account from keystore: %v", err), p.loggerFieldsForNewRound(log)...) + return + } else if log.StartedBy == acct.Address { + logger.Infow("Ignoring new round request: we started this round", p.loggerFieldsForNewRound(log)...) + return } - p.currentRound = round - return nil -} -func (p *PollingDeviationChecker) subscribeToNewRounds(client eth.Client) (eth.Subscription, error) { - filterQuery, err := models.FilterQueryFactory(p.initr, nil) + // It's possible for RoundState() to return a higher round ID than the one in the NewRound log + // (for example, if a large set of logs are delayed and arrive all at once). We trust the value + // from RoundState() over the one in the log, and record it as the current ReportableRoundID. + roundState, err := p.roundState() if err != nil { - return nil, err + logger.Errorw(fmt.Sprintf("Ignoring new round request: error fetching eligibility from contract: %v", err), p.loggerFieldsForNewRound(log)...) + return } + p.reportableRoundID = big.NewInt(int64(roundState.ReportableRoundID)) - subscription, err := client.SubscribeToLogs(p.newRounds, filterQuery) + err = p.checkEligibilityAndAggregatorFunding(roundState) if err != nil { - return nil, err + logger.Infow(fmt.Sprintf("Ignoring new round request: %v", err), p.loggerFieldsForNewRound(log)...) + return } - logger.Infow( - "Flux Monitor Initiator subscribing to new rounds", - "address", p.initr.Address.Hex()) - return subscription, nil -} + // Ignore old rounds + if log.RoundId.Cmp(p.reportableRoundID) < 0 { + logger.Infow("Ignoring new round request: new < current", p.loggerFieldsForNewRound(log)...) + return + } else if log.RoundId.Uint64() <= p.mostRecentSubmittedRoundID { + logger.Infow("Ignoring new round request: already submitted for this round", p.loggerFieldsForNewRound(log)...) + return + } else if p.reportableRoundID.Uint64() <= p.mostRecentSubmittedRoundID { + logger.Infow("Ignoring new round request: possible chain reorg", p.loggerFieldsForNewRound(log)...) + return + } -// respondToNewRound takes the round broadcasted in the log event, and responds -// on-chain with an updated price. -// Only invoked by the CSP consumer on the single goroutine for thread safety. -func (p *PollingDeviationChecker) respondToNewRound(log eth.Log) error { - requestedRound, err := models.ParseNewRoundLog(log) + logger.Infow("Responding to new round request: new > current", p.loggerFieldsForNewRound(log)...) + + polledAnswer, err := p.fetcher.Fetch() if err != nil { - return err + logger.Errorw(fmt.Sprintf("unable to fetch median price: %v", err), p.loggerFieldsForNewRound(log)...) + return } - jobSpecID := p.initr.JobSpecID.String() - promSetBigInt(promFMSeenRound.WithLabelValues(jobSpecID), requestedRound) - - // skip if requested is not greater than current. - if requestedRound.Cmp(p.currentRound) < 1 { - logger.Infow( - fmt.Sprintf("Ignoring new round request: requested %s <= current %s", requestedRound, p.currentRound), - "requestedRound", requestedRound, - "currentRound", p.currentRound, - "address", log.Address.Hex(), - "jobID", p.initr.JobSpecID, - ) - return nil + p.createJobRun(polledAnswer, p.reportableRoundID) +} + +func (p *PollingDeviationChecker) checkEligibilityAndAggregatorFunding(roundState contracts.FluxAggregatorRoundState) error { + if !roundState.EligibleToSubmit { + return errors.New("not eligible to submit") + } else if roundState.AvailableFunds.Cmp(roundState.PaymentAmount) < 0 { + return errors.New("aggregator is underfunded") + } else if roundState.PaymentAmount.Cmp(p.store.Config.MinimumContractPayment().ToInt()) < 0 { + return errors.New("round payment amount < minimum contract payment") } + return nil +} - logger.Infow( - fmt.Sprintf("Responding to new round request: requested %s > current %s", requestedRound, p.currentRound), - "requestedRound", requestedRound, - "currentRound", p.currentRound, - "address", log.Address.Hex(), +func (p *PollingDeviationChecker) pollIfEligible(threshold float64) (createdJobRun bool) { + loggerFields := []interface{}{ "jobID", p.initr.JobSpecID, - ) - p.currentRound = requestedRound + "address", p.initr.InitiatorParams.Address, + "threshold", threshold, + } - nextPrice, err := p.fetchPrices() - if err != nil { - return err + if p.connected.Get() == false { + logger.Warnw("not connected to Ethereum node, skipping poll", loggerFields...) + return false } - err = p.createJobRun(nextPrice, requestedRound) + roundState, err := p.roundState() if err != nil { - return err + logger.Errorw(fmt.Sprintf("unable to determine eligibility to submit from FluxAggregator contract: %v", err), loggerFields...) + return false } + loggerFields = append(loggerFields, "reportableRound", roundState.ReportableRoundID) - p.currentPrice = nextPrice - return nil -} + // It's pointless to listen to logs from before the current reporting round + p.reportableRoundID = big.NewInt(int64(roundState.ReportableRoundID)) -// poll walks through the steps to check for a deviation, early exiting if deviation -// is not met, or triggering a new job run if deviation is met. -// Only invoked by the CSP consumer on the single goroutine for thread safety. -// -// True is returned when a Job Run was triggered. -func (p *PollingDeviationChecker) poll(threshold float64) (bool, error) { - jobSpecID := p.initr.JobSpecID.String() + // If we've already submitted an answer for this round, but the tx is still pending, don't resubmit + if p.mostRecentSubmittedRoundID >= uint64(roundState.ReportableRoundID) { + logger.Infow(fmt.Sprintf("already submitted for round %v, tx is still pending", roundState.ReportableRoundID), loggerFields...) + return false + } - nextPrice, err := p.fetchPrices() + err = p.checkEligibilityAndAggregatorFunding(roundState) if err != nil { - return false, err + logger.Infow(fmt.Sprintf("skipping poll: %v", err), loggerFields...) + return false } - promSetDecimal(promFMSeenValue.WithLabelValues(jobSpecID), nextPrice) - if !OutsideDeviation(p.currentPrice, nextPrice, threshold) { - return false, nil // early exit since deviation criteria not met. + polledAnswer, err := p.fetcher.Fetch() + if err != nil { + logger.Errorw(fmt.Sprintf("can't fetch answer: %v", err), loggerFields...) + return false } - nextRound := new(big.Int).Add(p.currentRound, big.NewInt(1)) // start new round - logger.Infow("Detected change outside threshold, starting new round", - "round", nextRound, - "address", p.initr.Address.Hex(), - "jobID", p.initr.JobSpecID, + jobSpecID := p.initr.JobSpecID.String() + promSetDecimal(promFMSeenValue.WithLabelValues(jobSpecID), polledAnswer) + + latestAnswer := decimal.NewFromBigInt(roundState.LatestAnswer, -p.precision) + + loggerFields = append(loggerFields, + "latestAnswer", latestAnswer, + "polledAnswer", polledAnswer, ) - err = p.createJobRun(nextPrice, nextRound) + if roundState.ReportableRoundID > 1 && !OutsideDeviation(latestAnswer, polledAnswer, threshold) { + logger.Debugw("deviation < threshold, not submitting", loggerFields...) + return false + } + + if roundState.ReportableRoundID > 1 { + logger.Infow("deviation > threshold, starting new round", loggerFields...) + } else { + logger.Infow("starting first round", loggerFields...) + } + + err = p.createJobRun(polledAnswer, p.reportableRoundID) if err != nil { - return false, err + logger.Errorw(fmt.Sprintf("can't create job run: %v", err), loggerFields...) + return false } - p.currentPrice = nextPrice - p.currentRound = nextRound + promSetDecimal(promFMReportedValue.WithLabelValues(jobSpecID), polledAnswer) + promSetBigInt(promFMReportedRound.WithLabelValues(jobSpecID), p.reportableRoundID) + return true +} - promSetDecimal(promFMReportedValue.WithLabelValues(jobSpecID), p.currentPrice) - promSetBigInt(promFMReportedRound.WithLabelValues(jobSpecID), p.currentRound) +func (p *PollingDeviationChecker) roundState() (contracts.FluxAggregatorRoundState, error) { + acct, err := p.store.KeyStore.GetFirstAccount() + if err != nil { + return contracts.FluxAggregatorRoundState{}, err + } + roundState, err := p.fluxAggregator.RoundState(acct.Address) + if err != nil { + return contracts.FluxAggregatorRoundState{}, err + } - return true, nil + // Update the roundTimeTicker using the .TimesOutAt field describing the current round + if roundState.TimesOutAt == 0 { + p.roundTimeoutTicker = nil + } else { + timeUntilTimeout := time.Unix(int64(roundState.TimesOutAt), 0).Sub(time.Now()) + p.roundTimeoutTicker = time.After(timeUntilTimeout) + } + + return roundState, nil } -func (p *PollingDeviationChecker) fetchPrices() (decimal.Decimal, error) { - median, err := p.fetcher.Fetch() - return median, errors.Wrap(err, "unable to fetch median price") +// jobRunRequest is the request used to trigger a Job Run by the Flux Monitor. +type jobRunRequest struct { + Result decimal.Decimal `json:"result"` + Address string `json:"address"` + FunctionSelector string `json:"functionSelector"` + DataPrefix string `json:"dataPrefix"` } -func (p *PollingDeviationChecker) createJobRun(nextPrice decimal.Decimal, nextRound *big.Int) error { - aggregatorContract, err := eth.GetV6Contract(eth.FluxAggregatorName) +func (p *PollingDeviationChecker) createJobRun(polledAnswer decimal.Decimal, nextRound *big.Int) error { + methodID, err := p.fluxAggregator.GetMethodID("updateAnswer") if err != nil { return err } - methodID, err := aggregatorContract.GetMethodID("updateAnswer") + + nextRoundData, err := utils.EVMWordBigInt(nextRound) if err != nil { return err } - nextRoundData, err := utils.EVMWordBigInt(nextRound) + payload, err := json.Marshal(jobRunRequest{ + Result: polledAnswer, + Address: p.initr.InitiatorParams.Address.Hex(), + FunctionSelector: hexutil.Encode(methodID), + DataPrefix: hexutil.Encode(nextRoundData), + }) if err != nil { - return err + return errors.Wrapf(err, "unable to encode Job Run request in JSON") } - payload := fmt.Sprintf(`{ - "result": "%s", - "address": "%s", - "functionSelector": "%s", - "dataPrefix": "%s" - }`, - nextPrice, - p.address.Hex(), - hexutil.Encode(methodID), - hexutil.Encode(nextRoundData)) - - runData, err := models.ParseJSON([]byte(payload)) + runData, err := models.ParseJSON(payload) if err != nil { return errors.Wrap(err, fmt.Sprintf("unable to start chainlink run with payload %s", payload)) } runRequest := models.NewRunRequest(runData) _, err = p.runManager.Create(p.initr.JobSpecID, &p.initr, nil, runRequest) - return err + if err != nil { + return err + } + + p.mostRecentSubmittedRoundID = nextRound.Uint64() + + return nil +} + +func (p *PollingDeviationChecker) loggerFieldsForNewRound(log *contracts.LogNewRound) []interface{} { + return []interface{}{ + "reportableRound", p.reportableRoundID, + "round", log.RoundId, + "startedBy", log.StartedBy.Hex(), + "startedAt", log.StartedAt.String(), + "contract", log.Address.Hex(), + "jobID", p.initr.JobSpecID, + } } -var dec0 = decimal.NewFromInt(0) +func (p *PollingDeviationChecker) loggerFieldsForAnswerUpdated(log *contracts.LogAnswerUpdated) []interface{} { + return []interface{}{ + "round", log.RoundId, + "answer", log.Current.String(), + "timestamp", log.Timestamp.String(), + "contract", log.Address.Hex(), + "job", p.initr.JobSpecID, + } +} // OutsideDeviation checks whether the next price is outside the threshold. -func OutsideDeviation(curPrice, nextPrice decimal.Decimal, threshold float64) bool { - if curPrice.Equal(dec0) { - logger.Infow("Current price is 0, deviation automatically met", "currentPrice", dec0) +func OutsideDeviation(curAnswer, nextAnswer decimal.Decimal, threshold float64) bool { + loggerFields := []interface{}{ + "threshold", threshold, + "currentAnswer", curAnswer, + "nextAnswer", nextAnswer, + } + + if curAnswer.IsZero() { + if nextAnswer.IsZero() { + logger.Debugw("Deviation threshold not met", loggerFields...) + return false + } + + logger.Infow("Deviation threshold met", loggerFields...) return true } - diff := curPrice.Sub(nextPrice).Abs() - percentage := diff.Div(curPrice).Mul(decimal.NewFromInt(100)) + diff := curAnswer.Sub(nextAnswer).Abs() + percentage := diff.Div(curAnswer.Abs()).Mul(decimal.NewFromInt(100)) + + loggerFields = append(loggerFields, "percentage", percentage) + if percentage.LessThan(decimal.NewFromFloat(threshold)) { - logger.Debugw( - "Deviation threshold not met", - "difference", percentage, - "threshold", threshold, - "currentPrice", curPrice, - "nextPrice", nextPrice) + logger.Debugw("Deviation threshold not met", loggerFields...) return false } - logger.Infow( - "Deviation threshold met", - "difference", percentage, - "threshold", threshold, - "currentPrice", curPrice, - "nextPrice", nextPrice, - ) + logger.Infow("Deviation threshold met", loggerFields...) return true } diff --git a/core/services/fluxmonitor/flux_monitor_test.go b/core/services/fluxmonitor/flux_monitor_test.go index d7be08a1871..d90331c96a9 100644 --- a/core/services/fluxmonitor/flux_monitor_test.go +++ b/core/services/fluxmonitor/flux_monitor_test.go @@ -1,517 +1,732 @@ package fluxmonitor_test import ( + "fmt" + "math" + "math/big" + "net/url" + "reflect" + "testing" + "time" + "chainlink/core/cmd" "chainlink/core/internal/cltest" "chainlink/core/internal/mocks" + "chainlink/core/services/eth" + "chainlink/core/services/eth/contracts" "chainlink/core/services/fluxmonitor" + "chainlink/core/store" "chainlink/core/store/models" "chainlink/core/utils" - "context" - "fmt" - "math/big" - "net/url" - "testing" - "time" "github.com/ethereum/go-ethereum/common" - "github.com/pkg/errors" "github.com/shopspring/decimal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -type successFetcher decimal.Decimal - -func (f *successFetcher) Fetch() (decimal.Decimal, error) { - return decimal.Decimal(*f), nil -} +var ( + updateAnswerHash = utils.MustHash("updateAnswer(uint256,int256)") + updateAnswerSelector = updateAnswerHash[:4] +) -func fakeSubscription() *mocks.Subscription { - sub := new(mocks.Subscription) - sub.On("Unsubscribe").Return() - sub.On("Err").Return(nil) - return sub +func ensureAccount(t *testing.T, store *store.Store) common.Address { + t.Helper() + auth := cmd.TerminalKeyStoreAuthenticator{Prompter: &cltest.MockCountingPrompter{T: t}} + _, err := auth.Authenticate(store, "somepassword") + assert.NoError(t, err) + assert.True(t, store.KeyStore.HasAccounts()) + acct, err := store.KeyStore.GetFirstAccount() + assert.NoError(t, err) + return acct.Address } -func TestConcreteFluxMonitor_AddJobRemoveJobHappy(t *testing.T) { +func TestConcreteFluxMonitor_AddJobRemoveJob(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - job := cltest.NewJobWithFluxMonitorInitiator() - runManager := new(mocks.RunManager) - started := make(chan struct{}, 1) + txm := new(mocks.TxManager) + store.TxManager = txm + txm.On("GetBlockHeight").Return(uint64(123), nil) - dc := new(mocks.DeviationChecker) - dc.On("Start", mock.Anything, mock.Anything).Return(nil).Run(func(mock.Arguments) { - started <- struct{}{} - }) + t.Run("starts and stops DeviationCheckers when jobs are added and removed", func(t *testing.T) { + job := cltest.NewJobWithFluxMonitorInitiator() + runManager := new(mocks.RunManager) + started := make(chan struct{}, 1) - checkerFactory := new(mocks.DeviationCheckerFactory) - checkerFactory.On("New", job.Initiators[0], runManager, store.ORM).Return(dc, nil) - fm := fluxmonitor.New(store, runManager) - fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) - require.NoError(t, fm.Start()) - defer fm.Stop() - require.NoError(t, fm.Connect(nil)) - defer fm.Disconnect() + dc := new(mocks.DeviationChecker) + dc.On("Start", mock.Anything, mock.Anything).Return(nil).Run(func(mock.Arguments) { + started <- struct{}{} + }) - // Add Job - require.NoError(t, fm.AddJob(job)) + checkerFactory := new(mocks.DeviationCheckerFactory) + checkerFactory.On("New", job.Initiators[0], runManager, store.ORM, store.Config.DefaultHTTPTimeout()).Return(dc, nil) + fm := fluxmonitor.New(store, runManager) + fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) + require.NoError(t, fm.Start()) - cltest.CallbackOrTimeout(t, "deviation checker started", func() { - <-started - }) - checkerFactory.AssertExpectations(t) - dc.AssertExpectations(t) + // Add Job + require.NoError(t, fm.AddJob(job)) - // Remove Job - removed := make(chan struct{}) - dc.On("Stop").Return().Run(func(mock.Arguments) { - removed <- struct{}{} - }) - fm.RemoveJob(job.ID) - cltest.CallbackOrTimeout(t, "deviation checker stopped", func() { - <-removed - }) - dc.AssertExpectations(t) -} + cltest.CallbackOrTimeout(t, "deviation checker started", func() { + <-started + }) + checkerFactory.AssertExpectations(t) + dc.AssertExpectations(t) -func TestConcreteFluxMonitor_AddJobError(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + // Remove Job + removed := make(chan struct{}) + dc.On("Stop").Return().Run(func(mock.Arguments) { + removed <- struct{}{} + }) + fm.RemoveJob(job.ID) + cltest.CallbackOrTimeout(t, "deviation checker stopped", func() { + <-removed + }) - job := cltest.NewJobWithFluxMonitorInitiator() - runManager := new(mocks.RunManager) - dc := new(mocks.DeviationChecker) - dc.On("Start", mock.Anything, mock.Anything).Return(errors.New("deliberate test error")) - checkerFactory := new(mocks.DeviationCheckerFactory) - checkerFactory.On("New", job.Initiators[0], runManager, store.ORM).Return(dc, nil) - fm := fluxmonitor.New(store, runManager) - fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) - require.NoError(t, fm.Start()) - defer fm.Stop() - require.NoError(t, fm.Connect(nil)) - defer fm.Disconnect() - - require.Error(t, fm.AddJob(job)) - checkerFactory.AssertExpectations(t) - dc.AssertExpectations(t) -} + fm.Stop() -func TestConcreteFluxMonitor_AddJobDisconnected(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + dc.AssertExpectations(t) + }) - job := cltest.NewJobWithFluxMonitorInitiator() - runManager := new(mocks.RunManager) - checkerFactory := new(mocks.DeviationCheckerFactory) - dc := new(mocks.DeviationChecker) - checkerFactory.On("New", job.Initiators[0], runManager, store.ORM).Return(dc, nil) - fm := fluxmonitor.New(store, runManager) - fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) - require.NoError(t, fm.Start()) - defer fm.Stop() - - require.NoError(t, fm.AddJob(job)) -} + t.Run("does not error or attempt to start a DeviationChecker when receiving a non-Flux Monitor job", func(t *testing.T) { + job := cltest.NewJobWithRunLogInitiator() + runManager := new(mocks.RunManager) + checkerFactory := new(mocks.DeviationCheckerFactory) + fm := fluxmonitor.New(store, runManager) + fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) -func TestConcreteFluxMonitor_AddJobNonFluxMonitor(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + err := fm.Start() + require.NoError(t, err) + defer fm.Stop() - job := cltest.NewJobWithRunLogInitiator() - runManager := new(mocks.RunManager) - checkerFactory := new(mocks.DeviationCheckerFactory) - fm := fluxmonitor.New(store, runManager) - fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) - require.NoError(t, fm.Start()) - defer fm.Stop() + err = fm.AddJob(job) + require.NoError(t, err) - require.NoError(t, fm.AddJob(job)) + checkerFactory.AssertNotCalled(t, "New", mock.Anything, mock.Anything, mock.Anything) + }) } -func TestConcreteFluxMonitor_ConnectStartsExistingJobs(t *testing.T) { +func TestPollingDeviationChecker_PollIfEligible(t *testing.T) { + tests := []struct { + name string + eligible bool + connected bool + funded bool + threshold float64 + latestAnswer int64 + polledAnswer int64 + expectedToPoll bool + expectedToSubmit bool + }{ + {"eligible, connected, funded, threshold > 0, answers deviate", true, true, true, 0.1, 1, 100, true, true}, + {"eligible, connected, funded, threshold > 0, answers do not deviate", true, true, true, 0.1, 100, 100, true, false}, + {"eligible, connected, funded, threshold == 0, answers deviate", true, true, true, 0, 1, 100, true, true}, + {"eligible, connected, funded, threshold == 0, answers do not deviate", true, true, true, 0, 1, 100, true, true}, + + {"eligible, disconnected, funded, threshold > 0, answers deviate", true, false, true, 0.1, 1, 100, false, false}, + {"eligible, disconnected, funded, threshold > 0, answers do not deviate", true, false, true, 0.1, 100, 100, false, false}, + {"eligible, disconnected, funded, threshold == 0, answers deviate", true, false, true, 0, 1, 100, false, false}, + {"eligible, disconnected, funded, threshold == 0, answers do not deviate", true, false, true, 0, 1, 100, false, false}, + + {"ineligible, connected, funded, threshold > 0, answers deviate", false, true, true, 0.1, 1, 100, false, false}, + {"ineligible, connected, funded, threshold > 0, answers do not deviate", false, true, true, 0.1, 100, 100, false, false}, + {"ineligible, connected, funded, threshold == 0, answers deviate", false, true, true, 0, 1, 100, false, false}, + {"ineligible, connected, funded, threshold == 0, answers do not deviate", false, true, true, 0, 1, 100, false, false}, + + {"ineligible, disconnected, funded, threshold > 0, answers deviate", false, false, true, 0.1, 1, 100, false, false}, + {"ineligible, disconnected, funded, threshold > 0, answers do not deviate", false, false, true, 0.1, 100, 100, false, false}, + {"ineligible, disconnected, funded, threshold == 0, answers deviate", false, false, true, 0, 1, 100, false, false}, + {"ineligible, disconnected, funded, threshold == 0, answers do not deviate", false, false, true, 0, 1, 100, false, false}, + + {"eligible, connected, underfunded, threshold > 0, answers deviate", true, true, false, 0.1, 1, 100, false, false}, + {"eligible, connected, underfunded, threshold > 0, answers do not deviate", true, true, false, 0.1, 100, 100, false, false}, + {"eligible, connected, underfunded, threshold == 0, answers deviate", true, true, false, 0, 1, 100, false, false}, + {"eligible, connected, underfunded, threshold == 0, answers do not deviate", true, true, false, 0, 1, 100, false, false}, + + {"eligible, disconnected, underfunded, threshold > 0, answers deviate", true, false, false, 0.1, 1, 100, false, false}, + {"eligible, disconnected, underfunded, threshold > 0, answers do not deviate", true, false, false, 0.1, 100, 100, false, false}, + {"eligible, disconnected, underfunded, threshold == 0, answers deviate", true, false, false, 0, 1, 100, false, false}, + {"eligible, disconnected, underfunded, threshold == 0, answers do not deviate", true, false, false, 0, 1, 100, false, false}, + + {"ineligible, connected, underfunded, threshold > 0, answers deviate", false, true, false, 0.1, 1, 100, false, false}, + {"ineligible, connected, underfunded, threshold > 0, answers do not deviate", false, true, false, 0.1, 100, 100, false, false}, + {"ineligible, connected, underfunded, threshold == 0, answers deviate", false, true, false, 0, 1, 100, false, false}, + {"ineligible, connected, underfunded, threshold == 0, answers do not deviate", false, true, false, 0, 1, 100, false, false}, + + {"ineligible, disconnected, underfunded, threshold > 0, answers deviate", false, false, false, 0.1, 1, 100, false, false}, + {"ineligible, disconnected, underfunded, threshold > 0, answers do not deviate", false, false, false, 0.1, 100, 100, false, false}, + {"ineligible, disconnected, underfunded, threshold == 0, answers deviate", false, false, false, 0, 1, 100, false, false}, + {"ineligible, disconnected, underfunded, threshold == 0, answers do not deviate", false, false, false, 0, 1, 100, false, false}, + } + store, cleanup := cltest.NewStore(t) defer cleanup() - runManager := new(mocks.RunManager) - started := make(chan struct{}) + nodeAddr := ensureAccount(t, store) - dc := new(mocks.DeviationChecker) - dc.On("Start", mock.Anything, mock.Anything).Return(nil).Run(func(mock.Arguments) { - started <- struct{}{} - }) - - checkerFactory := new(mocks.DeviationCheckerFactory) - - for i := 0; i < 3; i++ { - job := cltest.NewJobWithFluxMonitorInitiator() - require.NoError(t, store.CreateJob(&job)) - job, err := store.FindJob(job.ID) - require.NoError(t, err) - checkerFactory.On("New", job.Initiators[0], runManager, store.ORM).Return(dc, nil) - } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rm := new(mocks.RunManager) + fetcher := new(mocks.Fetcher) + fluxAggregator := new(mocks.FluxAggregator) + + job := cltest.NewJobWithFluxMonitorInitiator() + initr := job.Initiators[0] + initr.ID = 1 + + const reportableRoundID = 2 + latestAnswerNoPrecision := test.latestAnswer * int64(math.Pow10(int(initr.InitiatorParams.Precision))) + + var availableFunds *big.Int + var paymentAmount *big.Int + minPayment := store.Config.MinimumContractPayment().ToInt() + if test.funded { + availableFunds = minPayment + paymentAmount = minPayment + } else { + availableFunds = big.NewInt(1) + paymentAmount = minPayment + } - fm := fluxmonitor.New(store, runManager) - fluxmonitor.ExportedSetCheckerFactory(fm, checkerFactory) - err := fm.Start() - require.NoError(t, err) - defer fm.Stop() + roundState := contracts.FluxAggregatorRoundState{ + ReportableRoundID: reportableRoundID, + EligibleToSubmit: test.eligible, + LatestAnswer: big.NewInt(latestAnswerNoPrecision), + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + } + fluxAggregator.On("RoundState", nodeAddr).Return(roundState, nil).Maybe() - require.NoError(t, fm.Connect(nil)) - cltest.CallbackOrTimeout(t, "deviation checker started", func() { - <-started - }) - checkerFactory.AssertExpectations(t) - dc.AssertExpectations(t) -} + if test.expectedToPoll { + fetcher.On("Fetch").Return(decimal.NewFromInt(test.polledAnswer), nil) + } -func TestConcreteFluxMonitor_StopWithoutStart(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + if test.expectedToSubmit { + run := cltest.NewJobRun(job) + data, err := models.ParseJSON([]byte(fmt.Sprintf(`{ + "result": "%d", + "address": "%s", + "functionSelector": "0x%x", + "dataPrefix": "0x000000000000000000000000000000000000000000000000000000000000000%d" + }`, test.polledAnswer, initr.InitiatorParams.Address.Hex(), updateAnswerSelector, reportableRoundID))) + require.NoError(t, err) + + rm.On("Create", job.ID, &initr, mock.Anything, mock.MatchedBy(func(runRequest *models.RunRequest) bool { + return reflect.DeepEqual(runRequest.RequestParams.Result.Value(), data.Result.Value()) + })).Return(&run, nil) + + fluxAggregator.On("GetMethodID", "updateAnswer").Return(updateAnswerSelector, nil) + } - runManager := new(mocks.RunManager) + checker, err := fluxmonitor.NewPollingDeviationChecker(store, fluxAggregator, initr, rm, fetcher, time.Second) + require.NoError(t, err) - fm := fluxmonitor.New(store, runManager) - fm.Stop() -} + if test.connected { + checker.OnConnect() + } -func TestPollingDeviationChecker_PollHappy(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + checker.ExportedPollIfEligible(test.threshold) - fetcher := new(mocks.Fetcher) - fetcher.On("Fetch").Return(decimal.NewFromInt(102), nil) - - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - - rm := new(mocks.RunManager) - run := cltest.NewJobRun(job) - data, err := models.ParseJSON([]byte(fmt.Sprintf(`{ - "result": "102", - "address": "%s", - "functionSelector": "0xe6330cf7", - "dataPrefix": "0x0000000000000000000000000000000000000000000000000000000000000002" - }`, initr.InitiatorParams.Address.Hex()))) - require.NoError(t, err) - rm.On("Create", job.ID, &initr, mock.Anything, mock.MatchedBy(func(runRequest *models.RunRequest) bool { - return runRequest.RequestParams == data - })).Return(&run, nil) - - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, fetcher, time.Second) - require.NoError(t, err) - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(1), nil) - - require.NoError(t, checker.ExportedFetchAggregatorData(ethClient)) // setup - ethClient.AssertExpectations(t) - assert.Equal(t, decimal.NewFromInt(100), checker.ExportedCurrentPrice()) - assert.Equal(t, big.NewInt(1), checker.ExportedCurrentRound()) - - _, err = checker.ExportedPoll() - require.NoError(t, err) // main entry point - - fetcher.AssertExpectations(t) - rm.AssertExpectations(t) - assert.Equal(t, decimal.NewFromInt(102), checker.ExportedCurrentPrice()) - assert.Equal(t, big.NewInt(2), checker.ExportedCurrentRound()) + fluxAggregator.AssertExpectations(t) + fetcher.AssertExpectations(t) + rm.AssertExpectations(t) + }) + } } func TestPollingDeviationChecker_TriggerIdleTimeThreshold(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() - - auth := cmd.TerminalKeyStoreAuthenticator{Prompter: &cltest.MockCountingPrompter{T: t}} - _, err := auth.Authenticate(store, "somepassword") - assert.NoError(t, err) - assert.True(t, store.KeyStore.HasAccounts()) - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - initr.PollingInterval = models.Duration(5 * time.Millisecond) - initr.IdleThreshold = models.Duration(10 * time.Millisecond) + tests := []struct { + name string + idleThreshold time.Duration + expectedToSubmit bool + }{ + {"no idleThreshold", 0, false}, + {"idleThreshold > 0", 10 * time.Millisecond, true}, + } - jobRun := cltest.NewJobRun(job) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() - runManager := new(mocks.RunManager) + nodeAddr := ensureAccount(t, store) - randomLargeNumber := 100 - jobRunCreated := make(chan struct{}, randomLargeNumber) - runManager.On("Create", job.ID, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(&jobRun, nil). - Run(func(args mock.Arguments) { - jobRunCreated <- struct{}{} - }) + fetcher := new(mocks.Fetcher) + runManager := new(mocks.RunManager) + fluxAggregator := new(mocks.FluxAggregator) + + job := cltest.NewJobWithFluxMonitorInitiator() + initr := job.Initiators[0] + initr.ID = 1 + initr.PollingInterval = models.Duration(math.MaxInt64) + initr.IdleThreshold = models.Duration(test.idleThreshold) + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer * int64(math.Pow10(int(initr.InitiatorParams.Precision)))) + + fluxAggregator.On("SubscribeToLogs", mock.Anything).Return(true, eth.UnsubscribeFunc(func() {}), nil) + + roundState1 := contracts.FluxAggregatorRoundState{ReportableRoundID: 1, EligibleToSubmit: false, LatestAnswer: answerBigInt} // Initial poll + roundState2 := contracts.FluxAggregatorRoundState{ReportableRoundID: 2, EligibleToSubmit: false, LatestAnswer: answerBigInt} // idleThreshold 1 + roundState3 := contracts.FluxAggregatorRoundState{ReportableRoundID: 3, EligibleToSubmit: false, LatestAnswer: answerBigInt} // NewRound + roundState4 := contracts.FluxAggregatorRoundState{ReportableRoundID: 4, EligibleToSubmit: false, LatestAnswer: answerBigInt} // idleThreshold 2 + + idleThresholdOccured := make(chan struct{}, 3) + + fluxAggregator.On("RoundState", nodeAddr).Return(roundState1, nil).Once() // Initial poll + if test.expectedToSubmit { + // idleThreshold 1 + fluxAggregator.On("RoundState", nodeAddr).Return(roundState2, nil).Once().Run(func(args mock.Arguments) { idleThresholdOccured <- struct{}{} }) + // NewRound + fluxAggregator.On("RoundState", nodeAddr).Return(roundState3, nil).Once() + // idleThreshold 2 + fluxAggregator.On("RoundState", nodeAddr).Return(roundState4, nil).Once().Run(func(args mock.Arguments) { idleThresholdOccured <- struct{}{} }) + } - fetcher := successFetcher(decimal.NewFromInt(100)) - deviationChecker, err := fluxmonitor.NewPollingDeviationChecker( - store, - initr, - runManager, - &fetcher, - time.Second, - ) - require.NoError(t, err) + deviationChecker, err := fluxmonitor.NewPollingDeviationChecker( + store, + fluxAggregator, + initr, + runManager, + fetcher, + time.Duration(math.MaxInt64), + ) + require.NoError(t, err) - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(1), nil) - ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything). - Return(fakeSubscription(), nil) - ethClient.On("GetLatestSubmission", mock.Anything, mock.Anything). - Return(big.NewInt(0), big.NewInt(0), nil) + deviationChecker.OnConnect() + deviationChecker.Start() + require.Len(t, idleThresholdOccured, 0, "no Job Runs created") - err = deviationChecker.Start(context.Background(), ethClient) - require.NoError(t, err) - require.Len(t, jobRunCreated, 0, "no Job Runs created") + if test.expectedToSubmit { + require.Eventually(t, func() bool { return len(idleThresholdOccured) == 1 }, 3*time.Second, 10*time.Millisecond) + deviationChecker.HandleLog(&contracts.LogNewRound{RoundId: big.NewInt(int64(roundState1.ReportableRoundID))}, nil) + require.Eventually(t, func() bool { return len(idleThresholdOccured) == 2 }, 3*time.Second, 10*time.Millisecond) + } - require.Eventually(t, func() bool { return len(jobRunCreated) >= 1 }, time.Second, time.Millisecond, "idleThreshold triggers Job Run") - require.Eventually(t, func() bool { return len(jobRunCreated) >= 5 }, time.Second, time.Millisecond, "idleThreshold triggers succeeding Job Runs") + deviationChecker.Stop() - deviationChecker.Stop() + if !test.expectedToSubmit { + require.Len(t, idleThresholdOccured, 0) + } - assert.Equal(t, decimal.NewFromInt(100).String(), deviationChecker.ExportedCurrentPrice().String()) + fetcher.AssertExpectations(t) + runManager.AssertExpectations(t) + fluxAggregator.AssertExpectations(t) + }) + } } -func TestPollingDeviationChecker_StartError(t *testing.T) { +func TestPollingDeviationChecker_RoundTimeoutCausesPoll(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - rm := new(mocks.RunManager) - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(0), errors.New("deliberate test error")) - - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, nil, time.Second) - require.NoError(t, err) - require.Error(t, checker.Start(context.Background(), ethClient)) -} + nodeAddr := ensureAccount(t, store) -func TestPollingDeviationChecker_StartStop(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + tests := []struct { + name string + timesOutAt int64 + expectedToTrigger bool + }{ + {"timesOutAt == 0", 0, false}, + {"timesOutAt != 0", time.Now().Add(1 * time.Second).Unix(), true}, + } - // Prepare initialization to 100, which matches external adapter, so no deviation - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(1), nil) - ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything). - Return(fakeSubscription(), nil) - - rm := new(mocks.RunManager) - fetcher := new(mocks.Fetcher) - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, fetcher, time.Millisecond) - require.NoError(t, err) - - // Set up fetcher to mark when polled - started := make(chan struct{}) - fetcher.On("Fetch").Return(decimal.NewFromFloat(100.0), nil).Maybe().Run(func(mock.Arguments) { - started <- struct{}{} - }) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fetcher := new(mocks.Fetcher) + runManager := new(mocks.RunManager) + fluxAggregator := new(mocks.FluxAggregator) + + job := cltest.NewJobWithFluxMonitorInitiator() + initr := job.Initiators[0] + initr.ID = 1 + initr.PollingInterval = models.Duration(math.MaxInt64) + initr.IdleThreshold = models.Duration(0) + + const fetchedAnswer = 100 + answerBigInt := big.NewInt(fetchedAnswer * int64(math.Pow10(int(initr.InitiatorParams.Precision)))) + + fluxAggregator.On("SubscribeToLogs", mock.Anything).Return(true, eth.UnsubscribeFunc(func() {}), nil) + + if test.expectedToTrigger { + fluxAggregator.On("RoundState", nodeAddr).Return(contracts.FluxAggregatorRoundState{ + ReportableRoundID: 1, + EligibleToSubmit: false, + LatestAnswer: answerBigInt, + TimesOutAt: uint64(test.timesOutAt), + }, nil).Once() + fluxAggregator.On("RoundState", nodeAddr).Return(contracts.FluxAggregatorRoundState{ + ReportableRoundID: 1, + EligibleToSubmit: false, + LatestAnswer: answerBigInt, + TimesOutAt: 0, + }, nil).Once() + } else { + fluxAggregator.On("RoundState", nodeAddr).Return(contracts.FluxAggregatorRoundState{ + ReportableRoundID: 1, + EligibleToSubmit: false, + LatestAnswer: answerBigInt, + TimesOutAt: uint64(test.timesOutAt), + }, nil).Once() + } - // Start() with no delay to speed up test and polling. - done := make(chan struct{}) - go func() { - checker.Start(context.Background(), ethClient) // Start() polling - done <- struct{}{} - }() + deviationChecker, err := fluxmonitor.NewPollingDeviationChecker( + store, + fluxAggregator, + initr, + runManager, + fetcher, + time.Duration(math.MaxInt64), + ) + require.NoError(t, err) - cltest.CallbackOrTimeout(t, "Start() starts", func() { - <-started - }) - fetcher.AssertExpectations(t) + deviationChecker.Start() + deviationChecker.OnConnect() + time.Sleep(5 * time.Second) + deviationChecker.Stop() - checker.Stop() - cltest.CallbackOrTimeout(t, "Stop() unblocks Start()", func() { - <-done - }) + fetcher.AssertExpectations(t) + runManager.AssertExpectations(t) + fluxAggregator.AssertExpectations(t) + }) + } } -func TestPollingDeviationChecker_NoDeviation_CanBeCanceled(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() +func TestPollingDeviationChecker_RespondToNewRound(t *testing.T) { - auth := cmd.TerminalKeyStoreAuthenticator{Prompter: &cltest.MockCountingPrompter{T: t}} - _, err := auth.Authenticate(store, "somepassword") - assert.NoError(t, err) - assert.True(t, store.KeyStore.HasAccounts()) + type roundIDCase struct { + name string + storedReportableRoundID *big.Int + fetchedReportableRoundID uint32 + logRoundID int64 + } + var ( + stored_lt_fetched_lt_log = roundIDCase{"stored < fetched < log", big.NewInt(5), 10, 15} + stored_lt_log_lt_fetched = roundIDCase{"stored < log < fetched", big.NewInt(5), 15, 10} + fetched_lt_stored_lt_log = roundIDCase{"fetched < stored < log", big.NewInt(10), 5, 15} + fetched_lt_log_lt_stored = roundIDCase{"fetched < log < stored", big.NewInt(15), 5, 10} + log_lt_fetched_lt_stored = roundIDCase{"log < fetched < stored", big.NewInt(15), 10, 5} + log_lt_stored_lt_fetched = roundIDCase{"log < stored < fetched", big.NewInt(10), 15, 5} + stored_lt_fetched_eq_log = roundIDCase{"stored < fetched = log", big.NewInt(5), 10, 10} + stored_eq_fetched_lt_log = roundIDCase{"stored = fetched < log", big.NewInt(5), 5, 10} + stored_eq_log_lt_fetched = roundIDCase{"stored = log < fetched", big.NewInt(5), 10, 5} + fetched_lt_stored_eq_log = roundIDCase{"fetched < stored = log", big.NewInt(10), 5, 10} + fetched_eq_log_lt_stored = roundIDCase{"fetched = log < stored", big.NewInt(10), 5, 5} + log_lt_fetched_eq_stored = roundIDCase{"log < fetched = stored", big.NewInt(10), 10, 5} + ) - // Set up fetcher to mark when polled - fetcher := new(mocks.Fetcher) - polled := make(chan struct{}) - fetcher.On("Fetch").Return(decimal.NewFromFloat(100.0), nil).Run(func(mock.Arguments) { - polled <- struct{}{} - }) + type answerCase struct { + name string + latestAnswer int64 + polledAnswer int64 + } + var ( + deviationThresholdExceeded = answerCase{"deviation", 10, 100} + deviationThresholdNotExceeded = answerCase{"no deviation", 10, 10} + ) - // Prepare initialization to 100, which matches external adapter, so no deviation - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(1), nil) - ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything). - Return(fakeSubscription(), nil) - ethClient.On("GetLatestSubmission", mock.Anything, mock.Anything). - Return(big.NewInt(0), big.NewInt(0), nil) - - // Start() with no delay to speed up test and polling. - rm := new(mocks.RunManager) // No mocks assert no runs are created - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, fetcher, time.Millisecond) - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan struct{}) - go func() { - checker.Start(ctx, ethClient) // Start() polling until cancel() - done <- struct{}{} - }() - - // Check if Polled - cltest.CallbackOrTimeout(t, "start repeatedly polls external adapter", func() { - <-polled // launched at the beginning of Start - <-polled // launched after time.After - }) - fetcher.AssertExpectations(t) + tests := []struct { + funded bool + eligible bool + startedBySelf bool + roundIDCase + answerCase + }{ + {true, true, true, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {true, true, true, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {true, true, true, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {true, true, true, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {true, true, true, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {true, true, true, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {true, true, true, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {true, true, true, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {true, true, true, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {true, true, true, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {true, true, true, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {true, true, true, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {true, true, true, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {true, true, true, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {true, true, true, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {true, true, true, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {true, true, true, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {true, true, true, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {true, true, true, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {true, true, true, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {true, true, true, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {true, true, true, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {true, true, true, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {true, true, true, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {true, true, false, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {true, true, false, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {true, true, false, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {true, true, false, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {true, true, false, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {true, true, false, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {true, true, false, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {true, true, false, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {true, true, false, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {true, true, false, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {true, true, false, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {true, true, false, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {true, true, false, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {true, true, false, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {true, true, false, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {true, true, false, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {true, true, false, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {true, true, false, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {true, true, false, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {true, true, false, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {true, true, false, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {true, true, false, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {true, true, false, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {true, true, false, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {true, false, true, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {true, false, true, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {true, false, true, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {true, false, true, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {true, false, true, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {true, false, true, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {true, false, true, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {true, false, true, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {true, false, true, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {true, false, true, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {true, false, true, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {true, false, true, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {true, false, true, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {true, false, true, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {true, false, true, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {true, false, true, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {true, false, true, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {true, false, true, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {true, false, true, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {true, false, true, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {true, false, true, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {true, false, true, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {true, false, true, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {true, false, true, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {true, false, false, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {true, false, false, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {true, false, false, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {true, false, false, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {true, false, false, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {true, false, false, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {true, false, false, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {true, false, false, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {true, false, false, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {true, false, false, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {true, false, false, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {true, false, false, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {true, false, false, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {true, false, false, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {true, false, false, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {true, false, false, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {true, false, false, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {true, false, false, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {true, false, false, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {true, false, false, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {true, false, false, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {true, false, false, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {true, false, false, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {true, false, false, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {false, true, true, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {false, true, true, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {false, true, true, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {false, true, true, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {false, true, true, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {false, true, true, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {false, true, true, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {false, true, true, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {false, true, true, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {false, true, true, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {false, true, true, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {false, true, true, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {false, true, true, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {false, true, true, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {false, true, true, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {false, true, true, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {false, true, true, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {false, true, true, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {false, true, true, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {false, true, true, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {false, true, true, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {false, true, true, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {false, true, true, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {false, true, true, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {false, true, false, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {false, true, false, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {false, true, false, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {false, true, false, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {false, true, false, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {false, true, false, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {false, true, false, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {false, true, false, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {false, true, false, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {false, true, false, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {false, true, false, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {false, true, false, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {false, true, false, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {false, true, false, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {false, true, false, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {false, true, false, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {false, true, false, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {false, true, false, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {false, true, false, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {false, true, false, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {false, true, false, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {false, true, false, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {false, true, false, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {false, true, false, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {false, false, true, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {false, false, true, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {false, false, true, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {false, false, true, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {false, false, true, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {false, false, true, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {false, false, true, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {false, false, true, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {false, false, true, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {false, false, true, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {false, false, true, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {false, false, true, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {false, false, true, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {false, false, true, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {false, false, true, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {false, false, true, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {false, false, true, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {false, false, true, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {false, false, true, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {false, false, true, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {false, false, true, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {false, false, true, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {false, false, true, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {false, false, true, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + {false, false, false, stored_lt_fetched_lt_log, deviationThresholdExceeded}, + {false, false, false, stored_lt_log_lt_fetched, deviationThresholdExceeded}, + {false, false, false, fetched_lt_stored_lt_log, deviationThresholdExceeded}, + {false, false, false, fetched_lt_log_lt_stored, deviationThresholdExceeded}, + {false, false, false, log_lt_fetched_lt_stored, deviationThresholdExceeded}, + {false, false, false, log_lt_stored_lt_fetched, deviationThresholdExceeded}, + {false, false, false, stored_lt_fetched_eq_log, deviationThresholdExceeded}, + {false, false, false, stored_eq_fetched_lt_log, deviationThresholdExceeded}, + {false, false, false, stored_eq_log_lt_fetched, deviationThresholdExceeded}, + {false, false, false, fetched_lt_stored_eq_log, deviationThresholdExceeded}, + {false, false, false, fetched_eq_log_lt_stored, deviationThresholdExceeded}, + {false, false, false, log_lt_fetched_eq_stored, deviationThresholdExceeded}, + {false, false, false, stored_lt_fetched_lt_log, deviationThresholdNotExceeded}, + {false, false, false, stored_lt_log_lt_fetched, deviationThresholdNotExceeded}, + {false, false, false, fetched_lt_stored_lt_log, deviationThresholdNotExceeded}, + {false, false, false, fetched_lt_log_lt_stored, deviationThresholdNotExceeded}, + {false, false, false, log_lt_fetched_lt_stored, deviationThresholdNotExceeded}, + {false, false, false, log_lt_stored_lt_fetched, deviationThresholdNotExceeded}, + {false, false, false, stored_lt_fetched_eq_log, deviationThresholdNotExceeded}, + {false, false, false, stored_eq_fetched_lt_log, deviationThresholdNotExceeded}, + {false, false, false, stored_eq_log_lt_fetched, deviationThresholdNotExceeded}, + {false, false, false, fetched_lt_stored_eq_log, deviationThresholdNotExceeded}, + {false, false, false, fetched_eq_log_lt_stored, deviationThresholdNotExceeded}, + {false, false, false, log_lt_fetched_eq_stored, deviationThresholdNotExceeded}, + } - // Cancel parent context and ensure Start() stops. - cancel() - cltest.CallbackOrTimeout(t, "Start() unblocks and is done", func() { - <-done - }) -} + for _, test := range tests { + name := test.answerCase.name + ", " + test.roundIDCase.name + if test.eligible { + name += ", eligible" + } else { + name += ", ineligible" + } + if test.startedBySelf { + name += ", started by self" + } else { + name += ", started by other" + } + if test.funded { + name += ", funded" + } else { + name += ", underfunded" + } + + t.Run(name, func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + + nodeAddr := ensureAccount(t, store) + + expectedToFetchRoundState := !test.startedBySelf + expectedToPoll := expectedToFetchRoundState && test.eligible && test.funded && test.logRoundID >= int64(test.fetchedReportableRoundID) + expectedToSubmit := expectedToPoll + + job := cltest.NewJobWithFluxMonitorInitiator() + initr := job.Initiators[0] + initr.ID = 1 + initr.InitiatorParams.PollingInterval = models.Duration(1 * time.Hour) + + rm := new(mocks.RunManager) + fetcher := new(mocks.Fetcher) + fluxAggregator := new(mocks.FluxAggregator) + + var availableFunds *big.Int + var paymentAmount *big.Int + minPayment := store.Config.MinimumContractPayment().ToInt() + if test.funded { + availableFunds = minPayment + paymentAmount = minPayment + } else { + availableFunds = big.NewInt(1) + paymentAmount = minPayment + } -func TestPollingDeviationChecker_StopWithoutStart(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + if expectedToFetchRoundState { + fluxAggregator.On("RoundState", nodeAddr).Return(contracts.FluxAggregatorRoundState{ + ReportableRoundID: test.fetchedReportableRoundID, + LatestAnswer: big.NewInt(test.latestAnswer * int64(math.Pow10(int(initr.InitiatorParams.Precision)))), + EligibleToSubmit: test.eligible, + AvailableFunds: availableFunds, + PaymentAmount: paymentAmount, + }, nil).Once() + } - rm := new(mocks.RunManager) - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 + if expectedToPoll { + fetcher.On("Fetch").Return(decimal.NewFromInt(test.polledAnswer), nil).Once() + } - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, nil, time.Second) - require.NoError(t, err) - checker.Stop() -} + if expectedToSubmit { + fluxAggregator.On("GetMethodID", "updateAnswer").Return(updateAnswerSelector, nil) -func TestPollingDeviationChecker_RespondToNewRound_Ignore(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() + data, err := models.ParseJSON([]byte(fmt.Sprintf(`{ + "result": "%d", + "address": "%s", + "functionSelector": "0xe6330cf7", + "dataPrefix": "0x%0x" + }`, test.polledAnswer, initr.InitiatorParams.Address.Hex(), utils.EVMWordUint64(uint64(test.fetchedReportableRoundID))))) + require.NoError(t, err) - currentRound := int64(5) + rm.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.MatchedBy(func(runRequest *models.RunRequest) bool { + return reflect.DeepEqual(runRequest.RequestParams.Result.Value(), data.Result.Value()) + })).Return(nil, nil) + } - // Prepare on-chain initialization to 100 - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 + checker, err := fluxmonitor.NewPollingDeviationChecker(store, fluxAggregator, initr, rm, fetcher, time.Hour) + require.NoError(t, err) - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(currentRound), nil) + checker.ExportedSetStoredReportableRoundID(test.storedReportableRoundID) - // Initialize - rm := new(mocks.RunManager) - fetcher := new(mocks.Fetcher) - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, fetcher, time.Minute) - require.NoError(t, err) - require.NoError(t, checker.ExportedFetchAggregatorData(ethClient)) - ethClient.AssertExpectations(t) + checker.OnConnect() - // Send rounds less than or equal to current, sequentially - tests := []struct { - name string - round uint64 - }{ - {"less than", 4}, - {"equal", 5}, - } + var startedBy common.Address + if test.startedBySelf { + startedBy = nodeAddr + } + checker.ExportedRespondToLog(&contracts.LogNewRound{RoundId: big.NewInt(test.logRoundID), StartedBy: startedBy}) - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - log := cltest.LogFromFixture(t, "../testdata/new_round_log.json") - log.Topics[models.NewRoundTopicRoundID] = common.BytesToHash(utils.EVMWordUint64(test.round)) - require.NoError(t, checker.ExportedRespondToNewRound(log)) - rm.AssertNotCalled(t, "Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + fluxAggregator.AssertExpectations(t) + fetcher.AssertExpectations(t) + rm.AssertExpectations(t) }) } } -func TestPollingDeviationChecker_RespondToNewRound_Respond(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() - - currentRound := int64(5) - - // Prepare on-chain initialization to 100, which matches external adapter, - // so no deviation - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(currentRound), nil) - - // Initialize - rm := new(mocks.RunManager) - fetcher := new(mocks.Fetcher) - checker, err := fluxmonitor.NewPollingDeviationChecker(store, initr, rm, fetcher, time.Minute) - require.NoError(t, err) - require.NoError(t, checker.ExportedFetchAggregatorData(ethClient)) - ethClient.AssertExpectations(t) - - // Send log greater than current - data, err := models.ParseJSON([]byte(fmt.Sprintf(`{ - "result": "100", - "address": "%s", - "functionSelector": "0xe6330cf7", - "dataPrefix": "0x0000000000000000000000000000000000000000000000000000000000000006" - }`, initr.InitiatorParams.Address.Hex()))) // dataPrefix has currentRound + 1 - require.NoError(t, err) - - // Set up fetcher for 100; even if within deviation, forces the creation of run. - fetcher.On("Fetch").Return(decimal.NewFromFloat(100.0), nil).Maybe() - - rm.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.MatchedBy(func(runRequest *models.RunRequest) bool { - return runRequest.RequestParams == data - })).Return(nil, nil) // only round 6 triggers run. - - log := cltest.LogFromFixture(t, "../testdata/new_round_log.json") - log.Topics[models.NewRoundTopicRoundID] = common.BytesToHash(utils.EVMWordUint64(6)) - require.NoError(t, checker.ExportedRespondToNewRound(log)) - fetcher.AssertExpectations(t) - rm.AssertExpectations(t) -} - func TestOutsideDeviation(t *testing.T) { tests := []struct { name string @@ -519,11 +734,22 @@ func TestOutsideDeviation(t *testing.T) { threshold float64 // in percentage expectation bool }{ - {"0 current price", decimal.NewFromInt(0), decimal.NewFromInt(100), 2, true}, + {"0 current price, outside deviation", decimal.NewFromInt(0), decimal.NewFromInt(100), 2, true}, + {"0 current price, inside deviation", decimal.NewFromInt(0), decimal.NewFromInt(1), 2, true}, + {"0 current and next price", decimal.NewFromInt(0), decimal.NewFromInt(0), 2, false}, + {"inside deviation", decimal.NewFromInt(100), decimal.NewFromInt(101), 2, false}, {"equal to deviation", decimal.NewFromInt(100), decimal.NewFromInt(102), 2, true}, {"outside deviation", decimal.NewFromInt(100), decimal.NewFromInt(103), 2, true}, {"outside deviation zero", decimal.NewFromInt(100), decimal.NewFromInt(0), 2, true}, + + {"inside deviation, crosses 0 backwards", decimal.NewFromFloat(0.1), decimal.NewFromFloat(-0.1), 201, false}, + {"equal to deviation, crosses 0 backwards", decimal.NewFromFloat(0.1), decimal.NewFromFloat(-0.1), 200, true}, + {"outside deviation, crosses 0 backwards", decimal.NewFromFloat(0.1), decimal.NewFromFloat(-0.1), 199, true}, + + {"inside deviation, crosses 0 forwards", decimal.NewFromFloat(-0.1), decimal.NewFromFloat(0.1), 201, false}, + {"equal to deviation, crosses 0 forwards", decimal.NewFromFloat(-0.1), decimal.NewFromFloat(0.1), 200, true}, + {"outside deviation, crosses 0 forwards", decimal.NewFromFloat(-0.1), decimal.NewFromFloat(0.1), 199, true}, } for _, test := range tests { @@ -591,82 +817,3 @@ func TestExtractFeedURLs(t *testing.T) { }) } } - -func TestPollingDeviationChecker_PollIfRoundOpen(t *testing.T) { - store, cleanup := cltest.NewStore(t) - defer cleanup() - - auth := cmd.TerminalKeyStoreAuthenticator{Prompter: &cltest.MockCountingPrompter{T: t}} - _, err := auth.Authenticate(store, "somepassword") - assert.NoError(t, err) - assert.True(t, store.KeyStore.HasAccounts()) - - job := cltest.NewJobWithFluxMonitorInitiator() - initr := job.Initiators[0] - initr.ID = 1 - initr.PollingInterval = models.Duration(5 * time.Millisecond) - initr.IdleThreshold = models.Duration(time.Hour) // long enough to prevent running during test - - jobRun := cltest.NewJobRun(job) - - tests := []struct { - name string - aggregatorRound int64 - latestRoundAnswered int64 - shouldUpdate bool - }{ - {"much less than", 1, 10, false}, - {"less than", 1, 2, false}, - {"equal", 1, 1, true}, - {"greater than", 2, 1, true}, - {"much greater than", 10, 1, true}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - - runManager := new(mocks.RunManager) - - jobRunCreated := make(chan struct{}, 100) - runManager.On("Create", job.ID, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(&jobRun, nil). - Run(func(args mock.Arguments) { - jobRunCreated <- struct{}{} - }) - - fetcher := successFetcher(decimal.NewFromInt(200)) - deviationChecker, err := fluxmonitor.NewPollingDeviationChecker( - store, - initr, - runManager, - &fetcher, - time.Second, - ) - require.NoError(t, err) - - ethClient := new(mocks.Client) - ethClient.On("GetAggregatorPrice", initr.InitiatorParams.Address, initr.InitiatorParams.Precision). - Return(decimal.NewFromInt(100), nil) - ethClient.On("GetAggregatorRound", initr.InitiatorParams.Address). - Return(big.NewInt(test.aggregatorRound), nil) - ethClient.On("SubscribeToLogs", mock.Anything, mock.Anything). - Return(fakeSubscription(), nil) - ethClient.On("GetLatestSubmission", mock.Anything, mock.Anything). - Return(big.NewInt(0), big.NewInt(test.latestRoundAnswered), nil) - - err = deviationChecker.Start(context.Background(), ethClient) - require.NoError(t, err) - require.Len(t, jobRunCreated, 1, "initial job run") - fetcher = successFetcher(decimal.NewFromInt(300)) - - if test.shouldUpdate { - require.Eventually(t, func() bool { return len(jobRunCreated) == 2 }, 2*time.Second, time.Millisecond, "pollIfRoundOpen triggers Job Run") - } else { - time.Sleep(2 * time.Second) - require.Len(t, jobRunCreated, 1, "no Job Runs created") - } - - deviationChecker.Stop() - }) - } -} diff --git a/core/services/fluxmonitor/helpers_test.go b/core/services/fluxmonitor/helpers_test.go index e8e0d2015ff..dbe15888dd1 100644 --- a/core/services/fluxmonitor/helpers_test.go +++ b/core/services/fluxmonitor/helpers_test.go @@ -1,7 +1,6 @@ package fluxmonitor import ( - "chainlink/core/eth" "encoding/json" "fmt" "io/ioutil" @@ -20,30 +19,16 @@ func ExportedSetCheckerFactory(fm Service, fac DeviationCheckerFactory) { impl.checkerFactory = fac } -func (p *PollingDeviationChecker) ExportedFetchAggregatorData(client eth.Client) error { - return p.fetchAggregatorData(client) +func (p *PollingDeviationChecker) ExportedPollIfEligible(threshold float64) bool { + return p.pollIfEligible(threshold) } -func (p *PollingDeviationChecker) ExportedRespondToNewRound(log eth.Log) error { - return p.respondToNewRound(log) +func (p *PollingDeviationChecker) ExportedSetStoredReportableRoundID(roundID *big.Int) { + p.reportableRoundID = roundID } -func (p *PollingDeviationChecker) ExportedPoll() (bool, error) { - return p.poll(p.threshold) -} - -// ExportedCurrentPrice returns the private current price for assertions; -// technically thread unsafe because it can be set in parallel from -// the CSP consumer. -func (p *PollingDeviationChecker) ExportedCurrentPrice() decimal.Decimal { - return p.currentPrice -} - -// ExportedCurrentRound returns the private current round for assertions; -// technically thread unsafe because it can be set in parallel from -// the CSP consumer. -func (p *PollingDeviationChecker) ExportedCurrentRound() *big.Int { - return new(big.Int).Set(p.currentRound) +func (p *PollingDeviationChecker) ExportedRespondToLog(log interface{}) { + p.respondToLog(log) } func mustReadFile(t testing.TB, file string) string { diff --git a/core/services/fluxmonitor/prometheus.go b/core/services/fluxmonitor/prometheus.go index 5895895f398..5ea2ac78ca3 100644 --- a/core/services/fluxmonitor/prometheus.go +++ b/core/services/fluxmonitor/prometheus.go @@ -18,6 +18,13 @@ var ( }, []string{"job_spec_id"}, ) + promFMIndividualReportedValue = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "flux_monitor_individual_reported_value", + Help: "Flux monitor's last reported price for each individual endpoint", + }, + []string{"url"}, + ) promFMSeenValue = promauto.NewGaugeVec( prometheus.GaugeOpts{ Name: "flux_monitor_seen_value", diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index be85a0464b7..d4ed002cabb 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -1,6 +1,7 @@ package services import ( + "context" "fmt" "sync" "time" @@ -246,8 +247,9 @@ func (ht *HeadTracker) subscribeToHead() error { ht.headMutex.Lock() defer ht.headMutex.Unlock() + ctx := context.Background() ht.headers = make(chan eth.BlockHeader) - sub, err := ht.store.TxManager.SubscribeToNewHeads(ht.headers) + sub, err := ht.store.TxManager.SubscribeToNewHeads(ctx, ht.headers) if err != nil { return errors.Wrap(err, "TxManager#SubscribeToNewHeads") } diff --git a/core/services/head_tracker_test.go b/core/services/head_tracker_test.go index 6b3e5e95b2e..b398b65fc36 100644 --- a/core/services/head_tracker_test.go +++ b/core/services/head_tracker_test.go @@ -106,7 +106,7 @@ func TestHeadTracker_Start_NewHeads(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store, cltest.EthMockRegisterChainID) + eth := cltest.MockEthOnStore(t, store, cltest.EthMockRegisterChainID, cltest.NoRegisterGetBlockNumber) ht := services.NewHeadTracker(store, []strpkg.HeadTrackable{}) defer ht.Stop() @@ -157,9 +157,9 @@ func TestHeadTracker_ReconnectOnError(t *testing.T) { txManager := new(mocks.TxManager) subscription := cltest.EmptyMockSubscription() txManager.On("GetChainID").Maybe().Return(store.Config.ChainID(), nil) - txManager.On("SubscribeToNewHeads", mock.Anything).Return(subscription, nil) - txManager.On("SubscribeToNewHeads", mock.Anything).Return(nil, errors.New("cannot reconnect")) - txManager.On("SubscribeToNewHeads", mock.Anything).Return(subscription, nil) + txManager.On("SubscribeToNewHeads", mock.Anything, mock.Anything, mock.Anything).Return(subscription, nil) + txManager.On("SubscribeToNewHeads", mock.Anything, mock.Anything).Return(nil, errors.New("cannot reconnect")) + txManager.On("SubscribeToNewHeads", mock.Anything, mock.Anything).Return(subscription, nil) store.TxManager = txManager checker := &cltest.MockHeadTrackable{} diff --git a/core/services/internal/mocks/application.go b/core/services/internal/mocks/application.go new file mode 100644 index 00000000000..e7ffe0dc41d --- /dev/null +++ b/core/services/internal/mocks/application.go @@ -0,0 +1,268 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + mock "github.com/stretchr/testify/mock" + + models "chainlink/core/store/models" + + packr "github.com/gobuffalo/packr" + + store "chainlink/core/store" + + synchronization "chainlink/core/services/synchronization" +) + +// Application is an autogenerated mock type for the Application type +type Application struct { + mock.Mock +} + +// AddJob provides a mock function with given fields: job +func (_m *Application) AddJob(job models.JobSpec) error { + ret := _m.Called(job) + + var r0 error + if rf, ok := ret.Get(0).(func(models.JobSpec) error); ok { + r0 = rf(job) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddServiceAgreement provides a mock function with given fields: _a0 +func (_m *Application) AddServiceAgreement(_a0 *models.ServiceAgreement) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*models.ServiceAgreement) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ArchiveJob provides a mock function with given fields: _a0 +func (_m *Application) ArchiveJob(_a0 *models.ID) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*models.ID) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Cancel provides a mock function with given fields: runID +func (_m *Application) Cancel(runID *models.ID) (*models.JobRun, error) { + ret := _m.Called(runID) + + var r0 *models.JobRun + if rf, ok := ret.Get(0).(func(*models.ID) *models.JobRun); ok { + r0 = rf(runID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.JobRun) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*models.ID) error); ok { + r1 = rf(runID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Create provides a mock function with given fields: jobSpecID, initiator, creationHeight, runRequest +func (_m *Application) Create(jobSpecID *models.ID, initiator *models.Initiator, creationHeight *big.Int, runRequest *models.RunRequest) (*models.JobRun, error) { + ret := _m.Called(jobSpecID, initiator, creationHeight, runRequest) + + var r0 *models.JobRun + if rf, ok := ret.Get(0).(func(*models.ID, *models.Initiator, *big.Int, *models.RunRequest) *models.JobRun); ok { + r0 = rf(jobSpecID, initiator, creationHeight, runRequest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.JobRun) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*models.ID, *models.Initiator, *big.Int, *models.RunRequest) error); ok { + r1 = rf(jobSpecID, initiator, creationHeight, runRequest) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CreateErrored provides a mock function with given fields: jobSpecID, initiator, err +func (_m *Application) CreateErrored(jobSpecID *models.ID, initiator models.Initiator, err error) (*models.JobRun, error) { + ret := _m.Called(jobSpecID, initiator, err) + + var r0 *models.JobRun + if rf, ok := ret.Get(0).(func(*models.ID, models.Initiator, error) *models.JobRun); ok { + r0 = rf(jobSpecID, initiator, err) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*models.JobRun) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(*models.ID, models.Initiator, error) error); ok { + r1 = rf(jobSpecID, initiator, err) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetStatsPusher provides a mock function with given fields: +func (_m *Application) GetStatsPusher() synchronization.StatsPusher { + ret := _m.Called() + + var r0 synchronization.StatsPusher + if rf, ok := ret.Get(0).(func() synchronization.StatsPusher); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(synchronization.StatsPusher) + } + } + + return r0 +} + +// GetStore provides a mock function with given fields: +func (_m *Application) GetStore() *store.Store { + ret := _m.Called() + + var r0 *store.Store + if rf, ok := ret.Get(0).(func() *store.Store); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*store.Store) + } + } + + return r0 +} + +// NewBox provides a mock function with given fields: +func (_m *Application) NewBox() packr.Box { + ret := _m.Called() + + var r0 packr.Box + if rf, ok := ret.Get(0).(func() packr.Box); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(packr.Box) + } + + return r0 +} + +// ResumeAllConfirming provides a mock function with given fields: currentBlockHeight +func (_m *Application) ResumeAllConfirming(currentBlockHeight *big.Int) error { + ret := _m.Called(currentBlockHeight) + + var r0 error + if rf, ok := ret.Get(0).(func(*big.Int) error); ok { + r0 = rf(currentBlockHeight) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResumeAllConnecting provides a mock function with given fields: +func (_m *Application) ResumeAllConnecting() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResumeAllInProgress provides a mock function with given fields: +func (_m *Application) ResumeAllInProgress() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ResumePending provides a mock function with given fields: runID, input +func (_m *Application) ResumePending(runID *models.ID, input models.BridgeRunResult) error { + ret := _m.Called(runID, input) + + var r0 error + if rf, ok := ret.Get(0).(func(*models.ID, models.BridgeRunResult) error); ok { + r0 = rf(runID, input) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *Application) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Application) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// WakeSessionReaper provides a mock function with given fields: +func (_m *Application) WakeSessionReaper() { + _m.Called() +} diff --git a/core/services/job_subscriber_test.go b/core/services/job_subscriber_test.go index 3b9472eb5f4..7716addf2ca 100644 --- a/core/services/job_subscriber_test.go +++ b/core/services/job_subscriber_test.go @@ -140,7 +140,7 @@ func TestJobSubscriber_Connect_Disconnect(t *testing.T) { jobSubscriber := services.NewJobSubscriber(store, runManager) defer jobSubscriber.Stop() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) eth.Register("eth_getLogs", []ethpkg.Log{}) eth.Register("eth_getLogs", []ethpkg.Log{}) diff --git a/core/services/run_executor.go b/core/services/run_executor.go index d375a66d342..e6a784b6f70 100644 --- a/core/services/run_executor.go +++ b/core/services/run_executor.go @@ -12,6 +12,17 @@ import ( "chainlink/core/store/orm" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var ( + promAdapterCallsVec = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "adapter_perform_complete_total", + Help: "The total number of adapters which have completed", + }, + []string{"job_spec_id", "task_type", "status"}, + ) ) //go:generate mockery -name RunExecutor -output ../internal/mocks/ -case=underscore @@ -121,5 +132,7 @@ func (re *runExecutor) executeTask(run *models.JobRun, taskRun *models.TaskRun) input := *models.NewRunInput(run.ID, data, taskRun.Status) result := adapter.Perform(input, re.store) + promAdapterCallsVec.WithLabelValues(run.JobSpecID.String(), string(adapter.TaskType()), string(result.Status())).Inc() + return result } diff --git a/core/services/run_manager_test.go b/core/services/run_manager_test.go index ceefb3c3545..ef9f77bd4fd 100644 --- a/core/services/run_manager_test.go +++ b/core/services/run_manager_test.go @@ -18,6 +18,7 @@ import ( "chainlink/core/utils" "github.com/ethereum/go-ethereum/common" + "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -607,11 +608,11 @@ func TestRunManager_Create_fromRunLogPayments(t *testing.T) { t.Run(test.name, func(t *testing.T) { config, configCleanup := cltest.NewConfig(t) defer configCleanup() + config.Set("DATABASE_TIMEOUT", "10s") // Lots of parallelized tests config.Set("MINIMUM_CONTRACT_PAYMENT", test.configMinimumPayment) - app, cleanup := cltest.NewApplicationWithConfig(t, config, cltest.EthMockRegisterChainID) - defer cleanup() - app.StartAndConnect() + store, storeCleanup := cltest.NewStoreWithConfig(config) + defer storeCleanup() bt := &models.BridgeType{ Name: models.MustNewTaskType("expensiveBridge"), @@ -619,7 +620,7 @@ func TestRunManager_Create_fromRunLogPayments(t *testing.T) { Confirmations: 0, MinimumContractPayment: test.bridgePayment, } - require.NoError(t, app.Store.CreateBridgeType(bt)) + require.NoError(t, store.CreateBridgeType(bt)) job := cltest.NewJobWithRunLogInitiator() job.MinPayment = test.jobMinimumPayment @@ -627,7 +628,7 @@ func TestRunManager_Create_fromRunLogPayments(t *testing.T) { cltest.NewTask(t, "NoOp"), cltest.NewTask(t, bt.Name.String()), } - require.NoError(t, app.Store.CreateJob(&job)) + require.NoError(t, store.CreateJob(&job)) initiator := job.Initiators[0] creationHeight := big.NewInt(1) @@ -636,10 +637,21 @@ func TestRunManager_Create_fromRunLogPayments(t *testing.T) { runRequest.Payment = test.inputPayment runRequest.RequestParams = cltest.JSONFromString(t, `{"random": "input"}`) - run, err := app.RunManager.Create(job.ID, &initiator, creationHeight, runRequest) + pusher := new(mocks.StatsPusher) + pusher.On("PushNow").Return(nil) + + runQueue := new(mocks.RunQueue) + runQueue.On("Run", mock.Anything).Return(nil) + + runManager := services.NewRunManager(runQueue, store.Config, store.ORM, pusher, store.TxManager, store.Clock) + run, err := runManager.Create(job.ID, &initiator, creationHeight, runRequest) require.NoError(t, err) - assert.Equal(t, test.jobStatus, run.Status) + runManager.ResumeAllConfirming(big.NewInt(3821)) + + gomega.NewGomegaWithT(t).Eventually(func() models.RunStatus { + return run.Status + }).Should(gomega.Equal(test.jobStatus)) }) } } diff --git a/core/services/run_queue.go b/core/services/run_queue.go index d0d8e6c8b1d..56d09bc99cd 100644 --- a/core/services/run_queue.go +++ b/core/services/run_queue.go @@ -63,6 +63,31 @@ func (rq *runQueue) Stop() { rq.workersWg.Wait() } +func (rq *runQueue) incrementQueue(runID string) bool { + defer rq.workersMutex.Unlock() + rq.workersMutex.Lock() + numberRunsQueued.Inc() + + wasEmpty := rq.workers[runID] == 0 + rq.workers[runID]++ + numberRunQueueWorkers.Set(float64(len(rq.workers))) + return wasEmpty +} + +func (rq *runQueue) decrementQueue(runID string) bool { + defer rq.workersMutex.Unlock() + rq.workersMutex.Lock() + + rq.workers[runID]-- + isEmpty := rq.workers[runID] <= 0 + if isEmpty { + delete(rq.workers, runID) + } + + numberRunQueueWorkers.Set(float64(len(rq.workers))) + return isEmpty +} + // Run tells the job runner to start executing a job func (rq *runQueue) Run(run *models.JobRun) { rq.workersMutex.Lock() @@ -70,38 +95,26 @@ func (rq *runQueue) Run(run *models.JobRun) { rq.workersMutex.Unlock() return } + rq.workersMutex.Unlock() runID := run.ID.String() - defer numberRunsQueued.Inc() - if queueCount, present := rq.workers[runID]; present { - rq.workers[runID] = queueCount + 1 - rq.workersMutex.Unlock() + if !rq.incrementQueue(runID) { return } - rq.workers[runID] = 1 - numberRunQueueWorkers.Set(float64(len(rq.workers))) - rq.workersMutex.Unlock() rq.workersWg.Add(1) go func() { - for { - rq.workersMutex.Lock() - queueCount := rq.workers[runID] - if queueCount <= 0 { - delete(rq.workers, runID) - numberRunQueueWorkers.Set(float64(len(rq.workers))) - rq.workersMutex.Unlock() - break - } - rq.workers[runID] = queueCount - 1 - rq.workersMutex.Unlock() + defer rq.workersWg.Done() + for { if err := rq.runExecutor.Execute(run.ID); err != nil { logger.Errorw(fmt.Sprint("Error executing run ", runID), "error", err) } - } - rq.workersWg.Done() + if rq.decrementQueue(runID) { + return + } + } }() } diff --git a/core/services/signatures/secp256k1/scalar.go b/core/services/signatures/secp256k1/scalar.go index 21eda0808ef..599ba10aa39 100644 --- a/core/services/signatures/secp256k1/scalar.go +++ b/core/services/signatures/secp256k1/scalar.go @@ -18,6 +18,7 @@ import ( "math/big" secp256k1BTCD "github.com/btcsuite/btcd/btcec" + "github.com/ethereum/go-ethereum/common" "go.dedis.ch/kyber/v3" "go.dedis.ch/kyber/v3/util/random" @@ -217,6 +218,10 @@ func IntToScalar(i *big.Int) kyber.Scalar { return ((*secp256k1Scalar)(i)).modG() } +func ScalarToHash(s kyber.Scalar) common.Hash { + return common.BigToHash(ToInt(s.(*secp256k1Scalar))) +} + // RepresentsScalar returns true iff i is in the right range to be a scalar func RepresentsScalar(i *big.Int) bool { return i.Cmp(GroupOrder) == -1 diff --git a/core/services/subscription.go b/core/services/subscription.go index dab13116dd4..cd611d96573 100644 --- a/core/services/subscription.go +++ b/core/services/subscription.go @@ -1,6 +1,7 @@ package services import ( + "context" "fmt" "math/big" "time" @@ -35,11 +36,7 @@ func StartJobSubscription(job models.JobSpec, head *models.Head, store *strpkg.S var merr error var unsubscribers []Unsubscriber - initrs := job.InitiatorsFor( - models.InitiatorEthLog, - models.InitiatorRunLog, - models.InitiatorServiceAgreementExecutionLog, - ) + initrs := job.InitiatorsFor(models.LogBasedChainlinkJobInitiators...) nextHead := head.NextInt() // Exclude current block from subscription if replayFromBlock := store.Config.ReplayFromBlock(); replayFromBlock >= 0 { @@ -128,10 +125,11 @@ func loggerLogListening(initr models.Initiator, blockNumber *big.Int) { logger.Infow(msg, "address", utils.LogListeningAddress(initr.Address), "jobID", initr.JobSpecID.String()) } -// ReceiveLogRequest parses the log and runs the job indicated by a RunLog or -// ServiceAgreementExecutionLog. (Both log events have the same format.) +// ReceiveLogRequest parses the log and runs the job it indicated by its +// GetJobSpecID method func ReceiveLogRequest(runManager RunManager, le models.LogRequest) { if !le.Validate() { + logger.Debugw("discarding INVALID EVENT LOG", "log", le.GetLog()) return } @@ -189,8 +187,9 @@ func NewManagedSubscription( filter ethereum.FilterQuery, callback func(eth.Log), ) (*ManagedSubscription, error) { + ctx := context.Background() logs := make(chan eth.Log) - es, err := logSubscriber.SubscribeToLogs(logs, filter) + es, err := logSubscriber.SubscribeToLogs(ctx, logs, filter) if err != nil { return nil, err } diff --git a/core/services/subscription_test.go b/core/services/subscription_test.go index d94a8c55b4b..f3ee33e85eb 100644 --- a/core/services/subscription_test.go +++ b/core/services/subscription_test.go @@ -29,7 +29,7 @@ func TestServices_NewInitiatorSubscription_BackfillLogs(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) job := cltest.NewJobWithLogInitiator() initr := job.Initiators[0] @@ -57,7 +57,7 @@ func TestServices_NewInitiatorSubscription_BackfillLogs_WithNoHead(t *testing.T) store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) job := cltest.NewJobWithLogInitiator() initr := job.Initiators[0] @@ -79,7 +79,7 @@ func TestServices_NewInitiatorSubscription_PreventsDoubleDispatch(t *testing.T) store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) job := cltest.NewJobWithLogInitiator() initr := job.Initiators[0] @@ -212,7 +212,7 @@ func TestServices_StartJobSubscription(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) eth.Register("eth_getLogs", []ethpkg.Log{}) logChan := make(chan ethpkg.Log, 1) eth.RegisterSubscription("logs", logChan) @@ -238,7 +238,7 @@ func TestServices_StartJobSubscription(t *testing.T) { logChan <- ethpkg.Log{ Address: test.logAddr, - Data: test.data, + Data: ethpkg.UntrustedBytes(test.data), Topics: []common.Hash{ test.topic0, models.IDToTopic(job.ID), @@ -280,7 +280,7 @@ func TestServices_StartJobSubscription_RunlogNoTopicMatch(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() - eth := cltest.MockEthOnStore(t, store) + eth := cltest.MockEthOnStore(t, store, cltest.NoRegisterGetBlockNumber) eth.Register("eth_getLogs", []ethpkg.Log{}) logChan := make(chan ethpkg.Log, 1) eth.RegisterSubscription("logs", logChan) @@ -301,7 +301,7 @@ func TestServices_StartJobSubscription_RunlogNoTopicMatch(t *testing.T) { logChan <- ethpkg.Log{ Address: sharedAddr, - Data: test.data, + Data: ethpkg.UntrustedBytes(test.data), Topics: []common.Hash{ common.Hash{}, models.IDToTopic(job.ID), @@ -356,7 +356,7 @@ func TestServices_NewInitiatorSubscription_EthLog_ReplayFromBlock(t *testing.T) log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") - txManager.On("SubscribeToLogs", mock.Anything, expectedQuery).Return(cltest.EmptyMockSubscription(), nil) + txManager.On("SubscribeToLogs", mock.Anything, mock.Anything, expectedQuery).Return(cltest.EmptyMockSubscription(), nil) txManager.On("GetLogs", expectedQuery).Return([]ethpkg.Log{log}, nil) executeJobChannel := make(chan struct{}) @@ -412,14 +412,16 @@ func TestServices_NewInitiatorSubscription_RunLog_ReplayFromBlock(t *testing.T) expectedQuery := ethereum.FilterQuery{ FromBlock: test.wantFromBlock, Addresses: []common.Address{initr.InitiatorParams.Address}, - Topics: models.TopicFiltersForRunLog([]common.Hash{models.RunLogTopic20190207withoutIndexes, models.RunLogTopic20190123withFullfillmentParams, models.RunLogTopic0original}, initr.JobSpecID), + Topics: [][]common.Hash{ + models.TopicsForInitiatorsWhichRequireJobSpecIDTopic[models.InitiatorRunLog], + {models.IDToTopic(initr.JobSpecID), models.IDToHexTopic(initr.JobSpecID)}}, } receipt := cltest.TxReceiptFromFixture(t, "../eth/testdata/runlogReceipt.json") log := receipt.Logs[3] log.Topics[1] = models.IDToTopic(job.ID) - txmMock.On("SubscribeToLogs", mock.Anything, expectedQuery).Return(cltest.EmptyMockSubscription(), nil) + txmMock.On("SubscribeToLogs", mock.Anything, mock.Anything, expectedQuery).Return(cltest.EmptyMockSubscription(), nil) txmMock.On("GetLogs", expectedQuery).Return([]ethpkg.Log{log}, nil) executeJobChannel := make(chan struct{}) diff --git a/core/services/testdata/answer_updated_log.json b/core/services/testdata/answer_updated_log.json new file mode 100644 index 00000000000..abb25800c8a --- /dev/null +++ b/core/services/testdata/answer_updated_log.json @@ -0,0 +1,22 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "logIndex": "0x0", + "transactionIndex": "0x0", + "transactionHash": "0x420de56323893bced814b83f16a94c8ef7f7b6f1e3920a11ec62733fcf82c730", + "blockHash": "0x5e3bd2cc97a68136cead922330e2ec27201420b3eff182875e388474079fcd9e", + "blockNumber": "0xa", + "address": "0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6", + "data": "0x0000000000000000000000000000000000000000000000000000000000000003", + "topics": [ + "0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002" + ], + "type": "mined" + } + } +} diff --git a/core/services/testdata/new_round_log.json b/core/services/testdata/new_round_log.json index 652ca5d7969..5d622fc3c45 100644 --- a/core/services/testdata/new_round_log.json +++ b/core/services/testdata/new_round_log.json @@ -10,9 +10,9 @@ "blockHash": "0x5e3bd2cc97a68136cead922330e2ec27201420b3eff182875e388474079fcd9e", "blockNumber": "0xa", "address": "0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6", - "data": "0x", + "data": "0x000000000000000000000000000000000000000000000000000000000000000f", "topics": [ - "0xc3c45d1924f55369653f407ee9f095309d1e687b2c0011b1f709042d4f457e17", + "0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271", "0x0000000000000000000000000000000000000000000000000000000000000001", "0x000000000000000000000000f17f52151ebef6c7334fad080c5704d77216b732" ], diff --git a/core/services/testdata/round_details_updated_log.json b/core/services/testdata/round_details_updated_log.json new file mode 100644 index 00000000000..00b4236b065 --- /dev/null +++ b/core/services/testdata/round_details_updated_log.json @@ -0,0 +1,23 @@ +{ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "subscription": "0x4a8a4c0517381924f9838102c5a4dcb7", + "result": { + "logIndex": "0x0", + "transactionIndex": "0x0", + "transactionHash": "0x420de56323893bced814b83f16a94c8ef7f7b6f1e3920a11ec62733fcf82c730", + "blockHash": "0x5e3bd2cc97a68136cead922330e2ec27201420b3eff182875e388474079fcd9e", + "blockNumber": "0xa", + "address": "0x2fCeA879fDC9FE5e90394faf0CA644a1749d0ad6", + "data": "0x00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005", + "topics": [ + "0x56800c9d1ed723511246614d15e58cfcde15b6a33c245b5c961b689c1890fd8f", + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ], + "type": "mined" + } + } +} diff --git a/core/services/validators.go b/core/services/validators.go index 5592a7fe257..ec4316b3e66 100644 --- a/core/services/validators.go +++ b/core/services/validators.go @@ -9,7 +9,6 @@ import ( "chainlink/core/adapters" "chainlink/core/assets" - "chainlink/core/services/fluxmonitor" "chainlink/core/store" "chainlink/core/store/models" "chainlink/core/store/orm" @@ -112,6 +111,8 @@ func ValidateInitiator(i models.Initiator, j models.JobSpec, store *store.Store) return nil case models.InitiatorEthLog: return nil + case models.InitiatorRandomnessLog: + return validateRandomnessLogInitiator(i, j) default: return models.NewJSONAPIErrorsWith(fmt.Sprintf("type %v does not exist", i.Type)) } @@ -119,6 +120,7 @@ func ValidateInitiator(i models.Initiator, j models.JobSpec, store *store.Store) func validateFluxMonitor(i models.Initiator, j models.JobSpec, store *store.Store) error { fe := models.NewJSONAPIErrors() + minimumPollingInterval := models.Duration(store.Config.DefaultHTTPTimeout()) if i.Address == utils.ZeroAddress { fe.Add("no address") @@ -134,8 +136,8 @@ func validateFluxMonitor(i models.Initiator, j models.JobSpec, store *store.Stor } if i.PollingInterval == 0 { fe.Add("no pollingInterval") - } else if i.PollingInterval < fluxmonitor.MinimumPollingInterval { - fe.Add("pollingInterval must be equal or greater than " + fluxmonitor.MinimumPollingInterval.String()) + } else if i.PollingInterval < minimumPollingInterval { + fe.Add("pollingInterval must be equal or greater than " + minimumPollingInterval.String()) } if err := validateFeeds(i.Feeds, store); err != nil { fe.Add(err.Error()) @@ -237,6 +239,17 @@ func validateServiceAgreementInitiator(i models.Initiator, j models.JobSpec) err return fe.CoerceEmptyToNil() } +func validateRandomnessLogInitiator(i models.Initiator, j models.JobSpec) error { + fe := models.NewJSONAPIErrors() + if len(j.Initiators) != 1 { + fe.Add("randomness log must have exactly one initiator") + } + if i.Address == utils.ZeroAddress { + fe.Add("randomness log must specify address of expected emmitter") + } + return fe.CoerceEmptyToNil() +} + func validateTask(task models.TaskSpec, store *store.Store) error { adapter, err := adapters.For(task, store.Config, store.ORM) if !store.Config.Dev() { diff --git a/core/services/vrf/generated/link_token_interface/link_token_interface.go b/core/services/vrf/generated/link_token_interface/link_token_interface.go new file mode 100644 index 00000000000..9604f170ab8 --- /dev/null +++ b/core/services/vrf/generated/link_token_interface/link_token_interface.go @@ -0,0 +1,779 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package link_token_interface + +import ( + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = abi.U256 + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// LinkTokenABI is the input ABI used to generate the binding from. +const LinkTokenABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"name\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"approve\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_from\",\"type\":\"address\"},{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transferFrom\",\"outputs\":[{\"name\":\"\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"name\":\"\",\"type\":\"uint8\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"transferAndCall\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_subtractedValue\",\"type\":\"uint256\"}],\"name\":\"decreaseApproval\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"name\":\"balance\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"symbol\",\"outputs\":[{\"name\":\"\",\"type\":\"string\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_to\",\"type\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"_spender\",\"type\":\"address\"},{\"name\":\"_addedValue\",\"type\":\"uint256\"}],\"name\":\"increaseApproval\",\"outputs\":[{\"name\":\"success\",\"type\":\"bool\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\"},{\"name\":\"_spender\",\"type\":\"address\"}],\"name\":\"allowance\",\"outputs\":[{\"name\":\"remaining\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"from\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"Transfer\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"spender\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"Approval\",\"type\":\"event\"}]" + +// LinkTokenBin is the compiled bytecode used for deploying new contracts. +var LinkTokenBin = "0x6060604052341561000f57600080fd5b5b600160a060020a03331660009081526001602052604090206b033b2e3c9fd0803ce800000090555b5b610c51806100486000396000f300606060405236156100b75763ffffffff7c010000000000000000000000000000000000000000000000000000000060003504166306fdde0381146100bc578063095ea7b31461014757806318160ddd1461017d57806323b872dd146101a2578063313ce567146101de5780634000aea014610207578063661884631461028057806370a08231146102b657806395d89b41146102e7578063a9059cbb14610372578063d73dd623146103a8578063dd62ed3e146103de575b600080fd5b34156100c757600080fd5b6100cf610415565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015257600080fd5b610169600160a060020a036004351660243561044c565b604051901515815260200160405180910390f35b341561018857600080fd5b610190610499565b60405190815260200160405180910390f35b34156101ad57600080fd5b610169600160a060020a03600435811690602435166044356104a9565b604051901515815260200160405180910390f35b34156101e957600080fd5b6101f16104f8565b60405160ff909116815260200160405180910390f35b341561021257600080fd5b61016960048035600160a060020a03169060248035919060649060443590810190830135806020601f820181900481020160405190810160405281815292919060208401838380828437509496506104fd95505050505050565b604051901515815260200160405180910390f35b341561028b57600080fd5b610169600160a060020a036004351660243561054c565b604051901515815260200160405180910390f35b34156102c157600080fd5b610190600160a060020a0360043516610648565b60405190815260200160405180910390f35b34156102f257600080fd5b6100cf610667565b60405160208082528190810183818151815260200191508051906020019080838360005b8381101561010c5780820151818401525b6020016100f3565b50505050905090810190601f1680156101395780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561037d57600080fd5b610169600160a060020a036004351660243561069e565b604051901515815260200160405180910390f35b34156103b357600080fd5b610169600160a060020a03600435166024356106eb565b604051901515815260200160405180910390f35b34156103e957600080fd5b610190600160a060020a0360043581169060243516610790565b60405190815260200160405180910390f35b60408051908101604052600f81527f436861696e4c696e6b20546f6b656e0000000000000000000000000000000000602082015281565b600082600160a060020a03811615801590610479575030600160a060020a031681600160a060020a031614155b151561048457600080fd5b61048e84846107bd565b91505b5b5092915050565b6b033b2e3c9fd0803ce800000081565b600082600160a060020a038116158015906104d6575030600160a060020a031681600160a060020a031614155b15156104e157600080fd5b6104ec85858561082a565b91505b5b509392505050565b601281565b600083600160a060020a0381161580159061052a575030600160a060020a031681600160a060020a031614155b151561053557600080fd5b6104ec85858561093c565b91505b5b509392505050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054808311156105a957600160a060020a0333811660009081526002602090815260408083209388168352929052908120556105e0565b6105b9818463ffffffff610a2316565b600160a060020a033381166000908152600260209081526040808320938916835292905220555b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020547f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925915190815260200160405180910390a3600191505b5092915050565b600160a060020a0381166000908152600160205260409020545b919050565b60408051908101604052600481527f4c494e4b00000000000000000000000000000000000000000000000000000000602082015281565b600082600160a060020a038116158015906106cb575030600160a060020a031681600160a060020a031614155b15156106d657600080fd5b61048e8484610a3a565b91505b5b5092915050565b600160a060020a033381166000908152600260209081526040808320938616835292905290812054610723908363ffffffff610afa16565b600160a060020a0333811660008181526002602090815260408083209489168084529490915290819020849055919290917f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591905190815260200160405180910390a35060015b92915050565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b600160a060020a03338116600081815260026020908152604080832094871680845294909152808220859055909291907f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b9259085905190815260200160405180910390a35060015b92915050565b600160a060020a03808416600081815260026020908152604080832033909516835293815283822054928252600190529182205461086e908463ffffffff610a2316565b600160a060020a0380871660009081526001602052604080822093909355908616815220546108a3908463ffffffff610afa16565b600160a060020a0385166000908152600160205260409020556108cc818463ffffffff610a2316565b600160a060020a03808716600081815260026020908152604080832033861684529091529081902093909355908616917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9086905190815260200160405180910390a3600191505b509392505050565b60006109488484610a3a565b5083600160a060020a031633600160a060020a03167fe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16858560405182815260406020820181815290820183818151815260200191508051906020019080838360005b838110156109c35780820151818401525b6020016109aa565b50505050905090810190601f1680156109f05780820380516001836020036101000a031916815260200191505b50935050505060405180910390a3610a0784610b14565b15610a1757610a17848484610b23565b5b5060015b9392505050565b600082821115610a2f57fe5b508082035b92915050565b600160a060020a033316600090815260016020526040812054610a63908363ffffffff610a2316565b600160a060020a033381166000908152600160205260408082209390935590851681522054610a98908363ffffffff610afa16565b600160a060020a0380851660008181526001602052604090819020939093559133909116907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9085905190815260200160405180910390a35060015b92915050565b600082820183811015610b0957fe5b8091505b5092915050565b6000813b908111905b50919050565b82600160a060020a03811663a4c0ed363385856040518463ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004018084600160a060020a0316600160a060020a0316815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b83811015610bbd5780820151818401525b602001610ba4565b50505050905090810190601f168015610bea5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b1515610c0a57600080fd5b6102c65a03f11515610c1b57600080fd5b5050505b505050505600a165627a7a72305820c5f438ff94e5ddaf2058efa0019e246c636c37a622e04bb67827c7374acad8d60029" + +// DeployLinkToken deploys a new Ethereum contract, binding an instance of LinkToken to it. +func DeployLinkToken(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *LinkToken, error) { + parsed, err := abi.JSON(strings.NewReader(LinkTokenABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(LinkTokenBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &LinkToken{LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +// LinkToken is an auto generated Go binding around an Ethereum contract. +type LinkToken struct { + LinkTokenCaller // Read-only binding to the contract + LinkTokenTransactor // Write-only binding to the contract + LinkTokenFilterer // Log filterer for contract events +} + +// LinkTokenCaller is an auto generated read-only Go binding around an Ethereum contract. +type LinkTokenCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LinkTokenTransactor is an auto generated write-only Go binding around an Ethereum contract. +type LinkTokenTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LinkTokenFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type LinkTokenFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// LinkTokenSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type LinkTokenSession struct { + Contract *LinkToken // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LinkTokenCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type LinkTokenCallerSession struct { + Contract *LinkTokenCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// LinkTokenTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type LinkTokenTransactorSession struct { + Contract *LinkTokenTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// LinkTokenRaw is an auto generated low-level Go binding around an Ethereum contract. +type LinkTokenRaw struct { + Contract *LinkToken // Generic contract binding to access the raw methods on +} + +// LinkTokenCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type LinkTokenCallerRaw struct { + Contract *LinkTokenCaller // Generic read-only contract binding to access the raw methods on +} + +// LinkTokenTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type LinkTokenTransactorRaw struct { + Contract *LinkTokenTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewLinkToken creates a new instance of LinkToken, bound to a specific deployed contract. +func NewLinkToken(address common.Address, backend bind.ContractBackend) (*LinkToken, error) { + contract, err := bindLinkToken(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &LinkToken{LinkTokenCaller: LinkTokenCaller{contract: contract}, LinkTokenTransactor: LinkTokenTransactor{contract: contract}, LinkTokenFilterer: LinkTokenFilterer{contract: contract}}, nil +} + +// NewLinkTokenCaller creates a new read-only instance of LinkToken, bound to a specific deployed contract. +func NewLinkTokenCaller(address common.Address, caller bind.ContractCaller) (*LinkTokenCaller, error) { + contract, err := bindLinkToken(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &LinkTokenCaller{contract: contract}, nil +} + +// NewLinkTokenTransactor creates a new write-only instance of LinkToken, bound to a specific deployed contract. +func NewLinkTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*LinkTokenTransactor, error) { + contract, err := bindLinkToken(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &LinkTokenTransactor{contract: contract}, nil +} + +// NewLinkTokenFilterer creates a new log filterer instance of LinkToken, bound to a specific deployed contract. +func NewLinkTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*LinkTokenFilterer, error) { + contract, err := bindLinkToken(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &LinkTokenFilterer{contract: contract}, nil +} + +// bindLinkToken binds a generic wrapper to an already deployed contract. +func bindLinkToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(LinkTokenABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_LinkToken *LinkTokenRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.LinkTokenCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_LinkToken *LinkTokenRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_LinkToken *LinkTokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.LinkTokenTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_LinkToken *LinkTokenCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _LinkToken.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_LinkToken *LinkTokenTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_LinkToken *LinkTokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _LinkToken.Contract.contract.Transact(opts, method, params...) +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address _owner, address _spender) constant returns(uint256 remaining) +func (_LinkToken *LinkTokenCaller) Allowance(opts *bind.CallOpts, _owner common.Address, _spender common.Address) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "allowance", _owner, _spender) + return *ret0, err +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address _owner, address _spender) constant returns(uint256 remaining) +func (_LinkToken *LinkTokenSession) Allowance(_owner common.Address, _spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, _owner, _spender) +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address _owner, address _spender) constant returns(uint256 remaining) +func (_LinkToken *LinkTokenCallerSession) Allowance(_owner common.Address, _spender common.Address) (*big.Int, error) { + return _LinkToken.Contract.Allowance(&_LinkToken.CallOpts, _owner, _spender) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address _owner) constant returns(uint256 balance) +func (_LinkToken *LinkTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "balanceOf", _owner) + return *ret0, err +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address _owner) constant returns(uint256 balance) +func (_LinkToken *LinkTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, _owner) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address _owner) constant returns(uint256 balance) +func (_LinkToken *LinkTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) { + return _LinkToken.Contract.BalanceOf(&_LinkToken.CallOpts, _owner) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() constant returns(uint8) +func (_LinkToken *LinkTokenCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var ( + ret0 = new(uint8) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "decimals") + return *ret0, err +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() constant returns(uint8) +func (_LinkToken *LinkTokenSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() constant returns(uint8) +func (_LinkToken *LinkTokenCallerSession) Decimals() (uint8, error) { + return _LinkToken.Contract.Decimals(&_LinkToken.CallOpts) +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() constant returns(string) +func (_LinkToken *LinkTokenCaller) Name(opts *bind.CallOpts) (string, error) { + var ( + ret0 = new(string) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "name") + return *ret0, err +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() constant returns(string) +func (_LinkToken *LinkTokenSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() constant returns(string) +func (_LinkToken *LinkTokenCallerSession) Name() (string, error) { + return _LinkToken.Contract.Name(&_LinkToken.CallOpts) +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() constant returns(string) +func (_LinkToken *LinkTokenCaller) Symbol(opts *bind.CallOpts) (string, error) { + var ( + ret0 = new(string) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "symbol") + return *ret0, err +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() constant returns(string) +func (_LinkToken *LinkTokenSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() constant returns(string) +func (_LinkToken *LinkTokenCallerSession) Symbol() (string, error) { + return _LinkToken.Contract.Symbol(&_LinkToken.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() constant returns(uint256) +func (_LinkToken *LinkTokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _LinkToken.contract.Call(opts, out, "totalSupply") + return *ret0, err +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() constant returns(uint256) +func (_LinkToken *LinkTokenSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() constant returns(uint256) +func (_LinkToken *LinkTokenCallerSession) TotalSupply() (*big.Int, error) { + return _LinkToken.Contract.TotalSupply(&_LinkToken.CallOpts) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address _spender, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenTransactor) Approve(opts *bind.TransactOpts, _spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "approve", _spender, _value) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address _spender, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenSession) Approve(_spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, _spender, _value) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address _spender, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenTransactorSession) Approve(_spender common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Approve(&_LinkToken.TransactOpts, _spender, _value) +} + +// DecreaseApproval is a paid mutator transaction binding the contract method 0x66188463. +// +// Solidity: function decreaseApproval(address _spender, uint256 _subtractedValue) returns(bool success) +func (_LinkToken *LinkTokenTransactor) DecreaseApproval(opts *bind.TransactOpts, _spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "decreaseApproval", _spender, _subtractedValue) +} + +// DecreaseApproval is a paid mutator transaction binding the contract method 0x66188463. +// +// Solidity: function decreaseApproval(address _spender, uint256 _subtractedValue) returns(bool success) +func (_LinkToken *LinkTokenSession) DecreaseApproval(_spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, _spender, _subtractedValue) +} + +// DecreaseApproval is a paid mutator transaction binding the contract method 0x66188463. +// +// Solidity: function decreaseApproval(address _spender, uint256 _subtractedValue) returns(bool success) +func (_LinkToken *LinkTokenTransactorSession) DecreaseApproval(_spender common.Address, _subtractedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.DecreaseApproval(&_LinkToken.TransactOpts, _spender, _subtractedValue) +} + +// IncreaseApproval is a paid mutator transaction binding the contract method 0xd73dd623. +// +// Solidity: function increaseApproval(address _spender, uint256 _addedValue) returns(bool success) +func (_LinkToken *LinkTokenTransactor) IncreaseApproval(opts *bind.TransactOpts, _spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "increaseApproval", _spender, _addedValue) +} + +// IncreaseApproval is a paid mutator transaction binding the contract method 0xd73dd623. +// +// Solidity: function increaseApproval(address _spender, uint256 _addedValue) returns(bool success) +func (_LinkToken *LinkTokenSession) IncreaseApproval(_spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, _spender, _addedValue) +} + +// IncreaseApproval is a paid mutator transaction binding the contract method 0xd73dd623. +// +// Solidity: function increaseApproval(address _spender, uint256 _addedValue) returns(bool success) +func (_LinkToken *LinkTokenTransactorSession) IncreaseApproval(_spender common.Address, _addedValue *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.IncreaseApproval(&_LinkToken.TransactOpts, _spender, _addedValue) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool success) +func (_LinkToken *LinkTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transfer", _to, _value) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool success) +func (_LinkToken *LinkTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, _to, _value) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool success) +func (_LinkToken *LinkTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.Transfer(&_LinkToken.TransactOpts, _to, _value) +} + +// TransferAndCall is a paid mutator transaction binding the contract method 0x4000aea0. +// +// Solidity: function transferAndCall(address _to, uint256 _value, bytes _data) returns(bool success) +func (_LinkToken *LinkTokenTransactor) TransferAndCall(opts *bind.TransactOpts, _to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferAndCall", _to, _value, _data) +} + +// TransferAndCall is a paid mutator transaction binding the contract method 0x4000aea0. +// +// Solidity: function transferAndCall(address _to, uint256 _value, bytes _data) returns(bool success) +func (_LinkToken *LinkTokenSession) TransferAndCall(_to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, _to, _value, _data) +} + +// TransferAndCall is a paid mutator transaction binding the contract method 0x4000aea0. +// +// Solidity: function transferAndCall(address _to, uint256 _value, bytes _data) returns(bool success) +func (_LinkToken *LinkTokenTransactorSession) TransferAndCall(_to common.Address, _value *big.Int, _data []byte) (*types.Transaction, error) { + return _LinkToken.Contract.TransferAndCall(&_LinkToken.TransactOpts, _to, _value, _data) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address _from, address _to, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenTransactor) TransferFrom(opts *bind.TransactOpts, _from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.contract.Transact(opts, "transferFrom", _from, _to, _value) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address _from, address _to, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenSession) TransferFrom(_from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, _from, _to, _value) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address _from, address _to, uint256 _value) returns(bool) +func (_LinkToken *LinkTokenTransactorSession) TransferFrom(_from common.Address, _to common.Address, _value *big.Int) (*types.Transaction, error) { + return _LinkToken.Contract.TransferFrom(&_LinkToken.TransactOpts, _from, _to, _value) +} + +// LinkTokenApprovalIterator is returned from FilterApproval and is used to iterate over the raw logs and unpacked data for Approval events raised by the LinkToken contract. +type LinkTokenApprovalIterator struct { + Event *LinkTokenApproval // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LinkTokenApprovalIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LinkTokenApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LinkTokenApprovalIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LinkTokenApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LinkTokenApproval represents a Approval event raised by the LinkToken contract. +type LinkTokenApproval struct { + Owner common.Address + Spender common.Address + Value *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterApproval is a free log retrieval operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed owner, address indexed spender, uint256 value) +func (_LinkToken *LinkTokenFilterer) FilterApproval(opts *bind.FilterOpts, owner []common.Address, spender []common.Address) (*LinkTokenApprovalIterator, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return &LinkTokenApprovalIterator{contract: _LinkToken.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +// WatchApproval is a free log subscription operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed owner, address indexed spender, uint256 value) +func (_LinkToken *LinkTokenFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *LinkTokenApproval, owner []common.Address, spender []common.Address) (event.Subscription, error) { + + var ownerRule []interface{} + for _, ownerItem := range owner { + ownerRule = append(ownerRule, ownerItem) + } + var spenderRule []interface{} + for _, spenderItem := range spender { + spenderRule = append(spenderRule, spenderItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Approval", ownerRule, spenderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseApproval is a log parse operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed owner, address indexed spender, uint256 value) +func (_LinkToken *LinkTokenFilterer) ParseApproval(log types.Log) (*LinkTokenApproval, error) { + event := new(LinkTokenApproval) + if err := _LinkToken.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + return event, nil +} + +// LinkTokenTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the LinkToken contract. +type LinkTokenTransferIterator struct { + Event *LinkTokenTransfer // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *LinkTokenTransferIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(LinkTokenTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *LinkTokenTransferIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *LinkTokenTransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// LinkTokenTransfer represents a Transfer event raised by the LinkToken contract. +type LinkTokenTransfer struct { + From common.Address + To common.Address + Value *big.Int + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterTransfer is a free log retrieval operation binding the contract event 0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16. +// +// Solidity: event Transfer(address indexed from, address indexed to, uint256 value, bytes data) +func (_LinkToken *LinkTokenFilterer) FilterTransfer(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*LinkTokenTransferIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.FilterLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return &LinkTokenTransferIterator{contract: _LinkToken.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +// WatchTransfer is a free log subscription operation binding the contract event 0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16. +// +// Solidity: event Transfer(address indexed from, address indexed to, uint256 value, bytes data) +func (_LinkToken *LinkTokenFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *LinkTokenTransfer, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _LinkToken.contract.WatchLogs(opts, "Transfer", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseTransfer is a log parse operation binding the contract event 0xe19260aff97b920c7df27010903aeb9c8d2be5d310a2c67824cf3f15396e4c16. +// +// Solidity: event Transfer(address indexed from, address indexed to, uint256 value, bytes data) +func (_LinkToken *LinkTokenFilterer) ParseTransfer(log types.Log) (*LinkTokenTransfer, error) { + event := new(LinkTokenTransfer) + if err := _LinkToken.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + return event, nil +} diff --git a/core/services/vrf/generated/solidity_request_id/solidity_request_id.go b/core/services/vrf/generated/solidity_request_id/solidity_request_id.go new file mode 100644 index 00000000000..ff1cbdb4b8f --- /dev/null +++ b/core/services/vrf/generated/solidity_request_id/solidity_request_id.go @@ -0,0 +1,242 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_request_id + +import ( + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = abi.U256 + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// VRFRequestIDBaseTestHelperABI is the input ABI used to generate the binding from. +const VRFRequestIDBaseTestHelperABI = "[{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_vRFInputSeed\",\"type\":\"uint256\"}],\"name\":\"makeRequestId_\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_userSeed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_requester\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_nonce\",\"type\":\"uint256\"}],\"name\":\"makeVRFInputSeed_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]" + +// VRFRequestIDBaseTestHelperBin is the compiled bytecode used for deploying new contracts. +var VRFRequestIDBaseTestHelperBin = "0x608060405234801561001057600080fd5b50610212806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806337ab429a1461003b578063bda087ae146100b1575b600080fd5b61009b6004803603608081101561005157600080fd5b810190808035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506100fd565b6040518082815260200191505060405180910390f35b6100e7600480360360408110156100c757600080fd5b810190808035906020019092919080359060200190929190505050610115565b6040518082815260200191505060405180910390f35b600061010b85858585610129565b9050949350505050565b600061012183836101a3565b905092915050565b600084848484604051602001808581526020018481526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019450505050506040516020818303038152906040528051906020012060001c9050949350505050565b6000828260405160200180838152602001828152602001925050506040516020818303038152906040528051906020012090509291505056fea2646970667358220000000000000000000000000000000000000000000000000000000000000000000064736f6c63430000000033" + +// DeployVRFRequestIDBaseTestHelper deploys a new Ethereum contract, binding an instance of VRFRequestIDBaseTestHelper to it. +func DeployVRFRequestIDBaseTestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFRequestIDBaseTestHelper, error) { + parsed, err := abi.JSON(strings.NewReader(VRFRequestIDBaseTestHelperABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(VRFRequestIDBaseTestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFRequestIDBaseTestHelper{VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +// VRFRequestIDBaseTestHelper is an auto generated Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelper struct { + VRFRequestIDBaseTestHelperCaller // Read-only binding to the contract + VRFRequestIDBaseTestHelperTransactor // Write-only binding to the contract + VRFRequestIDBaseTestHelperFilterer // Log filterer for contract events +} + +// VRFRequestIDBaseTestHelperCaller is an auto generated read-only Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelperCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFRequestIDBaseTestHelperTransactor is an auto generated write-only Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelperTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFRequestIDBaseTestHelperFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type VRFRequestIDBaseTestHelperFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFRequestIDBaseTestHelperSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type VRFRequestIDBaseTestHelperSession struct { + Contract *VRFRequestIDBaseTestHelper // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFRequestIDBaseTestHelperCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type VRFRequestIDBaseTestHelperCallerSession struct { + Contract *VRFRequestIDBaseTestHelperCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// VRFRequestIDBaseTestHelperTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type VRFRequestIDBaseTestHelperTransactorSession struct { + Contract *VRFRequestIDBaseTestHelperTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFRequestIDBaseTestHelperRaw is an auto generated low-level Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelperRaw struct { + Contract *VRFRequestIDBaseTestHelper // Generic contract binding to access the raw methods on +} + +// VRFRequestIDBaseTestHelperCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelperCallerRaw struct { + Contract *VRFRequestIDBaseTestHelperCaller // Generic read-only contract binding to access the raw methods on +} + +// VRFRequestIDBaseTestHelperTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type VRFRequestIDBaseTestHelperTransactorRaw struct { + Contract *VRFRequestIDBaseTestHelperTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewVRFRequestIDBaseTestHelper creates a new instance of VRFRequestIDBaseTestHelper, bound to a specific deployed contract. +func NewVRFRequestIDBaseTestHelper(address common.Address, backend bind.ContractBackend) (*VRFRequestIDBaseTestHelper, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelper{VRFRequestIDBaseTestHelperCaller: VRFRequestIDBaseTestHelperCaller{contract: contract}, VRFRequestIDBaseTestHelperTransactor: VRFRequestIDBaseTestHelperTransactor{contract: contract}, VRFRequestIDBaseTestHelperFilterer: VRFRequestIDBaseTestHelperFilterer{contract: contract}}, nil +} + +// NewVRFRequestIDBaseTestHelperCaller creates a new read-only instance of VRFRequestIDBaseTestHelper, bound to a specific deployed contract. +func NewVRFRequestIDBaseTestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFRequestIDBaseTestHelperCaller, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperCaller{contract: contract}, nil +} + +// NewVRFRequestIDBaseTestHelperTransactor creates a new write-only instance of VRFRequestIDBaseTestHelper, bound to a specific deployed contract. +func NewVRFRequestIDBaseTestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFRequestIDBaseTestHelperTransactor, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperTransactor{contract: contract}, nil +} + +// NewVRFRequestIDBaseTestHelperFilterer creates a new log filterer instance of VRFRequestIDBaseTestHelper, bound to a specific deployed contract. +func NewVRFRequestIDBaseTestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFRequestIDBaseTestHelperFilterer, error) { + contract, err := bindVRFRequestIDBaseTestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFRequestIDBaseTestHelperFilterer{contract: contract}, nil +} + +// bindVRFRequestIDBaseTestHelper binds a generic wrapper to an already deployed contract. +func bindVRFRequestIDBaseTestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(VRFRequestIDBaseTestHelperABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.VRFRequestIDBaseTestHelperTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFRequestIDBaseTestHelper.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFRequestIDBaseTestHelper.Contract.contract.Transact(opts, method, params...) +} + +// MakeRequestId is a free data retrieval call binding the contract method 0xbda087ae. +// +// Solidity: function makeRequestId_(bytes32 _keyHash, uint256 _vRFInputSeed) constant returns(bytes32) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeRequestId(opts *bind.CallOpts, _keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + var ( + ret0 = new([32]byte) + ) + out := ret0 + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, out, "makeRequestId_", _keyHash, _vRFInputSeed) + return *ret0, err +} + +// MakeRequestId is a free data retrieval call binding the contract method 0xbda087ae. +// +// Solidity: function makeRequestId_(bytes32 _keyHash, uint256 _vRFInputSeed) constant returns(bytes32) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +// MakeRequestId is a free data retrieval call binding the contract method 0xbda087ae. +// +// Solidity: function makeRequestId_(bytes32 _keyHash, uint256 _vRFInputSeed) constant returns(bytes32) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeRequestId(_keyHash [32]byte, _vRFInputSeed *big.Int) ([32]byte, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeRequestId(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _vRFInputSeed) +} + +// MakeVRFInputSeed is a free data retrieval call binding the contract method 0x37ab429a. +// +// Solidity: function makeVRFInputSeed_(bytes32 _keyHash, uint256 _userSeed, address _requester, uint256 _nonce) constant returns(uint256) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCaller) MakeVRFInputSeed(opts *bind.CallOpts, _keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFRequestIDBaseTestHelper.contract.Call(opts, out, "makeVRFInputSeed_", _keyHash, _userSeed, _requester, _nonce) + return *ret0, err +} + +// MakeVRFInputSeed is a free data retrieval call binding the contract method 0x37ab429a. +// +// Solidity: function makeVRFInputSeed_(bytes32 _keyHash, uint256 _userSeed, address _requester, uint256 _nonce) constant returns(uint256) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} + +// MakeVRFInputSeed is a free data retrieval call binding the contract method 0x37ab429a. +// +// Solidity: function makeVRFInputSeed_(bytes32 _keyHash, uint256 _userSeed, address _requester, uint256 _nonce) constant returns(uint256) +func (_VRFRequestIDBaseTestHelper *VRFRequestIDBaseTestHelperCallerSession) MakeVRFInputSeed(_keyHash [32]byte, _userSeed *big.Int, _requester common.Address, _nonce *big.Int) (*big.Int, error) { + return _VRFRequestIDBaseTestHelper.Contract.MakeVRFInputSeed(&_VRFRequestIDBaseTestHelper.CallOpts, _keyHash, _userSeed, _requester, _nonce) +} diff --git a/core/services/vrf/generated/solidity_verifier_wrapper/solidity_verifier_wrapper.go b/core/services/vrf/generated/solidity_verifier_wrapper/solidity_verifier_wrapper.go new file mode 100644 index 00000000000..896a9fbf16c --- /dev/null +++ b/core/services/vrf/generated/solidity_verifier_wrapper/solidity_verifier_wrapper.go @@ -0,0 +1,532 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_verifier_wrapper + +import ( + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = abi.U256 + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// VRFTestHelperABI is the input ABI used to generate the binding from. +const VRFTestHelperABI = "[{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"invZ\",\"type\":\"uint256\"}],\"name\":\"affineECAdd_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"base\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"exponent\",\"type\":\"uint256\"}],\"name\":\"bigModExp_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"x\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"q\",\"type\":\"uint256[2]\"}],\"name\":\"ecmulVerify_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"b\",\"type\":\"bytes\"}],\"name\":\"fieldHash_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"hashToCurve_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p1\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"cp1Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p2\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sp2Witness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"linearCombination_\",\"outputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"\",\"type\":\"uint256[2]\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"px\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"py\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qx\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"qy\",\"type\":\"uint256\"}],\"name\":\"projectiveECAdd_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"proof\",\"type\":\"bytes\"}],\"name\":\"randomValueFromVRFProof_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"output\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"hash\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"v\",\"type\":\"uint256[2]\"}],\"name\":\"scalarFromCurvePoints_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"squareRoot_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"p\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"lcWitness\",\"type\":\"address\"}],\"name\":\"verifyLinearCombinationWithGenerator_\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"pk\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"gamma\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"s\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"uWitness\",\"type\":\"address\"},{\"internalType\":\"uint256[2]\",\"name\":\"cGammaWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256[2]\",\"name\":\"sHashWitness\",\"type\":\"uint256[2]\"},{\"internalType\":\"uint256\",\"name\":\"zInv\",\"type\":\"uint256\"}],\"name\":\"verifyVRFProof_\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"x\",\"type\":\"uint256\"}],\"name\":\"ySquared_\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}]" + +// VRFTestHelperBin is the compiled bytecode used for deploying new contracts. +var VRFTestHelperBin = "0x608060405234801561001057600080fd5b50612220806100206000396000f3fe608060405234801561001057600080fd5b50600436106100cf5760003560e01c806395e6ee921161008c578063b481e26011610066578063b481e2601461067f578063cefda0c51461074e578063ef3b10ec1461081d578063fe54f2a21461098e576100cf565b806395e6ee92146105075780639d6f033714610575578063aa7b2fbb146105b7576100cf565b8063244f896d146100d457806335452450146101c05780635de600421461026b5780637f8f50a8146102b75780638af046ea1461041457806391d5f69114610456575b600080fd5b610182600480360360a08110156100ea57600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080359060200190929190505050610b11565b6040518082600260200280838360005b838110156101ad578082015181840152602081019050610192565b5050505090500191505060405180910390f35b61022d600480360360608110156101d657600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080359060200190929190505050610b2d565b6040518082600260200280838360005b8381101561025857808201518184015260208101905061023d565b5050505090500191505060405180910390f35b6102a16004803603604081101561028157600080fd5b810190808035906020019092919080359060200190929190505050610b47565b6040518082815260200191505060405180910390f35b6103fe60048036036101208110156102ce57600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f8201169050808301925050505050509192919290803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f8201169050808301925050505050509192919290505050610b5b565b6040518082815260200191505060405180910390f35b6104406004803603602081101561042a57600080fd5b8101908080359060200190929190505050610b75565b6040518082815260200191505060405180910390f35b6104ed600480360360a081101561046c57600080fd5b810190808035906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b87565b604051808215151515815260200191505060405180910390f35b6105516004803603608081101561051d57600080fd5b8101908080359060200190929190803590602001909291908035906020019092919080359060200190929190505050610b9f565b60405180848152602001838152602001828152602001935050505060405180910390f35b6105a16004803603602081101561058b57600080fd5b8101908080359060200190929190505050610bc0565b6040518082815260200191505060405180910390f35b610665600480360360a08110156105cd57600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f82011690508083019250505050505091929192908035906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f8201169050808301925050505050509192919290505050610bd2565b604051808215151515815260200191505060405180910390f35b6107386004803603602081101561069557600080fd5b81019080803590602001906401000000008111156106b257600080fd5b8201836020820111156106c457600080fd5b803590602001918460018302840111640100000000831117156106e657600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610be8565b6040518082815260200191505060405180910390f35b6108076004803603602081101561076457600080fd5b810190808035906020019064010000000081111561078157600080fd5b82018360208201111561079357600080fd5b803590602001918460018302840111640100000000831117156107b557600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610bfa565b6040518082815260200191505060405180910390f35b61098c60048036036101a081101561083457600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f8201169050808301925050505050509192919290803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080359060200190929190505050610c0c565b005b610ad360048036036101608110156109a557600080fd5b810190808035906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f82011690508083019250505050505091929192908035906020019092919080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050919291929080359060200190929190505050610c28565b6040518082600260200280838360005b83811015610afe578082015181840152602081019050610ae3565b5050505090500191505060405180910390f35b610b1961211f565b610b24848484610c4c565b90509392505050565b610b3561211f565b610b3f8383610dca565b905092915050565b6000610b538383610e89565b905092915050565b6000610b6a8686868686610feb565b905095945050505050565b6000610b808261111a565b9050919050565b6000610b9585858585611154565b9050949350505050565b6000806000610bb08787878761138c565b9250925092509450945094915050565b6000610bcb82611560565b9050919050565b6000610bdf8484846115ee565b90509392505050565b6000610bf382611779565b9050919050565b6000610c05826117e6565b9050919050565b610c1d8989898989898989896119b6565b505050505050505050565b610c3061211f565b610c3f88888888888888611ce1565b9050979650505050505050565b610c5461211f565b6000806000610ca987600060028110610c6957fe5b602002015188600160028110610c7b57fe5b602002015188600060028110610c8d57fe5b602002015189600160028110610c9f57fe5b602002015161138c565b80935081945082955050505060017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80610cdf57fe5b86830914610d55576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f696e765a206d75737420626520696e7665727365206f66207a0000000000000081525060200191505060405180910390fd5b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80610d8857fe5b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80610db857fe5b87850981525093505050509392505050565b610dd261211f565b610e33600184846040516020018084815260200183600260200280838360005b83811015610e0d578082015181840152602081019050610df2565b505050509050018281526020019350505050604051602081830303815290604052611e85565b90505b610e3f81611f58565b610e8357610e7c81600060028110610e5357fe5b602002015160405160200180828152602001915050604051602081830303815290604052611e85565b9050610e36565b92915050565b600080610e94612141565b602081600060068110610ea357fe5b602002018181525050602081600160068110610ebb57fe5b602002018181525050602081600260068110610ed357fe5b6020020181815250508481600360068110610eea57fe5b6020020181815250508381600460068110610f0157fe5b6020020181815250507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f81600560068110610f3857fe5b602002018181525050610f49612163565b60208160c0846005600019fa92506000831415610fce576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f6269674d6f64457870206661696c75726521000000000000000000000000000081525060200191505060405180910390fd5b80600060018110610fdb57fe5b6020020151935050505092915050565b6000600286868685876040516020018087815260200186600260200280838360005b8381101561102857808201518184015260208101905061100d565b5050505090500185600260200280838360005b8381101561105657808201518184015260208101905061103b565b5050505090500184600260200280838360005b83811015611084578082015181840152602081019050611069565b5050505090500183600260200280838360005b838110156110b2578082015181840152602081019050611097565b505050509050018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660601b815260140196505050505050506040516020818303038152906040528051906020012060001c905095945050505050565b600061114d82600260017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01901c610e89565b9050919050565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156111f8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600b8152602001807f626164207769746e65737300000000000000000000000000000000000000000081525060200191505060405180910390fd5b60008060028660016002811061120a57fe5b60200201518161121657fe5b061461122357601c611226565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418061125257fe5b858760006002811061126057fe5b6020020151097ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410360001b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141806112b757fe5b876000600281106112c457fe5b6020020151890960001b90506000600183858a6000600281106112e357fe5b602002015160001b8560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015611343573d6000803e3d6000fd5b5050506020604051035190508573ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614945050505050949350505050565b60008060008060006001809150915060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806113c557fe5b897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061141657fe5b8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a089050600061144b83838585611fc9565b809250819950505061145f88828e88612033565b809250819950505061147388828c87612033565b809250819950505060006114898d878b85612033565b809250819950505061149d88828686611fc9565b80925081995050506114b188828e89612033565b809250819950505080821461154c577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806114e857fe5b818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061151557fe5b82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061154257fe5b8183099650611550565b8196505b5050505050509450945094915050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061158b57fe5b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806115b357fe5b848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806115e257fe5b60078208915050919050565b6000808314156115fd57600080fd5b60008460006002811061160c57fe5b6020020151905060008060028760016002811061162557fe5b60200201518161163157fe5b061461163e57601c611641565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641418061166d57fe5b83870960001b9050600060016000801b848660001b8560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156116da573d6000803e3d6000fd5b5050506020604051035190506000866040516020018082600260200280838360005b838110156117175780820151818401526020810190506116fc565b505050509050019150506040516020818303038152906040528051906020012060001c90508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614955050505050509392505050565b6000818051906020012060001c90505b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f81106117e15780604051602001808281526020019150506040516020818303038152906040528051906020012060001c9050611789565b919050565b60006101a0825114611860576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f77726f6e672070726f6f66206c656e677468000000000000000000000000000081525060200191505060405180910390fd5b61186861211f565b61187061211f565b611878612185565b600061188261211f565b61188a61211f565b6000888060200190516101a08110156118a257600080fd5b810190809190826040019190826040019190826060018051906020019092919091908260400191908260400180519060200190929190505050869650859550849450839350829250819150809750819850829950839a50849b50859c50869d505050505050505061194d87878760006003811061191b57fe5b60200201518860016003811061192d57fe5b60200201518960026003811061193f57fe5b6020020151898989896119b6565b6003866040516020018083815260200182600260200280838360005b83811015611984578082015181840152602081019050611969565b50505050905001925050506040516020818303038152906040528051906020012060001c975050505050505050919050565b6119bf89611f58565b611a31576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601a8152602001807f7075626c6963206b6579206973206e6f74206f6e20637572766500000000000081525060200191505060405180910390fd5b611a3a88611f58565b611aac576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260158152602001807f67616d6d61206973206e6f74206f6e206375727665000000000000000000000081525060200191505060405180910390fd5b611ab583611f58565b611b27576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601d8152602001807f6347616d6d615769746e657373206973206e6f74206f6e20637572766500000081525060200191505060405180910390fd5b611b3082611f58565b611ba2576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601c8152602001807f73486173685769746e657373206973206e6f74206f6e2063757276650000000081525060200191505060405180910390fd5b611bae878a8887611154565b611c20576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601a8152602001807f6164647228632a706b2b732a6729e289a05f755769746e65737300000000000081525060200191505060405180910390fd5b611c2861211f565b611c328a87610dca565b9050611c3c61211f565b611c4b898b878b868989611ce1565b90506000611c5c838d8d8a86610feb565b9050808a14611cd3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f696e76616c69642070726f6f660000000000000000000000000000000000000081525060200191505060405180910390fd5b505050505050505050505050565b611ce961211f565b60007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f84600060028110611d1957fe5b602002015188600060028110611d2b57fe5b60200201510381611d3857fe5b061415611dad576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f706f696e747320696e2073756d206d7573742062652064697374696e6374000081525060200191505060405180910390fd5b611db88789886115ee565b611e0d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806121a86021913960400191505060405180910390fd5b611e188486856115ee565b611e6d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806121c96022913960400191505060405180910390fd5b611e78868484610c4c565b9050979650505050505050565b611e8d61211f565b611e9682611779565b81600060028110611ea357fe5b602002018181525050611ece611ec982600060028110611ebf57fe5b6020020151611560565b61111a565b81600160028110611edb57fe5b6020020181815250506001600282600160028110611ef557fe5b602002015181611f0157fe5b061415611f535780600160028110611f1557fe5b60200201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f0381600160028110611f4957fe5b6020020181815250505b919050565b60007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80611f8257fe5b82600160028110611f8f57fe5b602002015183600160028110611fa157fe5b602002015109611fc183600060028110611fb757fe5b6020020151611560565b149050919050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80611ff457fe5b8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061201f57fe5b848709809250819350505094509492505050565b60008060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061206057fe5b878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061208f57fe5b87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806120de57fe5b8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061210957fe5b8689098094508195505050505094509492505050565b6040518060400160405280600290602082028038833980820191505090505090565b6040518060c00160405280600690602082028038833980820191505090505090565b6040518060200160405280600190602082028038833980820191505090505090565b604051806060016040528060039060208202803883398082019150509050509056fe4669727374206d756c7469706c69636174696f6e20636865636b206661696c65645365636f6e64206d756c7469706c69636174696f6e20636865636b206661696c6564a2646970667358220000000000000000000000000000000000000000000000000000000000000000000064736f6c63430000000033" + +// DeployVRFTestHelper deploys a new Ethereum contract, binding an instance of VRFTestHelper to it. +func DeployVRFTestHelper(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *VRFTestHelper, error) { + parsed, err := abi.JSON(strings.NewReader(VRFTestHelperABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(VRFTestHelperBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFTestHelper{VRFTestHelperCaller: VRFTestHelperCaller{contract: contract}, VRFTestHelperTransactor: VRFTestHelperTransactor{contract: contract}, VRFTestHelperFilterer: VRFTestHelperFilterer{contract: contract}}, nil +} + +// VRFTestHelper is an auto generated Go binding around an Ethereum contract. +type VRFTestHelper struct { + VRFTestHelperCaller // Read-only binding to the contract + VRFTestHelperTransactor // Write-only binding to the contract + VRFTestHelperFilterer // Log filterer for contract events +} + +// VRFTestHelperCaller is an auto generated read-only Go binding around an Ethereum contract. +type VRFTestHelperCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestHelperTransactor is an auto generated write-only Go binding around an Ethereum contract. +type VRFTestHelperTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestHelperFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type VRFTestHelperFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFTestHelperSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type VRFTestHelperSession struct { + Contract *VRFTestHelper // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFTestHelperCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type VRFTestHelperCallerSession struct { + Contract *VRFTestHelperCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// VRFTestHelperTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type VRFTestHelperTransactorSession struct { + Contract *VRFTestHelperTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFTestHelperRaw is an auto generated low-level Go binding around an Ethereum contract. +type VRFTestHelperRaw struct { + Contract *VRFTestHelper // Generic contract binding to access the raw methods on +} + +// VRFTestHelperCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type VRFTestHelperCallerRaw struct { + Contract *VRFTestHelperCaller // Generic read-only contract binding to access the raw methods on +} + +// VRFTestHelperTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type VRFTestHelperTransactorRaw struct { + Contract *VRFTestHelperTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewVRFTestHelper creates a new instance of VRFTestHelper, bound to a specific deployed contract. +func NewVRFTestHelper(address common.Address, backend bind.ContractBackend) (*VRFTestHelper, error) { + contract, err := bindVRFTestHelper(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFTestHelper{VRFTestHelperCaller: VRFTestHelperCaller{contract: contract}, VRFTestHelperTransactor: VRFTestHelperTransactor{contract: contract}, VRFTestHelperFilterer: VRFTestHelperFilterer{contract: contract}}, nil +} + +// NewVRFTestHelperCaller creates a new read-only instance of VRFTestHelper, bound to a specific deployed contract. +func NewVRFTestHelperCaller(address common.Address, caller bind.ContractCaller) (*VRFTestHelperCaller, error) { + contract, err := bindVRFTestHelper(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFTestHelperCaller{contract: contract}, nil +} + +// NewVRFTestHelperTransactor creates a new write-only instance of VRFTestHelper, bound to a specific deployed contract. +func NewVRFTestHelperTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFTestHelperTransactor, error) { + contract, err := bindVRFTestHelper(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFTestHelperTransactor{contract: contract}, nil +} + +// NewVRFTestHelperFilterer creates a new log filterer instance of VRFTestHelper, bound to a specific deployed contract. +func NewVRFTestHelperFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFTestHelperFilterer, error) { + contract, err := bindVRFTestHelper(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFTestHelperFilterer{contract: contract}, nil +} + +// bindVRFTestHelper binds a generic wrapper to an already deployed contract. +func bindVRFTestHelper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(VRFTestHelperABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFTestHelper *VRFTestHelperRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFTestHelper.Contract.VRFTestHelperCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFTestHelper *VRFTestHelperRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestHelper.Contract.VRFTestHelperTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFTestHelper *VRFTestHelperRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestHelper.Contract.VRFTestHelperTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFTestHelper *VRFTestHelperCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFTestHelper.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFTestHelper *VRFTestHelperTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFTestHelper.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFTestHelper *VRFTestHelperTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFTestHelper.Contract.contract.Transact(opts, method, params...) +} + +// AffineECAdd is a free data retrieval call binding the contract method 0x244f896d. +// +// Solidity: function affineECAdd_(uint256[2] p1, uint256[2] p2, uint256 invZ) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCaller) AffineECAdd(opts *bind.CallOpts, p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + var ( + ret0 = new([2]*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "affineECAdd_", p1, p2, invZ) + return *ret0, err +} + +// AffineECAdd is a free data retrieval call binding the contract method 0x244f896d. +// +// Solidity: function affineECAdd_(uint256[2] p1, uint256[2] p2, uint256 invZ) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.AffineECAdd(&_VRFTestHelper.CallOpts, p1, p2, invZ) +} + +// AffineECAdd is a free data retrieval call binding the contract method 0x244f896d. +// +// Solidity: function affineECAdd_(uint256[2] p1, uint256[2] p2, uint256 invZ) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCallerSession) AffineECAdd(p1 [2]*big.Int, p2 [2]*big.Int, invZ *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.AffineECAdd(&_VRFTestHelper.CallOpts, p1, p2, invZ) +} + +// BigModExp is a free data retrieval call binding the contract method 0x5de60042. +// +// Solidity: function bigModExp_(uint256 base, uint256 exponent) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCaller) BigModExp(opts *bind.CallOpts, base *big.Int, exponent *big.Int) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "bigModExp_", base, exponent) + return *ret0, err +} + +// BigModExp is a free data retrieval call binding the contract method 0x5de60042. +// +// Solidity: function bigModExp_(uint256 base, uint256 exponent) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.BigModExp(&_VRFTestHelper.CallOpts, base, exponent) +} + +// BigModExp is a free data retrieval call binding the contract method 0x5de60042. +// +// Solidity: function bigModExp_(uint256 base, uint256 exponent) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) BigModExp(base *big.Int, exponent *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.BigModExp(&_VRFTestHelper.CallOpts, base, exponent) +} + +// EcmulVerify is a free data retrieval call binding the contract method 0xaa7b2fbb. +// +// Solidity: function ecmulVerify_(uint256[2] x, uint256 scalar, uint256[2] q) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperCaller) EcmulVerify(opts *bind.CallOpts, x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + var ( + ret0 = new(bool) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "ecmulVerify_", x, scalar, q) + return *ret0, err +} + +// EcmulVerify is a free data retrieval call binding the contract method 0xaa7b2fbb. +// +// Solidity: function ecmulVerify_(uint256[2] x, uint256 scalar, uint256[2] q) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFTestHelper.Contract.EcmulVerify(&_VRFTestHelper.CallOpts, x, scalar, q) +} + +// EcmulVerify is a free data retrieval call binding the contract method 0xaa7b2fbb. +// +// Solidity: function ecmulVerify_(uint256[2] x, uint256 scalar, uint256[2] q) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperCallerSession) EcmulVerify(x [2]*big.Int, scalar *big.Int, q [2]*big.Int) (bool, error) { + return _VRFTestHelper.Contract.EcmulVerify(&_VRFTestHelper.CallOpts, x, scalar, q) +} + +// FieldHash is a free data retrieval call binding the contract method 0xb481e260. +// +// Solidity: function fieldHash_(bytes b) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCaller) FieldHash(opts *bind.CallOpts, b []byte) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "fieldHash_", b) + return *ret0, err +} + +// FieldHash is a free data retrieval call binding the contract method 0xb481e260. +// +// Solidity: function fieldHash_(bytes b) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.FieldHash(&_VRFTestHelper.CallOpts, b) +} + +// FieldHash is a free data retrieval call binding the contract method 0xb481e260. +// +// Solidity: function fieldHash_(bytes b) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) FieldHash(b []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.FieldHash(&_VRFTestHelper.CallOpts, b) +} + +// HashToCurve is a free data retrieval call binding the contract method 0x35452450. +// +// Solidity: function hashToCurve_(uint256[2] pk, uint256 x) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCaller) HashToCurve(opts *bind.CallOpts, pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + var ( + ret0 = new([2]*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "hashToCurve_", pk, x) + return *ret0, err +} + +// HashToCurve is a free data retrieval call binding the contract method 0x35452450. +// +// Solidity: function hashToCurve_(uint256[2] pk, uint256 x) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.HashToCurve(&_VRFTestHelper.CallOpts, pk, x) +} + +// HashToCurve is a free data retrieval call binding the contract method 0x35452450. +// +// Solidity: function hashToCurve_(uint256[2] pk, uint256 x) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCallerSession) HashToCurve(pk [2]*big.Int, x *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.HashToCurve(&_VRFTestHelper.CallOpts, pk, x) +} + +// LinearCombination is a free data retrieval call binding the contract method 0xfe54f2a2. +// +// Solidity: function linearCombination_(uint256 c, uint256[2] p1, uint256[2] cp1Witness, uint256 s, uint256[2] p2, uint256[2] sp2Witness, uint256 zInv) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCaller) LinearCombination(opts *bind.CallOpts, c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + var ( + ret0 = new([2]*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "linearCombination_", c, p1, cp1Witness, s, p2, sp2Witness, zInv) + return *ret0, err +} + +// LinearCombination is a free data retrieval call binding the contract method 0xfe54f2a2. +// +// Solidity: function linearCombination_(uint256 c, uint256[2] p1, uint256[2] cp1Witness, uint256 s, uint256[2] p2, uint256[2] sp2Witness, uint256 zInv) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.LinearCombination(&_VRFTestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +// LinearCombination is a free data retrieval call binding the contract method 0xfe54f2a2. +// +// Solidity: function linearCombination_(uint256 c, uint256[2] p1, uint256[2] cp1Witness, uint256 s, uint256[2] p2, uint256[2] sp2Witness, uint256 zInv) constant returns(uint256[2]) +func (_VRFTestHelper *VRFTestHelperCallerSession) LinearCombination(c *big.Int, p1 [2]*big.Int, cp1Witness [2]*big.Int, s *big.Int, p2 [2]*big.Int, sp2Witness [2]*big.Int, zInv *big.Int) ([2]*big.Int, error) { + return _VRFTestHelper.Contract.LinearCombination(&_VRFTestHelper.CallOpts, c, p1, cp1Witness, s, p2, sp2Witness, zInv) +} + +// ProjectiveECAdd is a free data retrieval call binding the contract method 0x95e6ee92. +// +// Solidity: function projectiveECAdd_(uint256 px, uint256 py, uint256 qx, uint256 qy) constant returns(uint256, uint256, uint256) +func (_VRFTestHelper *VRFTestHelperCaller) ProjectiveECAdd(opts *bind.CallOpts, px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + var ( + ret0 = new(*big.Int) + ret1 = new(*big.Int) + ret2 = new(*big.Int) + ) + out := &[]interface{}{ + ret0, + ret1, + ret2, + } + err := _VRFTestHelper.contract.Call(opts, out, "projectiveECAdd_", px, py, qx, qy) + return *ret0, *ret1, *ret2, err +} + +// ProjectiveECAdd is a free data retrieval call binding the contract method 0x95e6ee92. +// +// Solidity: function projectiveECAdd_(uint256 px, uint256 py, uint256 qx, uint256 qy) constant returns(uint256, uint256, uint256) +func (_VRFTestHelper *VRFTestHelperSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFTestHelper.Contract.ProjectiveECAdd(&_VRFTestHelper.CallOpts, px, py, qx, qy) +} + +// ProjectiveECAdd is a free data retrieval call binding the contract method 0x95e6ee92. +// +// Solidity: function projectiveECAdd_(uint256 px, uint256 py, uint256 qx, uint256 qy) constant returns(uint256, uint256, uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) ProjectiveECAdd(px *big.Int, py *big.Int, qx *big.Int, qy *big.Int) (*big.Int, *big.Int, *big.Int, error) { + return _VRFTestHelper.Contract.ProjectiveECAdd(&_VRFTestHelper.CallOpts, px, py, qx, qy) +} + +// RandomValueFromVRFProof is a free data retrieval call binding the contract method 0xcefda0c5. +// +// Solidity: function randomValueFromVRFProof_(bytes proof) constant returns(uint256 output) +func (_VRFTestHelper *VRFTestHelperCaller) RandomValueFromVRFProof(opts *bind.CallOpts, proof []byte) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "randomValueFromVRFProof_", proof) + return *ret0, err +} + +// RandomValueFromVRFProof is a free data retrieval call binding the contract method 0xcefda0c5. +// +// Solidity: function randomValueFromVRFProof_(bytes proof) constant returns(uint256 output) +func (_VRFTestHelper *VRFTestHelperSession) RandomValueFromVRFProof(proof []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.RandomValueFromVRFProof(&_VRFTestHelper.CallOpts, proof) +} + +// RandomValueFromVRFProof is a free data retrieval call binding the contract method 0xcefda0c5. +// +// Solidity: function randomValueFromVRFProof_(bytes proof) constant returns(uint256 output) +func (_VRFTestHelper *VRFTestHelperCallerSession) RandomValueFromVRFProof(proof []byte) (*big.Int, error) { + return _VRFTestHelper.Contract.RandomValueFromVRFProof(&_VRFTestHelper.CallOpts, proof) +} + +// ScalarFromCurvePoints is a free data retrieval call binding the contract method 0x7f8f50a8. +// +// Solidity: function scalarFromCurvePoints_(uint256[2] hash, uint256[2] pk, uint256[2] gamma, address uWitness, uint256[2] v) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCaller) ScalarFromCurvePoints(opts *bind.CallOpts, hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "scalarFromCurvePoints_", hash, pk, gamma, uWitness, v) + return *ret0, err +} + +// ScalarFromCurvePoints is a free data retrieval call binding the contract method 0x7f8f50a8. +// +// Solidity: function scalarFromCurvePoints_(uint256[2] hash, uint256[2] pk, uint256[2] gamma, address uWitness, uint256[2] v) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.ScalarFromCurvePoints(&_VRFTestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +// ScalarFromCurvePoints is a free data retrieval call binding the contract method 0x7f8f50a8. +// +// Solidity: function scalarFromCurvePoints_(uint256[2] hash, uint256[2] pk, uint256[2] gamma, address uWitness, uint256[2] v) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) ScalarFromCurvePoints(hash [2]*big.Int, pk [2]*big.Int, gamma [2]*big.Int, uWitness common.Address, v [2]*big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.ScalarFromCurvePoints(&_VRFTestHelper.CallOpts, hash, pk, gamma, uWitness, v) +} + +// SquareRoot is a free data retrieval call binding the contract method 0x8af046ea. +// +// Solidity: function squareRoot_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCaller) SquareRoot(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "squareRoot_", x) + return *ret0, err +} + +// SquareRoot is a free data retrieval call binding the contract method 0x8af046ea. +// +// Solidity: function squareRoot_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.SquareRoot(&_VRFTestHelper.CallOpts, x) +} + +// SquareRoot is a free data retrieval call binding the contract method 0x8af046ea. +// +// Solidity: function squareRoot_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) SquareRoot(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.SquareRoot(&_VRFTestHelper.CallOpts, x) +} + +// VerifyLinearCombinationWithGenerator is a free data retrieval call binding the contract method 0x91d5f691. +// +// Solidity: function verifyLinearCombinationWithGenerator_(uint256 c, uint256[2] p, uint256 s, address lcWitness) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperCaller) VerifyLinearCombinationWithGenerator(opts *bind.CallOpts, c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + var ( + ret0 = new(bool) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "verifyLinearCombinationWithGenerator_", c, p, s, lcWitness) + return *ret0, err +} + +// VerifyLinearCombinationWithGenerator is a free data retrieval call binding the contract method 0x91d5f691. +// +// Solidity: function verifyLinearCombinationWithGenerator_(uint256 c, uint256[2] p, uint256 s, address lcWitness) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFTestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFTestHelper.CallOpts, c, p, s, lcWitness) +} + +// VerifyLinearCombinationWithGenerator is a free data retrieval call binding the contract method 0x91d5f691. +// +// Solidity: function verifyLinearCombinationWithGenerator_(uint256 c, uint256[2] p, uint256 s, address lcWitness) constant returns(bool) +func (_VRFTestHelper *VRFTestHelperCallerSession) VerifyLinearCombinationWithGenerator(c *big.Int, p [2]*big.Int, s *big.Int, lcWitness common.Address) (bool, error) { + return _VRFTestHelper.Contract.VerifyLinearCombinationWithGenerator(&_VRFTestHelper.CallOpts, c, p, s, lcWitness) +} + +// VerifyVRFProof is a free data retrieval call binding the contract method 0xef3b10ec. +// +// Solidity: function verifyVRFProof_(uint256[2] pk, uint256[2] gamma, uint256 c, uint256 s, uint256 seed, address uWitness, uint256[2] cGammaWitness, uint256[2] sHashWitness, uint256 zInv) constant returns() +func (_VRFTestHelper *VRFTestHelperCaller) VerifyVRFProof(opts *bind.CallOpts, pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + var () + out := &[]interface{}{} + err := _VRFTestHelper.contract.Call(opts, out, "verifyVRFProof_", pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) + return err +} + +// VerifyVRFProof is a free data retrieval call binding the contract method 0xef3b10ec. +// +// Solidity: function verifyVRFProof_(uint256[2] pk, uint256[2] gamma, uint256 c, uint256 s, uint256 seed, address uWitness, uint256[2] cGammaWitness, uint256[2] sHashWitness, uint256 zInv) constant returns() +func (_VRFTestHelper *VRFTestHelperSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFTestHelper.Contract.VerifyVRFProof(&_VRFTestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +// VerifyVRFProof is a free data retrieval call binding the contract method 0xef3b10ec. +// +// Solidity: function verifyVRFProof_(uint256[2] pk, uint256[2] gamma, uint256 c, uint256 s, uint256 seed, address uWitness, uint256[2] cGammaWitness, uint256[2] sHashWitness, uint256 zInv) constant returns() +func (_VRFTestHelper *VRFTestHelperCallerSession) VerifyVRFProof(pk [2]*big.Int, gamma [2]*big.Int, c *big.Int, s *big.Int, seed *big.Int, uWitness common.Address, cGammaWitness [2]*big.Int, sHashWitness [2]*big.Int, zInv *big.Int) error { + return _VRFTestHelper.Contract.VerifyVRFProof(&_VRFTestHelper.CallOpts, pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv) +} + +// YSquared is a free data retrieval call binding the contract method 0x9d6f0337. +// +// Solidity: function ySquared_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCaller) YSquared(opts *bind.CallOpts, x *big.Int) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFTestHelper.contract.Call(opts, out, "ySquared_", x) + return *ret0, err +} + +// YSquared is a free data retrieval call binding the contract method 0x9d6f0337. +// +// Solidity: function ySquared_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.YSquared(&_VRFTestHelper.CallOpts, x) +} + +// YSquared is a free data retrieval call binding the contract method 0x9d6f0337. +// +// Solidity: function ySquared_(uint256 x) constant returns(uint256) +func (_VRFTestHelper *VRFTestHelperCallerSession) YSquared(x *big.Int) (*big.Int, error) { + return _VRFTestHelper.Contract.YSquared(&_VRFTestHelper.CallOpts, x) +} diff --git a/core/services/vrf/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go b/core/services/vrf/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go new file mode 100644 index 00000000000..a3bcd60da38 --- /dev/null +++ b/core/services/vrf/generated/solidity_vrf_consumer_interface/solidity_vrf_consumer_interface.go @@ -0,0 +1,310 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_consumer_interface + +import ( + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = abi.U256 + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// VRFConsumerABI is the input ABI used to generate the binding from. +const VRFConsumerABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_vrfCoordinator\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_requestId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_randomness\",\"type\":\"uint256\"}],\"name\":\"fulfillRandomness\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"nonces\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"randomnessOutput\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"requestId\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_keyHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_seed\",\"type\":\"uint256\"}],\"name\":\"requestRandomness\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"requestId\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +// VRFConsumerBin is the compiled bytecode used for deploying new contracts. +var VRFConsumerBin = "0x608060405234801561001057600080fd5b506040516106033803806106038339818101604052604081101561003357600080fd5b810190808051906020019092919080519060200190929190505050818181600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505050505061051f806100e46000396000f3fe608060405234801561001057600080fd5b50600436106100565760003560e01c80626d6cae1461005b5780631f1f897f146100795780632f47fd86146100b15780639e317f12146100cf578063dc6cfe1014610111575b600080fd5b610063610167565b6040518082815260200191505060405180910390f35b6100af6004803603604081101561008f57600080fd5b81019080803590602001909291908035906020019092919050505061016d565b005b6100b961017f565b6040518082815260200191505060405180910390f35b6100fb600480360360208110156100e557600080fd5b8101908080359060200190929190505050610185565b6040518082815260200191505060405180910390f35b6101516004803603606081101561012757600080fd5b8101908080359060200190929190803590602001909291908035906020019092919050505061019d565b6040518082815260200191505060405180910390f35b60045481565b80600381905550816004819055505050565b60035481565b60026020528060005260406000206000915090505481565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16634000aea0600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1685878660405160200180838152602001828152602001925050506040516020818303038152906040526040518463ffffffff1660e01b8152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200183815260200180602001828103825283818151815260200191508051906020019080838360005b838110156102af578082015181840152602081019050610294565b50505050905090810190601f1680156102dc5780820380516001836020036101000a031916815260200191505b50945050505050602060405180830381600087803b1580156102fd57600080fd5b505af1158015610311573d6000803e3d6000fd5b505050506040513d602081101561032757600080fd5b810190808051906020019092919050505050600061035a858430600260008a8152602001908152602001600020546103ae565b90506103836001600260008881526020019081526020016000205461042890919063ffffffff16565b60026000878152602001908152602001600020819055506103a485826104b0565b9150509392505050565b600084848484604051602001808581526020018481526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019450505050506040516020818303038152906040528051906020012060001c9050949350505050565b6000808284019050838110156104a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b8091505092915050565b6000828260405160200180838152602001828152602001925050506040516020818303038152906040528051906020012090509291505056fea2646970667358220000000000000000000000000000000000000000000000000000000000000000000064736f6c63430000000033" + +// DeployVRFConsumer deploys a new Ethereum contract, binding an instance of VRFConsumer to it. +func DeployVRFConsumer(auth *bind.TransactOpts, backend bind.ContractBackend, _vrfCoordinator common.Address, _link common.Address) (common.Address, *types.Transaction, *VRFConsumer, error) { + parsed, err := abi.JSON(strings.NewReader(VRFConsumerABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(VRFConsumerBin), backend, _vrfCoordinator, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFConsumer{VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +// VRFConsumer is an auto generated Go binding around an Ethereum contract. +type VRFConsumer struct { + VRFConsumerCaller // Read-only binding to the contract + VRFConsumerTransactor // Write-only binding to the contract + VRFConsumerFilterer // Log filterer for contract events +} + +// VRFConsumerCaller is an auto generated read-only Go binding around an Ethereum contract. +type VRFConsumerCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFConsumerTransactor is an auto generated write-only Go binding around an Ethereum contract. +type VRFConsumerTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFConsumerFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type VRFConsumerFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFConsumerSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type VRFConsumerSession struct { + Contract *VRFConsumer // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFConsumerCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type VRFConsumerCallerSession struct { + Contract *VRFConsumerCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// VRFConsumerTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type VRFConsumerTransactorSession struct { + Contract *VRFConsumerTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFConsumerRaw is an auto generated low-level Go binding around an Ethereum contract. +type VRFConsumerRaw struct { + Contract *VRFConsumer // Generic contract binding to access the raw methods on +} + +// VRFConsumerCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type VRFConsumerCallerRaw struct { + Contract *VRFConsumerCaller // Generic read-only contract binding to access the raw methods on +} + +// VRFConsumerTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type VRFConsumerTransactorRaw struct { + Contract *VRFConsumerTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewVRFConsumer creates a new instance of VRFConsumer, bound to a specific deployed contract. +func NewVRFConsumer(address common.Address, backend bind.ContractBackend) (*VRFConsumer, error) { + contract, err := bindVRFConsumer(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFConsumer{VRFConsumerCaller: VRFConsumerCaller{contract: contract}, VRFConsumerTransactor: VRFConsumerTransactor{contract: contract}, VRFConsumerFilterer: VRFConsumerFilterer{contract: contract}}, nil +} + +// NewVRFConsumerCaller creates a new read-only instance of VRFConsumer, bound to a specific deployed contract. +func NewVRFConsumerCaller(address common.Address, caller bind.ContractCaller) (*VRFConsumerCaller, error) { + contract, err := bindVRFConsumer(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFConsumerCaller{contract: contract}, nil +} + +// NewVRFConsumerTransactor creates a new write-only instance of VRFConsumer, bound to a specific deployed contract. +func NewVRFConsumerTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFConsumerTransactor, error) { + contract, err := bindVRFConsumer(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFConsumerTransactor{contract: contract}, nil +} + +// NewVRFConsumerFilterer creates a new log filterer instance of VRFConsumer, bound to a specific deployed contract. +func NewVRFConsumerFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFConsumerFilterer, error) { + contract, err := bindVRFConsumer(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFConsumerFilterer{contract: contract}, nil +} + +// bindVRFConsumer binds a generic wrapper to an already deployed contract. +func bindVRFConsumer(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(VRFConsumerABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFConsumer *VRFConsumerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.VRFConsumerCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFConsumer *VRFConsumerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFConsumer *VRFConsumerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.VRFConsumerTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFConsumer *VRFConsumerCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFConsumer.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFConsumer *VRFConsumerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFConsumer *VRFConsumerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFConsumer.Contract.contract.Transact(opts, method, params...) +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) constant returns(uint256) +func (_VRFConsumer *VRFConsumerCaller) Nonces(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFConsumer.contract.Call(opts, out, "nonces", arg0) + return *ret0, err +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) constant returns(uint256) +func (_VRFConsumer *VRFConsumerSession) Nonces(arg0 [32]byte) (*big.Int, error) { + return _VRFConsumer.Contract.Nonces(&_VRFConsumer.CallOpts, arg0) +} + +// Nonces is a free data retrieval call binding the contract method 0x9e317f12. +// +// Solidity: function nonces(bytes32 ) constant returns(uint256) +func (_VRFConsumer *VRFConsumerCallerSession) Nonces(arg0 [32]byte) (*big.Int, error) { + return _VRFConsumer.Contract.Nonces(&_VRFConsumer.CallOpts, arg0) +} + +// RandomnessOutput is a free data retrieval call binding the contract method 0x2f47fd86. +// +// Solidity: function randomnessOutput() constant returns(uint256) +func (_VRFConsumer *VRFConsumerCaller) RandomnessOutput(opts *bind.CallOpts) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFConsumer.contract.Call(opts, out, "randomnessOutput") + return *ret0, err +} + +// RandomnessOutput is a free data retrieval call binding the contract method 0x2f47fd86. +// +// Solidity: function randomnessOutput() constant returns(uint256) +func (_VRFConsumer *VRFConsumerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +// RandomnessOutput is a free data retrieval call binding the contract method 0x2f47fd86. +// +// Solidity: function randomnessOutput() constant returns(uint256) +func (_VRFConsumer *VRFConsumerCallerSession) RandomnessOutput() (*big.Int, error) { + return _VRFConsumer.Contract.RandomnessOutput(&_VRFConsumer.CallOpts) +} + +// RequestId is a free data retrieval call binding the contract method 0x006d6cae. +// +// Solidity: function requestId() constant returns(bytes32) +func (_VRFConsumer *VRFConsumerCaller) RequestId(opts *bind.CallOpts) ([32]byte, error) { + var ( + ret0 = new([32]byte) + ) + out := ret0 + err := _VRFConsumer.contract.Call(opts, out, "requestId") + return *ret0, err +} + +// RequestId is a free data retrieval call binding the contract method 0x006d6cae. +// +// Solidity: function requestId() constant returns(bytes32) +func (_VRFConsumer *VRFConsumerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +// RequestId is a free data retrieval call binding the contract method 0x006d6cae. +// +// Solidity: function requestId() constant returns(bytes32) +func (_VRFConsumer *VRFConsumerCallerSession) RequestId() ([32]byte, error) { + return _VRFConsumer.Contract.RequestId(&_VRFConsumer.CallOpts) +} + +// FulfillRandomness is a paid mutator transaction binding the contract method 0x1f1f897f. +// +// Solidity: function fulfillRandomness(bytes32 _requestId, uint256 _randomness) returns() +func (_VRFConsumer *VRFConsumerTransactor) FulfillRandomness(opts *bind.TransactOpts, _requestId [32]byte, _randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "fulfillRandomness", _requestId, _randomness) +} + +// FulfillRandomness is a paid mutator transaction binding the contract method 0x1f1f897f. +// +// Solidity: function fulfillRandomness(bytes32 _requestId, uint256 _randomness) returns() +func (_VRFConsumer *VRFConsumerSession) FulfillRandomness(_requestId [32]byte, _randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.FulfillRandomness(&_VRFConsumer.TransactOpts, _requestId, _randomness) +} + +// FulfillRandomness is a paid mutator transaction binding the contract method 0x1f1f897f. +// +// Solidity: function fulfillRandomness(bytes32 _requestId, uint256 _randomness) returns() +func (_VRFConsumer *VRFConsumerTransactorSession) FulfillRandomness(_requestId [32]byte, _randomness *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.FulfillRandomness(&_VRFConsumer.TransactOpts, _requestId, _randomness) +} + +// RequestRandomness is a paid mutator transaction binding the contract method 0xdc6cfe10. +// +// Solidity: function requestRandomness(bytes32 _keyHash, uint256 _fee, uint256 _seed) returns(bytes32 requestId) +func (_VRFConsumer *VRFConsumerTransactor) RequestRandomness(opts *bind.TransactOpts, _keyHash [32]byte, _fee *big.Int, _seed *big.Int) (*types.Transaction, error) { + return _VRFConsumer.contract.Transact(opts, "requestRandomness", _keyHash, _fee, _seed) +} + +// RequestRandomness is a paid mutator transaction binding the contract method 0xdc6cfe10. +// +// Solidity: function requestRandomness(bytes32 _keyHash, uint256 _fee, uint256 _seed) returns(bytes32 requestId) +func (_VRFConsumer *VRFConsumerSession) RequestRandomness(_keyHash [32]byte, _fee *big.Int, _seed *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RequestRandomness(&_VRFConsumer.TransactOpts, _keyHash, _fee, _seed) +} + +// RequestRandomness is a paid mutator transaction binding the contract method 0xdc6cfe10. +// +// Solidity: function requestRandomness(bytes32 _keyHash, uint256 _fee, uint256 _seed) returns(bytes32 requestId) +func (_VRFConsumer *VRFConsumerTransactorSession) RequestRandomness(_keyHash [32]byte, _fee *big.Int, _seed *big.Int) (*types.Transaction, error) { + return _VRFConsumer.Contract.RequestRandomness(&_VRFConsumer.TransactOpts, _keyHash, _fee, _seed) +} diff --git a/core/services/vrf/generated/solidity_vrf_coordinator_interface/.solidity_vrf_coordinator_interface.go.swp b/core/services/vrf/generated/solidity_vrf_coordinator_interface/.solidity_vrf_coordinator_interface.go.swp new file mode 100644 index 00000000000..d6390aaee85 Binary files /dev/null and b/core/services/vrf/generated/solidity_vrf_coordinator_interface/.solidity_vrf_coordinator_interface.go.swp differ diff --git a/core/services/vrf/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go b/core/services/vrf/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go new file mode 100644 index 00000000000..2d0c7770471 --- /dev/null +++ b/core/services/vrf/generated/solidity_vrf_coordinator_interface/solidity_vrf_coordinator_interface.go @@ -0,0 +1,687 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package solidity_vrf_coordinator_interface + +import ( + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = abi.U256 + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// VRFCoordinatorABI is the input ABI used to generate the binding from. +const VRFCoordinatorABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_link\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"NewServiceAgreement\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"keyHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"jobID\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"RandomnessRequest\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"callbacks\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"callbackContract\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"randomnessFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"seed\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_proof\",\"type\":\"bytes\"}],\"name\":\"fulfillRandomnessRequest\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256[2]\",\"name\":\"_publicKey\",\"type\":\"uint256[2]\"}],\"name\":\"hashOfKey\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"onTokenTransfer\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_fee\",\"type\":\"uint256\"},{\"internalType\":\"uint256[2]\",\"name\":\"_publicProvingKey\",\"type\":\"uint256[2]\"},{\"internalType\":\"bytes32\",\"name\":\"_jobID\",\"type\":\"bytes32\"}],\"name\":\"registerProvingKey\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"serviceAgreements\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"vRFOracle\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"jobID\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"withdrawableTokens\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]" + +// VRFCoordinatorBin is the compiled bytecode used for deploying new contracts. +var VRFCoordinatorBin = "0x608060405234801561001057600080fd5b50604051612a75380380612a758339818101604052602081101561003357600080fd5b8101908080519060200190929190505050806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550506129e1806100946000396000f3fe608060405234801561001057600080fd5b50600436106100875760003560e01c806375d350701161005b57806375d3507014610275578063a4c0ed36146102f1578063caf70c4a146103d6578063f3fef3a31461044f57610087565b80626f6ad01461008c57806321f36509146100e45780635e1c1059146101605780636815851e14610233575b600080fd5b6100ce600480360360208110156100a257600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061049d565b6040518082815260200191505060405180910390f35b610110600480360360208110156100fa57600080fd5b81019080803590602001909291905050506104b5565b604051808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390f35b6102196004803603602081101561017657600080fd5b810190808035906020019064010000000081111561019357600080fd5b8201836020820111156101a557600080fd5b803590602001918460018302840111640100000000831117156101c757600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506104ff565b604051808215151515815260200191505060405180910390f35b6102736004803603608081101561024957600080fd5b810190808035906020019092919080604001909192919290803590602001909291905050506108ec565b005b6102a16004803603602081101561028b57600080fd5b8101908080359060200190929190505050610ade565b604051808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390f35b6103d46004803603606081101561030757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019064010000000081111561034e57600080fd5b82018360208201111561036057600080fd5b8035906020019184600183028401116401000000008311171561038257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610b28565b005b610439600480360360408110156103ec57600080fd5b8101908080604001906002806020026040519081016040528092919082600260200280828437600081840152601f19601f8201169050808301925050505050509192919290505050610c34565b6040518082815260200191505060405180910390f35b61049b6004803603604081101561046557600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610c8c565b005b60036020528060005260406000206000915090505481565b60016020528060005260406000206000915090508060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010154908060020154905083565b60006105096128a9565b600060208401915060e08401519050600061052383610c34565b905060006105318284610ec5565b905061053b6128cb565b600160008381526020019081526020016000206040518060600160405290816000820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001600182015481526020016002820154815250509050600073ffffffffffffffffffffffffffffffffffffffff16816000015173ffffffffffffffffffffffffffffffffffffffff16141561066c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f6e6f20636f72726573706f6e64696e672072657175657374000000000000000081525060200191505060405180910390fd5b600061067788610efe565b905060006002600086815260200190815260200160002060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905061070a8360200151600360008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020546110ce90919063ffffffff16565b600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555060006060631f1f897f60e01b86856040516024018083815260200182815260200192505050604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090506000856000015173ffffffffffffffffffffffffffffffffffffffff16826040518082805190602001908083835b6020831061082257805182526020820191506020810190506020830392506107ff565b6001836020036101000a0380198251168184511680821785525050505050509050019150506000604051808303816000865af19150503d8060008114610884576040519150601f19603f3d011682016040523d82523d6000602084013e610889565b606091505b5050905060016000888152602001908152602001600020600080820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055600182016000905560028201600090555050809a5050505050505050505050919050565b600061092e836002806020026040519081016040528092919082600260200280828437600081840152601f19601f820116905080830192505050505050610c34565b905060006002600083815260200190815260200160002060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff169050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614610a0d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f706c656173652072656769737465722061206e6577206b65790000000000000081525060200191505060405180910390fd5b336002600084815260200190815260200160002060000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055508260026000848152602001908152602001600020600101819055508460026000848152602001908152602001600020600201819055507fae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe8286604051808381526020018281526020019250505060405180910390a15050505050565b60026020528060005260406000206000915090508060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010154908060020154905083565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610bea576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f4d75737420757365204c494e4b20746f6b656e0000000000000000000000000081525060200191505060405180910390fd5b600080828060200190516040811015610c0257600080fd5b81019080805190602001909291908051906020019092919050505091509150610c2d82828688611156565b5050505050565b6000816040516020018082600260200280838360005b83811015610c65578082015181840152602081019050610c4a565b50505050905001915050604051602081830303815290604052805190602001209050919050565b8080600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015610d42576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260208152602001807f63616e2774207769746864726177206d6f7265207468616e2062616c616e636581525060200191505060405180910390fd5b610d9482600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020546114a390919063ffffffff16565b600360003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84846040518363ffffffff1660e01b8152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b158015610e7f57600080fd5b505af1158015610e93573d6000803e3d6000fd5b505050506040513d6020811015610ea957600080fd5b8101908080519060200190929190505050610ec057fe5b505050565b60008282604051602001808381526020018281526020019250505060405160208183030381529060405280519060200120905092915050565b60006101a0825114610f78576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f77726f6e672070726f6f66206c656e677468000000000000000000000000000081525060200191505060405180910390fd5b610f806128a9565b610f886128a9565b610f90612902565b6000610f9a6128a9565b610fa26128a9565b6000888060200190516101a0811015610fba57600080fd5b810190809190826040019190826040019190826060018051906020019092919091908260400191908260400180519060200190929190505050869650859550849450839350829250819150809750819850829950839a50849b50859c50869d505050505050505061106587878760006003811061103357fe5b60200201518860016003811061104557fe5b60200201518960026003811061105757fe5b60200201518989898961152c565b6003866040516020018083815260200182600260200280838360005b8381101561109c578082015181840152602081019050611081565b50505050905001925050506040516020818303038152906040528051906020012060001c975050505050505050919050565b60008082840190508381101561114c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f536166654d6174683a206164646974696f6e206f766572666c6f77000000000081525060200191505060405180910390fd5b8091505092915050565b818460026000828152602001908152602001600020600201548210156111e4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f42656c6f7720616772656564207061796d656e7400000000000000000000000081525060200191505060405180910390fd5b60006004600088815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050600061124788888785611857565b905060006112558983610ec5565b9050600073ffffffffffffffffffffffffffffffffffffffff166001600083815260200190815260200160002060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146112c357fe5b856001600083815260200190815260200160002060000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550866001600083815260200190815260200160002060010181905550816001600083815260200190815260200160002060020181905550600260008a8152602001908152602001600020600101547fd241d78a52145a5d1d1ff002e32ec15cdc395631bcee66246650c2429dfaccaa8a84898b604051808581526020018481526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390a26114446001600460008c815260200190815260200160002060008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020546110ce90919063ffffffff16565b600460008b815260200190815260200160002060008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550505050505050505050565b60008282111561151b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f536166654d6174683a207375627472616374696f6e206f766572666c6f77000081525060200191505060405180910390fd5b600082840390508091505092915050565b611535896118d1565b6115a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601a8152602001807f7075626c6963206b6579206973206e6f74206f6e20637572766500000000000081525060200191505060405180910390fd5b6115b0886118d1565b611622576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260158152602001807f67616d6d61206973206e6f74206f6e206375727665000000000000000000000081525060200191505060405180910390fd5b61162b836118d1565b61169d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601d8152602001807f6347616d6d615769746e657373206973206e6f74206f6e20637572766500000081525060200191505060405180910390fd5b6116a6826118d1565b611718576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601c8152602001807f73486173685769746e657373206973206e6f74206f6e2063757276650000000081525060200191505060405180910390fd5b611724878a8887611942565b611796576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601a8152602001807f6164647228632a706b2b732a6729e289a05f755769746e65737300000000000081525060200191505060405180910390fd5b61179e6128a9565b6117a88a87611b7a565b90506117b26128a9565b6117c1898b878b868989611c39565b905060006117d2838d8d8a86611ddd565b9050808a14611849576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f696e76616c69642070726f6f660000000000000000000000000000000000000081525060200191505060405180910390fd5b505050505050505050505050565b600084848484604051602001808581526020018481526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019450505050506040516020818303038152906040528051906020012060001c9050949350505050565b60007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806118fb57fe5b8260016002811061190857fe5b60200201518360016002811061191a57fe5b60200201510961193a8360006002811061193057fe5b6020020151611f0c565b149050919050565b60008073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156119e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600b8152602001807f626164207769746e65737300000000000000000000000000000000000000000081525060200191505060405180910390fd5b6000806002866001600281106119f857fe5b602002015181611a0457fe5b0614611a1157601c611a14565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036414180611a4057fe5b8587600060028110611a4e57fe5b6020020151097ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd03641410360001b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd036414180611aa557fe5b87600060028110611ab257fe5b6020020151890960001b90506000600183858a600060028110611ad157fe5b602002015160001b8560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015611b31573d6000803e3d6000fd5b5050506020604051035190508573ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614945050505050949350505050565b611b826128a9565b611be3600184846040516020018084815260200183600260200280838360005b83811015611bbd578082015181840152602081019050611ba2565b505050509050018281526020019350505050604051602081830303815290604052611f9a565b90505b611bef816118d1565b611c3357611c2c81600060028110611c0357fe5b602002015160405160200180828152602001915050604051602081830303815290604052611f9a565b9050611be6565b92915050565b611c416128a9565b60007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f84600060028110611c7157fe5b602002015188600060028110611c8357fe5b60200201510381611c9057fe5b061415611d05576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f706f696e747320696e2073756d206d7573742062652064697374696e6374000081525060200191505060405180910390fd5b611d1087898861206d565b611d65576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806129696021913960400191505060405180910390fd5b611d7084868561206d565b611dc5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602281526020018061298a6022913960400191505060405180910390fd5b611dd08684846121f8565b9050979650505050505050565b6000600286868685876040516020018087815260200186600260200280838360005b83811015611e1a578082015181840152602081019050611dff565b5050505090500185600260200280838360005b83811015611e48578082015181840152602081019050611e2d565b5050505090500184600260200280838360005b83811015611e76578082015181840152602081019050611e5b565b5050505090500183600260200280838360005b83811015611ea4578082015181840152602081019050611e89565b505050509050018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660601b815260140196505050505050506040516020818303038152906040528051906020012060001c905095945050505050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80611f3757fe5b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80611f5f57fe5b848509840990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f80611f8e57fe5b60078208915050919050565b611fa26128a9565b611fab82612376565b81600060028110611fb857fe5b602002018181525050611fe3611fde82600060028110611fd457fe5b6020020151611f0c565b6123e3565b81600160028110611ff057fe5b602002018181525050600160028260016002811061200a57fe5b60200201518161201657fe5b061415612068578060016002811061202a57fe5b60200201517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038160016002811061205e57fe5b6020020181815250505b919050565b60008083141561207c57600080fd5b60008460006002811061208b57fe5b602002015190506000806002876001600281106120a457fe5b6020020151816120b057fe5b06146120bd57601c6120c0565b601b5b905060007ffffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141806120ec57fe5b83870960001b9050600060016000801b848660001b8560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa158015612159573d6000803e3d6000fd5b5050506020604051035190506000866040516020018082600260200280838360005b8381101561219657808201518184015260208101905061217b565b505050509050019150506040516020818303038152906040528051906020012060001c90508073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614955050505050509392505050565b6122006128a9565b60008060006122558760006002811061221557fe5b60200201518860016002811061222757fe5b60200201518860006002811061223957fe5b60200201518960016002811061224b57fe5b602002015161241d565b80935081945082955050505060017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061228b57fe5b86830914612301576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f696e765a206d75737420626520696e7665727365206f66207a0000000000000081525060200191505060405180910390fd5b60405180604001604052807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061233457fe5b87860981526020017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061236457fe5b87850981525093505050509392505050565b6000818051906020012060001c90505b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f81106123de5780604051602001808281526020019150506040516020818303038152906040528051906020012060001c9050612386565b919050565b600061241682600260017ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f01901c6125f1565b9050919050565b60008060008060006001809150915060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061245657fe5b897ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038808905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806124a757fe5b8b7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f038a08905060006124dc83838585612753565b80925081995050506124f088828e886127bd565b809250819950505061250488828c876127bd565b8092508199505050600061251a8d878b856127bd565b809250819950505061252e88828686612753565b809250819950505061254288828e896127bd565b80925081995050508082146125dd577ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061257957fe5b818a0998507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806125a657fe5b82890997507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806125d357fe5b81830996506125e1565b8196505b5050505050509450945094915050565b6000806125fc612924565b60208160006006811061260b57fe5b60200201818152505060208160016006811061262357fe5b60200201818152505060208160026006811061263b57fe5b602002018181525050848160036006811061265257fe5b602002018181525050838160046006811061266957fe5b6020020181815250507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f816005600681106126a057fe5b6020020181815250506126b1612946565b60208160c0846005600019fa92506000831415612736576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f6269674d6f64457870206661696c75726521000000000000000000000000000081525060200191505060405180910390fd5b8060006001811061274357fe5b6020020151935050505092915050565b6000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061277e57fe5b8487097ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806127a957fe5b848709809250819350505094509492505050565b60008060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f806127ea57fe5b878509905060007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061281957fe5b87877ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f030990507ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061286857fe5b8183087ffffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f8061289357fe5b8689098094508195505050505094509492505050565b6040518060400160405280600290602082028038833980820191505090505090565b6040518060600160405280600073ffffffffffffffffffffffffffffffffffffffff16815260200160008152602001600081525090565b6040518060600160405280600390602082028038833980820191505090505090565b6040518060c00160405280600690602082028038833980820191505090505090565b604051806020016040528060019060208202803883398082019150509050509056fe4669727374206d756c7469706c69636174696f6e20636865636b206661696c65645365636f6e64206d756c7469706c69636174696f6e20636865636b206661696c6564a2646970667358220000000000000000000000000000000000000000000000000000000000000000000064736f6c63430000000033" + +// DeployVRFCoordinator deploys a new Ethereum contract, binding an instance of VRFCoordinator to it. +func DeployVRFCoordinator(auth *bind.TransactOpts, backend bind.ContractBackend, _link common.Address) (common.Address, *types.Transaction, *VRFCoordinator, error) { + parsed, err := abi.JSON(strings.NewReader(VRFCoordinatorABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(VRFCoordinatorBin), backend, _link) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &VRFCoordinator{VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +// VRFCoordinator is an auto generated Go binding around an Ethereum contract. +type VRFCoordinator struct { + VRFCoordinatorCaller // Read-only binding to the contract + VRFCoordinatorTransactor // Write-only binding to the contract + VRFCoordinatorFilterer // Log filterer for contract events +} + +// VRFCoordinatorCaller is an auto generated read-only Go binding around an Ethereum contract. +type VRFCoordinatorCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFCoordinatorTransactor is an auto generated write-only Go binding around an Ethereum contract. +type VRFCoordinatorTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFCoordinatorFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type VRFCoordinatorFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// VRFCoordinatorSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type VRFCoordinatorSession struct { + Contract *VRFCoordinator // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFCoordinatorCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type VRFCoordinatorCallerSession struct { + Contract *VRFCoordinatorCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// VRFCoordinatorTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type VRFCoordinatorTransactorSession struct { + Contract *VRFCoordinatorTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// VRFCoordinatorRaw is an auto generated low-level Go binding around an Ethereum contract. +type VRFCoordinatorRaw struct { + Contract *VRFCoordinator // Generic contract binding to access the raw methods on +} + +// VRFCoordinatorCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type VRFCoordinatorCallerRaw struct { + Contract *VRFCoordinatorCaller // Generic read-only contract binding to access the raw methods on +} + +// VRFCoordinatorTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type VRFCoordinatorTransactorRaw struct { + Contract *VRFCoordinatorTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewVRFCoordinator creates a new instance of VRFCoordinator, bound to a specific deployed contract. +func NewVRFCoordinator(address common.Address, backend bind.ContractBackend) (*VRFCoordinator, error) { + contract, err := bindVRFCoordinator(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &VRFCoordinator{VRFCoordinatorCaller: VRFCoordinatorCaller{contract: contract}, VRFCoordinatorTransactor: VRFCoordinatorTransactor{contract: contract}, VRFCoordinatorFilterer: VRFCoordinatorFilterer{contract: contract}}, nil +} + +// NewVRFCoordinatorCaller creates a new read-only instance of VRFCoordinator, bound to a specific deployed contract. +func NewVRFCoordinatorCaller(address common.Address, caller bind.ContractCaller) (*VRFCoordinatorCaller, error) { + contract, err := bindVRFCoordinator(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorCaller{contract: contract}, nil +} + +// NewVRFCoordinatorTransactor creates a new write-only instance of VRFCoordinator, bound to a specific deployed contract. +func NewVRFCoordinatorTransactor(address common.Address, transactor bind.ContractTransactor) (*VRFCoordinatorTransactor, error) { + contract, err := bindVRFCoordinator(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &VRFCoordinatorTransactor{contract: contract}, nil +} + +// NewVRFCoordinatorFilterer creates a new log filterer instance of VRFCoordinator, bound to a specific deployed contract. +func NewVRFCoordinatorFilterer(address common.Address, filterer bind.ContractFilterer) (*VRFCoordinatorFilterer, error) { + contract, err := bindVRFCoordinator(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &VRFCoordinatorFilterer{contract: contract}, nil +} + +// bindVRFCoordinator binds a generic wrapper to an already deployed contract. +func bindVRFCoordinator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(VRFCoordinatorABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFCoordinator *VRFCoordinatorRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.VRFCoordinatorCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFCoordinator *VRFCoordinatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFCoordinator *VRFCoordinatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.VRFCoordinatorTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_VRFCoordinator *VRFCoordinatorCallerRaw) Call(opts *bind.CallOpts, result interface{}, method string, params ...interface{}) error { + return _VRFCoordinator.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_VRFCoordinator *VRFCoordinatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _VRFCoordinator.Contract.contract.Transact(opts, method, params...) +} + +// Callbacks is a free data retrieval call binding the contract method 0x21f36509. +// +// Solidity: function callbacks(bytes32 ) constant returns(address callbackContract, uint256 randomnessFee, uint256 seed) +func (_VRFCoordinator *VRFCoordinatorCaller) Callbacks(opts *bind.CallOpts, arg0 [32]byte) (struct { + CallbackContract common.Address + RandomnessFee *big.Int + Seed *big.Int +}, error) { + ret := new(struct { + CallbackContract common.Address + RandomnessFee *big.Int + Seed *big.Int + }) + out := ret + err := _VRFCoordinator.contract.Call(opts, out, "callbacks", arg0) + return *ret, err +} + +// Callbacks is a free data retrieval call binding the contract method 0x21f36509. +// +// Solidity: function callbacks(bytes32 ) constant returns(address callbackContract, uint256 randomnessFee, uint256 seed) +func (_VRFCoordinator *VRFCoordinatorSession) Callbacks(arg0 [32]byte) (struct { + CallbackContract common.Address + RandomnessFee *big.Int + Seed *big.Int +}, error) { + return _VRFCoordinator.Contract.Callbacks(&_VRFCoordinator.CallOpts, arg0) +} + +// Callbacks is a free data retrieval call binding the contract method 0x21f36509. +// +// Solidity: function callbacks(bytes32 ) constant returns(address callbackContract, uint256 randomnessFee, uint256 seed) +func (_VRFCoordinator *VRFCoordinatorCallerSession) Callbacks(arg0 [32]byte) (struct { + CallbackContract common.Address + RandomnessFee *big.Int + Seed *big.Int +}, error) { + return _VRFCoordinator.Contract.Callbacks(&_VRFCoordinator.CallOpts, arg0) +} + +// HashOfKey is a free data retrieval call binding the contract method 0xcaf70c4a. +// +// Solidity: function hashOfKey(uint256[2] _publicKey) constant returns(bytes32) +func (_VRFCoordinator *VRFCoordinatorCaller) HashOfKey(opts *bind.CallOpts, _publicKey [2]*big.Int) ([32]byte, error) { + var ( + ret0 = new([32]byte) + ) + out := ret0 + err := _VRFCoordinator.contract.Call(opts, out, "hashOfKey", _publicKey) + return *ret0, err +} + +// HashOfKey is a free data retrieval call binding the contract method 0xcaf70c4a. +// +// Solidity: function hashOfKey(uint256[2] _publicKey) constant returns(bytes32) +func (_VRFCoordinator *VRFCoordinatorSession) HashOfKey(_publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.HashOfKey(&_VRFCoordinator.CallOpts, _publicKey) +} + +// HashOfKey is a free data retrieval call binding the contract method 0xcaf70c4a. +// +// Solidity: function hashOfKey(uint256[2] _publicKey) constant returns(bytes32) +func (_VRFCoordinator *VRFCoordinatorCallerSession) HashOfKey(_publicKey [2]*big.Int) ([32]byte, error) { + return _VRFCoordinator.Contract.HashOfKey(&_VRFCoordinator.CallOpts, _publicKey) +} + +// ServiceAgreements is a free data retrieval call binding the contract method 0x75d35070. +// +// Solidity: function serviceAgreements(bytes32 ) constant returns(address vRFOracle, bytes32 jobID, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorCaller) ServiceAgreements(opts *bind.CallOpts, arg0 [32]byte) (struct { + VRFOracle common.Address + JobID [32]byte + Fee *big.Int +}, error) { + ret := new(struct { + VRFOracle common.Address + JobID [32]byte + Fee *big.Int + }) + out := ret + err := _VRFCoordinator.contract.Call(opts, out, "serviceAgreements", arg0) + return *ret, err +} + +// ServiceAgreements is a free data retrieval call binding the contract method 0x75d35070. +// +// Solidity: function serviceAgreements(bytes32 ) constant returns(address vRFOracle, bytes32 jobID, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorSession) ServiceAgreements(arg0 [32]byte) (struct { + VRFOracle common.Address + JobID [32]byte + Fee *big.Int +}, error) { + return _VRFCoordinator.Contract.ServiceAgreements(&_VRFCoordinator.CallOpts, arg0) +} + +// ServiceAgreements is a free data retrieval call binding the contract method 0x75d35070. +// +// Solidity: function serviceAgreements(bytes32 ) constant returns(address vRFOracle, bytes32 jobID, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorCallerSession) ServiceAgreements(arg0 [32]byte) (struct { + VRFOracle common.Address + JobID [32]byte + Fee *big.Int +}, error) { + return _VRFCoordinator.Contract.ServiceAgreements(&_VRFCoordinator.CallOpts, arg0) +} + +// WithdrawableTokens is a free data retrieval call binding the contract method 0x006f6ad0. +// +// Solidity: function withdrawableTokens(address ) constant returns(uint256) +func (_VRFCoordinator *VRFCoordinatorCaller) WithdrawableTokens(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { + var ( + ret0 = new(*big.Int) + ) + out := ret0 + err := _VRFCoordinator.contract.Call(opts, out, "withdrawableTokens", arg0) + return *ret0, err +} + +// WithdrawableTokens is a free data retrieval call binding the contract method 0x006f6ad0. +// +// Solidity: function withdrawableTokens(address ) constant returns(uint256) +func (_VRFCoordinator *VRFCoordinatorSession) WithdrawableTokens(arg0 common.Address) (*big.Int, error) { + return _VRFCoordinator.Contract.WithdrawableTokens(&_VRFCoordinator.CallOpts, arg0) +} + +// WithdrawableTokens is a free data retrieval call binding the contract method 0x006f6ad0. +// +// Solidity: function withdrawableTokens(address ) constant returns(uint256) +func (_VRFCoordinator *VRFCoordinatorCallerSession) WithdrawableTokens(arg0 common.Address) (*big.Int, error) { + return _VRFCoordinator.Contract.WithdrawableTokens(&_VRFCoordinator.CallOpts, arg0) +} + +// FulfillRandomnessRequest is a paid mutator transaction binding the contract method 0x5e1c1059. +// +// Solidity: function fulfillRandomnessRequest(bytes _proof) returns(bool) +func (_VRFCoordinator *VRFCoordinatorTransactor) FulfillRandomnessRequest(opts *bind.TransactOpts, _proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "fulfillRandomnessRequest", _proof) +} + +// FulfillRandomnessRequest is a paid mutator transaction binding the contract method 0x5e1c1059. +// +// Solidity: function fulfillRandomnessRequest(bytes _proof) returns(bool) +func (_VRFCoordinator *VRFCoordinatorSession) FulfillRandomnessRequest(_proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.FulfillRandomnessRequest(&_VRFCoordinator.TransactOpts, _proof) +} + +// FulfillRandomnessRequest is a paid mutator transaction binding the contract method 0x5e1c1059. +// +// Solidity: function fulfillRandomnessRequest(bytes _proof) returns(bool) +func (_VRFCoordinator *VRFCoordinatorTransactorSession) FulfillRandomnessRequest(_proof []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.FulfillRandomnessRequest(&_VRFCoordinator.TransactOpts, _proof) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address _sender, uint256 _fee, bytes _data) returns() +func (_VRFCoordinator *VRFCoordinatorTransactor) OnTokenTransfer(opts *bind.TransactOpts, _sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "onTokenTransfer", _sender, _fee, _data) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address _sender, uint256 _fee, bytes _data) returns() +func (_VRFCoordinator *VRFCoordinatorSession) OnTokenTransfer(_sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, _sender, _fee, _data) +} + +// OnTokenTransfer is a paid mutator transaction binding the contract method 0xa4c0ed36. +// +// Solidity: function onTokenTransfer(address _sender, uint256 _fee, bytes _data) returns() +func (_VRFCoordinator *VRFCoordinatorTransactorSession) OnTokenTransfer(_sender common.Address, _fee *big.Int, _data []byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.OnTokenTransfer(&_VRFCoordinator.TransactOpts, _sender, _fee, _data) +} + +// RegisterProvingKey is a paid mutator transaction binding the contract method 0x6815851e. +// +// Solidity: function registerProvingKey(uint256 _fee, uint256[2] _publicProvingKey, bytes32 _jobID) returns() +func (_VRFCoordinator *VRFCoordinatorTransactor) RegisterProvingKey(opts *bind.TransactOpts, _fee *big.Int, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "registerProvingKey", _fee, _publicProvingKey, _jobID) +} + +// RegisterProvingKey is a paid mutator transaction binding the contract method 0x6815851e. +// +// Solidity: function registerProvingKey(uint256 _fee, uint256[2] _publicProvingKey, bytes32 _jobID) returns() +func (_VRFCoordinator *VRFCoordinatorSession) RegisterProvingKey(_fee *big.Int, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterProvingKey(&_VRFCoordinator.TransactOpts, _fee, _publicProvingKey, _jobID) +} + +// RegisterProvingKey is a paid mutator transaction binding the contract method 0x6815851e. +// +// Solidity: function registerProvingKey(uint256 _fee, uint256[2] _publicProvingKey, bytes32 _jobID) returns() +func (_VRFCoordinator *VRFCoordinatorTransactorSession) RegisterProvingKey(_fee *big.Int, _publicProvingKey [2]*big.Int, _jobID [32]byte) (*types.Transaction, error) { + return _VRFCoordinator.Contract.RegisterProvingKey(&_VRFCoordinator.TransactOpts, _fee, _publicProvingKey, _jobID) +} + +// Withdraw is a paid mutator transaction binding the contract method 0xf3fef3a3. +// +// Solidity: function withdraw(address _recipient, uint256 _amount) returns() +func (_VRFCoordinator *VRFCoordinatorTransactor) Withdraw(opts *bind.TransactOpts, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.contract.Transact(opts, "withdraw", _recipient, _amount) +} + +// Withdraw is a paid mutator transaction binding the contract method 0xf3fef3a3. +// +// Solidity: function withdraw(address _recipient, uint256 _amount) returns() +func (_VRFCoordinator *VRFCoordinatorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Withdraw(&_VRFCoordinator.TransactOpts, _recipient, _amount) +} + +// Withdraw is a paid mutator transaction binding the contract method 0xf3fef3a3. +// +// Solidity: function withdraw(address _recipient, uint256 _amount) returns() +func (_VRFCoordinator *VRFCoordinatorTransactorSession) Withdraw(_recipient common.Address, _amount *big.Int) (*types.Transaction, error) { + return _VRFCoordinator.Contract.Withdraw(&_VRFCoordinator.TransactOpts, _recipient, _amount) +} + +// VRFCoordinatorNewServiceAgreementIterator is returned from FilterNewServiceAgreement and is used to iterate over the raw logs and unpacked data for NewServiceAgreement events raised by the VRFCoordinator contract. +type VRFCoordinatorNewServiceAgreementIterator struct { + Event *VRFCoordinatorNewServiceAgreement // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *VRFCoordinatorNewServiceAgreementIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorNewServiceAgreement) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorNewServiceAgreement) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *VRFCoordinatorNewServiceAgreementIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *VRFCoordinatorNewServiceAgreementIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// VRFCoordinatorNewServiceAgreement represents a NewServiceAgreement event raised by the VRFCoordinator contract. +type VRFCoordinatorNewServiceAgreement struct { + KeyHash [32]byte + Fee *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterNewServiceAgreement is a free log retrieval operation binding the contract event 0xae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe. +// +// Solidity: event NewServiceAgreement(bytes32 keyHash, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterNewServiceAgreement(opts *bind.FilterOpts) (*VRFCoordinatorNewServiceAgreementIterator, error) { + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "NewServiceAgreement") + if err != nil { + return nil, err + } + return &VRFCoordinatorNewServiceAgreementIterator{contract: _VRFCoordinator.contract, event: "NewServiceAgreement", logs: logs, sub: sub}, nil +} + +// WatchNewServiceAgreement is a free log subscription operation binding the contract event 0xae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe. +// +// Solidity: event NewServiceAgreement(bytes32 keyHash, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchNewServiceAgreement(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorNewServiceAgreement) (event.Subscription, error) { + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "NewServiceAgreement") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(VRFCoordinatorNewServiceAgreement) + if err := _VRFCoordinator.contract.UnpackLog(event, "NewServiceAgreement", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseNewServiceAgreement is a log parse operation binding the contract event 0xae189157e0628c1e62315e9179156e1ea10e90e9c15060002f7021e907dc2cfe. +// +// Solidity: event NewServiceAgreement(bytes32 keyHash, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseNewServiceAgreement(log types.Log) (*VRFCoordinatorNewServiceAgreement, error) { + event := new(VRFCoordinatorNewServiceAgreement) + if err := _VRFCoordinator.contract.UnpackLog(event, "NewServiceAgreement", log); err != nil { + return nil, err + } + return event, nil +} + +// VRFCoordinatorRandomnessRequestIterator is returned from FilterRandomnessRequest and is used to iterate over the raw logs and unpacked data for RandomnessRequest events raised by the VRFCoordinator contract. +type VRFCoordinatorRandomnessRequestIterator struct { + Event *VRFCoordinatorRandomnessRequest // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *VRFCoordinatorRandomnessRequestIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(VRFCoordinatorRandomnessRequest) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *VRFCoordinatorRandomnessRequestIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *VRFCoordinatorRandomnessRequestIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// VRFCoordinatorRandomnessRequest represents a RandomnessRequest event raised by the VRFCoordinator contract. +type VRFCoordinatorRandomnessRequest struct { + KeyHash [32]byte + Seed *big.Int + JobID [32]byte + Sender common.Address + Fee *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterRandomnessRequest is a free log retrieval operation binding the contract event 0xd241d78a52145a5d1d1ff002e32ec15cdc395631bcee66246650c2429dfaccaa. +// +// Solidity: event RandomnessRequest(bytes32 keyHash, uint256 seed, bytes32 indexed jobID, address sender, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) FilterRandomnessRequest(opts *bind.FilterOpts, jobID [][32]byte) (*VRFCoordinatorRandomnessRequestIterator, error) { + + var jobIDRule []interface{} + for _, jobIDItem := range jobID { + jobIDRule = append(jobIDRule, jobIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.FilterLogs(opts, "RandomnessRequest", jobIDRule) + if err != nil { + return nil, err + } + return &VRFCoordinatorRandomnessRequestIterator{contract: _VRFCoordinator.contract, event: "RandomnessRequest", logs: logs, sub: sub}, nil +} + +// WatchRandomnessRequest is a free log subscription operation binding the contract event 0xd241d78a52145a5d1d1ff002e32ec15cdc395631bcee66246650c2429dfaccaa. +// +// Solidity: event RandomnessRequest(bytes32 keyHash, uint256 seed, bytes32 indexed jobID, address sender, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) WatchRandomnessRequest(opts *bind.WatchOpts, sink chan<- *VRFCoordinatorRandomnessRequest, jobID [][32]byte) (event.Subscription, error) { + + var jobIDRule []interface{} + for _, jobIDItem := range jobID { + jobIDRule = append(jobIDRule, jobIDItem) + } + + logs, sub, err := _VRFCoordinator.contract.WatchLogs(opts, "RandomnessRequest", jobIDRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(VRFCoordinatorRandomnessRequest) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseRandomnessRequest is a log parse operation binding the contract event 0xd241d78a52145a5d1d1ff002e32ec15cdc395631bcee66246650c2429dfaccaa. +// +// Solidity: event RandomnessRequest(bytes32 keyHash, uint256 seed, bytes32 indexed jobID, address sender, uint256 fee) +func (_VRFCoordinator *VRFCoordinatorFilterer) ParseRandomnessRequest(log types.Log) (*VRFCoordinatorRandomnessRequest, error) { + event := new(VRFCoordinatorRandomnessRequest) + if err := _VRFCoordinator.contract.UnpackLog(event, "RandomnessRequest", log); err != nil { + return nil, err + } + return event, nil +} diff --git a/core/services/vrf/generation/abigen.sh b/core/services/vrf/generation/abigen.sh new file mode 100755 index 00000000000..555e29fd56f --- /dev/null +++ b/core/services/vrf/generation/abigen.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +set -e + +# Usage: abigen.sh . See the following assignments for the argument list. +# $BIN_PATH, $ABI_PATH and $OUT_PATH must all be in the same directory + +BIN_PATH="$1" # Path to the contract binary, as 0x-hex +ABI_PATH="$2" # Path to the contract ABI JSON +OUT_PATH="$3" # Path at which to save the golang contract wrapper +CLASS_NAME="$4" # Name for the golang contract wrapper type +PKG_NAME="$5" # Name for the package containing the wrapper + +COMMON_PARENT_DIRECTORY=$(dirname "$BIN_PATH") +if [ "$(dirname "$ABI_PATH")" != "$COMMON_PARENT_DIRECTORY" ] || \ + [ "$(dirname "$OUT_PATH")" != "$COMMON_PARENT_DIRECTORY" ]; then + # shellcheck disable=SC2016 + echo '$BIN_PATH, $ABI_PATH and $OUT_PATH must all be in the same directory' + exit 1 +fi + +function cleanup() { + rm -rf "$CONTAINER_NAME_PATH" + if [ ! -z "$DOCKER_CONTAINER_NAME" ]; then + docker rm -f "$DOCKER_CONTAINER_NAME" > /dev/null + fi +} +trap cleanup EXIT + +# shellcheck source=common.sh +source "$(dirname "$0")/common.sh" + +ABIGEN_ARGS=( -bin "$BIN_PATH" -abi "$ABI_PATH" -out "$OUT_PATH" + -type "$CLASS_NAME" -pkg "$PKG_NAME") + +# Geth version from which native abigen was built, or v. +NATIVE_ABIGEN_VERSION=v"$( + abigen --version 2> /dev/null | \ + grep -E -o '([0-9]+\.[0-9]+\.[0-9]+)' +)" || true + +# Generate golang wrapper +if [ "$NATIVE_ABIGEN_VERSION" == "$GETH_VERSION" ]; then + abigen "${ABIGEN_ARGS[@]}" # We can use native abigen, which is much faster +else # Must use dockerized abigen + DOCKER_IMAGE="ethereum/client-go:alltools-$GETH_VERSION" + echo -n "Native abigen unavailable, broken, or wrong version (need version " + echo "$GETH_VERSION). Invoking abigen from $DOCKER_IMAGE docker image." + CONTAINER_NAME_PATH="$(mktemp)" + rm -f "$CONTAINER_NAME_PATH" + docker run -v "${COMMON_PARENT_DIRECTORY}:${COMMON_PARENT_DIRECTORY}" \ + --cidfile="$CONTAINER_NAME_PATH" \ + "$DOCKER_IMAGE" \ + abigen "${ABIGEN_ARGS[@]}" + DOCKER_CONTAINER_NAME=$(< "$CONTAINER_NAME_PATH") + if [ "$(docker wait "$DOCKER_CONTAINER_NAME")" != "0" ] ; then + echo "Failed to build $CLASS_NAME golang wrapper" + exit 1 + fi + docker cp "$DOCKER_CONTAINER_NAME:${OUT_PATH}" "${OUT_PATH}" +fi diff --git a/core/services/vrf/generation/common.sh b/core/services/vrf/generation/common.sh new file mode 100644 index 00000000000..5264da35091 --- /dev/null +++ b/core/services/vrf/generation/common.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# GETH_VERSION is the version of go-ethereum chainlink is using +GETH_VERSION=$(go list -json -m github.com/ethereum/go-ethereum | jq -r .Version) +export GETH_VERSION diff --git a/core/services/vrf/generation/compile_command.txt b/core/services/vrf/generation/compile_command.txt new file mode 100644 index 00000000000..92b076b8209 --- /dev/null +++ b/core/services/vrf/generation/compile_command.txt @@ -0,0 +1 @@ +yarn workspace @chainlink/contracts belt compile solc diff --git a/core/services/vrf/generation/compile_contracts.sh b/core/services/vrf/generation/compile_contracts.sh new file mode 100755 index 00000000000..479a8ff22cf --- /dev/null +++ b/core/services/vrf/generation/compile_contracts.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +CDIR=$(dirname "$0") +COMPILE_COMMAND=$(<"$CDIR/compile_command.txt") + +# Only print compilation output on failure. +OUT="$($COMPILE_COMMAND 2>&1)" +ERR="$?" + +# shellcheck disable=SC2181 +if [ "$ERR" != "0" ]; then + echo + echo "↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓" + echo "Error while compiling solidity contracts. See below for output." + echo "You can reproduce this error directly by running the command" + echo + echo " " "$COMPILE_COMMAND" + echo + echo "in the directory $SOLIDITY_DIR" + echo + echo "This is probably a problem with a solidity contract, under the" + echo "directory evm-contracts/src/." + echo "↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑" + echo + echo "$OUT" + exit 1 +fi diff --git a/core/services/vrf/generation/generate.sh b/core/services/vrf/generation/generate.sh new file mode 100755 index 00000000000..fd46a5ac597 --- /dev/null +++ b/core/services/vrf/generation/generate.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +set -e + +# Usage: +# +# ./generate.sh +# +# This will output the generated file to .//.go, +# where ./ is in the same directory as this script + +SOL_PATH="$1" +PKG_NAME="$2" + +function cleanup() { # Release resources on script exit + rm -rf "$TMP_DIR" +} +trap cleanup EXIT + +CDIR=$(dirname "$0") +# shellcheck source=common.sh +source "$CDIR/common.sh" + +CLASS_NAME=$(basename "$SOL_PATH" .json) + +TMP_DIR=$(mktemp -d "/tmp/${CLASS_NAME}.XXXXXXXXX") + +ABI=$(jq -c -r .compilerOutput.abi < "$SOL_PATH") +ABI_PATH="${TMP_DIR}/abi.json" +echo "$ABI" > "$ABI_PATH" + +# We want the bytecode here, not the deployedByteCode. The latter does not +# include the initialization code. +# https://ethereum.stackexchange.com/questions/32234/difference-between-bytecode-and-runtime-bytecode +BIN=$(jq -r .compilerOutput.evm.bytecode.object < "$SOL_PATH") + +# Modern solc objects have metadata suffixes which vary depending on +# incidental compilation context like absolute paths to source files. See +# https://solidity.readthedocs.io/en/v0.6.2/metadata.html#encoding-of-the-metadata-hash-in-the-bytecode +# Since this suffix varies so much, it can't be included in a reliable check +# that the golang wrapper is up-to-date, so remove it from the message hash. +BINLEN="${#BIN}" +TRUNCLEN="$((BINLEN - 106))" # 106/2=53=length of metadata hash in bytes +TRUNCATED="${BIN:0:$TRUNCLEN}" +SUFFIX="${BIN:$TRUNCLEN:106}" # The actual metadata hash, in hex. + +# Verify that the suffix follows the pattern outlined in the above link, to +# ensure that we're actually truncating what we think we are. +SUFFIX_REGEXP='^a264697066735822[[:xdigit:]]{68}64736f6c6343[[:xdigit:]]{6}0033$' +if [[ ! $SUFFIX =~ $SUFFIX_REGEXP ]]; then + echo "binary suffix has unexpected format; giving up" + exit 1 +fi + +CONSTANT_SUFFIX="a264697066735822000000000000000000000000000000000000000000000" +CONSTANT_SUFFIX+="0000000000000000000000064736f6c63430000000033" + +BIN_PATH="${TMP_DIR}/bin" +echo "${TRUNCATED}${CONSTANT_SUFFIX}" > "$BIN_PATH" + +OUT_PATH="${TMP_DIR}/$PKG_NAME.go" + +"$CDIR"/abigen.sh "$BIN_PATH" "$ABI_PATH" "$OUT_PATH" "$CLASS_NAME" "$PKG_NAME" + +TARGET_DIR="./generated/$PKG_NAME/" +mkdir -p "$TARGET_DIR" +cp "$OUT_PATH" "$TARGET_DIR" + +"$CDIR/record_versions.sh" "$@" "$ABI" "$BIN" diff --git a/core/services/vrf/generation/generate_link.sh b/core/services/vrf/generation/generate_link.sh new file mode 100755 index 00000000000..3871a05c458 --- /dev/null +++ b/core/services/vrf/generation/generate_link.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Generates the golang wrapper of the LINK ERC20 token, which is represented by +# a non-standard compiler argument. Takes no arguments. + +function cleanup() { # Release resources on script exit + rm -rf "$TMP_DIR" +} +# trap cleanup EXIT + +CDIR=$(dirname "$0") + +TMP_DIR=$(mktemp -d /tmp/link_token.XXXXXXXXX) + +LINK_COMPILER_ARTIFACT_PATH="$CDIR/../../../../evm-test-helpers/src/LinkToken.json" + +ABI=$(jq -c -r .abi < "$LINK_COMPILER_ARTIFACT_PATH") +ABI_PATH="${TMP_DIR}/abi.json" +echo "$ABI" > "$ABI_PATH" + +BIN=$(jq -r .bytecode < "$LINK_COMPILER_ARTIFACT_PATH") +BIN_PATH="${TMP_DIR}/bin" +echo "$BIN" > "$BIN_PATH" + +CLASS_NAME="LinkToken" +PKG_NAME="link_token_interface" +OUT_PATH="$TMP_DIR/$PKG_NAME.go" + +"$CDIR"/abigen.sh "$BIN_PATH" "$ABI_PATH" "$OUT_PATH" "$CLASS_NAME" "$PKG_NAME" + +TARGET_DIR="./generated/$PKG_NAME/" +mkdir -p "$TARGET_DIR" +cp "$OUT_PATH" "$TARGET_DIR" +"$CDIR/record_versions.sh" "$LINK_COMPILER_ARTIFACT_PATH" link_token_interface \ + "$ABI" "$BIN" dont_truncate_binary diff --git a/core/services/vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/services/vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt new file mode 100644 index 00000000000..9badf91e612 --- /dev/null +++ b/core/services/vrf/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -0,0 +1,6 @@ +GETH_VERSION: 1.9.11 +link_token_interface: ./generation/../../../../evm-test-helpers/src/LinkToken.json a16bfa4690d25769959dd59034f03d101a46ecbf7e774a57ca6874f8352c8568 +solidity_request_id: ../../../evm-contracts/abi/v0.6/VRFRequestIDBaseTestHelper.json 37323e90d064503ebfb0a2c9d6c40d4787fac916df659d082c3927f7f3de6b09 +solidity_verifier_wrapper: ../../../evm-contracts/abi/v0.6/VRFTestHelper.json c5f4617b801b7143b12d3062ebeeb5c9c3afc33a4ecdc14233ab6f2c0d95394c +solidity_vrf_consumer_interface: ../../../evm-contracts/abi/v0.6/VRFConsumer.json 5111ba5e42f3ef1512065078fe017ada84f9b3b4c1a2680a85415ebbac08bdb9 +solidity_vrf_coordinator_interface: ../../../evm-contracts/abi/v0.6/VRFCoordinator.json 584b33eb950331a8ff51f56e47deee7937db139cb4113f321eb6ccf7f1b82173 diff --git a/core/services/vrf/generation/record_versions.sh b/core/services/vrf/generation/record_versions.sh new file mode 100755 index 00000000000..4a7a6a7018f --- /dev/null +++ b/core/services/vrf/generation/record_versions.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +set -e + +# Record versions of each contract, to avoid use of stale wrappers +# +# Usage: record_versions.sh \ +# \ +# [] +# +# The db is a flat file, one record per line. There is one line of the form +# "GETH_VERSION: : +# +# with each contract path followed by the current hash of that contract. These +# are checked in the golang test TestCheckContractHashesFromLastGoGenerate, to +# ensure that the golang wrappers are current. +# +# If something is passed in the slot, no attempt is made to +# remove the trailing metadata in the binary object. + +SOL_PATH="$1" +PKG_NAME="$2" +ABI="$3" +BIN="$4" +DONT_TRUNCATE="$5" + +if [[ $SOL_PATH =~ [[:space:]] ]]; then + # The golang parser splits on whitespace, so don't allow it in the pathname + echo "path to compiler artifact, '$SOL_PATH', cannot contain whitespace" + exit 1 +fi + +CDIR=$(dirname "$0") +# shellcheck source=common.sh +source "$CDIR/common.sh" + +VERSION_DB_PATH="$CDIR/generated-wrapper-dependency-versions-do-not-edit.txt" +touch "$VERSION_DB_PATH" + +function blow_away_version_record() { + TGT_RECORD="$1" + (grep -v "$TGT_RECORD": "$VERSION_DB_PATH" > "$VERSION_DB_PATH.tmp") || true + mv "$VERSION_DB_PATH.tmp" "$VERSION_DB_PATH" +} + +blow_away_version_record GETH_VERSION + +# go.mod geth version is of form v1.9.9. Strip leading v. +echo GETH_VERSION: "${GETH_VERSION//v/}" >> "$VERSION_DB_PATH" + +blow_away_version_record "$PKG_NAME" + +if [ -n "$DONT_TRUNCATE" ]; then # Caller has asked us not to trucate binary + MSG_BIN="$BIN" +else + # Modern solc objects have metadata suffixes which vary depending on + # incidental compilation context like absolute paths to source files. See + # https://solidity.readthedocs.io/en/v0.6.2/metadata.html#encoding-of-the-metadata-hash-in-the-bytecode + # Since this suffix varies so much, it can't be included in a reliable check + # that the golang wrapper is up-to-date, so remove it from the message hash. + BINLEN="${#BIN}" + TRUNCLEN="$((BINLEN - 106))" # 106/2=53=length of metadata hash in bytes + TRUNCATED="${BIN:0:$TRUNCLEN}" + SUFFIX="${BIN:$TRUNCLEN:106}" # The actual metadata hash, in hex. + + # Verify that the suffix follows the pattern outlined in the above link, to + # ensure that we're actually truncating what we think we are. + SUFFIX_REGEXP='^a264697066735822[[:xdigit:]]{68}64736f6c6343[[:xdigit:]]{6}0033$' + if [[ ! $SUFFIX =~ $SUFFIX_REGEXP ]]; then + echo "binary suffix has unexpected format; giving up" + exit 1 + fi + MSG_BIN="$TRUNCATED" +fi + +HASHMSG="$ABI$MSG_BIN" +echo "$PKG_NAME: $SOL_PATH $(sha256sum <<< "$HASHMSG" | cut -f 1 -d ' ')" >> \ + "$VERSION_DB_PATH" +sort -o "$VERSION_DB_PATH" "$VERSION_DB_PATH" diff --git a/core/services/vrf/go_generate.go b/core/services/vrf/go_generate.go new file mode 100644 index 00000000000..91dee41fac9 --- /dev/null +++ b/core/services/vrf/go_generate.go @@ -0,0 +1,45 @@ +package vrf + +// Make sure solidity compiler artifacts are up to date. Only output stdout on failure. +//go:generate ./generation/compile_contracts.sh + +//go:generate ./generation/generate.sh ../../../evm-contracts/abi/v0.6/VRFTestHelper.json solidity_verifier_wrapper +//go:generate ./generation/generate.sh ../../../evm-contracts/abi/v0.6/VRFCoordinator.json solidity_vrf_coordinator_interface +//go:generate ./generation/generate.sh ../../../evm-contracts/abi/v0.6/VRFConsumer.json solidity_vrf_consumer_interface +//go:generate ./generation/generate.sh ../../../evm-contracts/abi/v0.6/VRFRequestIDBaseTestHelper.json solidity_request_id +//go:generate ./generation/generate_link.sh + +// To run these commands, you must either install docker, or the correct version +// of abigen. The latter can be installed with these commands, at least on linux: +// +// git clone https://github.com/ethereum/go-ethereum +// cd go-ethereum/cmd/abigen +// git checkout v +// go install +// +// Here, is the version of go-ethereum specified in chainlink's +// go.mod. This will install abigen in "$GOPATH/bin", which you should add to +// your $PATH. +// +// To reduce explicit dependencies, and in case the system does not have the +// correct version of abigen installed , the above commands spin up docker +// containers. In my hands, total running time including compilation is about +// 13s. If you're modifying solidity code and testing against go code a lot, it +// might be worthwhile to generate the the wrappers using a static container +// with abigen and solc, which will complete much faster. E.g. +// +// abigen -sol ../../../evm-contracts/src/v0.6/VRFAll.sol -pkg vrf -out solidity_interfaces.go +// +// where VRFAll.sol simply contains `import "contract_path";` instructions for +// all the contracts you wish to target. This runs in about 0.25 seconds in my +// hands. +// +// If you're on linux, you can copy the correct version of solc out of the +// appropriate docker container. At least, the following works on ubuntu: +// +// $ docker run --name solc ethereum/solc:0.6.2 +// $ sudo docker cp solc:/usr/bin/solc /usr/bin +// $ docker rm solc +// +// If you need to point abigen at your solc executable, you can specify the path +// with the abigen --solc option. diff --git a/core/services/vrf/go_generate_test.go b/core/services/vrf/go_generate_test.go new file mode 100644 index 00000000000..1ffb6f8fdc2 --- /dev/null +++ b/core/services/vrf/go_generate_test.go @@ -0,0 +1,220 @@ +// package vrf_test verifies correct and up-to-date generation of golang wrappers +// for solidity contracts. See go_generate.go for the actual generation. +package vrf_test + +import ( + "bufio" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + "testing" + + "chainlink/core/utils" + + gethParams "github.com/ethereum/go-ethereum/params" + "github.com/pkg/errors" + "github.com/tidwall/gjson" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// contractVersion records information about the solidity compiler artifact a +// golang contract wrapper package depends on. +type contractVersion struct { + // path to compiler artifact used by generate.sh to create wrapper package + compilerArtifactPath string + // hash of the artifact at the timem the wrapper was last generated + hash string +} + +// integratedVersion carries the full versioning information checked in this test +type integratedVersion struct { + // Version of geth last used to generate the wrappers + gethVersion string + // { golang-pkg-name: version_info } + contractVersions map[string]contractVersion +} + +// TestCheckContractHashesFromLastGoGenerate compares the metadata recorded by +// record_versions.sh, and fails if it indicates that the corresponding golang +// wrappers are out of date with respect to the solidty contracts they wrap. See +// record_versions.sh for description of file format. +func TestCheckContractHashesFromLastGoGenerate(t *testing.T) { + versions := readVersionsDB(t) + require.NotEmpty(t, versions.gethVersion, + `version DB should have a "GETH_VERSION:" line`) + require.Equal(t, versions.gethVersion, gethParams.Version, + "please re-run `go generate` in core/services/vrf") + for _, contractVersionInfo := range versions.contractVersions { + compareCurrentCompilerAritfactAgainstRecordsAndSoliditySources( + t, contractVersionInfo) + } +} + +// compareCurrentCompilerAritfactAgainstRecordsAndSoliditySources checks that +// the file at each contractVersion.compilerArtifactPath hashes to its +// contractVersion.hash, and that the solidity source code recorded in the +// compiler artifact matches the current solidity contracts. +// +// Most of the compiler artifacts should contain output from sol-compiler, or +// "yarn compile". The relevant parts of its schema are +// +// { "sourceCodes": { "": "", ... } } +// +// where is the path to the contract, below the truffle contracts/ +// directory, and is the source code of the contract at the time the JSON +// file was generated. +func compareCurrentCompilerAritfactAgainstRecordsAndSoliditySources( + t *testing.T, versionInfo contractVersion, +) { + apath := versionInfo.compilerArtifactPath + // check the compiler outputs (abi and bytecode object) haven't changed + compilerJSON, err := ioutil.ReadFile(apath) + require.NoError(t, err, "failed to read JSON compiler artifact %s", apath) + abiPath := "compilerOutput.abi" + binPath := "compilerOutput.evm.bytecode.object" + isLINKCompilerOutput := + path.Base(versionInfo.compilerArtifactPath) == "LinkToken.json" + if isLINKCompilerOutput { + abiPath = "abi" + binPath = "bytecode" + } + // Normalize the whitespace in the ABI JSON + abiBytes := stripWhitespace(gjson.GetBytes(compilerJSON, abiPath).String(), "") + binBytes := gjson.GetBytes(compilerJSON, binPath).String() + if !isLINKCompilerOutput { + // Remove the varying contract metadata, as in ./generation/generate.sh + binBytes = binBytes[:len(binBytes)-106] + } + hasher := sha256.New() + hashMsg := string(abiBytes+binBytes) + "\n" // newline from <<< in record_versions.sh + _, err = io.WriteString(hasher, hashMsg) + require.NoError(t, err, "failed to hash compiler artifact %s", apath) + recompileCommand := fmt.Sprintf("`%s && go generate`", compileCommand(t)) + assert.Equal(t, versionInfo.hash, fmt.Sprintf("%x", hasher.Sum(nil)), + "compiler artifact %s has changed; please rerun %s for the vrf package", + apath, recompileCommand) + + var artifact struct { + Sources map[string]string `json:"sourceCodes"` + } + require.NoError(t, json.Unmarshal(compilerJSON, &artifact), + "could not read compiler artifact %s", apath) + + if !isLINKCompilerOutput { // No need to check contract source for LINK token + // Check that each of the contract source codes hasn't changed + soliditySourceRoot := filepath.Dir(filepath.Dir(filepath.Dir(apath))) + contractPath := filepath.Join(soliditySourceRoot, "src", "v0.6") + for sourcePath, sourceCode := range artifact.Sources { // compare to current source + sourcePath = filepath.Join(contractPath, sourcePath) + actualSource, err := ioutil.ReadFile(sourcePath) + require.NoError(t, err, "could not read "+sourcePath) + assert.Equal(t, string(actualSource), sourceCode, + "%s has changed; please rerun %s for the vrf package", + sourcePath, recompileCommand) + } + } +} + +func versionsDBLineReader() (*bufio.Scanner, error) { + dirOfThisTest, err := os.Getwd() + if err != nil { + return nil, err + } + dBBasename := "generated-wrapper-dependency-versions-do-not-edit.txt" + dbPath := filepath.Join(dirOfThisTest, "generation", dBBasename) + versionsDBFile, err := os.Open(dbPath) + if err != nil { + return nil, errors.Wrapf(err, "could not open versions database") + } + return bufio.NewScanner(versionsDBFile), nil + +} + +// readVersionsDB populates an integratedVersion with all the info in the +// versions DB +func readVersionsDB(t *testing.T) integratedVersion { + rv := integratedVersion{} + rv.contractVersions = make(map[string]contractVersion) + db, err := versionsDBLineReader() + require.NoError(t, err) + for db.Scan() { + line := strings.Fields(db.Text()) + require.True(t, strings.HasSuffix(line[0], ":"), + `each line in versions.txt should start with "$TOPIC:"`) + topic := stripTrailingColon(line[0], "") + if topic == "GETH_VERSION" { + require.Len(t, line, 2, + "GETH_VERSION line should contain geth version, and only that") + require.Empty(t, rv.gethVersion, "more than one geth version") + rv.gethVersion = line[1] + } else { // It's a wrapper from a json compiler artifact + require.Len(t, line, 3, + `"%s" should have three elements ": "`, + db.Text()) + _, alreadyExists := rv.contractVersions[topic] + require.False(t, alreadyExists, `topic "%s" already mentioned!`, topic) + rv.contractVersions[topic] = contractVersion{ + compilerArtifactPath: line[1], hash: line[2], + } + } + } + return rv +} + +// Ensure that solidity compiler artifacts are present before running this test, +// by compiling them if necessary. +func init() { + db, err := versionsDBLineReader() + if err != nil { + panic(err) + } + var solidityArtifactsMissing []string + for db.Scan() { + line := strings.Fields(db.Text()) + if stripTrailingColon(line[0], "") != "GETH_VERSION" { + if os.IsNotExist(utils.JustError(os.Stat(line[1]))) { + solidityArtifactsMissing = append(solidityArtifactsMissing, line[1]) + } + } + } + if len(solidityArtifactsMissing) == 0 { + return + } + fmt.Printf("some solidity artifacts missing (%s); rebuilding...", + solidityArtifactsMissing) + compileCmd := strings.Fields(compileCommand(nil)) + cmd := exec.Command(compileCmd[0], compileCmd[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + panic(err) + } +} + +var ( + stripWhitespace = regexp.MustCompile(`\s+`).ReplaceAllString + stripTrailingColon = regexp.MustCompile(":$").ReplaceAllString +) + +// compileCommand() is a shell command which compiles chainlink's solidity +// contracts. +func compileCommand(t *testing.T) string { + cmd, err := ioutil.ReadFile("./generation/compile_command.txt") + if err != nil { + if t != nil { + t.Fatal(err) + } + panic(err) + } + return string(cmd) +} diff --git a/core/services/vrf/solidity_ports.go b/core/services/vrf/solidity_ports.go new file mode 100644 index 00000000000..e56f85a0846 --- /dev/null +++ b/core/services/vrf/solidity_ports.go @@ -0,0 +1,60 @@ +package vrf + +import ( + "math/big" + + "chainlink/core/services/signatures/secp256k1" + + "go.dedis.ch/kyber/v3" +) + +// This file contains golang re-implementations of functions on the VRF solidity +// contract. They are used to verify correct operation of those functions, and +// also to efficiently compute zInv off-chain, which makes computing the linear +// combination of c*gamma+s*hash onchain much more efficient. + +type fieldElt = *big.Int + +// neg(f) is the negation of f in the base field +func neg(f fieldElt) fieldElt { return sub(fieldSize, f) } + +// projectiveSub(x1, z1, x2, z2) is the projective coordinates of x1/z1 - x2/z2 +func projectiveSub(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { + num1 := mul(z2, x1) + num2 := neg(mul(z1, x2)) + return mod(add(num1, num2), fieldSize), mod(mul(z1, z2), fieldSize) +} + +// projectiveMul(x1, z1, x2, z2) is projective coordinates of (x1/z1)×(x2/z2) +func projectiveMul(x1, z1, x2, z2 fieldElt) (fieldElt, fieldElt) { + return mul(x1, x2), mul(z1, z2) +} + +// ProjectiveECAdd(px, py, qx, qy) duplicates the calculation in projective +// coordinates of VRF.sol#projectiveECAdd, so we can reliably get the +// denominator (i.e, z) +func ProjectiveECAdd(p, q kyber.Point) (x, y, z fieldElt) { + px, py := secp256k1.Coordinates(p) + qx, qy := secp256k1.Coordinates(q) + pz, qz := one, one + lx := sub(qy, py) + lz := sub(qx, px) + + sx, dx := projectiveMul(lx, lz, lx, lz) + sx, dx = projectiveSub(sx, dx, px, pz) + sx, dx = projectiveSub(sx, dx, qx, qz) + + sy, dy := projectiveSub(px, pz, sx, dx) + sy, dy = projectiveMul(sy, dy, lx, lz) + sy, dy = projectiveSub(sy, dy, py, pz) + + var sz fieldElt + if dx != dy { + sx = mul(sx, dy) + sy = mul(sy, dx) + sz = mul(dx, dy) + } else { + sz = dx + } + return mod(sx, fieldSize), mod(sy, fieldSize), mod(sz, fieldSize) +} diff --git a/core/services/vrf/solidity_proof.go b/core/services/vrf/solidity_proof.go new file mode 100644 index 00000000000..fecdbd4f3ae --- /dev/null +++ b/core/services/vrf/solidity_proof.go @@ -0,0 +1,131 @@ +package vrf + +// Logic for providing the precomputed values required by the solidity verifier, +// in binary-blob format. + +import ( + "fmt" + "math/big" + + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/utils" + + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" +) + +// SolidityProof contains precalculations which VRF.sol needs to verifiy proofs +type SolidityProof struct { + P *Proof // The core proof + UWitness common.Address // Address of P.C*P.PK+P.S*G + CGammaWitness, SHashWitness kyber.Point // P.C*P.Gamma, P.S*HashToCurve(P.Seed) + ZInv *big.Int // Inverse of Z coord from ProjectiveECAdd(CGammaWitness, SHashWitness) +} + +// String returns the values in p, in hexadecimal format +func (p *SolidityProof) String() string { + return fmt.Sprintf( + "SolidityProof{P: %s, UWitness: %x, CGammaWitness: %s, SHashWitness: %s, ZInv: %x}", + p.P, p.UWitness, p.CGammaWitness, p.SHashWitness, p.ZInv) +} + +func point() kyber.Point { + return secp256k1Curve.Point() +} + +// SolidityPrecalculations returns the precomputed values needed by the solidity +// verifier, or an error on failure. +func (p *Proof) SolidityPrecalculations() (*SolidityProof, error) { + var rv SolidityProof + rv.P = p + c := secp256k1.IntToScalar(p.C) + s := secp256k1.IntToScalar(p.S) + u := point().Add(point().Mul(c, p.PublicKey), point().Mul(s, Generator)) + var err error + rv.UWitness = secp256k1.EthereumAddress(u) + rv.CGammaWitness = point().Mul(c, p.Gamma) + hash, err := HashToCurve(p.PublicKey, p.Seed, func(*big.Int) {}) + if err != nil { + return nil, err + } + rv.SHashWitness = point().Mul(s, hash) + _, _, z := ProjectiveECAdd(rv.CGammaWitness, rv.SHashWitness) + rv.ZInv = z.ModInverse(z, fieldSize) + return &rv, nil +} + +// Length of marshaled proof, in bytes +const ProofLength = 64 + // PublicKey + 64 + // Gamma + 32 + // C + 32 + // S + 32 + // Seed + 32 + // uWitness (gets padded to 256 bits, even though it's only 160) + 64 + // cGammaWitness + 64 + // sHashWitness + 32 // zInv (Leave Output out, because that can be efficiently calculated) + +// MarshaledProof contains a VRF proof for randomValueFromVRFProof. +// +// NB: when passing one of these to randomValueFromVRFProof via the geth +// blockchain simulator, it must be passed as a slice ("proof[:]"). Passing it +// as-is sends hundreds of single bytes, each padded to their own 32-byte word. +type MarshaledProof [ProofLength]byte + +// String returns m as 0x-hex bytes +func (m MarshaledProof) String() string { + return fmt.Sprintf("0x%x", [ProofLength]byte(m)) +} + +// MarshalForSolidityVerifier renders p as required by randomValueFromVRFProof +func (p *SolidityProof) MarshalForSolidityVerifier() (proof MarshaledProof) { + cursor := proof[:0] + write := func(b []byte) { cursor = append(cursor, b...) } + write(secp256k1.LongMarshal(p.P.PublicKey)) + write(secp256k1.LongMarshal(p.P.Gamma)) + write(uint256ToBytes32(p.P.C)) + write(uint256ToBytes32(p.P.S)) + write(uint256ToBytes32(p.P.Seed)) + write(make([]byte, 12)) // Left-pad address to 32 bytes, with zeros + write(p.UWitness[:]) + write(secp256k1.LongMarshal(p.CGammaWitness)) + write(secp256k1.LongMarshal(p.SHashWitness)) + write(uint256ToBytes32(p.ZInv)) + if len(cursor) != ProofLength { + panic(fmt.Errorf("wrong proof length: %d", len(proof))) + } + return proof +} + +// MarshalForSolidityVerifier renders p as required by randomValueFromVRFProof +func (p *Proof) MarshalForSolidityVerifier() (MarshaledProof, error) { + var rv MarshaledProof + solidityProof, err := p.SolidityPrecalculations() + if err != nil { + return rv, err + } + return solidityProof.MarshalForSolidityVerifier(), nil +} + +func UnmarshalSolidityProof(proof []byte) (rv Proof, err error) { + failedProof := Proof{} + if len(proof) != ProofLength { + return failedProof, fmt.Errorf( + "VRF proof is %d bytes long, should be %d: \"%x\"", len(proof), + ProofLength, proof) + } + if rv.PublicKey, err = secp256k1.LongUnmarshal(proof[:64]); err != nil { + return failedProof, errors.Wrapf(err, "while reading proof public key") + } + rawGamma := proof[64:128] + if rv.Gamma, err = secp256k1.LongUnmarshal(rawGamma); err != nil { + return failedProof, errors.Wrapf(err, "while reading proof gamma") + } + rv.C = i().SetBytes(proof[128:160]) + rv.S = i().SetBytes(proof[160:192]) + rv.Seed = i().SetBytes(proof[192:224]) + rv.Output = utils.MustHash(string(vrfRandomOutputHashPrefix) + + string(rawGamma)).Big() + return rv, nil +} diff --git a/core/services/vrf/vrf.go b/core/services/vrf/vrf.go index 58e51cb195d..60f8cf2fa75 100644 --- a/core/services/vrf/vrf.go +++ b/core/services/vrf/vrf.go @@ -1,27 +1,33 @@ // Package vrf provides a cryptographically secure pseudo-random number generator. -//////////////////////////////////////////////////////////////////////////////// -// XXX: Do not use in production until this code has been audited. -//////////////////////////////////////////////////////////////////////////////// -// Numbers are deterministically generated from a seed and a secret key, and are -// statistically indistinguishable from uniform sampling from {0, ..., 2**256}, -// to observers who don't know the key. But each number comes with a proof that -// it was generated according to the procedure mandated by a public key -// associated with that private key. + +// Numbers are deterministically generated from seeds and a secret key, and are +// statistically indistinguishable from uniform sampling from {0,...,2**256-1}, +// to computationally-bounded observers who know the seeds, don't know the key, +// and only see the generated numbers. But each number also comes with a proof +// that it was generated according to the procedure mandated by a public key +// associated with that secret key. // // See VRF.sol for design notes. // // Usage // ----- // -// A secret key sk should be securely sampled uniformly from {0, ..., Order}. -// The public key associated with it can be calculated from it by XXX +// You should probably not be using this directly. +// chainlink/store/core/models/vrfkey.PrivateKey provides a simple, more +// misuse-resistant interface to the same functionality, via the CreateKey and +// MarshaledProof methods. +// +// Nonetheless, a secret key sk should be securely sampled uniformly from +// {0,...,Order-1}. Its public key can be calculated from it by +// +// secp256k1.Secp256k1{}.Point().Mul(secretKey, Generator) // // To generate random output from a big.Int seed, pass sk and the seed to // GenerateProof, and use the Output field of the returned Proof object. // -// To verify a Proof object p, run p.Verify(), or pass its fields to the -// corresponding arguments of isValidVRFOutput on the VRF solidity contract, to -// verify it on-chain. +// To verify a Proof object p, run p.Verify(); or to verify it on-chain pass +// p.MarshalForSolidityVerifier() to randomValueFromVRFProof on the VRF solidity +// contract. package vrf import ( @@ -46,18 +52,14 @@ func bigFromHex(s string) *big.Int { return n } -// P is number of elements in the Galois field over which secp256k1 is defined -var P = bigFromHex( +// fieldSize is number of elements in secp256k1's base field, i.e. GF(fieldSize) +var fieldSize = bigFromHex( "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F") -// Order is the number of rational points on the curve in GF(P) (group size) -var Order = bigFromHex( - "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141") - -// Compensate for awkward big.Int API. var bi = big.NewInt var zero, one, two, three, four, seven = bi(0), bi(1), bi(2), bi(3), bi(4), bi(7) +// Compensate for awkward big.Int API. Can cause an extra allocation or two. func i() *big.Int { return new(big.Int) } func add(addend1, addend2 *big.Int) *big.Int { return i().Add(addend1, addend2) } func div(dividend, divisor *big.Int) *big.Int { return i().Div(dividend, divisor) } @@ -69,25 +71,26 @@ func mod(dividend, divisor *big.Int) *big.Int { return i().Mod(dividend, func sub(minuend, subtrahend *big.Int) *big.Int { return i().Sub(minuend, subtrahend) } var ( - // (P-1)/2: Half Fermat's Little Theorem exponent - eulersCriterionPower = div(sub(P, one), two) - // (P+1)/4: As long as P%4==3 and n=x^2 in GF(P), n^((P+1)/4)=±x - sqrtPower = div(add(P, one), four) + // (fieldSize-1)/2: Half Fermat's Little Theorem exponent + eulersCriterionPower = div(sub(fieldSize, one), two) + // (fieldSize+1)/4: As long as P%4==3 and n=x^2 in GF(fieldSize), n^sqrtPower=±x + sqrtPower = div(add(fieldSize, one), four) ) // IsSquare returns true iff x = y^2 for some y in GF(p) func IsSquare(x *big.Int) bool { - return equal(one, exp(x, eulersCriterionPower, P)) + return equal(one, exp(x, eulersCriterionPower, fieldSize)) } -// SquareRoot returns a s.t. a^2=x. Assumes x is a square +// SquareRoot returns a s.t. a^2=x, as long as x is a square func SquareRoot(x *big.Int) *big.Int { - return exp(x, sqrtPower, P) + return exp(x, sqrtPower, fieldSize) } -// YSquared returns x^3+7 mod P +// YSquared returns x^3+7 mod fieldSize, the right-hand side of the secp256k1 +// curve equation. func YSquared(x *big.Int) *big.Int { - return mod(add(exp(x, three, P), seven), P) + return mod(add(exp(x, three, fieldSize), seven), fieldSize) } // IsCurveXOrdinate returns true iff there is y s.t. y^2=x^3+7 @@ -95,6 +98,7 @@ func IsCurveXOrdinate(x *big.Int) bool { return IsSquare(YSquared(x)) } +// packUint256s returns xs serialized as concatenated uint256s, or an error func packUint256s(xs ...*big.Int) ([]byte, error) { mem := []byte{} for _, x := range xs { @@ -107,20 +111,10 @@ func packUint256s(xs ...*big.Int) ([]byte, error) { return mem, nil } -type curveT *secp256k1.Secp256k1 +var secp256k1Curve = &secp256k1.Secp256k1{} -var curve = secp256k1.Secp256k1{} -var rcurve = &curve - -// Generator is a specific generator of the curve group. Any non-zero point will -// do, since the group order is prime. But one must be specified as part of the -// protocol. -var Generator = rcurve.Point().Base() - -// CoordsFromPoint returns the (x, y) coordinates of p -func CoordsFromPoint(p kyber.Point) (*big.Int, *big.Int) { - return secp256k1.Coordinates(p) -} +// Generator is the generator point of secp256k1 +var Generator = secp256k1Curve.Point().Base() // HashUint256s returns a uint256 representing the hash of the concatenated byte // representations of the inputs @@ -129,202 +123,222 @@ func HashUint256s(xs ...*big.Int) (*big.Int, error) { if err != nil { return &big.Int{}, err } - hash, err := utils.Keccak256(packed) - if err != nil { - return &big.Int{}, errors.Wrap(err, "vrf.HashUint256s#Keccak256") - } - return i().SetBytes(hash), nil + return utils.MustHash(string(packed)).Big(), nil } -func asUint256(x *big.Int) []byte { +func uint256ToBytes32(x *big.Int) []byte { if x.BitLen() > 256 { - panic("vrf.asUint256: too big to marshal to uint256") + panic("vrf.uint256ToBytes32: too big to marshal to uint256") } return common.LeftPadBytes(x.Bytes(), 32) } -var numWords = lsh(two, 256) -var mask = sub(numWords, one) - -// ZqHash hashes xs uniformly into {0, ..., q-1}. q must be 256 bits long, and -// msg is assumed to already be a 256-bit hash -func ZqHash(q *big.Int, msg []byte) (*big.Int, error) { - if q.BitLen() != 256 || len(msg) > 256 { - panic(fmt.Errorf( - "will only work for moduli 256 bits long, need %v", - q.BitLen())) - } - rv := i().SetBytes(msg) +// fieldHash hashes xs uniformly into {0, ..., fieldSize-1}. msg is assumed to +// already be a 256-bit hash +func fieldHash(msg []byte) *big.Int { + rv := utils.MustHash(string(msg)).Big() // Hash recursively until rv < q. P(success per iteration) >= 0.5, so // number of extra hashes is geometrically distributed, with mean < 1. - for rv.Cmp(q) != -1 { - hash, err := utils.Keccak256(asUint256(rv)) - if err != nil { - return nil, errors.Wrap(err, "vrf.ZqHash#Keccak256.loop") - } - rv.SetBytes(hash) + for rv.Cmp(fieldSize) >= 0 { + rv = utils.MustHash(string(common.BigToHash(rv).Bytes())).Big() } - return rv, nil + return rv } -// HashToCurve is a one-way hash function onto the curve -func HashToCurve(p kyber.Point, input *big.Int) (kyber.Point, error) { - if !(secp256k1.ValidPublicKey(p) && input.BitLen() <= 32) { +// hashToCurveHashPrefix is domain-separation tag for initial HashToCurve hash. +// Corresponds to HASH_TO_CURVE_HASH_PREFIX in VRF.sol. +var hashToCurveHashPrefix = common.BigToHash(one).Bytes() + +// HashToCurve is a cryptographic hash function which outputs a secp256k1 point, +// or an error. It passes each candidate x ordinate to ordinates function. +func HashToCurve(p kyber.Point, input *big.Int, ordinates func(x *big.Int), +) (kyber.Point, error) { + if !(secp256k1.ValidPublicKey(p) && input.BitLen() <= 256 && input.Cmp(zero) >= 0) { return nil, fmt.Errorf("bad input to vrf.HashToCurve") } - iHash, err := utils.Keccak256( - append(secp256k1.LongMarshal(p), asUint256(input)...)) - if err != nil { - panic(errors.Wrap(err, "while attempting initial hash")) - } - x, err := ZqHash(P, iHash) - if err != nil { - return nil, errors.Wrap(err, "vrf.HashToCurve#ZqHash") - } - count := 0 + x := fieldHash(append(hashToCurveHashPrefix, append(secp256k1.LongMarshal(p), + uint256ToBytes32(input)...)...)) + ordinates(x) for !IsCurveXOrdinate(x) { // Hash recursively until x^3+7 is a square - count += 1 - if count >= 10 { - panic("done") - } - nHash, err := utils.Keccak256(asUint256(x)) - if err != nil { - panic(errors.Wrap(err, "while attempting to rehash x")) - } - nx, err := ZqHash(P, nHash) - if err != nil { - panic(err) - } - x.Set(nx) - } - rv := secp256k1.SetCoordinates(x, SquareRoot(YSquared(x))) - // Two possible y ordinates for this x ordinate; pick one "randomly" - nhash, err := HashUint256s(x, input) // nhash is the random value - if err != nil { - return nil, errors.Wrap(err, "vrf.HashToCurve#HashUint256s") + x.Set(fieldHash(common.BigToHash(x).Bytes())) + ordinates(x) } - if i().Mod(nhash, two).Cmp(zero) == 0 { // Negate response if nhash even + y := SquareRoot(YSquared(x)) + rv := secp256k1.SetCoordinates(x, y) + if equal(i().Mod(y, two), one) { // Negate response if y odd rv = rv.Neg(rv) } return rv, nil } -// ScalarFromCurvePoints returns a hash for the curve points. Corresponds to the -// hash computed in Curve.sol#scalarFromCurve +// scalarFromCurveHashPrefix is a domain-separation tag for the hash taken in +// ScalarFromCurve. Corresponds to SCALAR_FROM_CURVE_POINTS_HASH_PREFIX in +// VRF.sol. +var scalarFromCurveHashPrefix = common.BigToHash(two).Bytes() + +// ScalarFromCurve returns a hash for the curve points. Corresponds to the +// hash computed in VRF.sol#ScalarFromCurvePoints func ScalarFromCurvePoints( hash, pk, gamma kyber.Point, uWitness [20]byte, v kyber.Point) *big.Int { if !(secp256k1.ValidPublicKey(hash) && secp256k1.ValidPublicKey(pk) && secp256k1.ValidPublicKey(gamma) && secp256k1.ValidPublicKey(v)) { panic("bad arguments to vrf.ScalarFromCurvePoints") } - msg := secp256k1.LongMarshal(hash) - msg = append(msg, secp256k1.LongMarshal(pk)...) - msg = append(msg, secp256k1.LongMarshal(gamma)...) - msg = append(msg, secp256k1.LongMarshal(v)...) - msg = append(msg, uWitness[:]...) - preHash, err := utils.Keccak256(msg) - if err != nil { - panic(err) + // msg will contain abi.encodePacked(hash, pk, gamma, v, uWitness) + msg := scalarFromCurveHashPrefix + for _, p := range []kyber.Point{hash, pk, gamma, v} { + msg = append(msg, secp256k1.LongMarshal(p)...) } - h, err := ZqHash(Order, preHash) - if err != nil { - panic(err) - } - return h + msg = append(msg, uWitness[:]...) + return i().SetBytes(utils.MustHash(string(msg)).Bytes()) } // linearComination returns c*p1+s*p2 func linearCombination(c *big.Int, p1 kyber.Point, s *big.Int, p2 kyber.Point) kyber.Point { - return rcurve.Point().Add( - rcurve.Point().Mul(secp256k1.IntToScalar(c), p1), - rcurve.Point().Mul(secp256k1.IntToScalar(s), p2)) + return secp256k1Curve.Point().Add( + secp256k1Curve.Point().Mul(secp256k1.IntToScalar(c), p1), + secp256k1Curve.Point().Mul(secp256k1.IntToScalar(s), p2)) } // Proof represents a proof that Gamma was constructed from the Seed // according to the process mandated by the PublicKey. // -// N.B.: The kyber.Point fields must contain secp256k1.secp256k1Point values +// N.B.: The kyber.Point fields must contain secp256k1.secp256k1Point values, C, +// S and Seed must be secp256k1Point, and Output must be at +// most 256 bits. See Proof.WellFormed. type Proof struct { - PublicKey, Gamma kyber.Point - C, S, Seed, Output *big.Int + PublicKey kyber.Point // secp256k1 public key of private key used in proof + Gamma kyber.Point + C *big.Int + S *big.Int + Seed *big.Int // Seed input to verifiable random function + Output *big.Int // verifiable random function output;, uniform uint256 sample +} + +func (p *Proof) String() string { + return fmt.Sprintf( + "vrf.Proof{PublicKey: %s, Gamma: %s, C: %x, S: %x, Seed: %x, Output: %x}", + p.PublicKey, p.Gamma, p.C, p.S, p.Seed, p.Output) } +// WellFormed is true iff p's attributes satisfy basic domain checks func (p *Proof) WellFormed() bool { - return (secp256k1.ValidPublicKey(p.PublicKey) && secp256k1.ValidPublicKey(p.Gamma) && - secp256k1.RepresentsScalar(p.C) && secp256k1.RepresentsScalar(p.S) && - p.Output.BitLen() <= 256) + return (secp256k1.ValidPublicKey(p.PublicKey) && + secp256k1.ValidPublicKey(p.Gamma) && secp256k1.RepresentsScalar(p.C) && + secp256k1.RepresentsScalar(p.S) && p.Output.BitLen() <= 256) +} + +var ErrCGammaEqualsSHash = fmt.Errorf( + "pick a different nonce; c*gamma = s*hash, with this one") + +// checkCGammaNotEqualToSHash checks c*gamma ≠ s*hash, as required by solidity +// verifier +func checkCGammaNotEqualToSHash(c *big.Int, gamma kyber.Point, s *big.Int, + hash kyber.Point) error { + cGamma := secp256k1Curve.Point().Mul(secp256k1.IntToScalar(c), gamma) + sHash := secp256k1Curve.Point().Mul(secp256k1.IntToScalar(s), hash) + if cGamma.Equal(sHash) { + return ErrCGammaEqualsSHash + } + return nil } +// vrfRandomOutputHashPrefix is a domain-separation tag for the hash used to +// compute the final VRF random output +var vrfRandomOutputHashPrefix = common.BigToHash(three).Bytes() + // VerifyProof is true iff gamma was generated in the mandated way from the -// given publicKey and seed -func (proof *Proof) Verify() (bool, error) { - if !proof.WellFormed() { +// given publicKey and seed, and no error was encountered +func (p *Proof) VerifyVRFProof() (bool, error) { + if !p.WellFormed() { return false, fmt.Errorf("badly-formatted proof") } - h, err := HashToCurve(proof.PublicKey, proof.Seed) + h, err := HashToCurve(p.PublicKey, p.Seed, func(*big.Int) {}) if err != nil { return false, err } + err = checkCGammaNotEqualToSHash(p.C, p.Gamma, p.S, h) + if err != nil { + return false, fmt.Errorf("c*γ = s*hash (disallowed in solidity verifier)") + } // publicKey = secretKey*Generator. See GenerateProof for u, v, m, s // c*secretKey*Generator + (m - c*secretKey)*Generator = m*Generator = u - uPrime := linearCombination(proof.C, proof.PublicKey, proof.S, Generator) + uPrime := linearCombination(p.C, p.PublicKey, p.S, Generator) // c*secretKey*h + (m - c*secretKey)*h = m*h = v - vPrime := linearCombination(proof.C, proof.Gamma, proof.S, h) + vPrime := linearCombination(p.C, p.Gamma, p.S, h) uWitness := secp256k1.EthereumAddress(uPrime) - cPrime := ScalarFromCurvePoints(h, proof.PublicKey, proof.Gamma, uWitness, vPrime) - output, err := utils.Keccak256(secp256k1.LongMarshal(proof.Gamma)) - if err != nil { - panic(errors.Wrap(err, "while hashing to compute proof output")) - } - return (proof.C.Cmp(cPrime) == 0) && - (proof.Output.Cmp(i().SetBytes(output)) == 0), - nil + cPrime := ScalarFromCurvePoints(h, p.PublicKey, p.Gamma, uWitness, vPrime) + output := utils.MustHash(string(append( + vrfRandomOutputHashPrefix, secp256k1.LongMarshal(p.Gamma)...))) + return equal(p.C, cPrime) && equal(p.Output, output.Big()), nil } -// GenerateProof returns gamma, plus proof that gamma was constructed from seed -// as mandated from the given secretKey, with public key secretKey*Generator -// Proof is constructed using nonce as the ephemeral key. If provided, it must -// be treated as key material (cryptographically-securely randomly generated, -// kept confidential or just forgotten.) If it's nil, it will be generated here. -func GenerateProof(secretKey, seed, nonce *big.Int) (*Proof, error) { +// generateProofWithNonce allows external nonce generation for testing purposes +// +// As with signatures, using nonces which are in any way predictable to an +// adversary will leak your secret key! Most people should use GenerateProof +// instead. +func generateProofWithNonce(secretKey, seed, nonce *big.Int) (*Proof, error) { if !(secp256k1.RepresentsScalar(secretKey) && seed.BitLen() <= 256) { return nil, fmt.Errorf("badly-formatted key or seed") } - publicKey := rcurve.Point().Mul(secp256k1.IntToScalar(secretKey), nil) - h, err := HashToCurve(publicKey, seed) + skAsScalar := secp256k1.IntToScalar(secretKey) + publicKey := secp256k1Curve.Point().Mul(skAsScalar, nil) + h, err := HashToCurve(publicKey, seed, func(*big.Int) {}) if err != nil { - return &Proof{}, errors.Wrap(err, "vrf.makeProof#HashToCurve") - } - gamma := rcurve.Point().Mul(secp256k1.IntToScalar(secretKey), h) - if nonce == nil { - nonce, err = rand.Int(rand.Reader, Order) - if err != nil { - return &Proof{}, errors.Wrap(err, "vrf.makeProof#rand.Int") - } + return nil, errors.Wrap(err, "vrf.makeProof#HashToCurve") } + gamma := secp256k1Curve.Point().Mul(skAsScalar, h) sm := secp256k1.IntToScalar(nonce) - u := rcurve.Point().Mul(sm, Generator) + u := secp256k1Curve.Point().Mul(sm, Generator) uWitness := secp256k1.EthereumAddress(u) - v := rcurve.Point().Mul(sm, h) + v := secp256k1Curve.Point().Mul(sm, h) c := ScalarFromCurvePoints(h, publicKey, gamma, uWitness, v) - // s = (m - c*secretKey) % Order - s := mod(sub(nonce, mul(c, secretKey)), Order) - outputHash, err := utils.Keccak256(secp256k1.LongMarshal(gamma)) - if err != nil { - panic("failed to hash gamma") + // (m - c*secretKey) % GroupOrder + s := mod(sub(nonce, mul(c, secretKey)), secp256k1.GroupOrder) + if err := checkCGammaNotEqualToSHash(c, gamma, s, h); err != nil { + return nil, err } + outputHash := utils.MustHash(string(append(vrfRandomOutputHashPrefix, + secp256k1.LongMarshal(gamma)...))) rv := Proof{ PublicKey: publicKey, Gamma: gamma, C: c, S: s, Seed: seed, - Output: i().SetBytes(outputHash), + Output: outputHash.Big(), } - valid, err := rv.Verify() + valid, err := rv.VerifyVRFProof() if !valid || err != nil { panic("constructed invalid proof") } return &rv, nil } + +// GenerateProof returns gamma, plus proof that gamma was constructed from seed +// as mandated from the given secretKey, with public key secretKey*Generator +// +// secretKey and seed must be less than secp256k1 group order. (Without this +// constraint on the seed, the samples and the possible public keys would +// deviate very slightly from uniform distribution.) +func GenerateProof(secretKey, seed common.Hash) (*Proof, error) { + for { + nonce, err := rand.Int(rand.Reader, secp256k1.GroupOrder) + if err != nil { + return nil, err + } + proof, err := generateProofWithNonce(secretKey.Big(), seed.Big(), nonce) + switch { + case err == ErrCGammaEqualsSHash: + // This is cryptographically impossible, but if it were ever to happen, we + // should try again with a different nonce. + continue + case err != nil: // Any other error indicates failure + return nil, err + default: + return proof, err // err should be nil + } + } +} diff --git a/core/services/vrf/vrf_coordinator_abi_values.go b/core/services/vrf/vrf_coordinator_abi_values.go new file mode 100644 index 00000000000..129f93a61f5 --- /dev/null +++ b/core/services/vrf/vrf_coordinator_abi_values.go @@ -0,0 +1,87 @@ +package vrf + +import ( + "fmt" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + + "chainlink/core/services/vrf/generated/solidity_vrf_coordinator_interface" +) + +// CoordinatorABI returns the ABI for the VRFCoordinator contract +func CoordinatorABI() abi.ABI { + return coordinatorABIValues().coordinatorABI +} + +// FulfillMethod returns the golang abstraction of the fulfillRandomnessRequest method +func FulfillMethod() abi.Method { + return coordinatorABIValues().fulfillMethod +} + +// FulfillSelector returns the signature of the fulfillRandomnessRequest method +// on the VRFCoordinator contract +func FulfillSelector() string { + return coordinatorABIValues().fulfillSelector +} + +// RandomnessRequestLogTopic returns the signature of the RandomnessRequest log +// emitted by the VRFCoordinator contract +func RandomnessRequestLogTopic() common.Hash { + return coordinatorABIValues().randomnessRequestLogTopic +} + +// randomnessRequestRawDataArgs returns a list of the arguments to the +// RandomnessRequest log emitted by the VRFCoordinator contract +func randomnessRequestRawDataArgs() abi.Arguments { + return coordinatorABIValues().randomnessRequestRawDataArgs +} + +var fulfillMethodName = "fulfillRandomnessRequest" + +// abiValues is a singleton carrying information parsed once from the +// VRFCoordinator abi string +type abiValues struct { + // CoordinatorABI is the ABI of the VRFCoordinator + coordinatorABI abi.ABI + fulfillSelector string + fulfillMethod abi.Method + // RandomnessRequestLogTopic is the signature of the RandomnessRequest log + randomnessRequestLogTopic common.Hash + randomnessRequestRawDataArgs abi.Arguments +} + +var dontUseThisUseGetterFunctionsAbove abiValues +var parseABIOnce sync.Once + +func coordinatorABIValues() *abiValues { + parseABIOnce.Do(readCoordinatorABI) + return &dontUseThisUseGetterFunctionsAbove +} + +func readCoordinatorABI() { + v := &dontUseThisUseGetterFunctionsAbove + var err error + v.coordinatorABI, err = abi.JSON(strings.NewReader( + solidity_vrf_coordinator_interface.VRFCoordinatorABI)) + if err != nil { + panic(err) + } + var found bool + v.fulfillMethod, found = v.coordinatorABI.Methods[fulfillMethodName] + if !found { + panic(fmt.Errorf("could not find method %s in VRFCoordinator ABI", + fulfillMethodName)) + } + v.fulfillSelector = hexutil.Encode(v.fulfillMethod.ID()) + randomnessRequestABI := v.coordinatorABI.Events["RandomnessRequest"] + v.randomnessRequestLogTopic = randomnessRequestABI.ID() + for _, arg := range randomnessRequestABI.Inputs { + if !arg.Indexed { + v.randomnessRequestRawDataArgs = append(v.randomnessRequestRawDataArgs, arg) + } + } +} diff --git a/core/services/vrf/vrf_coordinator_interface.go b/core/services/vrf/vrf_coordinator_interface.go new file mode 100644 index 00000000000..dca171bdf66 --- /dev/null +++ b/core/services/vrf/vrf_coordinator_interface.go @@ -0,0 +1,93 @@ +package vrf + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/pkg/errors" + + "chainlink/core/assets" + "chainlink/core/eth" + "chainlink/core/services/vrf/generated/solidity_vrf_coordinator_interface" + "chainlink/core/utils" +) + +// RawRandomnessRequestLog is used to parse a RandomnessRequest log into types +// go-ethereum knows about. +type RawRandomnessRequestLog solidity_vrf_coordinator_interface.VRFCoordinatorRandomnessRequest + +// RandomnessRequestLog contains the data for a RandomnessRequest log, +// represented as compatible golang types. +type RandomnessRequestLog struct { + KeyHash common.Hash + Seed *big.Int // uint256 + JobID common.Hash + Sender common.Address + Fee *assets.Link // uint256 + Raw RawRandomnessRequestLog +} + +var dummyCoordinator, _ = solidity_vrf_coordinator_interface.NewVRFCoordinator( + common.Address{}, nil) + +func toGethLog(log eth.Log) types.Log { + return types.Log{ + Address: log.Address, + Topics: log.Topics, + Data: []byte(log.Data), + BlockNumber: log.BlockNumber, + TxHash: log.TxHash, + TxIndex: log.TxIndex, + BlockHash: log.BlockHash, + Index: log.Index, + Removed: log.Removed, + } +} + +// ParseRandomnessRequestLog returns the RandomnessRequestLog corresponding to +// the raw logData +func ParseRandomnessRequestLog(log eth.Log) (*RandomnessRequestLog, error) { + rawLog, err := dummyCoordinator.ParseRandomnessRequest(toGethLog(log)) + if err != nil { + return nil, errors.Wrapf(err, + "while parsing %x as RandomnessRequestLog", log.Data) + } + return RawRandomnessRequestLogToRandomnessRequestLog( + (*RawRandomnessRequestLog)(rawLog)), nil +} + +// RawData returns the raw bytes corresponding to l in a solidity log +// +// This serialization does not include the JobID, because that's an indexed field. +func (l *RandomnessRequestLog) RawData() ([]byte, error) { + return randomnessRequestRawDataArgs().Pack(l.KeyHash, + l.Seed, l.Sender, (*big.Int)(l.Fee)) +} + +// Equal(ol) is true iff l is the same log as ol, and both represent valid +// RandomnessRequest logs. +func (l *RandomnessRequestLog) Equal(ol RandomnessRequestLog) bool { + return l.KeyHash == ol.KeyHash && equal(l.Seed, ol.Seed) && + l.JobID == ol.JobID && l.Sender == ol.Sender && l.Fee.Cmp(ol.Fee) == 0 +} + +func (l *RandomnessRequestLog) RequestID() common.Hash { + soliditySeed, err := utils.Uint256ToBytes(l.Seed) + if err != nil { + panic(errors.Wrapf(err, "vrf seed out of bounds in %#+v", l)) + } + return utils.MustHash(string(append(l.KeyHash[:], soliditySeed...))) +} + +func RawRandomnessRequestLogToRandomnessRequestLog( + l *RawRandomnessRequestLog) *RandomnessRequestLog { + return &RandomnessRequestLog{ + KeyHash: l.KeyHash, + Seed: l.Seed, + JobID: l.JobID, + Sender: l.Sender, + Fee: (*assets.Link)(l.Fee), + Raw: *l, + } +} diff --git a/core/services/vrf/vrf_coordinator_interface_test.go b/core/services/vrf/vrf_coordinator_interface_test.go new file mode 100644 index 00000000000..763ab175d25 --- /dev/null +++ b/core/services/vrf/vrf_coordinator_interface_test.go @@ -0,0 +1,51 @@ +package vrf_test + +import ( + "math/big" + "testing" + + "chainlink/core/assets" + "chainlink/core/eth" + "chainlink/core/services/vrf" + "chainlink/core/store/models/vrfkey" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + secretKey = vrfkey.NewPrivateKeyXXXTestingOnly(big.NewInt(1)) + keyHash = secretKey.PublicKey.Hash() + jobID = common.BytesToHash([]byte("1234567890abcdef1234567890abcdef")) + seed = big.NewInt(1) + sender = common.HexToAddress("0xecfcab0a285d3380e488a39b4bb21e777f8a4eac") + fee = assets.NewLink(100) + raw = vrf.RawRandomnessRequestLog{keyHash, seed, jobID, sender, + (*big.Int)(fee), types.Log{ + // A raw, on-the-wire RandomnessRequestLog is the concat of fields as uint256's + Data: append(append(append( + keyHash.Bytes(), + common.BigToHash(seed).Bytes()...), + sender.Hash().Bytes()...), + fee.ToHash().Bytes()...), + Topics: []common.Hash{common.Hash{}, jobID}, + }, + } +) + +func TestVRFParseRandomnessRequestLog(t *testing.T) { + r := vrf.RawRandomnessRequestLogToRandomnessRequestLog(&raw) + rawLog, err := r.RawData() + require.NoError(t, err) + assert.Equal(t, rawLog, raw.Raw.Data) + nR, err := vrf.ParseRandomnessRequestLog(eth.Log{ + Data: rawLog, + Topics: []common.Hash{common.Hash{}, jobID}, + }) + require.NoError(t, err) + require.True(t, r.Equal(*nR), + "Round-tripping RandomnessRequestLog through serialization and parsing "+ + "resulted in a different log.") +} diff --git a/core/services/vrf/vrf_coordinator_solidity_crosscheck_test.go b/core/services/vrf/vrf_coordinator_solidity_crosscheck_test.go new file mode 100644 index 00000000000..df5df75c708 --- /dev/null +++ b/core/services/vrf/vrf_coordinator_solidity_crosscheck_test.go @@ -0,0 +1,276 @@ +package vrf + +import ( + "math/big" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + gethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + chainlink_eth "chainlink/core/eth" + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/utils" + + "chainlink/core/services/vrf/generated/link_token_interface" + "chainlink/core/services/vrf/generated/solidity_request_id" + "chainlink/core/services/vrf/generated/solidity_vrf_consumer_interface" + "chainlink/core/services/vrf/generated/solidity_vrf_coordinator_interface" +) + +func toCLEthLog(log gethTypes.Log) chainlink_eth.Log { + return chainlink_eth.Log{ + Address: log.Address, + Topics: log.Topics, + Data: chainlink_eth.UntrustedBytes(log.Data), + BlockNumber: log.BlockNumber, + TxHash: log.TxHash, + TxIndex: log.TxIndex, + BlockHash: log.BlockHash, + Index: log.Index, + Removed: log.Removed, + } +} + +// coordinator represents the universe in which a randomness request occurs and +// is fulfilled. +type coordinator struct { + // Golang wrappers ofr solidity contracts + rootContract *solidity_vrf_coordinator_interface.VRFCoordinator + linkContract *link_token_interface.LinkToken + consumerContract *solidity_vrf_consumer_interface.VRFConsumer + requestIDBase *solidity_request_id.VRFRequestIDBaseTestHelper + rootContractAddress common.Address + consumerContractAddress common.Address + // Abstraction representation of the ethereum blockchain + backend *backends.SimulatedBackend + coordinatorABI *abi.ABI + consumerABI *abi.ABI + // Cast of participants + sergey *bind.TransactOpts // Owns all the LINK initially + neil *bind.TransactOpts // Node operator running VRF service + carol *bind.TransactOpts // Author of consuming contract which requests randomness +} + +// newIdentity returns a go-ethereum abstraction of an ethereum account for +// interacting with contract golang wrappers +func newIdentity(t *testing.T) *bind.TransactOpts { + key, err := crypto.GenerateKey() + require.NoError(t, err, "failed to generate ethereum identity") + return bind.NewKeyedTransactor(key) +} + +// deployCoordinator sets up all identities and contracts associated with +// testing the solidity VRF contracts involved in randomness request workflow +func deployCoordinator(t *testing.T) coordinator { + var ( + sergey = newIdentity(t) + neil = newIdentity(t) + carol = newIdentity(t) + ) + oneEth := bi(1000000000000000000) + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: oneEth}, + neil.From: {Balance: oneEth}, + carol.From: {Balance: oneEth}, + } + gasLimit := eth.DefaultConfig.Miner.GasCeil + consumerABI, err := abi.JSON(strings.NewReader( + solidity_vrf_consumer_interface.VRFConsumerABI)) + require.NoError(t, err) + coordinatorABI, err := abi.JSON(strings.NewReader( + solidity_vrf_coordinator_interface.VRFCoordinatorABI)) + require.NoError(t, err) + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + linkAddress, _, linkContract, err := link_token_interface.DeployLinkToken( + sergey, backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + coordinatorAddress, _, coordinatorContract, err := + solidity_vrf_coordinator_interface.DeployVRFCoordinator( + neil, backend, linkAddress) + require.NoError(t, err, "failed to deploy VRFCoordinator contract to simulated ethereum blockchain") + consumerContractAddress, _, consumerContract, err := + solidity_vrf_consumer_interface.DeployVRFConsumer( + carol, backend, coordinatorAddress, linkAddress) + require.NoError(t, err, "failed to deploy VRFConsumer contract to simulated ethereum blockchain") + _, _, requestIDBase, err := + solidity_request_id.DeployVRFRequestIDBaseTestHelper(neil, backend) + require.NoError(t, err, "failed to deploy VRFRequestIDBaseTestHelper contract to simulated ethereum blockchain") + _, err = linkContract.Transfer(sergey, consumerContractAddress, oneEth) // Actually, LINK + require.NoError(t, err, "failed to send LINK to VRFConsumer contract on simulated ethereum blockchain") + backend.Commit() + return coordinator{ + rootContract: coordinatorContract, + rootContractAddress: coordinatorAddress, + linkContract: linkContract, + consumerContract: consumerContract, + requestIDBase: requestIDBase, + consumerContractAddress: consumerContractAddress, + backend: backend, + coordinatorABI: &coordinatorABI, + consumerABI: &consumerABI, + sergey: sergey, + neil: neil, + carol: carol, + } +} + +func TestRequestIDMatches(t *testing.T) { + keyHash := common.HexToHash("0x01") + seed := big.NewInt(1) + baseContract := deployCoordinator(t).requestIDBase + solidityRequestID, err := baseContract.MakeRequestId(nil, keyHash, seed) + require.NoError(t, err, "failed to calculate VRF requestID on simulated ethereum blockchain") + goRequestLog := &RandomnessRequestLog{KeyHash: keyHash, Seed: seed} + assert.Equal(t, common.Hash(solidityRequestID), goRequestLog.RequestID(), + "solidity VRF requestID differs from golang requestID!") +} + +var ( + secretKey = one // never do this in production! + publicKey = secp256k1Curve.Point().Mul(secp256k1.IntToScalar(secretKey), nil) + seed = two + vrfFee = seven +) + +// registerProvingKey registers keyHash to neil in the VRFCoordinator universe +// represented by coordinator, with the given jobID and fee. +func registerProvingKey(t *testing.T, coordinator coordinator) ( + keyHash [32]byte, jobID [32]byte, fee *big.Int) { + copy(jobID[:], []byte("exactly 32 characters in length.")) + _, err := coordinator.rootContract.RegisterProvingKey( + coordinator.neil, vrfFee, pair(secp256k1.Coordinates(publicKey)), jobID) + require.NoError(t, err, "failed to register VRF proving key on VRFCoordinator contract") + coordinator.backend.Commit() + keyHash = utils.MustHash(string(secp256k1.LongMarshal(publicKey))) + return keyHash, jobID, vrfFee +} + +func TestRegisterProvingKey(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash, jobID, fee := registerProvingKey(t, coordinator) + log, err := coordinator.rootContract.FilterNewServiceAgreement(nil) + require.NoError(t, err, "failed to subscribe to NewServiceAgreement logs on simulated ethereum blockchain") + logCount := 0 + for log.Next() { + logCount += 1 + assert.Equal(t, log.Event.KeyHash, keyHash, "VRFCoordinator logged a different keyHash than was registered") + assert.True(t, equal(fee, log.Event.Fee), "VRFCoordinator logged a different fee than was registered") + } + require.Equal(t, 1, logCount, "unexpected NewServiceAgreement log generated by key VRF key registration") + serviceAgreement, err := coordinator.rootContract.ServiceAgreements(nil, keyHash) + require.NoError(t, err, "failed to retrieve previously registered VRF service agreement from VRFCoordinator") + assert.Equal(t, coordinator.neil.From, serviceAgreement.VRFOracle, + "VRFCoordinator registered wrong provider, on service agreement!") + assert.Equal(t, jobID, serviceAgreement.JobID, + "VRFCoordinator registered wrong jobID, on service agreement!") + assert.True(t, equal(fee, serviceAgreement.Fee), + "VRFCoordinator registered wrong fee, on service agreement!") +} + +// requestRandomness sends a randomness request via Carol's consuming contract, +// in the VRFCoordinator universe represented by coordinator, specifying the +// given keyHash and seed, and paying the given fee. It returns the log emitted +// from the VRFCoordinator in response to the request +func requestRandomness(t *testing.T, coordinator coordinator, + keyHash common.Hash, fee, seed *big.Int) *RandomnessRequestLog { + _, err := coordinator.consumerContract.RequestRandomness(coordinator.carol, + keyHash, fee, seed) + require.NoError(t, err, "problem during initial VRF randomness request") + coordinator.backend.Commit() + log, err := coordinator.rootContract.FilterRandomnessRequest(nil, nil) + require.NoError(t, err, "failed to subscribe to RandomnessRequest logs") + logCount := 0 + for log.Next() { + logCount += 1 + } + require.Equal(t, 1, logCount, "unexpected log generated by randomness request to VRFCoordinator") + return RawRandomnessRequestLogToRandomnessRequestLog( + (*RawRandomnessRequestLog)(log.Event)) +} + +func TestRandomnessRequestLog(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash_, jobID_, fee := registerProvingKey(t, coordinator) + keyHash := common.BytesToHash(keyHash_[:]) + jobID := common.BytesToHash(jobID_[:]) + log := requestRandomness(t, coordinator, keyHash, fee, seed) + assert.Equal(t, keyHash, log.KeyHash, "VRFCoordinator logged wrong KeyHash for randomness request") + nonce := zero + actualSeed, err := coordinator.requestIDBase.MakeVRFInputSeed(nil, keyHash, + seed, coordinator.consumerContractAddress, nonce) + require.NoError(t, err, "failure while using VRFCoordinator to calculate actual VRF input seed") + assert.True(t, equal(actualSeed, log.Seed), "VRFCoordinator logged wrong actual input seed from randomness request") + golangSeed := utils.MustHash(string(append(append(append( + keyHash[:], + common.BigToHash(seed).Bytes()...), + coordinator.consumerContractAddress.Hash().Bytes()...), + common.BigToHash(nonce).Bytes()...))) + assert.Equal(t, golangSeed, common.BigToHash((log.Seed)), "VRFCoordinator logged different actual input seed than expected by golang code!") + assert.Equal(t, jobID, log.JobID, "VRFCoordinator logged different JobID from randomness request!") + assert.Equal(t, coordinator.consumerContractAddress, log.Sender, "VRFCoordinator logged different requester address from randomness request!") + assert.True(t, equal(fee, (*big.Int)(log.Fee)), "VRFCoordinator logged different fee from randomness request!") + parsedLog, err := ParseRandomnessRequestLog(toCLEthLog(log.Raw.Raw)) + assert.NoError(t, err, "could not parse randomness request log generated by VRFCoordinator") + assert.True(t, parsedLog.Equal(*log), "got a different randomness request log by parsing the raw data than reported by simulated backend") +} + +// fulfillRandomnessRequest is neil fulfilling randomness requested by log. +func fulfillRandomnessRequest(t *testing.T, coordinator coordinator, + log RandomnessRequestLog) *Proof { + proof, err := generateProofWithNonce(secretKey, log.Seed, one /* nonce */) + require.NoError(t, err, "could not generate VRF proof!") + proofBlob, err := proof.MarshalForSolidityVerifier() + require.NoError(t, err, "could not marshal VRF proof for VRFCoordinator!") + _, err = coordinator.rootContract.FulfillRandomnessRequest( + coordinator.neil, proofBlob[:]) + require.NoError(t, err, "failed to fulfill randomness request!") + coordinator.backend.Commit() + return proof +} + +func TestFulfillRandomness(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash, _, fee := registerProvingKey(t, coordinator) + randomnessRequestLog := requestRandomness(t, coordinator, keyHash, fee, seed) + proof := fulfillRandomnessRequest(t, coordinator, *randomnessRequestLog) + output, err := coordinator.consumerContract.RandomnessOutput(nil) + require.NoError(t, err, "failed to get VRF output from consuming contract, after randomness request was fulfilled") + assert.True(t, equal(proof.Output, output), "VRF output from randomness request fulfillment was different than provided!") + requestID, err := coordinator.consumerContract.RequestId(nil) + require.NoError(t, err, "failed to get requestId from VRFConsumer") + assert.Equal(t, randomnessRequestLog.RequestID(), common.Hash(requestID), "VRFConsumer has different request ID than logged from randomness request!") + neilBalance, err := coordinator.rootContract.WithdrawableTokens( + nil, coordinator.neil.From) + require.NoError(t, err, "failed to get neil's token balance, after he successfully fullfilled a randomness request") + assert.True(t, equal(neilBalance, fee), "neil's balance on VRFCoordinator was not paid his fee, despite succesfull fulfillment of randomness request!") +} + +func TestWithdraw(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash, _, fee := registerProvingKey(t, coordinator) + log := requestRandomness(t, coordinator, keyHash, fee, seed) + fulfillRandomnessRequest(t, coordinator, *log) + payment := four + peteThePunter := common.HexToAddress("0xdeadfa11deadfa11deadfa11deadfa11deadfa11") + _, err := coordinator.rootContract.Withdraw(coordinator.neil, peteThePunter, payment) + require.NoError(t, err, "failed to withdraw LINK from neil's balance") + coordinator.backend.Commit() + peteBalance, err := coordinator.linkContract.BalanceOf(nil, peteThePunter) + require.NoError(t, err, "failed to get balance of payee on LINK contract, after payment") + assert.True(t, equal(payment, peteBalance), "LINK balance is wrong, following payment") + neilBalance, err := coordinator.rootContract.WithdrawableTokens( + nil, coordinator.neil.From) + require.NoError(t, err, "failed to get neil's balance on VRFCoordinator") + assert.True(t, equal(i().Sub(fee, payment), neilBalance), "neil's VRFCoordinator balance is wrong, after he's made a withdrawal!") + _, err = coordinator.rootContract.Withdraw(coordinator.neil, peteThePunter, fee) + assert.Error(t, err, "VRFcoordinator allowed overdraft") +} diff --git a/core/services/vrf/vrf_fulfillment_cost_test.go b/core/services/vrf/vrf_fulfillment_cost_test.go new file mode 100644 index 00000000000..5863bdb33f3 --- /dev/null +++ b/core/services/vrf/vrf_fulfillment_cost_test.go @@ -0,0 +1,29 @@ +package vrf + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMeasureFulfillmenttGasCost(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash, _, fee := registerProvingKey(t, coordinator) + // Set up a request to fulfill + log := requestRandomness(t, coordinator, keyHash, fee, seed) + proof, err := generateProofWithNonce(secretKey, log.Seed, one /* nonce */) + require.NoError(t, err, "could not generate VRF proof!") + // Set up the proof with which to fulfill request + proofBlob, err := proof.MarshalForSolidityVerifier() + require.NoError(t, err, "could not marshal VRF proof for VRFCoordinator!") + + estimate := estimateGas(t, coordinator.backend, coordinator.neil.From, + coordinator.rootContractAddress, coordinator.coordinatorABI, + "fulfillRandomnessRequest", proofBlob[:]) + + assert.Greater(t, estimate, uint64(148000), + "fulfillRandomness tx cost less gas than expected") + assert.Less(t, estimate, uint64(250000), + "fulfillRandomness tx cost more gas than expected") +} diff --git a/core/services/vrf/vrf_hash_to_curve_cost_test.go b/core/services/vrf/vrf_hash_to_curve_cost_test.go new file mode 100644 index 00000000000..4c1276f844f --- /dev/null +++ b/core/services/vrf/vrf_hash_to_curve_cost_test.go @@ -0,0 +1,101 @@ +package vrf + +import ( + "context" + "crypto/ecdsa" + "math/big" + "strings" + "testing" + + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/services/vrf/generated/solidity_verifier_wrapper" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type contract struct { + contract *bind.BoundContract + address common.Address + abi *abi.ABI + backend *backends.SimulatedBackend +} + +// deployVRFContract returns a deployed VRF contract, with some extra attributes +// which are useful for gas measurements. +func deployVRFContract(t *testing.T) (contract, common.Address) { + x, y := secp256k1.Coordinates(Generator) + key := ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{Curve: crypto.S256(), X: x, Y: y}, + D: big.NewInt(1), + } + auth := bind.NewKeyedTransactor(&key) + genesisData := core.GenesisAlloc{auth.From: {Balance: bi(1000000000)}} + gasLimit := eth.DefaultConfig.Miner.GasCeil + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + parsed, err := abi.JSON(strings.NewReader( + solidity_verifier_wrapper.VRFTestHelperABI)) + require.NoError(t, err, "could not parse VRF ABI") + address, _, vRFContract, err := bind.DeployContract(auth, parsed, + common.FromHex(solidity_verifier_wrapper.VRFTestHelperBin), backend) + require.NoError(t, err, "failed to deploy VRF contract to simulated blockchain") + backend.Commit() + return contract{vRFContract, address, &parsed, backend}, crypto.PubkeyToAddress( + key.PublicKey) +} + +// estimateGas returns the estimated gas cost of running the given method on the +// contract at address to, on the given backend, with the given args, and given +// that the transaction is sent from the from address. +func estimateGas(t *testing.T, backend *backends.SimulatedBackend, + from, to common.Address, abi *abi.ABI, method string, args ...interface{}, +) uint64 { + rawData, err := abi.Pack(method, args...) + require.NoError(t, err, "failed to construct raw %s transaction with args %s", + method, args) + callMsg := ethereum.CallMsg{From: from, To: &to, Data: rawData} + estimate, err := backend.EstimateGas(context.TODO(), callMsg) + require.NoError(t, err, "failed ot estimate gas from %s call with args %s", + method, args) + return estimate +} + +func measureHashToCurveGasCost(t *testing.T, contract contract, + owner common.Address, input int64) (gasCost, numOrdinates uint64) { + estimate := estimateGas(t, contract.backend, owner, contract.address, + contract.abi, "hashToCurve_", pair(secp256k1.Coordinates(Generator)), + big.NewInt(input)) + + _, err := HashToCurve(Generator, big.NewInt(input), + func(*big.Int) { numOrdinates += 1 }) + require.NoError(t, err, "corresponding golang HashToCurve calculation failed") + return estimate, numOrdinates +} + +var baseCost uint64 = 25000 +var marginalCost uint64 = 15555 + +func HashToCurveGasCostBound(numOrdinates uint64) uint64 { + return baseCost + marginalCost*numOrdinates +} + +func TestMeasureHashToCurveGasCost(t *testing.T) { + contract, owner := deployVRFContract(t) + numSamples := int64(numSamples()) + for i := int64(0); i < numSamples; i += 1 { + gasCost, numOrdinates := measureHashToCurveGasCost(t, contract, owner, i) + assert.Less(t, gasCost, HashToCurveGasCostBound(numOrdinates), + "on-chain hashToCurve gas cost exceeded estimate function") + } + require.Less(t, HashToCurveGasCostBound(128), uint64(2.017e6), + "estimate for on-chain hashToCurve gas cost with 128 iterations is greater "+ + "than stated in the VRF.sol documentation") +} diff --git a/core/services/vrf/vrf_randomness_output_cost_test.go b/core/services/vrf/vrf_randomness_output_cost_test.go new file mode 100644 index 00000000000..1a94aeade8d --- /dev/null +++ b/core/services/vrf/vrf_randomness_output_cost_test.go @@ -0,0 +1,30 @@ +package vrf + +import ( + mrand "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "chainlink/core/services/signatures/secp256k1" +) + +func TestMeasureRandomValueFromVRFProofGasCost(t *testing.T) { + r := mrand.New(mrand.NewSource(10)) + sk := randomScalar(t, r) + skNum := secp256k1.ToInt(sk) + nonce := randomScalar(t, r) + seed := randomUint256(t, r) + proof, err := generateProofWithNonce(skNum, seed, secp256k1.ToInt(nonce)) + require.NoError(t, err, "failed to generate VRF proof") + mproof, err := proof.MarshalForSolidityVerifier() + require.NoError(t, err, "failed to marshal VRF proof for on-chain verification") + contract, _ := deployVRFContract(t) + + estimate := estimateGas(t, contract.backend, common.Address{}, + contract.address, contract.abi, "randomValueFromVRFProof_", mproof[:]) + + require.NoError(t, err, "failed to estimate gas cost for VRF verification") + require.Less(t, estimate, uint64(100000)) +} diff --git a/core/services/vrf/vrf_request_cost_test.go b/core/services/vrf/vrf_request_cost_test.go new file mode 100644 index 00000000000..ef45f6d1512 --- /dev/null +++ b/core/services/vrf/vrf_request_cost_test.go @@ -0,0 +1,22 @@ +package vrf + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" +) + +func TestMeasureRandomnessRequestGasCost(t *testing.T) { + coordinator := deployCoordinator(t) + keyHash_, _, fee := registerProvingKey(t, coordinator) + + estimate := estimateGas(t, coordinator.backend, common.Address{}, + coordinator.consumerContractAddress, coordinator.consumerABI, + "requestRandomness", common.BytesToHash(keyHash_[:]), fee, one) + + assert.Greater(t, estimate, uint64(175000), + "requestRandomness tx gas cost lower than expected") + assert.Less(t, estimate, uint64(176000), + "requestRandomness tx gas cost higher than expected") +} diff --git a/core/services/vrf/vrf_solidity_crosscheck_test.go b/core/services/vrf/vrf_solidity_crosscheck_test.go new file mode 100644 index 00000000000..b25b7ab379d --- /dev/null +++ b/core/services/vrf/vrf_solidity_crosscheck_test.go @@ -0,0 +1,386 @@ +package vrf + +import ( + "crypto/ecdsa" + "math/big" + mrand "math/rand" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.dedis.ch/kyber/v3" + + "chainlink/core/services/vrf/generated/solidity_verifier_wrapper" + + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/utils" +) + +// Cross-checks of golang implementation details vs corresponding solidity +// details. +// +// It's worth automatically checking these implementation details because they +// can help to quickly locate any disparity between the solidity and golang +// implementations. + +// deployVRFContract returns the wrapper of the EVM verifier contract. +// +// NB: For changes to the VRF solidity code to be reflected here, "go generate" +// must be run in core/services/vrf. +// +// TODO(alx): This suit used to be much faster, presumably because all tests +// were sharing a common global verifier (which is fine, because all methods are +// pure.) Revert to that, and see if it helps. +func deployVRFTestHelper(t *testing.T) *solidity_verifier_wrapper.VRFTestHelper { + key, err := crypto.GenerateKey() + require.NoError(t, err, "failed to create root ethereum identity") + auth := bind.NewKeyedTransactor(key) + genesisData := core.GenesisAlloc{auth.From: {Balance: bi(1000000000)}} + gasLimit := eth.DefaultConfig.Miner.GasCeil + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + _, _, verifier, err := solidity_verifier_wrapper.DeployVRFTestHelper(auth, backend) + require.NoError(t, err, "failed to deploy VRF contract to simulated blockchain") + backend.Commit() + return verifier +} + +// randomUint256 deterministically simulates a uniform sample of uint256's, +// given r's seed +// +// Never use this if cryptographic security is required +func randomUint256(t *testing.T, r *mrand.Rand) *big.Int { + b := make([]byte, 32) + _, err := r.Read(b) + require.NoError(t, err, "failed to read random sample") // deterministic, though + return i().SetBytes(b) +} + +// numSamples returns the number of examples which should be checked, in +// generative tests +func numSamples() int { + return 10 +} + +func TestVRF_CompareProjectiveECAddToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(11)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + q := randomPoint(t, r) + px, py := secp256k1.Coordinates(p) + qx, qy := secp256k1.Coordinates(q) + actualX, actualY, actualZ := ProjectiveECAdd(p, q) + verifier := deployVRFTestHelper(t) + expectedX, expectedY, expectedZ, err := verifier.ProjectiveECAdd( + nil, px, py, qx, qy) + require.NoError(t, err, "failed to compute secp256k1 sum in projective coords") + assert.Equal(t, [3]*big.Int{expectedX, expectedY, expectedZ}, + [3]*big.Int{actualX, actualY, actualZ}, + "got different answers on-chain vs off-chain, for ProjectiveECAdd") + } +} + +func TestVRF_CompareBigModExpToVerifier(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(0)) + for j := 0; j < numSamples(); j++ { + base := randomUint256(t, r) + exponent := randomUint256(t, r) + actual, err := deployVRFTestHelper(t).BigModExp(nil, base, exponent) + require.NoError(t, err, "while computing bigmodexp on-chain") + expected := exp(base, exponent, fieldSize) + assert.Equal(t, expected, actual, + "%x ** %x %% %x = %x ≠ %x from solidity calculation", + base, exponent, fieldSize, expected, actual) + } +} + +func TestVRF_CompareSquareRoot(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(1)) + for j := 0; j < numSamples(); j++ { + maybeSquare := randomUint256(t, r) // Might not be square; should get same result anyway + squareRoot, err := deployVRFTestHelper(t).SquareRoot(nil, maybeSquare) + require.NoError(t, err, "failed to compute square root on-chain") + golangSquareRoot := SquareRoot(maybeSquare) + assert.Equal(t, golangSquareRoot, squareRoot, + "expected square root in GF(fieldSize) of %x to be %x, got %x on-chain", + maybeSquare, golangSquareRoot, squareRoot) + assert.True(t, + (!IsSquare(maybeSquare)) || equal(exp(squareRoot, two, fieldSize), maybeSquare), + "maybeSquare is a square, but failed to calculate its square root!") + assert.NotEqual(t, IsSquare(maybeSquare), IsSquare(sub(fieldSize, maybeSquare)), + "negative of a non square should be square, and vice-versa, since -1 is not a square") + } +} + +func TestVRF_CompareYSquared(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(2)) + for i := 0; i < numSamples(); i++ { + x := randomUint256(t, r) + actual, err := deployVRFTestHelper(t).YSquared(nil, x) + require.NoError(t, err, "failed to compute y² given x, on-chain") + assert.Equal(t, YSquared(x), actual, + "different answers for y², on-chain vs off-chain") + } +} + +func TestVRF_CompareFieldHash(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(3)) + msg := make([]byte, 32) + for j := 0; j < numSamples(); j++ { + _, err := r.Read(msg) + require.NoError(t, err, "failed to randomize intended hash message") + actual, err := deployVRFTestHelper(t).FieldHash(nil, msg) + require.NoError(t, err, "failed to compute fieldHash on-chain") + expected := fieldHash(msg) + require.Equal(t, expected, actual, + "fieldHash value on-chain differs from off-chain") + } +} + +// randomKey deterministically generates a secp256k1 key. +// +// Never use this if cryptographic security is required +func randomKey(t *testing.T, r *mrand.Rand) *ecdsa.PrivateKey { + secretKey := fieldSize + for secretKey.Cmp(fieldSize) >= 0 { // Keep picking until secretKey < fieldSize + secretKey = randomUint256(t, r) + } + cKey := crypto.ToECDSAUnsafe(secretKey.Bytes()) + return cKey +} + +// pair returns the inputs as a length-2 big.Int array. Useful for translating +// coordinates to the uint256[2]'s VRF.sol uses to represent secp256k1 points. +func pair(x, y *big.Int) [2]*big.Int { return [2]*big.Int{x, y} } +func asPair(p kyber.Point) [2]*big.Int { return pair(secp256k1.Coordinates(p)) } + +func TestVRF_CompareHashToCurve(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(4)) + for i := 0; i < numSamples(); i++ { + input := randomUint256(t, r) + cKey := randomKey(t, r) + pubKeyCoords := pair(cKey.X, cKey.Y) + actual, err := deployVRFTestHelper(t).HashToCurve(nil, pubKeyCoords, input) + require.NoError(t, err, "failed to compute hashToCurve on-chain") + pubKeyPoint := secp256k1.SetCoordinates(cKey.X, cKey.Y) + expected, err := HashToCurve(pubKeyPoint, input, func(*big.Int) {}) + require.NoError(t, err, "failed to compute HashToCurve in golang") + require.Equal(t, asPair(expected), actual, + "on-chain and off-chain calculations of HashToCurve gave different secp256k1 points") + } +} + +// randomPoint deterministically simulates a uniform sample of secp256k1 points, +// given r's seed +// +// Never use this if cryptographic security is required +func randomPoint(t *testing.T, r *mrand.Rand) kyber.Point { + p, err := HashToCurve(Generator, randomUint256(t, r), func(*big.Int) {}) + require.NoError(t, err, + "failed to hash random value to secp256k1 while generating random point") + if r.Int63n(2) == 1 { // Uniform sample of ±p + p.Neg(p) + } + return p +} + +// randomPointWithPair returns a random secp256k1, both as a kyber.Point and as +// a pair of *big.Int's. Useful for translating between the types needed by the +// golang contract wrappers. +func randomPointWithPair(t *testing.T, r *mrand.Rand) (kyber.Point, [2]*big.Int) { + p := randomPoint(t, r) + return p, asPair(p) +} + +// randomScalar deterministically simulates a uniform sample of secp256k1 +// scalars, given r's seed +// +// Never use this if cryptographic security is required +func randomScalar(t *testing.T, r *mrand.Rand) kyber.Scalar { + s := randomUint256(t, r) + for s.Cmp(secp256k1.GroupOrder) >= 0 { + s = randomUint256(t, r) + } + return secp256k1.IntToScalar(s) +} + +func TestVRF_CheckSolidityPointAddition(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(5)) + for j := 0; j < numSamples(); j++ { + p1 := randomPoint(t, r) + p2 := randomPoint(t, r) + p1x, p1y := secp256k1.Coordinates(p1) + p2x, p2y := secp256k1.Coordinates(p2) + psx, psy, psz, err := deployVRFTestHelper(t).ProjectiveECAdd( + nil, p1x, p1y, p2x, p2y) + require.NoError(t, err, "failed to compute ProjectiveECAdd, on-chain") + apx, apy, apz := ProjectiveECAdd(p1, p2) + require.Equal(t, []*big.Int{apx, apy, apz}, []*big.Int{psx, psy, psz}, + "got different values on-chain and off-chain for ProjectiveECAdd") + zInv := i().ModInverse(psz, fieldSize) + require.Equal(t, mod(mul(psz, zInv), fieldSize), one, + "failed to calculate correct inverse of z ordinate") + actualSum, err := deployVRFTestHelper(t).AffineECAdd( + nil, pair(p1x, p1y), pair(p2x, p2y), zInv) + require.NoError(t, err, + "failed to deploy VRF contract to simulated blockchain") + assert.Equal(t, asPair(point().Add(p1, p2)), actualSum, + "got different answers, on-chain vs off-chain, for secp256k1 sum in affine coordinates") + } +} + +func TestVRF_CheckSolidityECMulVerify(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(6)) + for j := 0; j < numSamples(); j++ { + p := randomPoint(t, r) + pxy := pair(secp256k1.Coordinates(p)) + s := randomScalar(t, r) + product := asPair(point().Mul(s, p)) + actual, err := deployVRFTestHelper(t).EcmulVerify(nil, pxy, secp256k1.ToInt(s), + product) + require.NoError(t, err, "failed to check on-chain that s*p=product") + assert.True(t, actual, + "EcmulVerify rejected a valid secp256k1 scalar product relation") + shouldReject, err := deployVRFTestHelper(t).EcmulVerify(nil, pxy, + add(secp256k1.ToInt(s), one), product) + require.NoError(t, err, "failed to check on-chain that (s+1)*p≠product") + assert.False(t, shouldReject, + "failed to reject a false secp256k1 scalar product relation") + } +} + +func TestVRF_CheckSolidityVerifyLinearCombinationWithGenerator(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(7)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + s := randomScalar(t, r) + p := randomPoint(t, r) + expectedPoint := point().Add(point().Mul(c, p), point().Mul(s, Generator)) // cp+sg + expectedAddress := secp256k1.EthereumAddress(expectedPoint) + pPair := asPair(p) + actual, err := deployVRFTestHelper(t).VerifyLinearCombinationWithGenerator(nil, + secp256k1.ToInt(c), pPair, secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that secp256k1 linear relationship holds") + assert.True(t, actual, + "VerifyLinearCombinationWithGenerator rejected a valid secp256k1 linear relationship") + shouldReject, err := deployVRFTestHelper(t).VerifyLinearCombinationWithGenerator(nil, + add(secp256k1.ToInt(c), one), pPair, secp256k1.ToInt(s), expectedAddress) + require.NoError(t, err, + "failed to check on-chain that address((c+1)*p+s*g)≠expectedAddress") + assert.False(t, shouldReject, + "VerifyLinearCombinationWithGenerator accepted an invalid secp256k1 linear relationship!") + } +} + +func TestVRF_CheckSolidityLinearComination(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(8)) + for j := 0; j < numSamples(); j++ { + c := randomScalar(t, r) + cNum := secp256k1.ToInt(c) + p1, p1Pair := randomPointWithPair(t, r) + s := randomScalar(t, r) + sNum := secp256k1.ToInt(s) + p2, p2Pair := randomPointWithPair(t, r) + cp1 := point().Mul(c, p1) + cp1Pair := asPair(cp1) + sp2 := point().Mul(s, p2) + sp2Pair := asPair(sp2) + expected := asPair(point().Add(cp1, sp2)) + _, _, z := ProjectiveECAdd(cp1, sp2) + zInv := i().ModInverse(z, fieldSize) + actual, err := deployVRFTestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, sNum, p2Pair, sp2Pair, zInv) + require.NoError(t, err, "failed to compute c*p1+s*p2, on-chain") + assert.Equal(t, expected, actual, + "on-chain computation of c*p1+s*p2 gave wrong answer") + _, err = deployVRFTestHelper(t).LinearCombination(nil, add(cNum, one), + p1Pair, cp1Pair, sNum, p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((c+1)*p1)") + assert.Contains(t, err.Error(), "First multiplication check failed", + "revert message wrong.") + _, err = deployVRFTestHelper(t).LinearCombination(nil, cNum, p1Pair, + cp1Pair, add(sNum, one), p2Pair, sp2Pair, zInv) + assert.Error(t, err, + "on-chain LinearCombination accepted a bad product relation! ((s+1)*p2)") + assert.Contains(t, err.Error(), "Second multiplication check failed", + "revert message wrong.") + } +} + +func TestVRF_CompareSolidityScalarFromCurvePoints(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(9)) + for j := 0; j < numSamples(); j++ { + hash, hashPair := randomPointWithPair(t, r) + pk, pkPair := randomPointWithPair(t, r) + gamma, gammaPair := randomPointWithPair(t, r) + var uWitness [20]byte + require.NoError(t, utils.JustError(r.Read(uWitness[:])), + "failed to randomize uWitness") + v, vPair := randomPointWithPair(t, r) + expected := ScalarFromCurvePoints(hash, pk, gamma, uWitness, v) + actual, err := deployVRFTestHelper(t).ScalarFromCurvePoints(nil, hashPair, pkPair, + gammaPair, uWitness, vPair) + require.NoError(t, err, "on-chain ScalarFromCurvePoints calculation failed") + assert.Equal(t, expected, actual, + "on-chain ScalarFromCurvePoints output does not match off-chain output!") + } +} + +func TestVRF_MarshalProof(t *testing.T) { + t.Parallel() + r := mrand.New(mrand.NewSource(10)) + for j := 0; j < numSamples(); j++ { + sk := randomScalar(t, r) + skNum := secp256k1.ToInt(sk) + nonce := randomScalar(t, r) + seed := randomUint256(t, r) + proof, err := generateProofWithNonce(skNum, seed, secp256k1.ToInt(nonce)) + require.NoError(t, err, "failed to generate VRF proof!") + mproof, err := proof.MarshalForSolidityVerifier() + require.NoError(t, err, "failed to marshal VRF proof for on-chain verification") + response, err := deployVRFTestHelper(t).RandomValueFromVRFProof(nil, mproof[:]) + require.NoError(t, err, "failed on-chain to verify VRF proof / get its output") + require.True(t, equal(response, proof.Output), + "on-chain VRF output differs from off-chain!") + corruptionTargetByte := r.Int63n(int64(len(mproof))) + // Only the lower 160 bits of the word containing uWitness have any effect + inAddressZeroBytes := func(b int64) bool { return b >= 224 && b < 236 } + originalByte := mproof[corruptionTargetByte] + mproof[corruptionTargetByte] += 1 + _, err = deployVRFTestHelper(t).RandomValueFromVRFProof(nil, mproof[:]) + require.True(t, inAddressZeroBytes(corruptionTargetByte) || err != nil, + "VRF verfication accepted a bad proof! Changed byte %d from %d to %d in %s, which is of length %d", + corruptionTargetByte, originalByte, mproof[corruptionTargetByte], + mproof.String(), len(mproof)) + require.True(t, + inAddressZeroBytes(corruptionTargetByte) || + strings.Contains(err.Error(), "invZ must be inverse of z") || + strings.Contains(err.Error(), "First multiplication check failed") || + strings.Contains(err.Error(), "Second multiplication check failed") || + strings.Contains(err.Error(), "cGammaWitness is not on curve") || + strings.Contains(err.Error(), "sHashWitness is not on curve") || + strings.Contains(err.Error(), "gamma is not on curve") || + strings.Contains(err.Error(), "addr(c*pk+s*g)≠_uWitness") || + strings.Contains(err.Error(), "public key is not on curve"), + "VRF verification returned an unknown error: %s", err, + ) + } +} diff --git a/core/services/vrf/vrf_test.go b/core/services/vrf/vrf_test.go index 8af11d0f110..dfcb6e043f6 100644 --- a/core/services/vrf/vrf_test.go +++ b/core/services/vrf/vrf_test.go @@ -5,103 +5,23 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "chainlink/core/services/signatures/secp256k1" - "chainlink/core/utils" ) func TestVRF_IsSquare(t *testing.T) { - assert.True(t, IsSquare(big.NewInt(4))) - minusOneModP := new(big.Int).Sub(P, big.NewInt(1)) + assert.True(t, IsSquare(four)) + minusOneModP := i().Sub(fieldSize, one) assert.False(t, IsSquare(minusOneModP)) } func TestVRF_SquareRoot(t *testing.T) { - assert.Equal(t, big.NewInt(2), SquareRoot(big.NewInt(4))) + assert.Equal(t, two, SquareRoot(four)) } func TestVRF_YSquared(t *testing.T) { - assert.Equal(t, big.NewInt(15), YSquared(two)) + assert.Equal(t, add(mul(two, mul(two, two)), seven), YSquared(two)) // 2³+7 } func TestVRF_IsCurveXOrdinate(t *testing.T) { assert.True(t, IsCurveXOrdinate(big.NewInt(1))) assert.False(t, IsCurveXOrdinate(big.NewInt(5))) } - -func TestVRF_CoordsFromPoint(t *testing.T) { - x, y := CoordsFromPoint(Generator) - assert.Equal(t, x, bigFromHex( - "79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798")) - assert.Equal(t, y, bigFromHex( - "483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8")) -} - -func TestVRF_ZqHash(t *testing.T) { - var log2Mod uint = 256 - modulus := lsh(one, log2Mod-1) - hash := sub(lsh(one, log2Mod), one) - assert.Equal(t, 1, hash.Cmp(modulus), - `need an example which hashes to something bigger than the modulus, to test the rehash logic.`) - zqHash, err := ZqHash(modulus, hash.Bytes()) - if err != nil { - panic(err) - } - assert.Equal( - t, - bigFromHex("1ae61e33ec9365756efc1436222a72df7fdb74651e25c38bde613482291a0c69"), - zqHash, - ) -} - -func TestVRF_HashToCurve(t *testing.T) { - reHashTriggeringInput := one - iHash, err := utils.Keccak256(append(secp256k1.LongMarshal(Generator), - asUint256(reHashTriggeringInput)...)) - require.NoError(t, err) - x, err := ZqHash(P, iHash) - require.NoError(t, err) - assert.False(t, IsCurveXOrdinate(x), - `need an example where first hash is not an x-ordinate for any point on the curve, to exercise rehash logic.`) - p, err := HashToCurve(Generator, reHashTriggeringInput) - if err != nil { - panic(err) - } - x, y := CoordsFromPoint(p) - // See 'Hashes to the curve with the same results as the golang code' in Curve.js - eX := "530fddd863609aa12030a07c5fdb323bb392a88343cea123b7f074883d2654c4" - eY := "6fd4ee394bf2a3de542c0e5f3c86fc8f75b278a017701a59d69bdf5134dd6b70" - assert.Equal(t, bigFromHex(eX), x) - assert.Equal(t, bigFromHex(eY), y) -} - -func TestVRF_ScalarFromCurvePoints(t *testing.T) { - g := Generator - ga := secp256k1.EthereumAddress(g) - s := ScalarFromCurvePoints(g, g, g, ga, g) - eS := "2b1049accb1596a24517f96761b22600a690ee5c6b6cadae3fa522e7d95ba338" - // See 'Computes the same hashed scalar from curve points as the golang code' in Curve.js - assert.Equal(t, bigFromHex(eS), s) -} - -func TestVRF_GenerateProof(t *testing.T) { - secretKeyHaHaNeverDoThis := big.NewInt(1) - seed := one - nonce := one - // Can't test c & s: They vary from run to run. - proof, err := GenerateProof(secretKeyHaHaNeverDoThis, seed, nonce) - require.NoError(t, err) - publicKey := rcurve.Point().Mul( - secp256k1.IntToScalar(secretKeyHaHaNeverDoThis), Generator) - assert.True(t, publicKey.Equal(proof.PublicKey)) - gammaX, gammaY := CoordsFromPoint(proof.Gamma) - // See 'Accepts a valid VRF proof' in VRF.js. These outputs are used there - gX := "530fddd863609aa12030a07c5fdb323bb392a88343cea123b7f074883d2654c4" - gY := "6fd4ee394bf2a3de542c0e5f3c86fc8f75b278a017701a59d69bdf5134dd6b70" - assert.Equal(t, bigFromHex(gX), gammaX) - assert.Equal(t, bigFromHex(gY), gammaY) - verification, err := proof.Verify() - require.NoError(t, err) - assert.True(t, verification, "proof verification failed") -} diff --git a/core/store/dbutil/dbutil.go b/core/store/dbutil/dbutil.go index fb522e1a556..803b8bb2083 100644 --- a/core/store/dbutil/dbutil.go +++ b/core/store/dbutil/dbutil.go @@ -15,11 +15,6 @@ func IsPostgres(db *gorm.DB) bool { return db.Dialect().GetName() == "postgres" } -// IsSqlite returns true if the underlying database is sqlite. -func IsSqlite(db *gorm.DB) bool { - return strings.HasPrefix(db.Dialect().GetName(), "sqlite") -} - // SetTimezone sets the time zone to UTC func SetTimezone(db *gorm.DB) error { if IsPostgres(db) { @@ -27,24 +22,3 @@ func SetTimezone(db *gorm.DB) error { } return nil } - -// SetSqlitePragmas sets some optimization params for SQLite -func SetSqlitePragmas(db *gorm.DB) error { - if IsSqlite(db) { - return db.Exec(` - PRAGMA foreign_keys = ON; - PRAGMA journal_mode = WAL; - `).Error - } - return nil -} - -// LimitSqliteOpenConnections deliberately limits Sqlites concurrency -// to reduce contention, reduce errors, and improve performance: -// https://stackoverflow.com/a/35805826/639773 -func LimitSqliteOpenConnections(db *gorm.DB) error { - if IsSqlite(db) { - db.DB().SetMaxOpenConns(1) - } - return nil -} diff --git a/core/store/migrations/migrate.go b/core/store/migrations/migrate.go index e2024f61c24..96d5f4b8ff1 100644 --- a/core/store/migrations/migrate.go +++ b/core/store/migrations/migrate.go @@ -27,11 +27,13 @@ import ( "chainlink/core/store/migrations/migration1570675883" "chainlink/core/store/migrations/migration1573667511" "chainlink/core/store/migrations/migration1573812490" + "chainlink/core/store/migrations/migration1574659987" "chainlink/core/store/migrations/migration1575036327" "chainlink/core/store/migrations/migration1576022702" "chainlink/core/store/migrations/migration1579700934" "chainlink/core/store/migrations/migration1580904019" "chainlink/core/store/migrations/migration1581240419" + "chainlink/core/store/migrations/migration1584377646" "github.com/jinzhu/gorm" "github.com/pkg/errors" @@ -150,6 +152,10 @@ func MigrateTo(db *gorm.DB, migrationID string) error { ID: "1575036327", Migrate: migration1575036327.Migrate, }, + { + ID: "1574659987", + Migrate: migration1574659987.Migrate, + }, { ID: "1576022702", Migrate: migration1576022702.Migrate, @@ -166,6 +172,10 @@ func MigrateTo(db *gorm.DB, migrationID string) error { ID: "1581240419", Migrate: migration1581240419.Migrate, }, + { + ID: "1584377646", + Migrate: migration1584377646.Migrate, + }, } m := gormigrate.New(db, &options, migrations) diff --git a/core/store/migrations/migrate_test.go b/core/store/migrations/migrate_test.go index 095bb9b930c..465faea55ef 100644 --- a/core/store/migrations/migrate_test.go +++ b/core/store/migrations/migrate_test.go @@ -8,6 +8,7 @@ import ( "time" "chainlink/core/assets" + "chainlink/core/gracefulpanic" "chainlink/core/internal/cltest" "chainlink/core/store/migrations" "chainlink/core/store/migrations/migration0" @@ -30,7 +31,7 @@ func bootstrapORM(t *testing.T) (*orm.ORM, func()) { require.NoError(t, os.MkdirAll(config.RootDir(), 0700)) cleanupDB := cltest.PrepareTestDB(tc) - orm, err := orm.NewORM(orm.NormalizedDatabaseURL(config), config.DatabaseTimeout()) + orm, err := orm.NewORM(config.DatabaseURL(), config.DatabaseTimeout(), gracefulpanic.NewSignal()) require.NoError(t, err) orm.SetLogging(true) diff --git a/core/store/migrations/migration1574659987/migrate.go b/core/store/migrations/migration1574659987/migrate.go new file mode 100644 index 00000000000..39996fec05d --- /dev/null +++ b/core/store/migrations/migration1574659987/migrate.go @@ -0,0 +1,16 @@ +package migration1574659987 + +import ( + "github.com/jinzhu/gorm" + "github.com/pkg/errors" + + "chainlink/core/store/models" +) + +// Migrate adds VRF proving-key table, and related subtables. +func Migrate(db *gorm.DB) error { + if err := db.AutoMigrate(&models.EncryptedSecretVRFKey{}).Error; err != nil { + return errors.Wrap(err, "failed to create VRF proving-key table") + } + return nil +} diff --git a/core/store/migrations/migration1584377646/migrate.go b/core/store/migrations/migration1584377646/migrate.go new file mode 100644 index 00000000000..6acba3c033f --- /dev/null +++ b/core/store/migrations/migration1584377646/migrate.go @@ -0,0 +1,21 @@ +package migration1584377646 + +import ( + "github.com/jinzhu/gorm" + "github.com/pkg/errors" +) + +type logCursor struct { + Name string `gorm:"primary_key"` + Initialized bool `gorm:"not null;default true"` + BlockIndex uint `gorm:"not null;default 0"` + LogIndex uint `gorm:"not null;default 0"` +} + +// Migrate adds the LogCursor table +func Migrate(tx *gorm.DB) error { + if err := tx.AutoMigrate(&logCursor{}).Error; err != nil { + return errors.Wrap(err, "could not add log_cursor table") + } + return nil +} diff --git a/core/store/models/common.go b/core/store/models/common.go index c39e7564b6b..db0a37d6b99 100644 --- a/core/store/models/common.go +++ b/core/store/models/common.go @@ -183,32 +183,50 @@ func (j JSON) Bytes() []byte { return []byte(j.String()) } -// Add returns a new instance of JSON with the new value added. -func (j JSON) Add(insertKey string, insertValue interface{}) (JSON, error) { +// AsMap returns j as a map +func (j JSON) AsMap() (map[string]interface{}, error) { output := make(map[string]interface{}) - switch v := j.Result.Value().(type) { case map[string]interface{}: for key, value := range v { - if key != insertKey { - output[key] = value - } + output[key] = value } - output[insertKey] = insertValue case nil: - output[insertKey] = insertValue default: - return JSON{}, errors.New("can only add to JSON objects or null") + return nil, errors.New("can only add to JSON objects or null") } + return output, nil +} - bytes, err := json.Marshal(output) +// mapToJSON returns m as a JSON object, or errors +func mapToJSON(m map[string]interface{}) (JSON, error) { + bytes, err := json.Marshal(m) if err != nil { return JSON{}, err } - return JSON{Result: gjson.ParseBytes(bytes)}, nil } +// Add returns a new instance of JSON with the new value added. +func (j JSON) Add(insertKey string, insertValue interface{}) (JSON, error) { + return j.MultiAdd(KV{insertKey: insertValue}) +} + +// KV represents a key/value pair to be added to a JSON object +type KV map[string]interface{} + +// MultiAdd returns a new instance of j with the new values added. +func (j JSON) MultiAdd(keyValues KV) (JSON, error) { + output, err := j.AsMap() + if err != nil { + return JSON{}, err + } + for key, value := range keyValues { + output[key] = value + } + return mapToJSON(output) +} + // Delete returns a new instance of JSON with the specified key removed. func (j JSON) Delete(key string) (JSON, error) { js, err := sjson.Delete(j.String(), key) diff --git a/core/store/models/eth.go b/core/store/models/eth.go index 1c97d01ab7f..00d2bb73717 100755 --- a/core/store/models/eth.go +++ b/core/store/models/eth.go @@ -1,9 +1,7 @@ package models import ( - "errors" "fmt" - "io/ioutil" "math/big" "time" @@ -12,8 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/jinzhu/gorm" - "github.com/tidwall/gjson" - "go.uber.org/multierr" null "gopkg.in/guregu/null.v3" ) @@ -202,34 +198,3 @@ func (l *Head) NextInt() *big.Int { } return new(big.Int).Add(l.ToInt(), big.NewInt(1)) } - -// Key holds the private key metadata for a given address that is used to unlock -// said key when given a password. -type Key struct { - Address EIP55Address `gorm:"primary_key;type:varchar(64)"` - JSON JSON `gorm:"type:text"` -} - -// NewKeyFromFile creates an instance in memory from a key file on disk. -func NewKeyFromFile(path string) (*Key, error) { - dat, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - js := gjson.ParseBytes(dat) - address, err := NewEIP55Address(common.HexToAddress(js.Get("address").String()).Hex()) - if err != nil { - return nil, multierr.Append(errors.New("unable to create Key model"), err) - } - - return &Key{ - Address: address, - JSON: JSON{Result: js}, - }, nil -} - -// WriteToDisk writes this key to disk at the passed path. -func (k *Key) WriteToDisk(path string) error { - return ioutil.WriteFile(path, []byte(k.JSON.String()), 0700) -} diff --git a/core/store/models/job_spec.go b/core/store/models/job_spec.go index 09fd4a9589b..c41db0961d9 100644 --- a/core/store/models/job_spec.go +++ b/core/store/models/job_spec.go @@ -184,6 +184,8 @@ const ( // InitiatorFluxMonitor for tasks in a job to be run on price deviation // or request for a new round of prices. InitiatorFluxMonitor = "fluxmonitor" + // InitiatorRandomnessLog for tasks from a VRF specific contract + InitiatorRandomnessLog = "randomnesslog" ) // Initiator could be thought of as a trigger, defines how a Job can be @@ -285,8 +287,12 @@ func NewInitiatorFromRequest( // IsLogInitiated Returns true if triggered by event logs. func (i Initiator) IsLogInitiated() bool { - return i.Type == InitiatorEthLog || i.Type == InitiatorRunLog || - i.Type == InitiatorServiceAgreementExecutionLog + for _, logType := range LogBasedChainlinkJobInitiators { + if i.Type == logType { + return true + } + } + return false } // Feeds holds the json of the feeds parameter in the job spec. It is an array of diff --git a/core/store/models/key.go b/core/store/models/key.go new file mode 100644 index 00000000000..5644809ce31 --- /dev/null +++ b/core/store/models/key.go @@ -0,0 +1,45 @@ +package models + +import ( + "errors" + "io/ioutil" + + "github.com/ethereum/go-ethereum/common" + "github.com/tidwall/gjson" + "go.uber.org/multierr" + + "chainlink/core/store/models/vrfkey" +) + +// Key holds the private key metadata for a given address that is used to unlock +// said key when given a password. +// +// By default, a key is assumed to represent an ethereum account. +type Key struct { + Address EIP55Address `gorm:"primary_key;type:varchar(64)"` + JSON JSON `gorm:"type:text"` +} + +type EncryptedSecretVRFKey = vrfkey.EncryptedSecretKey +type PublicKey = vrfkey.PublicKey + +// NewKeyFromFile creates an instance in memory from a key file on disk. +func NewKeyFromFile(path string) (*Key, error) { + dat, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + js := gjson.ParseBytes(dat) + address, err := NewEIP55Address(common.HexToAddress(js.Get("address").String()).Hex()) + if err != nil { + return nil, multierr.Append(errors.New("unable to create Key model"), err) + } + + return &Key{Address: address, JSON: JSON{Result: js}}, nil +} + +// WriteToDisk writes this key to disk at the passed path. +func (k *Key) WriteToDisk(path string) error { + return ioutil.WriteFile(path, []byte(k.JSON.String()), 0700) +} diff --git a/core/store/models/log_events.go b/core/store/models/log_events.go index 928d68e3669..6f5281457d9 100644 --- a/core/store/models/log_events.go +++ b/core/store/models/log_events.go @@ -9,10 +9,12 @@ import ( "chainlink/core/assets" "chainlink/core/eth" "chainlink/core/logger" + "chainlink/core/services/vrf" "chainlink/core/utils" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/whisper/whisperv6" "github.com/pkg/errors" ) @@ -25,13 +27,6 @@ const ( RequestLogTopicPayment ) -// Descriptive indices of the FluxAggregator's NewRound Topic Array: -// event NewRound(uint256 indexed roundId, address indexed startedBy); -const ( - NewRoundTopicSignature = iota - NewRoundTopicRoundID -) - const ( evmWordSize = common.HashLength requesterSize = evmWordSize @@ -56,8 +51,11 @@ var ( RunLogTopic20190207withoutIndexes = utils.MustHash("OracleRequest(bytes32,address,bytes32,uint256,address,bytes4,uint256,uint256,bytes)") // ServiceAgreementExecutionLogTopic is the signature for the // Coordinator.RunRequest(...) events which Chainlink nodes watch for. See - // https://chainlink/blob/master/evm/contracts/Coordinator.sol#RunRequest + // ../../../evm-contracts/src/v0.5/dev/Coordinator.sol#RunRequest ServiceAgreementExecutionLogTopic = utils.MustHash("ServiceAgreementExecution(bytes32,address,uint256,uint256,uint256,bytes)") + // RandomnessRequestLogTopic is the signature for the event log + // VRFCoordinator.RandomnessRequest. + RandomnessRequestLogTopic = vrf.RandomnessRequestLogTopic() // OracleFullfillmentFunctionID0original is the original function selector for fulfilling Ethereum requests. OracleFullfillmentFunctionID0original = utils.MustHash("fulfillData(uint256,bytes32)").Hex()[:10] // OracleFulfillmentFunctionID20190123withFulfillmentParams is the function selector for fulfilling Ethereum requests, @@ -66,14 +64,11 @@ var ( // OracleFulfillmentFunctionID20190128withoutCast is the function selector for fulfilling Ethereum requests, // as updated on 2019-01-28, removing the cast to uint256 for the requestId. OracleFulfillmentFunctionID20190128withoutCast = utils.MustHash("fulfillOracleRequest(bytes32,uint256,address,bytes4,uint256,bytes32)").Hex()[:10] - // AggregatorNewRoundLogTopic20191220 is the NewRound filter topic for - // the FluxAggregator as of Dec. 20th 2019. Eagerly fails if not found. - AggregatorNewRoundLogTopic20191220 = eth.MustGetV6ContractEventID("FluxAggregator", "NewRound") ) type logRequestParser interface { parseJSON(eth.Log) (JSON, error) - parseRequestID(eth.Log) string + parseRequestID(eth.Log) (string, error) } // topicFactoryMap maps the log topic to a factory method that returns an @@ -84,57 +79,76 @@ var topicFactoryMap = map[common.Hash]logRequestParser{ RunLogTopic0original: parseRunLog0original{}, RunLogTopic20190123withFullfillmentParams: parseRunLog20190123withFulfillmentParams{}, RunLogTopic20190207withoutIndexes: parseRunLog20190207withoutIndexes{}, + RandomnessRequestLogTopic: parseRandomnessRequest{}, } -// TopicFiltersForRunLog generates the two variations of RunLog IDs that could -// possibly be entered on a RunLog or a ServiceAgreementExecutionLog. There is the ID, -// hex encoded and the ID zero padded. -func TopicFiltersForRunLog(logTopics []common.Hash, jobID *ID) [][]common.Hash { - return [][]common.Hash{logTopics, {IDToTopic(jobID), IDToHexTopic(jobID)}} +// LogBasedChainlinkJobInitiators are initiators which kick off a user-specified +// chainlink job when an appropriate ethereum log is received. +// (InitiatorFluxMonitor kicks off work, but not a user-specified job.) +var LogBasedChainlinkJobInitiators = []string{InitiatorRunLog, InitiatorEthLog, + InitiatorServiceAgreementExecutionLog, InitiatorRandomnessLog} + +// topicsForInitiatorsWhichRequireJobSpecTopic are the log topics which kick off +// a user job with the given type of initiator. If chainlink has any jobs with +// these initiators, it subscribes on startup to logs which match both these +// topics and some representation of the job spec ID. +var TopicsForInitiatorsWhichRequireJobSpecIDTopic = map[string][]common.Hash{ + InitiatorRunLog: {RunLogTopic20190207withoutIndexes, + RunLogTopic20190123withFullfillmentParams, RunLogTopic0original}, + InitiatorServiceAgreementExecutionLog: {ServiceAgreementExecutionLogTopic}, + InitiatorRandomnessLog: {RandomnessRequestLogTopic}, } -// FilterQueryFactory returns the ethereum FilterQuery for this initiator. -func FilterQueryFactory(i Initiator, from *big.Int) (ethereum.FilterQuery, error) { - q := ethereum.FilterQuery{ - FromBlock: from, - Addresses: utils.WithoutZeroAddresses([]common.Address{i.Address}), +// initiationRequiresJobSpecId is true if jobs initiated by the given +// initiatiatorType require that their initiating logs match their JobSpecIDs. +func initiationRequiresJobSpecID(initiatorType string) bool { + _, ok := TopicsForInitiatorsWhichRequireJobSpecIDTopic[initiatorType] + return ok +} + +// jobSpecIDTopics lists the ways jsID could be represented as a log topic. This +// allows log subscriptions to respond to all possible representations. +func JobSpecIDTopics(jsID *ID) []common.Hash { + return []common.Hash{ + // The job to be initiated can be encoded in a log topic in two ways: + IDToTopic(jsID), // 16 full-range bytes, left padded to 32 bytes, + IDToHexTopic(jsID), // 32 ASCII hex chars representing the 16 bytes } +} - switch i.Type { - case InitiatorEthLog: +// FilterQueryFactory returns the ethereum FilterQuery for this initiator. +func FilterQueryFactory(i Initiator, from *big.Int) (q ethereum.FilterQuery, err error) { + q.FromBlock = from + q.Addresses = utils.WithoutZeroAddresses([]common.Address{i.Address}) + + switch { + case i.Type == InitiatorEthLog: if from == nil { q.FromBlock = i.InitiatorParams.FromBlock.ToInt() - } else if from != nil && i.InitiatorParams.FromBlock != nil { + } else if i.InitiatorParams.FromBlock != nil { q.FromBlock = utils.MaxBigs(from, i.InitiatorParams.FromBlock.ToInt()) } q.ToBlock = i.InitiatorParams.ToBlock.ToInt() if q.FromBlock != nil && q.ToBlock != nil && q.FromBlock.Cmp(q.ToBlock) >= 0 { - return q, fmt.Errorf("cannot generate a FilterQuery with fromBlock >= toBlock") + return ethereum.FilterQuery{}, fmt.Errorf( + "cannot generate a FilterQuery with fromBlock >= toBlock") } + // Copying the topics across (instead of coercing i.Topics to a + // [][]common.Hash) clarifies their type for reflect.DeepEqual q.Topics = make([][]common.Hash, len(i.Topics)) - copy(q.Topics, i.Topics) // Simply coercing i.Topics to the underlying type confuses reflect.DeepEqual - - return q, nil - - case InitiatorRunLog: - topics := []common.Hash{RunLogTopic20190207withoutIndexes, RunLogTopic20190123withFullfillmentParams, RunLogTopic0original} - q.Topics = TopicFiltersForRunLog(topics, i.JobSpecID) - return q, nil - - case InitiatorServiceAgreementExecutionLog: - topics := []common.Hash{ServiceAgreementExecutionLogTopic} - q.Topics = TopicFiltersForRunLog(topics, i.JobSpecID) - return q, nil - - case InitiatorFluxMonitor: - q.Topics = [][]common.Hash{{AggregatorNewRoundLogTopic20191220}} - return q, nil - + copy(q.Topics, i.Topics) + case initiationRequiresJobSpecID(i.Type): + q.Topics = [][]common.Hash{ + TopicsForInitiatorsWhichRequireJobSpecIDTopic[i.Type], + JobSpecIDTopics(i.JobSpecID), + } default: - return ethereum.FilterQuery{}, fmt.Errorf("Cannot generate a FilterQuery for initiator of type %T", i) + return ethereum.FilterQuery{}, + fmt.Errorf("cannot generate a FilterQuery for initiator of type %T", i) } + return q, nil } // LogRequest is the interface to allow polymorphic functionality of different @@ -155,22 +169,24 @@ type LogRequest interface { } // InitiatorLogEvent encapsulates all information as a result of a received log from an -// InitiatorSubscription. +// InitiatorSubscription, and acts as a base struct for other log-initiated events type InitiatorLogEvent struct { Log eth.Log Initiator Initiator } +var _ LogRequest = InitiatorLogEvent{} // InitiatorLogEvent implements LogRequest + // LogRequest is a factory method that coerces this log event to the correct // type based on Initiator.Type, exposed by the LogRequest interface. func (le InitiatorLogEvent) LogRequest() LogRequest { switch le.Initiator.Type { case InitiatorEthLog: return EthLogEvent{InitiatorLogEvent: le} - case InitiatorServiceAgreementExecutionLog: - fallthrough - case InitiatorRunLog: + case InitiatorRunLog, InitiatorServiceAgreementExecutionLog: return RunLogEvent{le} + case InitiatorRandomnessLog: + return RandomnessLogEvent{le} } logger.Warnw("LogRequest: Unable to discern initiator type for log request", le.ForLogger()...) return EthLogEvent{InitiatorLogEvent: le} @@ -217,26 +233,18 @@ func (le InitiatorLogEvent) ToDebug() { // BlockNumber returns the block number for the given InitiatorSubscriptionLogEvent. func (le InitiatorLogEvent) BlockNumber() *big.Int { - num := new(big.Int) - num.SetUint64(le.Log.BlockNumber) - return num + return new(big.Int).SetUint64(le.Log.BlockNumber) } // RunRequest returns a run request instance with the transaction hash, // present on all log initiated runs. func (le InitiatorLogEvent) RunRequest() (RunRequest, error) { - txHash := common.BytesToHash(le.Log.TxHash.Bytes()) - blockHash := common.BytesToHash(le.Log.BlockHash.Bytes()) - requestParams, err := le.JSON() if err != nil { return RunRequest{}, err } - return RunRequest{ - BlockHash: &blockHash, - TxHash: &txHash, - RequestParams: requestParams, - }, nil + return RunRequest{BlockHash: &le.Log.BlockHash, TxHash: &le.Log.TxHash, + RequestParams: requestParams}, nil } // Validate returns true, no validation on this log event type. @@ -290,7 +298,7 @@ func (le RunLogEvent) Validate() bool { func contractPayment(log eth.Log) (*assets.Link, error) { version, err := log.GetTopic(0) if err != nil { - return nil, fmt.Errorf("Missing RunLogEvent Topic#0: %v", err) + return nil, fmt.Errorf("missing RunLogEvent Topic#0: %v", err) } var encodedAmount common.Hash @@ -298,7 +306,11 @@ func contractPayment(log eth.Log) (*assets.Link, error) { encodedAmount = log.Topics[RequestLogTopicPayment] } else { paymentStart := requesterSize + idSize - encodedAmount = common.BytesToHash(log.Data[paymentStart : paymentStart+paymentSize]) + paymentData, err := log.Data.SafeByteSlice(paymentStart, paymentStart+paymentSize) + if err != nil { + return nil, err + } + encodedAmount = common.BytesToHash(paymentData) } payment, ok := new(assets.Link).SetString(encodedAmount.Hex(), 0) @@ -320,25 +332,33 @@ func (le RunLogEvent) ValidateRequester() error { if len(le.Initiator.Requesters) == 0 { return nil } + requester, err := le.Requester() + if err != nil { + return err + } for _, r := range le.Initiator.Requesters { - if le.Requester() == r { + if requester == r { return nil } } - return fmt.Errorf("Run Log didn't have have a valid requester: %v", le.Requester().Hex()) + return fmt.Errorf("run Log didn't have have a valid requester: %v", requester.Hex()) } // Requester pulls the requesting address out of the LogEvent's topics. -func (le RunLogEvent) Requester() common.Address { +func (le RunLogEvent) Requester() (common.Address, error) { version, err := le.Log.GetTopic(0) if err != nil { - return common.Address{} + return common.Address{}, nil } if oldRequestVersion(version) { - return common.BytesToAddress(le.Log.Topics[RequestLogTopicRequester].Bytes()) + return common.BytesToAddress(le.Log.Topics[RequestLogTopicRequester].Bytes()), nil + } + requesterData, err := le.Log.Data.SafeByteSlice(0, requesterSize) + if err != nil { + return common.Address{}, err } - return common.BytesToAddress(le.Log.Data[:requesterSize]) + return common.BytesToAddress(requesterData), nil } // RunRequest returns an RunRequest instance with all parameters @@ -360,15 +380,19 @@ func (le RunLogEvent) RunRequest() (RunRequest, error) { return RunRequest{}, err } - txHash := common.BytesToHash(le.Log.TxHash.Bytes()) - blockHash := common.BytesToHash(le.Log.BlockHash.Bytes()) - str := parser.parseRequestID(le.Log) - requester := le.Requester() + requestID, err := parser.parseRequestID(le.Log) + if err != nil { + return RunRequest{}, err + } + requester, err := le.Requester() + if err != nil { + return RunRequest{}, err + } return RunRequest{ - RequestID: &str, - TxHash: &txHash, - BlockHash: &blockHash, + RequestID: &requestID, + TxHash: &le.Log.TxHash, + BlockHash: &le.Log.BlockHash, Requester: &requester, Payment: payment, RequestParams: requestParams, @@ -387,7 +411,7 @@ func parserFromLog(log eth.Log) (logRequestParser, error) { } parser, ok := topicFactoryMap[topic] if !ok { - return nil, fmt.Errorf("No parser for the RunLogEvent topic %s", topic.String()) + return nil, fmt.Errorf("no parser for the RunLogEvent topic %s", topic.String()) } return parser, nil } @@ -413,25 +437,31 @@ func (p parseRunLog0original) parseJSON(log eth.Log) (JSON, error) { return JSON{}, errors.New("malformed data") } - js, err := ParseCBOR(data[start:]) + cborData, err := data.SafeByteSlice(start, len(data)) if err != nil { - return js, err + return JSON{}, err } - js, err = js.Add("address", log.Address.String()) + js, err := ParseCBOR(cborData) if err != nil { return js, err } - - js, err = js.Add("dataPrefix", bytesToHex(data[:idSize])) + idData, err := data.SafeByteSlice(0, idSize) if err != nil { - return js, err + return JSON{}, err } - - return js.Add("functionSelector", OracleFullfillmentFunctionID0original) + return js.MultiAdd(KV{ + "address": log.Address.String(), + "dataPrefix": bytesToHex(idData), + "functionSelector": OracleFullfillmentFunctionID0original, + }) } -func (parseRunLog0original) parseRequestID(log eth.Log) string { - return common.BytesToHash(log.Data[:idSize]).Hex() +func (parseRunLog0original) parseRequestID(log eth.Log) (string, error) { + idData, err := log.Data.SafeByteSlice(0, idSize) + if err != nil { + return "", err + } + return hexutil.Encode(idData), nil } // parseRunLog20190123withFulfillmentParams parses the OracleRequest log format @@ -447,31 +477,41 @@ func (parseRunLog20190123withFulfillmentParams) parseJSON(log eth.Log) (JSON, er if len(data) < cborStart { return JSON{}, errors.New("malformed data") } - js, err := ParseCBOR(data[cborStart:]) + cborData, err := data.SafeByteSlice(cborStart, len(data)) if err != nil { - return js, err + return JSON{}, err } - - js, err = js.Add("address", log.Address.String()) + js, err := ParseCBOR(cborData) if err != nil { return js, err } - callbackAndExpStart := idSize + versionSize callbackAndExpEnd := callbackAndExpStart + callbackAddrSize + callbackFuncSize + expirationSize - dataPrefix := bytesToHex(append(append(data[:idSize], - log.Topics[RequestLogTopicPayment].Bytes()...), - data[callbackAndExpStart:callbackAndExpEnd]...)) - js, err = js.Add("dataPrefix", dataPrefix) + + idData, err := data.SafeByteSlice(0, idSize) if err != nil { - return js, err + return JSON{}, err } - - return js.Add("functionSelector", OracleFulfillmentFunctionID20190123withFulfillmentParams) + callbackData, err := data.SafeByteSlice(callbackAndExpStart, callbackAndExpEnd) + if err != nil { + return JSON{}, err + } + dataPrefix := bytesToHex(append(append(idData, + log.Topics[RequestLogTopicPayment].Bytes()...), + callbackData...)) + return js.MultiAdd(KV{ + "address": log.Address.String(), + "dataPrefix": dataPrefix, + "functionSelector": OracleFulfillmentFunctionID20190123withFulfillmentParams, + }) } -func (parseRunLog20190123withFulfillmentParams) parseRequestID(log eth.Log) string { - return common.BytesToHash(log.Data[:idSize]).Hex() +func (parseRunLog20190123withFulfillmentParams) parseRequestID(log eth.Log) (string, error) { + idData, err := log.Data.SafeByteSlice(0, idSize) + if err != nil { + return "", err + } + return common.BytesToHash(idData).Hex(), nil } // parseRunLog20190207withoutIndexes parses the OracleRequest log format after @@ -493,43 +533,45 @@ func (parseRunLog20190207withoutIndexes) parseJSON(log eth.Log) (JSON, error) { return JSON{}, errors.New("malformed data") } - dataLength := whisperv6.BytesToUintBigEndian(data[dataLengthStart : dataLengthStart+32]) + dataLengthBytes, err := data.SafeByteSlice(dataLengthStart, dataLengthStart+32) + if err != nil { + return JSON{}, err + } + dataLength := whisperv6.BytesToUintBigEndian(dataLengthBytes) if len(log.Data) < cborStart+int(dataLength) { return JSON{}, errors.New("cbor too short") } - js, err := ParseCBOR(data[cborStart : cborStart+int(dataLength)]) + cborData, err := data.SafeByteSlice(cborStart, cborStart+int(dataLength)) if err != nil { - return js, fmt.Errorf("Error parsing CBOR: %v", err) + return JSON{}, err } - js, err = js.Add("address", log.Address.String()) + js, err := ParseCBOR(cborData) if err != nil { - return js, err + return js, fmt.Errorf("Error parsing CBOR: %v", err) } - js, err = js.Add("dataPrefix", bytesToHex(data[idStart:expirationEnd])) + dataPrefixBytes, err := data.SafeByteSlice(idStart, expirationEnd) if err != nil { - return js, err + return JSON{}, err } - return js.Add("functionSelector", OracleFulfillmentFunctionID20190128withoutCast) + return js.MultiAdd(KV{ + "address": log.Address.String(), + "dataPrefix": bytesToHex(dataPrefixBytes), + "functionSelector": OracleFulfillmentFunctionID20190128withoutCast, + }) } -func (parseRunLog20190207withoutIndexes) parseRequestID(log eth.Log) string { +func (parseRunLog20190207withoutIndexes) parseRequestID(log eth.Log) (string, error) { start := requesterSize - return common.BytesToHash(log.Data[start : start+idSize]).Hex() -} - -// ParseNewRoundLog pulls the round from the aggregator log event. -func ParseNewRoundLog(log eth.Log) (*big.Int, error) { - encodedRound := log.Topics[NewRoundTopicRoundID] - round, ok := new(big.Int).SetString(encodedRound.Hex(), 0) - if !ok { - return nil, fmt.Errorf("unable to parse new round log from %s", encodedRound.Hex()) + requestIDBytes, err := log.Data.SafeByteSlice(start, start+idSize) + if err != nil { + return "", err } - return round, nil + return common.BytesToHash(requestIDBytes).Hex(), nil } func bytesToHex(data []byte) string { @@ -546,3 +588,10 @@ func IDToTopic(id *ID) common.Hash { func IDToHexTopic(id *ID) common.Hash { return common.BytesToHash([]byte(id.String())) } + +type LogCursor struct { + Name string `gorm:"primary_key"` + Initialized bool `gorm:"not null;default true"` + BlockIndex uint64 `gorm:"not null;default 0"` + LogIndex uint64 `gorm:"not null;default 0"` +} diff --git a/core/store/models/log_events_test.go b/core/store/models/log_events_test.go index 7a4d2da1eec..dc06b7a70bf 100644 --- a/core/store/models/log_events_test.go +++ b/core/store/models/log_events_test.go @@ -84,30 +84,6 @@ func TestParseRunLog(t *testing.T) { } } -func TestParseNewRoundLog(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - log eth.Log - want *big.Int - }{ - { - name: "round 1", - log: cltest.LogFromFixture(t, "testdata/newRoundLog.json"), - want: big.NewInt(1), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - output, err := models.ParseNewRoundLog(test.log) - assert.NoError(t, err) - assert.Equal(t, test.want, output) - }) - } -} - func TestEthLogEvent_JSON(t *testing.T) { t.Parallel() @@ -393,30 +369,6 @@ func TestFilterQueryFactory_InitiatorRunLog(t *testing.T) { assert.Equal(t, want, filter) } -func TestFilterQueryFactory_InitiatorFluxMonitor(t *testing.T) { - t.Parallel() - - id, err := models.NewIDFromString("4a1eb0e8df314cb894024a38991cff0f") - require.NoError(t, err) - i := models.Initiator{ - Type: models.InitiatorFluxMonitor, - JobSpecID: id, - } - fromBlock := big.NewInt(42) - filter, err := models.FilterQueryFactory(i, fromBlock) - assert.NoError(t, err) - - want := ethereum.FilterQuery{ - FromBlock: fromBlock.Add(fromBlock, big.NewInt(1)), - Topics: [][]common.Hash{ - { - models.AggregatorNewRoundLogTopic20191220, - }, - }, - } - assert.Equal(t, want, filter) -} - func TestRunLogEvent_ContractPayment(t *testing.T) { t.Parallel() @@ -491,7 +443,8 @@ func TestRunLogEvent_Requester(t *testing.T) { t.Run(test.name, func(t *testing.T) { rle := models.RunLogEvent{models.InitiatorLogEvent{Log: test.log}} - received := rle.Requester() + received, err := rle.Requester() + require.NoError(t, err) assert.Equal(t, test.want, received) }) diff --git a/core/store/models/parse_randomness_request.go b/core/store/models/parse_randomness_request.go new file mode 100644 index 00000000000..ac992112bc1 --- /dev/null +++ b/core/store/models/parse_randomness_request.go @@ -0,0 +1,49 @@ +package models + +import ( + "github.com/pkg/errors" + + "chainlink/core/eth" + "chainlink/core/services/vrf" + "chainlink/core/utils" +) + +// parseRandomnessRequest parses the RandomnessRequest log format. +type parseRandomnessRequest struct{} + +var _ logRequestParser = parseRandomnessRequest{} // Implements logRequestParser + +// parseJSON returns the inputs to be passed as a JSON object to Random adapter +func (parseRandomnessRequest) parseJSON(log eth.Log) (js JSON, err error) { + parsedLog, err := vrf.ParseRandomnessRequestLog(log) + if err != nil { + return JSON{}, errors.Wrapf(err, + "could not parse log data %#+v as RandomnessRequest log", log) + } + fullSeedString, err := utils.Uint256ToHex(parsedLog.Seed) + if err != nil { + return JSON{}, errors.Wrap(err, "vrf seed out of bounds") + } + return js.MultiAdd(KV{ + // Address of log emitter + "address": log.Address.String(), + // Signature of callback function on consuming contract + "functionSelector": vrf.FulfillSelector(), + // Hash of the public key for the VRF to be used + "keyHash": parsedLog.KeyHash.Hex(), + // Raw input seed for the VRF (includes requester, nonce, etc.) + "seed": fullSeedString, + // The chainlink job corresponding to this VRF + "jobID": parsedLog.JobID.Hex(), + // Address of consuming contract which initially made the request + "sender": parsedLog.Sender.Hex(), + }) +} + +func (parseRandomnessRequest) parseRequestID(log eth.Log) (string, error) { + parsedLog, err := vrf.ParseRandomnessRequestLog(log) + if err != nil { + return "", errors.Wrapf(err, "while extracting randomness requestID from %#+v", log) + } + return parsedLog.RequestID().Hex(), nil +} diff --git a/core/store/models/randomness_log_event.go b/core/store/models/randomness_log_event.go new file mode 100644 index 00000000000..bf446352b10 --- /dev/null +++ b/core/store/models/randomness_log_event.go @@ -0,0 +1,88 @@ +package models + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + + "chainlink/core/logger" + "chainlink/core/services/vrf" +) + +// RandomnessLogEvent provides functionality specific to a log event emitted +// for a run log initiator. +type RandomnessLogEvent struct{ InitiatorLogEvent } + +var _ LogRequest = RandomnessLogEvent{} // implements LogRequest interface + +// Validate() is true if the contained log is parseable as a RandomnessRequest, +// and it's from the address specified by the job's initiator. The log filter +// and the go-ethereum parser should prevent any invalid logs from reacching +// this point, so Validate emits an error log on failure. +func (le RandomnessLogEvent) Validate() bool { + _, err := vrf.ParseRandomnessRequestLog(le.Log) + switch { + case err != nil: + logger.Errorf("error while parsing RandomnessRequest log: %s on log %#+v", + err, le.Log) + return false + // Following should be guaranteed by log query filterer, but doesn't hurt to + // check again. + case le.Log.Address != le.Initiator.Address: + logger.Errorf( + "RandomnessRequest log received from address %s, but expect logs from %s", + le.Log.Address.String(), le.Initiator.Address.String()) + return false + } + return true +} + +// ValidateRequester never errors, because the requester is not important to the +// node's functionality. A requesting contract cannot request the VRF output on +// behalf of another contract, because the initial input seed is hashed with the +// requesting contract's address (plus a nonce) to get the actual VRF input. +func (le RandomnessLogEvent) ValidateRequester() error { + return nil +} + +// Requester pulls the requesting address out of the LogEvent's topics. +func (le RandomnessLogEvent) Requester() common.Address { + log, err := vrf.ParseRandomnessRequestLog(le.Log) + if err != nil { + logger.Errorf("error while parsing RandomnessRequest log: %s on log %#+v", + err, le.Log) + return common.Address{} + } + return log.Sender +} + +// RunRequest returns a RunRequest instance with all parameters +// from a run log topic, like RequestID. +func (le RandomnessLogEvent) RunRequest() (RunRequest, error) { + parsedLog, err := vrf.ParseRandomnessRequestLog(le.Log) + if err != nil { + return RunRequest{}, errors.Wrapf(err, + "while parsing log for VRF run request") + } + requestParams, err := le.JSON() + if err != nil { + return RunRequest{}, errors.Wrapf(err, + "while parsing request params for VRF run request") + } + + str := parsedLog.RequestID().Hex() + requester := le.Requester() + return RunRequest{ + RequestID: &str, + TxHash: &le.Log.TxHash, + BlockHash: &le.Log.BlockHash, + Requester: &requester, + Payment: parsedLog.Fee, + RequestParams: requestParams, + }, nil +} + +// JSON returns the JSON from this RandomnessRequest log, as it will be passed +// to the Randomn adapter +func (le RandomnessLogEvent) JSON() (js JSON, err error) { + return parseRandomnessRequest{}.parseJSON(le.Log) +} diff --git a/core/store/models/topics_test.go b/core/store/models/topics_test.go index 9e71a9e2588..371f3ff2b80 100644 --- a/core/store/models/topics_test.go +++ b/core/store/models/topics_test.go @@ -10,18 +10,14 @@ import ( "github.com/stretchr/testify/require" ) -func TestTopicFiltersForRunLog(t *testing.T) { +func TestJobSpecIDTopics(t *testing.T) { t.Parallel() jobID, err := models.NewIDFromString("4a1eb0e8df314cb894024a38991cff0f") require.NoError(t, err) - topics := models.TopicFiltersForRunLog([]common.Hash{models.RunLogTopic0original}, jobID) + topics := models.JobSpecIDTopics(jobID) assert.Equal(t, 2, len(topics)) - assert.Equal( - t, - []common.Hash{models.RunLogTopic0original}, - topics[models.RequestLogTopicSignature]) assert.Equal( t, @@ -29,7 +25,7 @@ func TestTopicFiltersForRunLog(t *testing.T) { common.HexToHash("0x4a1eb0e8df314cb894024a38991cff0f00000000000000000000000000000000"), common.HexToHash("0x3461316562306538646633313463623839343032346133383939316366663066"), }, - topics[1]) + topics) } func TestRunLogTopic0original(t *testing.T) { diff --git a/core/store/models/vrfkey/doc.go b/core/store/models/vrfkey/doc.go new file mode 100644 index 00000000000..754dc7d7934 --- /dev/null +++ b/core/store/models/vrfkey/doc.go @@ -0,0 +1,34 @@ +// Package vrfkey tracks the secret keys associated with VRF proofs. It +// is a separate package from ../store to increase encapsulation of the keys, +// reduce the risk of them leaking, and reduce confusion between VRF keys and +// ethereum keys. +// +// The three types, PrivateKey (private_key.go), PublicKey (public_key.go) and +// EncryptedSecretKey (serialzation.go) are all aspects of the one keypair. +// +// The details of the secret key in a keypair should remain private to this +// package. If you need to access the secret key, you should add a method to +// PrivateKey which does the crypto requiring it, without leaking the secret. +// See PrivateKey#MarshaledProof in private_key.go, for an example. +// +// PrivateKey#PublicKey represents the associated public key, and, in the +// context of a VRF, represents a public commitment to a particular "verifiable +// random function." +// +// EncryptedSecretKey is used to store a public/private keypair in a database, +// in encrypted form. +// +// Usage +// +// Call vrfkey.CreateKey() to generate a PrivateKey with cryptographically +// secure randomness. +// +// Call PrivateKey#Encrypt(passphrase) to create a representation of the key +// which is encrypted for storage. +// +// Call PrivateKey#MarshaledProof(seed) to generate the VRF proof for the given +// seed and private key. The proof is marshaled in the format expected by the +// on-chain verification mechanism in VRF.sol. If you want to know the VRF +// output independently of the on-chain verification mechanism, you can get it +// from vrf.UnmarshalSolidityProof(p).Output. +package vrfkey diff --git a/core/store/models/vrfkey/private_key.go b/core/store/models/vrfkey/private_key.go new file mode 100644 index 00000000000..78a042f4e53 --- /dev/null +++ b/core/store/models/vrfkey/private_key.go @@ -0,0 +1,118 @@ +package vrfkey + +import ( + "crypto/ecdsa" + + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/services/vrf" + + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "go.dedis.ch/kyber/v3" +) + +// PrivateKey represents the secret used to construct a VRF proof. +// +// Don't serialize directly, use Encrypt method, with user-supplied passphrase. +// The unencrypted PrivateKey struct should only live in-memory. +// +// Only use it if you absolutely need it (i.e., for a novel crypto protocol.) +// Implement whatever cryptography you need on this struct, so your callers +// don't need to know the secret key explicitly. (See, e.g., MarshaledProof.) +type PrivateKey struct { + k kyber.Scalar + PublicKey PublicKey +} + +var suite = secp256k1.NewBlakeKeccackSecp256k1() + +// newPrivateKey(k) is k wrapped in a PrivateKey along with corresponding +// PublicKey, or an error. Internal use only. Use cltest.StoredVRFKey for stable +// testing key, or CreateKey if you don't need determinism. +func newPrivateKey(rawKey *big.Int) (*PrivateKey, error) { + if rawKey.Cmp(secp256k1.GroupOrder) >= 0 || rawKey.Cmp(big.NewInt(0)) < 0 { + return nil, fmt.Errorf("secret key must be in {0, ..., #secp256k1 - 1}") + } + sk := &PrivateKey{} + sk.k = secp256k1.IntToScalar(rawKey) + pk, err := suite.Point().Mul(sk.k, nil).MarshalBinary() + if err != nil { + panic(errors.Wrapf(err, "could not marshal public key")) + } + if len(pk) != CompressedPublicKeyLength { + panic(fmt.Errorf("public key %x has wrong length", pk)) + } + if l := copy(sk.PublicKey[:], pk[:]); l != CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy correct length in serialized public key")) + } + return sk, nil +} + +// k.MarshaledProof(seed) is a VRF proof of randomness using k and seed, in the +// form required by VRF.sol's randomValueFromVRFProof +func (k *PrivateKey) MarshaledProof(seed *big.Int) (vrf.MarshaledProof, error) { + proof, err := vrf.GenerateProof(secp256k1.ScalarToHash(k.k), common.BigToHash(seed)) + if err != nil { + return vrf.MarshaledProof{}, err + } + rv, err := proof.MarshalForSolidityVerifier() + if err != nil { + return vrf.MarshaledProof{}, err + } + return rv, nil +} + +// gethKey returns the geth keystore representation of k. Do not abuse this to +// convert a VRF key to an ethereum key! +func (k *PrivateKey) gethKey() *keystore.Key { + return &keystore.Key{ + Address: k.PublicKey.Address(), + PrivateKey: &ecdsa.PrivateKey{D: secp256k1.ToInt(k.k)}, + } +} + +// fromGethKey returns the vrfkey representation of gethKey. Do not abuse this +// to convert an ethereum key into a VRF key! +func fromGethKey(gethKey *keystore.Key) *PrivateKey { + secretKey := secp256k1.IntToScalar(gethKey.PrivateKey.D) + rawPublicKey, err := secp256k1.ScalarToPublicPoint(secretKey).MarshalBinary() + if err != nil { + panic(err) // Only way this can happen is out-of-memory failure + } + var publicKey PublicKey + copy(publicKey[:], rawPublicKey) + return &PrivateKey{secretKey, publicKey} +} + +// CreateKey makes a new VRF proving key from cryptographically secure entropy +func CreateKey() (key *PrivateKey) { + sk := suite.Scalar().Pick(suite.RandomStream()) + k, err := newPrivateKey(secp256k1.ToInt(sk)) + if err != nil { + panic(errors.Wrapf(err, "should not be possible to error, here")) + } + return k +} + +// NewPrivateKeyXXXTestingOnly is for testing purposes only! +func NewPrivateKeyXXXTestingOnly(k *big.Int) *PrivateKey { + rv, err := newPrivateKey(k) + if err != nil { + panic(err) + } + return rv +} + +// String reduces the risk of accidentally logging the private key +func (k *PrivateKey) String() string { + return fmt.Sprintf("PrivateKey{k: , PublicKey: 0x%x}", k.PublicKey) +} + +// GoStringer reduces the risk of accidentally logging the private key +func (k *PrivateKey) GoStringer() string { + return k.String() +} diff --git a/core/store/models/vrfkey/private_key_test.go b/core/store/models/vrfkey/private_key_test.go new file mode 100644 index 00000000000..d393e7f72c7 --- /dev/null +++ b/core/store/models/vrfkey/private_key_test.go @@ -0,0 +1,62 @@ +package vrfkey + +import ( + "encoding/hex" + "fmt" + "math/big" + "regexp" + "testing" + + "chainlink/core/services/vrf/generated/solidity_verifier_wrapper" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var sk = 0xdeadbeefdeadbee +var k = mustNewPrivateKey(big.NewInt(int64(sk))) +var pkr = regexp.MustCompile(fmt.Sprintf( + `PrivateKey\{k: , PublicKey: 0x[[:xdigit:]]{%d}\}`, + 2*CompressedPublicKeyLength)) + +func TestPrintingDoesNotLeakKey(t *testing.T) { + v := fmt.Sprintf("%v", k) + assert.Equal(t, v+"\n", fmt.Sprintln(k)) + assert.Regexp(t, pkr, v) + assert.NotContains(t, v, fmt.Sprintf("%x", sk)) + // Other verbs just give the corresponding encoding of .String() + assert.Equal(t, fmt.Sprintf("%x", k), hex.EncodeToString([]byte(v))) +} + +func TestMarshaledProof(t *testing.T) { + proof, err := k.MarshaledProof(big.NewInt(1)) + require.NoError(t, err) + // NB: For changes to the VRF solidity code to be reflected here, "go generate" + // must be run in core/services/vrf. + ethereumKey, _ := crypto.GenerateKey() + auth := bind.NewKeyedTransactor(ethereumKey) + genesisData := core.GenesisAlloc{auth.From: {Balance: big.NewInt(1000000000)}} + gasLimit := eth.DefaultConfig.Miner.GasCeil + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + _, _, verifier, err := solidity_verifier_wrapper.DeployVRFTestHelper(auth, backend) + if err != nil { + panic(errors.Wrapf(err, "while initializing EVM contract wrapper")) + } + backend.Commit() + _, err = verifier.RandomValueFromVRFProof(nil, proof[:]) + require.NoError(t, err) +} + +func mustNewPrivateKey(rawKey *big.Int) *PrivateKey { + k, err := newPrivateKey(rawKey) + if err != nil { + panic(err) + } + return k +} diff --git a/core/store/models/vrfkey/public_key.go b/core/store/models/vrfkey/public_key.go new file mode 100644 index 00000000000..3ef2d29b6a9 --- /dev/null +++ b/core/store/models/vrfkey/public_key.go @@ -0,0 +1,87 @@ +package vrfkey + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "go.dedis.ch/kyber/v3" + + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/utils" +) + +// PublicKey is a secp256k1 point in compressed format +type PublicKey [CompressedPublicKeyLength]byte + +// CompressedPublicKeyLength is the length of a secp256k1 public key's x +// ordinate as a uint256, concatenated with 00 if y is even, 01 if odd. +const CompressedPublicKeyLength = 33 + +func init() { + if CompressedPublicKeyLength != (&secp256k1.Secp256k1{}).Point().MarshalSize() { + panic("disparity in expected public key lengths") + } +} + +// Set sets k to the public key represented by l +func (k *PublicKey) Set(l PublicKey) { + if copy(k[:], l[:]) != CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy entire public key %x to %x", l, k)) + } +} + +// Point returns the secp256k1 point corresponding to k +func (k *PublicKey) Point() (kyber.Point, error) { + p := (&secp256k1.Secp256k1{}).Point() + return p, p.UnmarshalBinary(k[:]) +} + +// NewPublicKey returns the PublicKey corresponding to rawKey +func NewPublicKey(rawKey [CompressedPublicKeyLength]byte) *PublicKey { + rv := PublicKey(rawKey) + return &rv +} + +// NewPublicKeyFromHex returns the PublicKey encoded by 0x-hex string hex, or errors +func NewPublicKeyFromHex(hex string) (*PublicKey, error) { + rawKey, err := hexutil.Decode(hex) + if err != nil { + return nil, err + } + if l := len(rawKey); l != CompressedPublicKeyLength { + return nil, fmt.Errorf("wrong length for public key: %s of length %d", rawKey, l) + } + k := &PublicKey{} + if c := copy(k[:], rawKey[:]); c != CompressedPublicKeyLength { + panic(fmt.Errorf("failed to copy entire key to return value")) + } + return k, err +} + +// SetFromHex sets k to the public key represented by hex, which must represent +// the uncompressed binary format +func (k *PublicKey) SetFromHex(hex string) error { + nk, err := NewPublicKeyFromHex(hex) + if err != nil { + return err + } + k.Set(*nk) + return nil +} + +// String returns k's binary uncompressed representation, as 0x-hex +func (k *PublicKey) String() string { + return hexutil.Encode(k[:]) +} + +// Hash returns the solidity Keccak256 hash of k. Corresponds to hashOfKey on +// VRFCoordinator. +func (k *PublicKey) Hash() common.Hash { + return utils.MustHash(string(k[:])) +} + +// Address returns the Ethereum address of k +func (k *PublicKey) Address() common.Address { + return common.BytesToAddress(k.Hash().Bytes()[12:]) +} diff --git a/core/store/models/vrfkey/public_key_test.go b/core/store/models/vrfkey/public_key_test.go new file mode 100644 index 00000000000..1dd14819821 --- /dev/null +++ b/core/store/models/vrfkey/public_key_test.go @@ -0,0 +1,34 @@ +package vrfkey + +import ( + "testing" + + "chainlink/core/services/signatures/cryptotest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValueScanIdentityPointSet(t *testing.T) { + randomStream := cryptotest.NewStream(t, 0) + for i := 0; i < 10; i++ { + p := suite.Point().Pick(randomStream) + var pk, nPk, nnPk PublicKey + marshaledKey, err := p.MarshalBinary() + require.NoError(t, err, "failed to marshal public key") + require.Equal(t, copy(pk[:], marshaledKey), + CompressedPublicKeyLength, "failed to copy marshaled key to pk") + assert.NotEqual(t, pk, nPk, "equality test succeeds on different keys!") + np, err := pk.Point() + require.NoError(t, err, "failed to marshal public key") + assert.True(t, p.Equal(np), "Point should give the point we constructed pk from") + value, err := pk.Value() + require.NoError(t, err, "failed to serialize public key for database") + nPk.Scan(value) + assert.Equal(t, pk, nPk, + "roundtripping public key through db Value/Scan gave different key!") + nnPk.Set(pk) + assert.Equal(t, pk, nnPk, + "setting one PubliKey to another should result in equal keys") + } +} diff --git a/core/store/models/vrfkey/serialization.go b/core/store/models/vrfkey/serialization.go new file mode 100644 index 00000000000..1f16bed5506 --- /dev/null +++ b/core/store/models/vrfkey/serialization.go @@ -0,0 +1,164 @@ +package vrfkey + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/pkg/errors" +) + +// EncryptedSecretKey contains encrypted private key to be serialized to DB +// +// We could re-use geth's key handling, here, but this makes it much harder to +// misuse VRF proving keys as ethereum keys or vice versa. +type EncryptedSecretKey struct { + PublicKey PublicKey `gorm:"primary_key;type:varchar(68)"` + VRFKey gethKeyStruct `json:"vrf_key" gorm:"type:text"` +} + +// passwordPrefix is added to the beginning of the passwords for +// EncryptedSecretKey's, so that VRF keys can't casually be used as ethereum +// keys, and vice-versa. If you want to do that, DON'T. +var passwordPrefix = "don't mix VRF and Ethereum keys!" + +func adulteratedPassword(auth string) string { + return passwordPrefix + auth +} + +type ScryptParams struct{ N, P int } + +var defaultScryptParams = ScryptParams{ + N: keystore.StandardScryptN, P: keystore.StandardScryptP} + +// FastScryptParams is for use in tests, where you don't want to wear out your +// CPU with expensive key derivations, do not use it in production, or your +// encrypted VRF keys will be easy to brute-force! +var FastScryptParams = ScryptParams{N: 2, P: 1} + +// Encrypt returns the key encrypted with passphrase auth +func (k *PrivateKey) Encrypt(auth string, p ...ScryptParams, +) (*EncryptedSecretKey, error) { + var scryptParams ScryptParams + switch len(p) { + case 0: + scryptParams = defaultScryptParams + case 1: + scryptParams = p[0] + default: + return nil, fmt.Errorf("can take at most one set of ScryptParams") + } + keyJSON, err := keystore.EncryptKey(k.gethKey(), adulteratedPassword(auth), + scryptParams.N, scryptParams.P) + if err != nil { + return nil, errors.Wrapf(err, "could not encrypt vrf key") + } + rv := EncryptedSecretKey{} + if err := json.Unmarshal(keyJSON, &rv.VRFKey); err != nil { + return nil, errors.Wrapf(err, "geth returned unexpected key material") + } + rv.PublicKey = k.PublicKey + roundTripKey, err := rv.Decrypt(auth) + if err != nil { + return nil, errors.Wrapf(err, "could not decrypt just-encrypted key!") + } + if !roundTripKey.k.Equal(k.k) || roundTripKey.PublicKey != k.PublicKey { + panic(fmt.Errorf("roundtrip of key resulted in different value")) + } + return &rv, nil +} + +// JSON returns the JSON representation of e, or errors +func (e *EncryptedSecretKey) JSON() ([]byte, error) { + keyJSON, err := json.Marshal(e) + if err != nil { + return nil, errors.Wrapf(err, "could not marshal encrypted key to JSON") + } + return keyJSON, nil +} + +// Decrypt returns the PrivateKey in e, decrypted via auth, or an error +func (e *EncryptedSecretKey) Decrypt(auth string) (*PrivateKey, error) { + keyJSON, err := json.Marshal(e.VRFKey) + if err != nil { + return nil, errors.Wrapf(err, "while marshaling key for decryption") + } + gethKey, err := keystore.DecryptKey(keyJSON, adulteratedPassword(auth)) + if err != nil { + return nil, errors.Wrapf(err, "could not decrypt key %s", + e.PublicKey.String()) + } + return fromGethKey(gethKey), nil +} + +// WriteToDisk writes the JSON representation of e to given file path, and +// ensures the file has appropriate access permissions +func (e *EncryptedSecretKey) WriteToDisk(path string) error { + keyJSON, err := e.JSON() + if err != nil { + return errors.Wrapf(err, "while marshaling key to save to %s", path) + } + userReadWriteOtherNoAccess := os.FileMode(0600) + return ioutil.WriteFile(path, keyJSON, userReadWriteOtherNoAccess) +} + +// MarshalText renders k as a text string +func (k PublicKey) MarshalText() ([]byte, error) { + return []byte(k.String()), nil +} + +// UnmarshalText reads a PublicKey into k from text, or errors +func (k *PublicKey) UnmarshalText(text []byte) error { + if err := k.SetFromHex(string(text)); err != nil { + return errors.Wrapf(err, "while parsing %s as public key", text) + } + return nil +} + +// Value marshals PublicKey to be saved in the DB +func (k PublicKey) Value() (driver.Value, error) { + return k.String(), nil +} + +// Scan reconstructs a PublicKey from a DB record of it. +func (k *PublicKey) Scan(value interface{}) error { + rawKey, ok := value.(string) + if !ok { + return errors.Wrap(fmt.Errorf("unable to convert %+v of type %T to PublicKey", value, value), "scan failure") + } + if err := k.SetFromHex(rawKey); err != nil { + return errors.Wrapf(err, "while scanning %s as PublicKey", rawKey) + } + return nil +} + +// Copied from go-ethereum/accounts/keystore/key.go's encryptedKeyJSONV3 +type gethKeyStruct struct { + Address string `json:"address"` + Crypto keystore.CryptoJSON `json:"crypto"` + Version int `json:"version"` +} + +func (k gethKeyStruct) Value() (driver.Value, error) { + return json.Marshal(&k) +} + +func (k *gethKeyStruct) Scan(value interface{}) error { + // With sqlite gorm driver, we get a []byte, here. With postgres, a string! + // https://github.com/jinzhu/gorm/issues/2276 + var toUnmarshal []byte + switch s := value.(type) { + case []byte: + toUnmarshal = s + case string: + toUnmarshal = []byte(s) + default: + return errors.Wrap( + fmt.Errorf("unable to convert %+v of type %T to gethKeyStruct", + value, value), "scan failure") + } + return json.Unmarshal(toUnmarshal, k) +} diff --git a/core/store/models/vrfkey/serialization_test.go b/core/store/models/vrfkey/serialization_test.go new file mode 100644 index 00000000000..9bedce0c5f5 --- /dev/null +++ b/core/store/models/vrfkey/serialization_test.go @@ -0,0 +1,31 @@ +package vrfkey + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var phrase = "as3r8phu82u9ru843cdi4298yf" + +var serialSK = 0xdeadbeefdeadbee +var serialK = mustNewPrivateKey(big.NewInt(int64(sk))) + +func TestEncryptDecryptRoundTrip(t *testing.T) { + // Encrypt already does a roundtrip to make sure it can decrypt, anyway + _, err := serialK.Encrypt(phrase, FastScryptParams) + assert.NoError(t, err, + "failed to roundtrip secret key through enecryption/decryption") +} + +func TestPublicKeyRoundTrip(t *testing.T) { + pk := serialK.PublicKey + serialized, err := pk.Value() + require.NoError(t, err, "failed to serialize public key for db") + var npk PublicKey + require.NoError(t, npk.Scan(serialized), + "could not deserialize serialized public key") + assert.Equal(t, pk, npk, "should get same key back after Value/Scan roundtrip") +} diff --git a/core/store/orm/config.go b/core/store/orm/config.go index b10f96cf32c..bba2533688d 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -18,6 +18,7 @@ import ( "chainlink/core/utils" "github.com/ethereum/go-ethereum/common" + ethCore "github.com/ethereum/go-ethereum/core" "github.com/gin-gonic/contrib/sessions" "github.com/gin-gonic/gin" "github.com/gorilla/securecookie" @@ -28,6 +29,9 @@ import ( "go.uber.org/zap/zapcore" ) +// this permission grants read / write accccess to file owners only +const readWritePerms = os.FileMode(0600) + // Config holds parameters used by the application which can be overridden by // setting environment variables. // @@ -76,6 +80,20 @@ func newConfigWithViper(v *viper.Viper) *Config { return config } +// Validate performs basic sanity checks on config and returns error if any +// misconfiguration would be fatal to the application +func (c *Config) Validate() error { + ethGasBumpPercent := c.EthGasBumpPercent() + if uint64(ethGasBumpPercent) < ethCore.DefaultTxPoolConfig.PriceBump { + logger.Warnf( + "ETH_GAS_BUMP_PERCENT of %v is less than Geth's default of %v, transactions may fail with underpriced replacement errors", + c.EthGasBumpPercent(), + ethCore.DefaultTxPoolConfig.PriceBump, + ) + } + return nil +} + // SetRuntimeStore tells the configuration system to use a store for retrieving // configuration variables that can be configured at runtime. func (c *Config) SetRuntimeStore(orm *ORM) { @@ -122,17 +140,26 @@ func (c Config) DatabaseTimeout() time.Duration { } // DatabaseURL configures the URL for chainlink to connect to. This must be -// a properly formatted URL, with a valid scheme (postgres://, file://), or -// an empty string, so the application defaults to .chainlink/db.sqlite. +// a properly formatted URL, with a valid scheme (postgres://) func (c Config) DatabaseURL() string { return c.viper.GetString(EnvVarName("DatabaseURL")) } +// DefaultMaxHTTPAttempts defines the limit for HTTP requests. +func (c Config) DefaultMaxHTTPAttempts() uint { + return c.viper.GetUint(EnvVarName("DefaultMaxHTTPAttempts")) +} + // DefaultHTTPLimit defines the limit for HTTP requests. func (c Config) DefaultHTTPLimit() int64 { return c.viper.GetInt64(EnvVarName("DefaultHTTPLimit")) } +// DefaultHTTPTimeout defines the default timeout for http requests +func (c Config) DefaultHTTPTimeout() time.Duration { + return c.viper.GetDuration(EnvVarName("DefaultHTTPTimeout")) +} + // Dev configures "development" mode for chainlink. func (c Config) Dev() bool { return c.viper.GetBool(EnvVarName("Dev")) @@ -440,15 +467,6 @@ func (c Config) getWithFallback(name string, parser func(string) (interface{}, e return v } -// NormalizedDatabaseURL returns the DatabaseURL with the empty default -// coerced to a sqlite3 URL. -func NormalizedDatabaseURL(c ConfigReader) string { - if c.DatabaseURL() == "" { - return filepath.ToSlash(filepath.Join(c.RootDir(), "db.sqlite3")) - } - return c.DatabaseURL() -} - // SecretGenerator is the interface for objects that generate a secret // used to sign or encrypt. type SecretGenerator interface { @@ -468,7 +486,7 @@ func (f filePersistedSecretGenerator) Generate(c Config) ([]byte, error) { } key := securecookie.GenerateRandomKey(32) str := base64.StdEncoding.EncodeToString(key) - return key, ioutil.WriteFile(sessionPath, []byte(str), 0644) + return key, ioutil.WriteFile(sessionPath, []byte(str), readWritePerms) } func parseAddress(str string) (interface{}, error) { diff --git a/core/store/orm/config_eth_gas_price_test.go b/core/store/orm/config_eth_gas_price_test.go new file mode 100644 index 00000000000..95fd967b3f7 --- /dev/null +++ b/core/store/orm/config_eth_gas_price_test.go @@ -0,0 +1,45 @@ +package orm_test + +import ( + "math/big" + "testing" + + "chainlink/core/internal/cltest" + + "github.com/stretchr/testify/require" +) + +func TestConfig_EthGasPriceDefault(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + config := store.Config + + // Get default value + def := config.EthGasPriceDefault() + + // No orm installed + err := config.SetEthGasPriceDefault(big.NewInt(0)) + require.Error(t, err) + + // Install ORM + config.SetRuntimeStore(store.ORM) + + // Value still stays as the default + require.Equal(t, def, config.EthGasPriceDefault()) + + // Override + newValue := new(big.Int).Add(def, big.NewInt(1)) + err = config.SetEthGasPriceDefault(newValue) + require.NoError(t, err) + + // Value changes + require.Equal(t, newValue, config.EthGasPriceDefault()) + + // Set again + newerValue := new(big.Int).Add(def, big.NewInt(2)) + err = config.SetEthGasPriceDefault(newerValue) + require.NoError(t, err) + + // Value changes + require.Equal(t, newerValue, config.EthGasPriceDefault()) +} diff --git a/core/store/orm/config_reader.go b/core/store/orm/config_reader.go index 34460dee999..aeb49f2a3c0 100644 --- a/core/store/orm/config_reader.go +++ b/core/store/orm/config_reader.go @@ -20,7 +20,9 @@ type ConfigReader interface { ClientNodeURL() string DatabaseTimeout() time.Duration DatabaseURL() string + DefaultMaxHTTPAttempts() uint DefaultHTTPLimit() int64 + DefaultHTTPTimeout() time.Duration Dev() bool FeatureExternalInitiators() bool FeatureFluxMonitor() bool diff --git a/core/store/orm/config_test.go b/core/store/orm/config_test.go index 46b9022a29e..7ac9e933038 100644 --- a/core/store/orm/config_test.go +++ b/core/store/orm/config_test.go @@ -9,10 +9,8 @@ import ( "time" "chainlink/core/assets" - "chainlink/core/store/migrations/migration1564007745" "github.com/ethereum/go-ethereum/common" - "github.com/jinzhu/gorm" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,9 +18,8 @@ import ( ) func TestStore_ConfigDefaults(t *testing.T) { - t.Parallel() config := NewConfig() - assert.Equal(t, big.NewInt(0), config.ChainID()) + assert.Equal(t, big.NewInt(1), config.ChainID()) assert.Equal(t, false, config.FeatureExternalInitiators()) assert.Equal(t, big.NewInt(20000000000), config.EthGasPriceDefault()) assert.Equal(t, "0x514910771AF9Ca656af840dff83E8264EcF986CA", common.HexToAddress(config.LinkContractAddress()).String()) @@ -160,75 +157,3 @@ func TestStore_urlParser(t *testing.T) { }) } } - -func TestConfig_NormalizedDatabaseURL(t *testing.T) { - t.Parallel() - - tests := []struct { - name, uri, expect string - }{ - {"default", "", "/root/db.sqlite3"}, - {"root", "/root/db.sqlite3", "/root/db.sqlite3"}, - {"windows root", `C:\root\db.sqlite3`, `C:\root\db.sqlite3`}, - {"garbage", "89324*$*#@(=", "89324*$*#@(="}, - {"relative path", "store/db/here", "store/db/here"}, - {"file uri", "file://host/path", "file://host/path"}, - {"postgres uri", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"}, - {"postgres string", "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full", "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - config := NewConfig() - config.Set("ROOT", "/root") - config.Set("DATABASE_URL", test.uri) - assert.Equal(t, test.expect, NormalizedDatabaseURL(config)) - }) - } -} - -func TestConfig_EthGasPriceDefault(t *testing.T) { - t.Parallel() - - config := NewConfig() - - // Get default value - def := config.EthGasPriceDefault() - - // No orm installed - err := config.SetEthGasPriceDefault(big.NewInt(0)) - require.Error(t, err) - - // ORM installed - require.NoError(t, os.MkdirAll(config.RootDir(), 0700)) - defer os.RemoveAll(config.RootDir()) - orm, err := NewORM(NormalizedDatabaseURL(config), config.DatabaseTimeout()) - require.NoError(t, err) - require.NotNil(t, orm) - orm.SetLogging(true) - err = orm.RawDB(func(db *gorm.DB) error { - return migration1564007745.Migrate(db) - }) - require.NoError(t, err) - - config.SetRuntimeStore(orm) - - // Value still stays as the default - require.Equal(t, def, config.EthGasPriceDefault()) - - // Override - newValue := new(big.Int).Add(def, big.NewInt(1)) - err = config.SetEthGasPriceDefault(newValue) - require.NoError(t, err) - - // Value changes - require.Equal(t, newValue, config.EthGasPriceDefault()) - - // Set again - newerValue := new(big.Int).Add(def, big.NewInt(2)) - err = config.SetEthGasPriceDefault(newerValue) - require.NoError(t, err) - - // Value changes - require.Equal(t, newerValue, config.EthGasPriceDefault()) -} diff --git a/core/store/orm/helpers_test.go b/core/store/orm/helpers_test.go index 7a37bffa915..48f1df054eb 100644 --- a/core/store/orm/helpers_test.go +++ b/core/store/orm/helpers_test.go @@ -1,5 +1,7 @@ package orm +import "chainlink/core/gracefulpanic" + func (o *ORM) LockingStrategyHelperSimulateDisconnect() (error, error) { err1 := o.lockingStrategy.(*PostgresLockingStrategy).conn.Close() err2 := o.lockingStrategy.(*PostgresLockingStrategy).db.Close() @@ -7,3 +9,7 @@ func (o *ORM) LockingStrategyHelperSimulateDisconnect() (error, error) { o.lockingStrategy.(*PostgresLockingStrategy).db = nil return err1, err2 } + +func (o *ORM) ShutdownSignal() gracefulpanic.Signal { + return o.shutdownSignal +} diff --git a/core/store/orm/locking_strategies.go b/core/store/orm/locking_strategies.go index d7703bb12fc..4b1e34c96e6 100644 --- a/core/store/orm/locking_strategies.go +++ b/core/store/orm/locking_strategies.go @@ -4,11 +4,9 @@ import ( "context" "database/sql" "fmt" - "path/filepath" "sync" "time" - "github.com/gofrs/flock" "github.com/pkg/errors" "go.uber.org/multierr" ) @@ -19,8 +17,6 @@ func NewLockingStrategy(dialect DialectName, dbpath string) (LockingStrategy, er switch dialect { case DialectPostgres: return NewPostgresLockingStrategy(dbpath) - case DialectSqlite: - return NewFileLockingStrategy(dbpath) } return nil, fmt.Errorf("unable to create locking strategy for dialect %s and path %s", dialect, dbpath) @@ -33,44 +29,6 @@ type LockingStrategy interface { Unlock(timeout time.Duration) error } -// FileLockingStrategy uses a file lock on disk to ensure exclusive access. -type FileLockingStrategy struct { - path string - fileLock *flock.Flock - m *sync.Mutex -} - -// NewFileLockingStrategy creates a new instance of FileLockingStrategy -// at the passed path. -func NewFileLockingStrategy(dbpath string) (LockingStrategy, error) { - directory := filepath.Dir(dbpath) - lockPath := filepath.Join(directory, "chainlink.lock") - return &FileLockingStrategy{ - path: lockPath, - fileLock: flock.New(lockPath), - m: &sync.Mutex{}, - }, nil -} - -// Lock returns immediately and assumes is always unlocked. -func (s *FileLockingStrategy) Lock(timeout time.Duration) error { - s.m.Lock() - defer s.m.Unlock() - - var err error - locked := make(chan struct{}) - go func() { - err = s.fileLock.Lock() - close(locked) - }() - select { - case <-locked: - case <-normalizedTimeout(timeout): - return fmt.Errorf("file locking strategy timed out for %s", s.path) - } - return err -} - func normalizedTimeout(timeout time.Duration) <-chan time.Time { if timeout == 0 { return make(chan time.Time) // never time out @@ -78,13 +36,6 @@ func normalizedTimeout(timeout time.Duration) <-chan time.Time { return time.After(timeout) } -// Unlock is a noop. -func (s *FileLockingStrategy) Unlock(timeout time.Duration) error { - s.m.Lock() - defer s.m.Unlock() - return s.fileLock.Unlock() -} - // PostgresLockingStrategy uses a postgres advisory lock to ensure exclusive // access. type PostgresLockingStrategy struct { diff --git a/core/store/orm/locking_strategies_test.go b/core/store/orm/locking_strategies_test.go index 7f0fb61fb17..ee8075b4d33 100644 --- a/core/store/orm/locking_strategies_test.go +++ b/core/store/orm/locking_strategies_test.go @@ -1,8 +1,6 @@ package orm_test import ( - "os" - "path/filepath" "reflect" "testing" "time" @@ -20,17 +18,12 @@ import ( ) func TestNewLockingStrategy(t *testing.T) { - tc, cleanup := cltest.NewConfig(t) - defer cleanup() - c := tc.Config - tests := []struct { name string dialectName orm.DialectName path string expect reflect.Type }{ - {"sqlite", orm.DialectSqlite, c.RootDir(), reflect.ValueOf(&orm.FileLockingStrategy{}).Type()}, {"postgres", orm.DialectPostgres, "postgres://something:5432", reflect.ValueOf(&orm.PostgresLockingStrategy{}).Type()}, } @@ -44,31 +37,6 @@ func TestNewLockingStrategy(t *testing.T) { } } -func TestFileLockingStrategy_Lock(t *testing.T) { - const delay = 10 * time.Millisecond - - tc, cleanup := cltest.NewConfig(t) - defer cleanup() - c := tc.Config - - require.NoError(t, os.MkdirAll(c.RootDir(), 0700)) - defer os.RemoveAll(c.RootDir()) - - dbpath := filepath.ToSlash(filepath.Join(c.RootDir(), "db.sqlite3")) - ls, err := orm.NewFileLockingStrategy(dbpath) - require.NoError(t, err) - require.NoError(t, ls.Lock(delay), "should get exclusive lock") - - ls2, err := orm.NewFileLockingStrategy(dbpath) - require.NoError(t, err) - require.Error(t, ls2.Lock(delay), "should not get 2nd exclusive lock") - - require.NoError(t, ls.Unlock(delay)) - - require.NoError(t, ls2.Lock(delay), "allow another to lock after unlock") - require.NoError(t, ls2.Unlock(delay)) -} - func TestPostgresLockingStrategy_Lock(t *testing.T) { tc, cleanup := cltest.NewConfig(t) defer cleanup() @@ -130,14 +98,15 @@ func TestPostgresLockingStrategy_CanBeReacquiredByNewNodeAfterDisconnect(t *test defer cleanup() if store.Config.DatabaseURL() == "" { - t.Skip("No postgres DatabaseURL set.") + panic("No postgres DatabaseURL set.") } connErr, dbErr := store.ORM.LockingStrategyHelperSimulateDisconnect() require.NoError(t, connErr) require.NoError(t, dbErr) - orm2, err := orm.NewORM(orm.NormalizedDatabaseURL(store.Config), store.Config.DatabaseTimeout()) + orm2ShutdownSignal := gracefulpanic.NewSignal() + orm2, err := orm.NewORM(store.Config.DatabaseURL(), store.Config.DatabaseTimeout(), orm2ShutdownSignal) require.NoError(t, err) defer orm2.Close() @@ -147,7 +116,7 @@ func TestPostgresLockingStrategy_CanBeReacquiredByNewNodeAfterDisconnect(t *test require.NoError(t, err) _ = store.ORM.RawDB(func(db *gorm.DB) error { return nil }) - gomega.NewGomegaWithT(t).Eventually(gracefulpanic.Wait()).Should(gomega.BeClosed()) + gomega.NewGomegaWithT(t).Eventually(store.ORM.ShutdownSignal().Wait()).Should(gomega.BeClosed()) } func TestPostgresLockingStrategy_WhenReacquiredOriginalNodeErrors(t *testing.T) { @@ -173,5 +142,5 @@ func TestPostgresLockingStrategy_WhenReacquiredOriginalNodeErrors(t *testing.T) defer lock.Unlock(delay) _ = store.ORM.RawDB(func(db *gorm.DB) error { return nil }) - gomega.NewGomegaWithT(t).Eventually(gracefulpanic.Wait()).Should(gomega.BeClosed()) + gomega.NewGomegaWithT(t).Eventually(store.ORM.ShutdownSignal().Wait()).Should(gomega.BeClosed()) } diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index e693d4c22a0..f3bd63ea220 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -22,15 +22,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/postgres" // http://doc.gorm.io/database.html#connecting-to-a-database - _ "github.com/jinzhu/gorm/dialects/sqlite" // http://doc.gorm.io/database.html#connecting-to-a-database "github.com/lib/pq" - "github.com/mattn/go-sqlite3" "github.com/pkg/errors" "go.uber.org/multierr" ) // BatchSize is the safe number of records to cache during Batch calls for // SQLite without causing load problems. +// NOTE: Now we no longer support SQLite, perhaps this can be tuned? const BatchSize = 100 var ( @@ -46,8 +45,6 @@ type DialectName string const ( // DialectPostgres represents the postgres dialect. DialectPostgres DialectName = "postgres" - // DialectSqlite represents the sqlite dialect. - DialectSqlite = "sqlite3" ) // ORM contains the database object used by Chainlink. @@ -57,6 +54,7 @@ type ORM struct { advisoryLockTimeout time.Duration dialectName DialectName closeOnce sync.Once + shutdownSignal gracefulpanic.Signal } var ( @@ -67,16 +65,14 @@ var ( // mapError tries to coerce the error into package defined errors. func mapError(err error) error { err = errors.Cause(err) - if v, ok := err.(sqlite3.Error); ok && v.Code == sqlite3.ErrConstraint { - return ErrorConflict - } else if v, ok := err.(*pq.Error); ok && v.Code.Class() == "23" { + if v, ok := err.(*pq.Error); ok && v.Code.Class() == "23" { return ErrorConflict } return err } // NewORM initializes a new database file at the configured uri. -func NewORM(uri string, timeout time.Duration) (*ORM, error) { +func NewORM(uri string, timeout time.Duration, shutdownSignal gracefulpanic.Signal) (*ORM, error) { dialect, err := DeduceDialect(uri) if err != nil { return nil, err @@ -93,6 +89,7 @@ func NewORM(uri string, timeout time.Duration) (*ORM, error) { lockingStrategy: lockingStrategy, advisoryLockTimeout: timeout, dialectName: dialect, + shutdownSignal: shutdownSignal, } orm.MustEnsureAdvisoryLock() @@ -113,7 +110,7 @@ func (orm *ORM) MustEnsureAdvisoryLock() { err := orm.lockingStrategy.Lock(orm.advisoryLockTimeout) if err != nil { logger.Errorf("unable to lock ORM: %v", err) - gracefulpanic.Panic() + orm.shutdownSignal.Panic() } } @@ -136,14 +133,6 @@ func initializeDatabase(dialect, path string) (*gorm.DB, error) { return nil, err } - if err := dbutil.SetSqlitePragmas(db); err != nil { - return nil, err - } - - if err := dbutil.LimitSqliteOpenConnections(db); err != nil { - return nil, err - } - return db, nil } @@ -157,16 +146,9 @@ func DeduceDialect(path string) (DialectName, error) { switch scheme { case "postgresql", "postgres": return DialectPostgres, nil - case "file", "": - if len(strings.Split(url.Path, " ")) > 1 { - return "", errors.New("error deducing ORM dialect, no spaces allowed, please use a postgres URL or file path") - } - return DialectSqlite, nil - case "sqlite3", "sqlite": - return "", fmt.Errorf("do not have full support for the sqlite URL, please use file:// instead for path %s", path) + default: + return "", fmt.Errorf("missing or unsupported database path: \"%s\". Did you forget to specify DATABASE_URL?", path) } - - return DialectSqlite, nil } func ignoreRecordNotFound(db *gorm.DB) error { @@ -325,8 +307,6 @@ func (orm *ORM) AllSyncEvents(cb func(*models.SyncEvent) error) error { // Encourages the use of transactions for gorm calls that translate // into multiple sql calls, i.e. orm.SaveJobRun(run), which are better suited // in a database transaction. -// Improves efficiency in sqlite by preventing autocommit on each line, instead -// Batch committing at the end of the transaction. func (orm *ORM) convenientTransaction(callback func(*gorm.DB) error) error { orm.MustEnsureAdvisoryLock() dbtx := orm.db.Begin() @@ -724,6 +704,19 @@ func (orm *ORM) FindAllTxsInNonceRange(beginningNonce uint, endingNonce uint) ([ return txs, err } +// FindTxsBySenderAndRecipient returns an array of transactions sent by `sender` to `recipient` +func (orm *ORM) FindTxsBySenderAndRecipient(sender, recipient common.Address, offset, limit uint) ([]models.Tx, error) { + orm.MustEnsureAdvisoryLock() + var txs []models.Tx + err := orm.db. + Where(`"from" = ? AND "to" = ?`, sender, recipient). + Order("nonce DESC"). + Offset(offset). + Limit(limit). + Find(&txs).Error + return txs, err +} + // FindTxByAttempt returns the specific transaction attempt with the hash. func (orm *ORM) FindTxByAttempt(hash common.Hash) (*models.Tx, *models.TxAttempt, error) { orm.MustEnsureAdvisoryLock() @@ -1109,8 +1102,6 @@ func (orm *ORM) DeleteTransaction(ethtx *models.Tx) error { func (orm *ORM) BulkDeleteRuns(bulkQuery *models.BulkDeleteRunRequest) error { orm.MustEnsureAdvisoryLock() return orm.convenientTransaction(func(dbtx *gorm.DB) error { - // NOTE: SQLite doesn't support compound delete statements, so delete run - // results for job_runs ... err := dbtx.Exec(` DELETE FROM run_results @@ -1175,6 +1166,45 @@ func (orm *ORM) FirstOrCreateKey(k *models.Key) error { return orm.db.FirstOrCreate(k).Error } +// FirstOrCreateEncryptedSecretKey returns the first key found or creates a new one in the orm. +func (orm *ORM) FirstOrCreateEncryptedSecretVRFKey(k *models.EncryptedSecretVRFKey) error { + orm.MustEnsureAdvisoryLock() + return orm.db.FirstOrCreate(k).Error +} + +// DeleteEncryptedSecretKey deletes k from the encrypted keys table, or errors +func (orm *ORM) DeleteEncryptedSecretVRFKey(k *models.EncryptedSecretVRFKey) error { + orm.MustEnsureAdvisoryLock() + return orm.db.Delete(k).Error +} + +// FindEncryptedSecretKeys retrieves matches to where from the encrypted keys table, or errors +func (orm *ORM) FindEncryptedSecretVRFKeys(where ...models.EncryptedSecretVRFKey) ( + retrieved []*models.EncryptedSecretVRFKey, err error) { + orm.MustEnsureAdvisoryLock() + var anonWhere []interface{} // Find needs "where" contents coerced to interface{} + for _, constraint := range where { + anonWhere = append(anonWhere, &constraint) + } + return retrieved, orm.db.Find(&retrieved, anonWhere...).Error +} + +// SaveLogCursor saves the log cursor. +func (orm *ORM) SaveLogCursor(logCursor *models.LogCursor) error { + orm.MustEnsureAdvisoryLock() + return orm.db.Save(logCursor).Error +} + +// FindLogCursor will find the given log cursor. +func (orm *ORM) FindLogCursor(name string) (models.LogCursor, error) { + orm.MustEnsureAdvisoryLock() + lc := models.LogCursor{} + err := orm.db. + Where("name = ?", name). + First(&lc).Error + return lc, err +} + // ClobberDiskKeyStoreWithDBKeys writes all keys stored in the orm to // the keys folder on disk, deleting anything there prior. func (orm *ORM) ClobberDiskKeyStoreWithDBKeys(keysDir string) error { diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index fd7666abf13..5816beb26cc 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -1045,6 +1045,33 @@ func TestBulkDeleteRuns(t *testing.T) { require.NoError(t, err) } +func TestORM_FindTxsBySenderAndRecipient(t *testing.T) { + t.Parallel() + + store, cleanup := cltest.NewStore(t) + _, err := store.KeyStore.NewAccount(cltest.Password) + require.NoError(t, err) + defer cleanup() + + from := cltest.GetAccountAddress(t, store) + to := cltest.NewAddress() + tx1 := cltest.CreateTxWithNonceGasPriceAndRecipient(t, store, from, to, 0, 0, 1) + tx2 := cltest.CreateTxWithNonceGasPriceAndRecipient(t, store, from, to, 0, 1, 1) + cltest.CreateTxWithNonceGasPriceAndRecipient(t, store, from, cltest.NewAddress(), 0, 2, 1) + cltest.CreateTxWithNonceGasPriceAndRecipient(t, store, cltest.NewAddress(), to, 0, 3, 1) + + txs, err := store.FindTxsBySenderAndRecipient(from, to, 0, 4) + require.NoError(t, err) + + require.Len(t, txs, 2) + expectedTxs := []*models.Tx{tx2, tx1} + for i, expected := range expectedTxs { + require.Equal(t, expected.To, txs[i].To) + require.Equal(t, expected.From, txs[i].From) + require.Equal(t, expected.Nonce, txs[i].Nonce) + } +} + func TestORM_FindTxAttempt_CurrentAttempt(t *testing.T) { t.Parallel() @@ -1166,10 +1193,12 @@ func TestORM_DeduceDialect(t *testing.T) { expect orm.DialectName wantError bool }{ - {"windows full path", `D:/node-0/node/db.sqlite3`, `sqlite3`, false}, - {"relative file", "db.sqlite", "sqlite3", false}, - {"relative dir path", "store/db/here", "sqlite3", false}, - {"file url", "file://host/path", "sqlite3", false}, + // Old sqlite URLs included to verify that they error since sqlite + // support has been dropped + {"windows full path", `D:/node-0/node/db.sqlite3`, ``, true}, + {"relative file", "db.sqlite", "", true}, + {"relative dir path", "store/db/here", "", true}, + {"file url", "file://host/path", "", true}, {"sqlite url", "sqlite:///path/to/sqlite.db", "", true}, {"sqlite3 url", "sqlite3:///path/to/sqlite.db", "", true}, {"postgres url", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full", "postgres", false}, diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index a398fe634d5..536f7196269 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -16,11 +16,12 @@ import ( type ConfigSchema struct { AllowOrigins string `env:"ALLOW_ORIGINS" default:"http://localhost:3000,http://localhost:6688"` BridgeResponseURL url.URL `env:"BRIDGE_RESPONSE_URL"` - ChainID big.Int `env:"ETH_CHAIN_ID" default:"0"` + ChainID big.Int `env:"ETH_CHAIN_ID" default:"1"` ClientNodeURL string `env:"CLIENT_NODE_URL" default:"http://localhost:6688"` DatabaseTimeout time.Duration `env:"DATABASE_TIMEOUT" default:"500ms"` DatabaseURL string `env:"DATABASE_URL"` DefaultHTTPLimit int64 `env:"DEFAULT_HTTP_LIMIT" default:"32768"` + DefaultHTTPTimeout time.Duration `env:"DEFAULT_HTTP_TIMEOUT" default:"15s"` Dev bool `env:"CHAINLINK_DEV" default:"false"` FeatureExternalInitiators bool `env:"FEATURE_EXTERNAL_INITIATORS" default:"false"` FeatureFluxMonitor bool `env:"FEATURE_FLUX_MONITOR" default:"false"` @@ -41,6 +42,7 @@ type ConfigSchema struct { LogToDisk bool `env:"LOG_TO_DISK" default:"true"` LogSQLStatements bool `env:"LOG_SQL" default:"false"` LogSQLMigrations bool `env:"LOG_SQL_MIGRATIONS" default:"true"` + DefaultMaxHTTPAttempts uint `env:"MAX_HTTP_ATTEMPTS" default:"5"` MinIncomingConfirmations uint32 `env:"MIN_INCOMING_CONFIRMATIONS" default:"3"` MinOutgoingConfirmations uint64 `env:"MIN_OUTGOING_CONFIRMATIONS" default:"12"` MinimumContractPayment assets.Link `env:"MINIMUM_CONTRACT_PAYMENT" default:"1000000000000000000"` diff --git a/core/store/presenters/presenters.go b/core/store/presenters/presenters.go index 84ec57e7efe..b1b84d060ac 100644 --- a/core/store/presenters/presenters.go +++ b/core/store/presenters/presenters.go @@ -387,6 +387,8 @@ func initiatorParams(i Initiator) (interface{}, error) { Precision int32 `json:"precision"` PollingInterval models.Duration `json:"pollingInterval"` }{i.Address, i.RequestData, i.Feeds, i.Threshold, i.Precision, i.PollingInterval}, nil + case models.InitiatorRandomnessLog: + return struct{ Address common.Address }{i.Address}, nil default: return nil, fmt.Errorf("Cannot marshal unsupported initiator type '%v'", i.Type) } diff --git a/core/store/prometheus.go b/core/store/prometheus.go new file mode 100644 index 00000000000..3223d2734d2 --- /dev/null +++ b/core/store/prometheus.go @@ -0,0 +1,45 @@ +package store + +import ( + "chainlink/core/assets" + "chainlink/core/eth" + "chainlink/core/logger" + "errors" + "fmt" + "math" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var promETHBalance = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "eth_balance", + Help: "Each Ethereum account's balance", + }, + []string{"account"}, +) + +func promUpdateEthBalance(balance *assets.Eth, from common.Address) { + balanceFloat, err := approximateFloat64(balance) + + if err != nil { + logger.Error(fmt.Errorf("updatePrometheusEthBalance: %v", err)) + return + } + + promETHBalance.WithLabelValues(from.Hex()).Set(balanceFloat) +} + +func approximateFloat64(e *assets.Eth) (float64, error) { + ef := new(big.Float).SetInt(e.ToInt()) + weif := new(big.Float).SetInt(eth.WeiPerEth) + bf := new(big.Float).Quo(ef, weif) + f64, _ := bf.Float64() + if f64 == math.Inf(1) || f64 == math.Inf(-1) { + return math.Inf(1), errors.New("assets.Eth.Float64: Could not approximate Eth value into float") + } + return f64, nil +} diff --git a/core/store/prometheus_test.go b/core/store/prometheus_test.go new file mode 100644 index 00000000000..e7861d1142f --- /dev/null +++ b/core/store/prometheus_test.go @@ -0,0 +1,33 @@ +package store + +import ( + "chainlink/core/assets" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_approximateFloat64(t *testing.T) { + tests := []struct { + name string + input string + want float64 + wantError bool + }{ + {"zero", "0", 0, false}, + {"small", "1", 0.000000000000000001, false}, + {"rounding", "12345678901234567890", 12.345678901234567, false}, + {"large", "123456789012345678901234567890", 123456789012.34567, false}, + {"extreme", "1234567890123456789012345678901234567890123456789012345678901234567890", 1.2345678901234568e+51, false}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + eth := assets.NewEth(0) + eth.SetString(test.input, 10) + float, err := approximateFloat64(eth) + require.NoError(t, err) + require.Equal(t, test.want, float) + }) + } +} diff --git a/core/store/store.go b/core/store/store.go index 18710966bee..72e573262b2 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -10,6 +10,7 @@ import ( "time" "chainlink/core/eth" + "chainlink/core/gracefulpanic" "chainlink/core/logger" "chainlink/core/store/migrations" "chainlink/core/store/models" @@ -28,11 +29,12 @@ import ( // for keeping the application state in sync with the database. type Store struct { *orm.ORM - Config *orm.Config - Clock utils.AfterNower - KeyStore *KeyStore - TxManager TxManager - closeOnce sync.Once + Config *orm.Config + Clock utils.AfterNower + KeyStore *KeyStore + VRFKeyStore *VRFKeyStore + TxManager TxManager + closeOnce sync.Once } type lazyRPCWrapper struct { @@ -124,38 +126,38 @@ func (ed *EthDialer) Dial(urlString string) (eth.CallerSubscriber, error) { return newLazyRPCWrapper(urlString, ed.limiter) } -// NewStore will create a new database file at the config's RootDir if -// it is not already present, otherwise it will use the existing db.sqlite3 -// file. -func NewStore(config *orm.Config) *Store { - return NewStoreWithDialer(config, NewEthDialer(config.MaxRPCCallsPerSecond())) +// NewStore will create a new store using the Eth dialer +func NewStore(config *orm.Config, shutdownSignal gracefulpanic.Signal) *Store { + return NewStoreWithDialer(config, NewEthDialer(config.MaxRPCCallsPerSecond()), shutdownSignal) } // NewStoreWithDialer creates a new store with the given config and dialer -func NewStoreWithDialer(config *orm.Config, dialer Dialer) *Store { +func NewStoreWithDialer(config *orm.Config, dialer Dialer, shutdownSignal gracefulpanic.Signal) *Store { keyStore := func() *KeyStore { return NewKeyStore(config.KeysDir()) } - return newStoreWithDialerAndKeyStore(config, dialer, keyStore) + return newStoreWithDialerAndKeyStore(config, dialer, keyStore, shutdownSignal) } // NewInsecureStore creates a new store with the given config and // dialer, using an insecure keystore. // NOTE: Should only be used for testing! -func NewInsecureStore(config *orm.Config) *Store { +func NewInsecureStore(config *orm.Config, shutdownSignal gracefulpanic.Signal) *Store { dialer := NewEthDialer(config.MaxRPCCallsPerSecond()) keyStore := func() *KeyStore { return NewInsecureKeyStore(config.KeysDir()) } - return newStoreWithDialerAndKeyStore(config, dialer, keyStore) + return newStoreWithDialerAndKeyStore(config, dialer, keyStore, shutdownSignal) } func newStoreWithDialerAndKeyStore( config *orm.Config, dialer Dialer, - keyStoreGenerator func() *KeyStore) *Store { + keyStoreGenerator func() *KeyStore, + shutdownSignal gracefulpanic.Signal, +) *Store { err := os.MkdirAll(config.RootDir(), os.FileMode(0700)) if err != nil { logger.Fatal(fmt.Sprintf("Unable to create project root dir: %+v", err)) } - orm, err := initializeORM(config) + orm, err := initializeORM(config, shutdownSignal) if err != nil { logger.Fatal(fmt.Sprintf("Unable to initialize ORM: %+v", err)) } @@ -177,6 +179,7 @@ func newStoreWithDialerAndKeyStore( ORM: orm, TxManager: txManager, } + store.VRFKeyStore = NewVRFKeyStore(store) return store } @@ -233,8 +236,8 @@ func (s *Store) SyncDiskKeyStoreToDB() error { return merr } -func initializeORM(config *orm.Config) (*orm.ORM, error) { - orm, err := orm.NewORM(orm.NormalizedDatabaseURL(config), config.DatabaseTimeout()) +func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*orm.ORM, error) { + orm, err := orm.NewORM(config.DatabaseURL(), config.DatabaseTimeout(), shutdownSignal) if err != nil { return nil, errors.Wrap(err, "initializeORM#NewORM") } diff --git a/core/store/tx_manager.go b/core/store/tx_manager.go index fd9a2ed978c..dc7156d534c 100644 --- a/core/store/tx_manager.go +++ b/core/store/tx_manager.go @@ -21,6 +21,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/tevino/abool" "go.uber.org/multierr" "gopkg.in/guregu/null.v3" @@ -39,8 +41,30 @@ const ( nonceReloadBackoffBaseTime = 3 * time.Second ) -// ErrPendingConnection is the error returned if TxManager is not connected. -var ErrPendingConnection = errors.New("Cannot talk to chain, pending connection") +var ( + // ErrPendingConnection is the error returned if TxManager is not connected. + ErrPendingConnection = errors.New("Cannot talk to chain, pending connection") + + promNumGasBumps = promauto.NewCounter(prometheus.CounterOpts{ + Name: "tx_manager_num_gas_bumps", + Help: "Number of gas bumps", + }) + + promGasBumpExceedsLimit = promauto.NewCounter(prometheus.CounterOpts{ + Name: "tx_manager_gas_bump_exceeds_limit", + Help: "Number of times gas bumping failed from exceeding the configured limit. Any counts of this type indicate a serious problem.", + }) + + promGasBumpUnderpricedReplacement = promauto.NewCounter(prometheus.CounterOpts{ + Name: "tx_manager_gas_bump_underpriced_replacement", + Help: "Number of underpriced replacement errors received while trying to bump gas. Counts of this type most likely indicate some kind of misconfiguration or problem.", + }) + + promTxAttemptFailed = promauto.NewCounter(prometheus.CounterOpts{ + Name: "tx_manager_tx_attempt_failed", + Help: "Number of tx attempts that failed. Tx attempts should not fail in normal operation.", + }) +) //go:generate mockery -name TxManager -output ../internal/mocks/ -case=underscore @@ -493,21 +517,11 @@ func (txm *EthTxManager) ContractLINKBalance(wr models.WithdrawalRequest) (asset return *linkBalance, nil } -// GetETHAndLINKBalances attempts to retrieve the ethereum node's perception of -// the latest ETH and LINK balances for the active account on the txm, or an -// error on failure. -func (txm *EthTxManager) GetETHAndLINKBalances(address common.Address) (*assets.Eth, *assets.Link, error) { - linkBalance, linkErr := txm.GetLINKBalance(address) - ethBalance, ethErr := txm.GetEthBalance(address) - merr := multierr.Append(linkErr, ethErr) - return ethBalance, linkBalance, merr -} - // WithdrawLINK withdraws the given amount of LINK from the contract to the // configured withdrawal address. If wr.ContractAddress is empty (zero address), // funds are withdrawn from configured OracleContractAddress. func (txm *EthTxManager) WithdrawLINK(wr models.WithdrawalRequest) (common.Hash, error) { - oracle, err := eth.GetContract("Oracle") + oracle, err := eth.GetContractCodec("Oracle") if err != nil { return common.Hash{}, err } @@ -619,6 +633,14 @@ func (txm *EthTxManager) processAttempt( "jobRunId", jobRunID, ) + // Update prometheus metric here as waiting on the transaction + // to be marked 'Safe' may be too delayed due to possible + // backlog of transaction confirmations. + ethBalance, err := txm.GetEthBalance(tx.From) + if err != nil { + return receipt, state, errors.Wrap(err, "confirming confirmation attempt") + } + promUpdateEthBalance(ethBalance, tx.From) return receipt, state, nil case Unconfirmed: @@ -705,8 +727,12 @@ func (txm *EthTxManager) handleSafe( return errors.Wrap(err, "handleSafe MarkTxSafe failed") } + var balanceErr error minimumConfirmations := txm.config.MinOutgoingConfirmations() - ethBalance, linkBalance, balanceErr := txm.GetETHAndLINKBalances(tx.From) + ethBalance, err := txm.GetEthBalance(tx.From) + balanceErr = multierr.Append(balanceErr, err) + linkBalance, err := txm.GetLINKBalance(tx.From) + balanceErr = multierr.Append(balanceErr, err) logger.Infow( fmt.Sprintf("Tx #%d is safe", attemptIndex), @@ -721,8 +747,9 @@ func (txm *EthTxManager) handleSafe( return nil } -// BumpGasByIncrement returns a new gas price increased by the larger of either -// a percentage bump or a fixed size bump +// BumpGasByIncrement returns a new gas price increased by the larger of: +// - A configured percentage bump (ETH_GAS_BUMP_PERCENT) +// - A configured fixed amount of Wei (ETH_GAS_PRICE_WEI) func (txm *EthTxManager) BumpGasByIncrement(originalGasPrice *big.Int) *big.Int { // Similar logic is used in geth // See: https://github.com/ethereum/go-ethereum/blob/8d7aa9078f8a94c2c10b1d11e04242df0ea91e5b/core/tx_list.go#L255 @@ -742,6 +769,7 @@ func (txm *EthTxManager) BumpGasByIncrement(originalGasPrice *big.Int) *big.Int return minimumGasBumpByIncrement } +// bumpGas attempts a new transaction with an increased gas cost func (txm *EthTxManager) bumpGas(tx *models.Tx, attemptIndex int, blockHeight uint64) error { txAttempt := tx.Attempts[attemptIndex] @@ -750,16 +778,21 @@ func (txm *EthTxManager) bumpGas(tx *models.Tx, attemptIndex int, blockHeight ui bumpedGasPrice := txm.BumpGasByIncrement(originalGasPrice) for { + promNumGasBumps.Inc() if bumpedGasPrice.Cmp(txm.config.EthMaxGasPriceWei()) > 0 { // NOTE: In the current design, a new tx attempt will be created even if this one returns error. // If we do hit this scenario, we will keep creating new attempts that are guaranteed to fail // until CHAINLINK_TX_ATTEMPT_LIMIT is reached - return fmt.Errorf("bumped gas price of %v would exceed maximum configured limit of %v, set by ETH_GAS_PRICE_WEI", bumpedGasPrice, txm.config.EthMaxGasPriceWei()) + promGasBumpExceedsLimit.Inc() + err := fmt.Errorf("bumped gas price of %v would exceed maximum configured limit of %v, set by ETH_GAS_PRICE_WEI", bumpedGasPrice, txm.config.EthMaxGasPriceWei()) + logger.Error(err) + return err } bumpedTxAttempt, err := txm.createAttempt(tx, bumpedGasPrice, blockHeight) if isUnderPricedReplacementError(err) { // This is not expected if we have bumped at least geth's required // amount. + promGasBumpUnderpricedReplacement.Inc() logger.Warnw(fmt.Sprintf("Gas bump was rejected by ethereum node as underpriced, bumping again. Your value of ETH_GAS_BUMP_PERCENT (%v) may be set too low", txm.config.EthGasBumpPercent()), "originalGasPrice", originalGasPrice, "bumpedGasPrice", bumpedGasPrice, ) @@ -767,7 +800,10 @@ func (txm *EthTxManager) bumpGas(tx *models.Tx, attemptIndex int, blockHeight ui continue } if err != nil { - return errors.Wrapf(err, "bumpGas from Tx #%s", txAttempt.Hash.Hex()) + promTxAttemptFailed.Inc() + err := errors.Wrapf(err, "bumpGas from Tx #%s", txAttempt.Hash.Hex()) + logger.Error(err) + return err } logger.Infow( diff --git a/core/store/tx_manager_test.go b/core/store/tx_manager_test.go index 8eaff657887..c7694c76cb7 100644 --- a/core/store/tx_manager_test.go +++ b/core/store/tx_manager_test.go @@ -593,6 +593,7 @@ func TestTxManager_BumpGasUntilSafe_confirmed(t *testing.T) { require.Greater(t, len(tx.Attempts), 0) app.EthMock.Register("eth_getTransactionReceipt", eth.TxReceipt{Hash: cltest.NewHash(), BlockNumber: cltest.Int(gasThreshold)}) + app.EthMock.Register("eth_getBalance", "0x0100") receipt, state, err := txm.BumpGasUntilSafe(tx.Attempts[0].Hash) assert.NoError(t, err) @@ -770,7 +771,7 @@ func TestTxManager_BumpGasUntilSafe_erroring(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - app, cleanup := cltest.NewApplicationWithConfigAndKey(t, config) + app, cleanup := cltest.NewApplicationWithConfigAndKey(t, config, cltest.NoRegisterGetBlockNumber) defer cleanup() store := app.Store @@ -783,6 +784,7 @@ func TestTxManager_BumpGasUntilSafe_erroring(t *testing.T) { ethMock := app.EthMock ethMock.ShouldCall(test.mockSetup).During(func() { require.NoError(t, app.Store.ORM.CreateHead(cltest.Head(test.blockHeight))) + ethMock.Register("eth_blockNumber", hexutil.Uint64(1)) ethMock.Register("eth_chainId", store.Config.ChainID()) ethMock.Register("eth_sendRawTransaction", cltest.NewHash()) @@ -1124,7 +1126,7 @@ func TestTxManager_LogsETHAndLINKBalancesAfterSuccessfulTx(t *testing.T) { manager.OnNewHead(cltest.Head(confirmedAt)) ethClient.On("GetTxReceipt", tx.Attempts[0].Hash).Return(&confirmedReceipt, nil) ethClient.On("GetERC20Balance", from, mock.Anything).Return(nil, nil) - ethClient.On("GetEthBalance", from).Return(nil, nil) + ethClient.On("GetEthBalance", from).Return(cltest.NewEth(t, "10000000"), nil) receipt, state, err := manager.BumpGasUntilSafe(tx.Attempts[0].Hash) require.NoError(t, err) diff --git a/core/store/vrf_key_store.go b/core/store/vrf_key_store.go new file mode 100644 index 00000000000..8012fb65702 --- /dev/null +++ b/core/store/vrf_key_store.go @@ -0,0 +1,91 @@ +package store + +import ( + "fmt" + "math/big" + "sync" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "chainlink/core/services/vrf" + "chainlink/core/store/models/vrfkey" +) + +// VRFKeyStore tracks auxillary VRF secret keys, and generates their VRF proofs +// +// VRF proofs need access to the actual secret key, which geth does not expose. +// Similar to the way geth's KeyStore exposes signing capability, VRFKeyStore +// exposes VRF proof generation without the caller needing explicit knowledge of +// the secret key. +type VRFKeyStore struct { + lock sync.RWMutex + keys InMemoryKeyStore + store *Store +} + +type InMemoryKeyStore = map[vrfkey.PublicKey]vrfkey.PrivateKey + +// NewVRFKeyStore returns an empty VRFKeyStore +func NewVRFKeyStore(store *Store) *VRFKeyStore { + return &VRFKeyStore{ + lock: sync.RWMutex{}, + keys: make(InMemoryKeyStore), + store: store, + } +} + +// GenerateProof(k, seed) is marshaled randomness proof given public key k and +// VRF input seed. +// +// k must have already been unlocked in ks, as constructing the VRF proof +// requires the secret key. +func (ks *VRFKeyStore) GenerateProof(k *vrfkey.PublicKey, seed *big.Int) ( + vrf.MarshaledProof, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + privateKey, found := ks.keys[*k] + if !found { + return vrf.MarshaledProof{}, fmt.Errorf("key %s has not been unlocked", k) + } + return privateKey.MarshaledProof(seed) +} + +// Unlock tries to unlock each vrf key in the db, using the given pass phrase, +// and returns any keys it manages to unlock, and any errors which result. +func (ks *VRFKeyStore) Unlock(phrase string) (keysUnlocked []vrfkey.PublicKey, + merr error) { + ks.lock.Lock() + defer ks.lock.Unlock() + keys, err := ks.get(nil) + if err != nil { + return nil, errors.Wrap(err, "while retrieving vrf keys from db") + } + for _, k := range keys { + key, err := k.Decrypt(phrase) + if err != nil { + merr = multierr.Append(merr, err) + continue + } + ks.keys[key.PublicKey] = *key + keysUnlocked = append(keysUnlocked, key.PublicKey) + } + return keysUnlocked, merr +} + +// Forget removes the in-memory copy of the secret key of k, or errors if not +// present. Caller is responsible for taking ks.lock. +func (ks *VRFKeyStore) forget(k *vrfkey.PublicKey) error { + if _, found := ks.keys[*k]; !found { + return fmt.Errorf("public key %s is not unlocked; can't forget it", k) + } else { + delete(ks.keys, *k) + return nil + } +} + +func (ks *VRFKeyStore) Forget(k *vrfkey.PublicKey) error { + ks.lock.Lock() + defer ks.lock.Unlock() + return ks.forget(k) +} diff --git a/core/store/vrf_key_store_db.go b/core/store/vrf_key_store_db.go new file mode 100644 index 00000000000..888d25c368b --- /dev/null +++ b/core/store/vrf_key_store_db.go @@ -0,0 +1,178 @@ +package store + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" + "go.uber.org/multierr" + + "chainlink/core/store/models/vrfkey" +) + +// CreateKey returns a public key which is immediately unlocked in memory, and +// saved in DB encrypted with phrase. If p is given, its parameters are used for +// key derivation from the phrase. +func (ks *VRFKeyStore) CreateKey(phrase string, p ...vrfkey.ScryptParams, +) (*vrfkey.PublicKey, error) { + key := vrfkey.CreateKey() + if err := ks.Store(key, phrase, p...); err != nil { + return nil, err + } + return &key.PublicKey, nil +} + +// CreateWeakInMemoryEncryptedKeyXXXTestingOnly is for testing only! It returns +// an encrypted key which is fast to unlock, but correspondingly easy to brute +// force. It is not persisted to the DB, because no one should be keeping such +// keys lying around. +func (ks *VRFKeyStore) CreateWeakInMemoryEncryptedKeyXXXTestingOnly( + phrase string) (*vrfkey.EncryptedSecretKey, error) { + key := vrfkey.CreateKey() + encrypted, err := key.Encrypt(phrase, vrfkey.FastScryptParams) + if err != nil { + return nil, errors.Wrap(err, "while creating testing key") + } + return encrypted, nil +} + +// Store saves key to ks (in memory), and to the DB, encrypted with phrase +func (ks *VRFKeyStore) Store(key *vrfkey.PrivateKey, phrase string, + p ...vrfkey.ScryptParams) error { + ks.lock.Lock() + defer ks.lock.Unlock() + encrypted, err := key.Encrypt(phrase, p...) + if err != nil { + return errors.Wrap(err, "failed to encrypt key") + } + if err := ks.store.FirstOrCreateEncryptedSecretVRFKey(encrypted); err != nil { + return errors.Wrap(err, "failed to save encrypted key to db") + } + ks.keys[key.PublicKey] = *key + return nil +} + +// StoreInMemoryXXXTestingOnly memorizes key, only in in-memory store. +func (ks *VRFKeyStore) StoreInMemoryXXXTestingOnly(key *vrfkey.PrivateKey) { + ks.lock.Lock() + defer ks.lock.Unlock() + ks.keys[key.PublicKey] = *key +} + +var zeroPublicKey = vrfkey.PublicKey{} + +// Delete removes keys with this public key from the keystore and the DB, if present. +func (ks *VRFKeyStore) Delete(key *vrfkey.PublicKey) (err error) { + ks.lock.Lock() + defer ks.lock.Unlock() + if *key == zeroPublicKey { + return fmt.Errorf("cannot delete the empty public key") + } + if _, found := ks.keys[*key]; found { + err = ks.forget(key) // Destroy in-memory representation of key + delete(ks.keys, *key) + } + matches, err := ks.get(key) + if err != nil { + return errors.Wrapf(err, "while checking for existence of key %s in DB", + key.String()) + } + if len(matches) == 0 { + return AttemptToDeleteNonExistentKeyFromDB + } + return multierr.Append(err, ks.store.ORM.DeleteEncryptedSecretVRFKey( + &vrfkey.EncryptedSecretKey{PublicKey: *key})) +} + +// Import adds this encrypted key to the DB and unlocks it in in-memory store +// with passphrase auth, and returns any resulting errors +func (ks *VRFKeyStore) Import(keyjson []byte, auth string) error { + ks.lock.Lock() + defer ks.lock.Unlock() + enckey := &vrfkey.EncryptedSecretKey{} + if err := json.Unmarshal(keyjson, enckey); err != nil { + return fmt.Errorf("could not parse %s as EncryptedSecretKey json", keyjson) + } + extantMatchingKeys, err := ks.get(&enckey.PublicKey) + if err != nil { + return errors.Wrapf(err, "while checking for matching extant key in DB") + } + if len(extantMatchingKeys) != 0 { + return MatchingVRFKeyError + } + key, err := enckey.Decrypt(auth) + if err != nil { + return errors.Wrapf(err, + "while attempting to decrypt key with public key %s", + key.PublicKey.String()) + } + if err := ks.store.FirstOrCreateEncryptedSecretVRFKey(enckey); err != nil { + return errors.Wrapf(err, "while saving encrypted key to DB") + } + ks.keys[key.PublicKey] = *key + return nil +} + +// get retrieves all EncryptedSecretKey's associated with k, or all encrypted +// keys if k is nil, or errors. Caller is responsible for locking the store +func (ks *VRFKeyStore) get(k *vrfkey.PublicKey) ([]*vrfkey.EncryptedSecretKey, error) { + var where []vrfkey.EncryptedSecretKey + if k != nil { // Search for this specific public key + where = append(where, vrfkey.EncryptedSecretKey{PublicKey: *k}) + } + keys, err := ks.store.FindEncryptedSecretVRFKeys(where...) + if err != nil { + return nil, errors.Wrapf(err, "failed to find public key %s in DB", k) + } + return keys, nil +} + +// Get retrieves all EncryptedSecretKey's associated with k, or all encrypted +// keys if k is nil, or errors +func (ks *VRFKeyStore) Get(k *vrfkey.PublicKey) ([]*vrfkey.EncryptedSecretKey, error) { + ks.lock.RLock() + defer ks.lock.RUnlock() + return ks.get(k) +} + +func (ks *VRFKeyStore) GetSpecificKey( + k *vrfkey.PublicKey) (*vrfkey.EncryptedSecretKey, error) { + if k == nil { + return nil, fmt.Errorf("can't retrieve nil key") + } + encryptedKey, err := ks.Get(k) + if err != nil { + return nil, errors.Wrapf(err, "could not retrieve %s from db", k) + } + if len(encryptedKey) == 0 { + return nil, fmt.Errorf("could not find any keys with public key %s", + k.String()) + } + if len(encryptedKey) > 1 { + // This is impossible, as long as the public key is the primary key on the + // EncryptedSecretKey table. + panic(fmt.Errorf("found more than one key with public key %s", k.String())) + } + return encryptedKey[0], nil +} + +// ListKey lists the public keys contained in the db +func (ks *VRFKeyStore) ListKeys() (publicKeys []*vrfkey.PublicKey, err error) { + enc, err := ks.Get(nil) + if err != nil { + return nil, errors.Wrapf(err, "while listing db keys") + } + for _, enckey := range enc { + publicKeys = append(publicKeys, &enckey.PublicKey) + } + return publicKeys, nil +} + +// MatchingVRFKeyError is returned when Import attempts to import key with a +// PublicKey matching one already in the database +var MatchingVRFKeyError = errors.New( + `key with matching public key already stored in DB`) + +// AttemptToDeleteNonExistentKeyFromDB is returned when Delete is asked to +// delete a key it can't find in the DB. +var AttemptToDeleteNonExistentKeyFromDB = errors.New("key is not present in DB") diff --git a/core/store/vrf_key_store_test.go b/core/store/vrf_key_store_test.go new file mode 100644 index 00000000000..27403d9127f --- /dev/null +++ b/core/store/vrf_key_store_test.go @@ -0,0 +1,110 @@ +package store_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "chainlink/core/internal/cltest" + "chainlink/core/services/signatures/secp256k1" + "chainlink/core/services/vrf/generated/solidity_verifier_wrapper" + strpkg "chainlink/core/store" + "chainlink/core/store/models/vrfkey" +) + +var suite = secp256k1.NewBlakeKeccackSecp256k1() + +// NB: For changes to the VRF solidity code to be reflected here, "go generate" +// must be run in core/services/vrf. +func vrfVerifier() *solidity_verifier_wrapper.VRFTestHelper { + ethereumKey, _ := crypto.GenerateKey() + auth := bind.NewKeyedTransactor(ethereumKey) + genesisData := core.GenesisAlloc{auth.From: {Balance: big.NewInt(1000000000)}} + gasLimit := eth.DefaultConfig.Miner.GasCeil + backend := backends.NewSimulatedBackend(genesisData, gasLimit) + _, _, verifier, err := solidity_verifier_wrapper.DeployVRFTestHelper(auth, backend) + if err != nil { + panic(errors.Wrapf(err, "while initializing EVM contract wrapper")) + } + backend.Commit() + return verifier +} + +var phrase = "englebert humperdinck is the greatest musician of all time" + +func TestKeyStoreEndToEnd(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ks := strpkg.NewVRFKeyStore(store) + key, err := ks.CreateKey(phrase, vrfkey.FastScryptParams) // NB: Varies from run to run. Shouldn't matter, though + require.NoError(t, err, "could not create encrypted key") + require.NoError(t, ks.Forget(key), + "could not forget a created key from in-memory store") + keys, err := ks.Get(nil) // Test generic Get + require.NoError(t, err, "failed to retrieve expected key from db") + assert.True(t, len(keys) == 1 && keys[0].PublicKey == *key, + "did not get back the expected key from db retrial") + ophrase := phrase + "corruption" // Extra key; make sure it's not returned by Get + newKey, err := ks.CreateKey(ophrase, vrfkey.FastScryptParams) + require.NoError(t, err, "could not create extra key") + keys, err = ks.Get(key) // Test targeted Get + require.NoError(t, err, "key databese retrieval failed") + require.NoError(t, ks.Forget(newKey), + "failed to forget in-memory copy of second key") + require.Equal(t, keys[0].PublicKey, *key, "retrieved wrong key from db") + require.Len(t, keys, 1, "retrieved more keys than expected from db") + keys, err = ks.Get(nil) // Verify both keys are present in the db + require.NoError(t, err, "could not retrieve keys from db") + require.Len(t, keys, 2, "failed to remember both the keys just created") + unlockedKeys, err := ks.Unlock(phrase) // Unlocking enables generation of proofs + require.Contains(t, err.Error(), "could not decrypt key with given password", + "should have a complaint about not being able to unlock the key with a different password") + assert.Contains(t, err.Error(), newKey.String(), + "complaint about inability to unlock should pertain to the key with a different password") + assert.Len(t, unlockedKeys, 1, "should have only unlocked one key") + assert.Equal(t, unlockedKeys[0], *key, + "should have only unlocked the key with the offered password") + proof, err := ks.GenerateProof(key, big.NewInt(10)) + assert.NoError(t, err, + "should be able to generate VRF proofs with unlocked keys") + _, err = ks.GenerateProof(newKey, big.NewInt(10)) // ...but only for unlocked keys + require.Error(t, err, + "should not be able to generate VRF proofs unless key has been unlocked") + require.Contains(t, err.Error(), "has not been unlocked", + "complaint when attempting to generate VRF proof with unclocked key should be that it's locked") + encryptedKey, err := ks.GetSpecificKey(key) // Can export a key to bytes + require.NoError(t, err, "should be able to get a specific key") + assert.True(t, bytes.Equal(encryptedKey.PublicKey[:], key[:]), + "should have recovered the encrypted key for the requested public key") + verifier := vrfVerifier() // Generated proof is valid + _, err = verifier.RandomValueFromVRFProof(nil, proof[:]) + require.NoError(t, err, + "failed to get VRF proof output from solidity VRF contract") + require.NoError(t, ks.Delete(key), "failed to delete VRF key") + _, err = ks.GenerateProof(key, big.NewInt(10)) + require.Error(t, err, + "should not be able to generate VRF proofs with a deleted key") + require.Contains(t, err.Error(), "has not been unlocked", + "complaint when trying to prove with deleted key should be that it's locked") + keys, err = ks.Get(key) // Deleted key is removed from DB + require.NoError(t, err, "failed to query db for key") + require.Len(t, keys, 0, "deleted key should not be retrieved by db query") + keyjson, err := encryptedKey.JSON() + require.NoError(t, err, "failed to serialize key to JSON") + require.NoError(t, ks.Import(keyjson, phrase), + "failed to import encrypted key to database") + err = ks.Import(keyjson, phrase) + require.Equal(t, strpkg.MatchingVRFKeyError, err, + "should be prevented from importing a key with a public key already present in the DB") + _, err = ks.GenerateProof(key, big.NewInt(10)) + require.NoError(t, err, "should be able to generate proof with unlocked key") +} diff --git a/core/utils/atomic.go b/core/utils/atomic.go new file mode 100644 index 00000000000..2146353cac9 --- /dev/null +++ b/core/utils/atomic.go @@ -0,0 +1,19 @@ +package utils + +import ( + "sync/atomic" +) + +type AtomicBool struct { + atomic.Value +} + +func (b *AtomicBool) Get() bool { + if as, is := b.Load().(bool); is { + return as + } + return false +} +func (b *AtomicBool) Set(val bool) { + b.Store(val) +} diff --git a/core/utils/big.go b/core/utils/big.go index 35f0db87dba..bd9c0b51713 100644 --- a/core/utils/big.go +++ b/core/utils/big.go @@ -61,7 +61,11 @@ func (b *Big) MarshalText() ([]byte, error) { // MarshalJSON marshals this instance to base 10 number as string. func (b *Big) MarshalJSON() ([]byte, error) { - return b.MarshalText() + text, err := b.MarshalText() + if err != nil { + return nil, err + } + return json.Marshal(string(text)) } // UnmarshalText implements encoding.TextUnmarshaler. diff --git a/core/utils/big_test.go b/core/utils/big_test.go index 5d60bb4698e..39dc2dc211d 100644 --- a/core/utils/big_test.go +++ b/core/utils/big_test.go @@ -112,12 +112,17 @@ func TestBig_UnmarshalTextErrors(t *testing.T) { func TestBig_MarshalJSON(t *testing.T) { t.Parallel() + plusOneTo64bit, ok := new(big.Int).SetString("9223372036854775808", 10) + require.True(t, ok) + tests := []struct { name string input *big.Int want string }{ - {"number", big.NewInt(1234), `1234`}, + {"zero", big.NewInt(0), `"0"`}, + {"number", big.NewInt(1234), `"1234"`}, + {"big number", plusOneTo64bit, `"9223372036854775808"`}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -129,6 +134,50 @@ func TestBig_MarshalJSON(t *testing.T) { } } +func TestBig_UnMarshalJSON(t *testing.T) { + t.Parallel() + + plusOneTo64bit, ok := new(big.Int).SetString("9223372036854775808", 10) + require.True(t, ok) + + tests := []struct { + name string + input string + want *Big + }{ + {"zero", `"0"`, (*Big)(big.NewInt(0))}, + {"number", `"1234"`, (*Big)(big.NewInt(1234))}, + {"big number", `"9223372036854775808"`, (*Big)(plusOneTo64bit)}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i := new(Big) + err := json.Unmarshal([]byte(test.input), &i) + assert.NoError(t, err) + assert.Equal(t, test.want, i) + }) + } +} + +func TestBig_UnMarshalJSON_errors(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + }{ + {"empty", `""`}, + {"NaN", `"NaN"`}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i := new(Big) + err := json.Unmarshal([]byte(test.input), &i) + assert.Error(t, err) + }) + } +} + func TestBig_Scan(t *testing.T) { t.Parallel() diff --git a/core/utils/ethabi.go b/core/utils/ethabi.go index f0f454db83d..034206e6c8d 100644 --- a/core/utils/ethabi.go +++ b/core/utils/ethabi.go @@ -3,6 +3,7 @@ package utils import ( "bytes" "encoding/binary" + "encoding/hex" "fmt" "math/big" "strconv" @@ -16,6 +17,8 @@ import ( const ( // FormatBytes encodes the output as bytes FormatBytes = "bytes" + // FormatPreformatted encodes the output, assumed to be hex, as bytes. + FormatPreformatted = "preformatted" // FormatUint256 encodes the output as bytes containing a uint256 FormatUint256 = "uint256" // FormatInt256 encodes the output as bytes containing an int256 @@ -185,7 +188,8 @@ func EVMTranscodeJSONWithFormat(value gjson.Result, format string) ([]byte, erro switch format { case FormatBytes: return EVMTranscodeBytes(value) - + case FormatPreformatted: + return hex.DecodeString(RemoveHexPrefix(value.Str)) case FormatUint256: data, err := EVMTranscodeUint256(value) if err != nil { @@ -219,6 +223,17 @@ func EVMWordUint64(val uint64) []byte { return word } +// EVMWordUint128 returns a uint128 as an EVM word byte array. +func EVMWordUint128(val *big.Int) ([]byte, error) { + bytes := val.Bytes() + if val.BitLen() > 128 { + return nil, fmt.Errorf("Overflow saving uint128 to EVM word: %v", val) + } else if val.Sign() == -1 { + return nil, fmt.Errorf("Invalid attempt to save negative value as uint128 to EVM word: %v", val) + } + return common.LeftPadBytes(bytes, EVMWordByteLen), nil +} + // EVMWordSignedBigInt returns a big.Int as an EVM word byte array, with // support for a signed representation. Returns error on overflow. func EVMWordSignedBigInt(val *big.Int) ([]byte, error) { diff --git a/core/utils/ethabi_test.go b/core/utils/ethabi_test.go index a993b62589b..7dd40a4658b 100644 --- a/core/utils/ethabi_test.go +++ b/core/utils/ethabi_test.go @@ -7,9 +7,14 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tidwall/gjson" ) +func pow2(arg int64) *big.Int { + return new(big.Int).Exp(big.NewInt(2), big.NewInt(arg), nil) +} + func TestEVMWordUint64(t *testing.T) { assert.Equal(t, hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000001"), @@ -22,6 +27,57 @@ func TestEVMWordUint64(t *testing.T) { EVMWordUint64(math.MaxUint64)) } +func TestEVMWordUint128(t *testing.T) { + tests := []struct { + name string + val *big.Int + exp string + }{ + { + name: "1", + val: big.NewInt(1), + exp: "0x0000000000000000000000000000000000000000000000000000000000000001", + }, + { + name: "256", + val: big.NewInt(256), + exp: "0x0000000000000000000000000000000000000000000000000000000000000100", + }, + { + name: "Max Uint 128", + val: new(big.Int).Sub(pow2(128), big.NewInt(1)), + exp: "0x00000000000000000000000000000000ffffffffffffffffffffffffffffffff", + }, + } + for _, test := range tests { + t.Log(test.name) + ret, err := EVMWordUint128(test.val) + assert.Equal(t, hexutil.MustDecode(test.exp), ret) + require.NoError(t, err) + } +} + +func TestEVMWordUint128_Error(t *testing.T) { + tests := []struct { + name string + val *big.Int + }{ + { + name: "Negative number", + val: big.NewInt(-1), + }, + { + name: "Number too large: 128", + val: pow2(128), + }, + } + for _, test := range tests { + t.Log(test.name) + _, err := EVMWordUint128(test.val) + assert.Error(t, err) + } +} + func TestEVMWordSignedBigInt(t *testing.T) { val, err := EVMWordSignedBigInt(&big.Int{}) assert.NoError(t, err) @@ -438,6 +494,12 @@ func TestEVMTranscodeJSONWithFormat(t *testing.T) { "0000000000000000000000000000000000000000000000000000000000000020" + "0000000000000000000000000000000000000000000000000000000000000001", }, + { + "result is preformatted", + FormatPreformatted, + `{"result": "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}`, + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + }, } for _, tt := range tests { @@ -445,7 +507,7 @@ func TestEVMTranscodeJSONWithFormat(t *testing.T) { t.Run(test.name, func(t *testing.T) { input := gjson.GetBytes([]byte(test.input), "result") out, err := EVMTranscodeJSONWithFormat(input, test.format) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, test.output, hexutil.Encode(out)) }) } diff --git a/core/utils/utils.go b/core/utils/utils.go index 1ec15a7c1e1..f44613eba0f 100644 --- a/core/utils/utils.go +++ b/core/utils/utils.go @@ -3,7 +3,6 @@ package utils import ( - "chainlink/core/logger" "crypto/rand" "encoding/base64" "encoding/hex" @@ -16,6 +15,8 @@ import ( "strings" "time" + "chainlink/core/logger" + ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -24,6 +25,7 @@ import ( "github.com/jpillora/backoff" "github.com/pkg/errors" uuid "github.com/satori/go.uuid" + "github.com/shopspring/decimal" "golang.org/x/crypto/bcrypt" "golang.org/x/crypto/sha3" null "gopkg.in/guregu/null.v3" @@ -67,6 +69,20 @@ func Uint64ToHex(i uint64) string { return fmt.Sprintf("0x%x", i) } +var maxUint256 = common.HexToHash("0x" + strings.Repeat("f", 64)).Big() + +// Uint256ToBytes(x) is x represented as the bytes of a uint256 +func Uint256ToBytes(x *big.Int) (uint256 []byte, err error) { + if x.Cmp(maxUint256) > 0 { + return nil, fmt.Errorf("too large to convert to uint256") + } + uint256 = common.LeftPadBytes(x.Bytes(), EVMWordByteLen) + if x.Cmp(big.NewInt(0).SetBytes(uint256)) != 0 { + panic("failed to round-trip uint256 back to source big.Int") + } + return uint256, err +} + // ISO8601UTC formats given time to ISO8601. func ISO8601UTC(t time.Time) string { return logger.ISO8601UTC(t) @@ -468,3 +484,39 @@ func FileContents(path string) (string, error) { func JustError(_ interface{}, err error) error { return err } + +var zero = big.NewInt(0) + +// CheckUint256(n) returns an error if n is out of bounds for a uint256 +func CheckUint256(n *big.Int) error { + if n.Cmp(zero) < 0 || n.Cmp(maxUint256) >= 0 { + return fmt.Errorf("number out of range for uint256") + } + return nil +} + +// HexToUint256 returns the uint256 represented by s, or an error if it doesn't +// represent one. +func HexToUint256(s string) (*big.Int, error) { + rawNum, err := hexutil.Decode(s) + if err != nil { + return nil, errors.Wrapf(err, "while parsing %s as hex: ", s) + } + rv := big.NewInt(0).SetBytes(rawNum) // can't be negative number + if err := CheckUint256(rv); err != nil { + return nil, err + } + return rv, nil +} + +// Uint256ToHex returns the hex representation of n, or error if out of bounds +func Uint256ToHex(n *big.Int) (string, error) { + if err := CheckUint256(n); err != nil { + return "", err + } + return common.BigToHash(n).Hex(), nil +} + +func DecimalFromBigInt(i *big.Int, precision int32) decimal.Decimal { + return decimal.NewFromBigInt(i, -precision) +} diff --git a/core/web/job_specs_controller.go b/core/web/job_specs_controller.go index 7e4f71e8400..44d7fae918d 100644 --- a/core/web/job_specs_controller.go +++ b/core/web/job_specs_controller.go @@ -50,25 +50,34 @@ func (jsc *JobSpecsController) requireImplemented(js models.JobSpec) error { return nil } -// Create adds validates, saves, and starts a new JobSpec. -// Example: -// "/specs" -func (jsc *JobSpecsController) Create(c *gin.Context) { +// getAndCheckJobSpec(c) returns a validated job spec from c, or errors. The +// httpStatus return value is only meaningful on error, and in that case +// reflects the type of failure to be reported back to the client. +func (jsc *JobSpecsController) getAndCheckJobSpec( + c *gin.Context) (js models.JobSpec, httpStatus int, err error) { var jsr models.JobSpecRequest if err := c.ShouldBindJSON(&jsr); err != nil { // TODO(alx): Better parsing and more specific error messages // https://www.pivotaltracker.com/story/show/171164115 - jsonAPIError(c, http.StatusBadRequest, err) - return + return models.JobSpec{}, http.StatusBadRequest, err } - - js := models.NewJobFromRequest(jsr) + js = models.NewJobFromRequest(jsr) if err := jsc.requireImplemented(js); err != nil { - jsonAPIError(c, http.StatusNotImplemented, err) - return + return models.JobSpec{}, http.StatusNotImplemented, err } if err := services.ValidateJob(js, jsc.App.GetStore()); err != nil { - jsonAPIError(c, http.StatusBadRequest, err) + return models.JobSpec{}, http.StatusBadRequest, err + } + return js, 0, nil +} + +// Create adds validates, saves, and starts a new JobSpec. +// Example: +// "/specs" +func (jsc *JobSpecsController) Create(c *gin.Context) { + js, httpStatus, err := jsc.getAndCheckJobSpec(c) + if err != nil { + jsonAPIError(c, httpStatus, err) return } if err := NotifyExternalInitiator(js, jsc.App.GetStore()); err != nil { @@ -79,7 +88,7 @@ func (jsc *JobSpecsController) Create(c *gin.Context) { jsonAPIError(c, http.StatusInternalServerError, err) return } - // https://www.pivotaltracker.com/story/show/171169052 + // TODO: https://www.pivotaltracker.com/story/show/171169052 jsonAPIResponse(c, presenters.JobSpec{JobSpec: js}, "job") } diff --git a/core/web/router.go b/core/web/router.go index 7d0114919b3..b16d87dd56c 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -67,7 +67,7 @@ func explorerStatus(app chainlink.Application) gin.HandlerFunc { panic(err) } - c.SetCookie("explorer", (string)(b), 0, "", "", false, false) + c.SetCookie("explorer", (string)(b), 0, "", "", http.SameSiteStrictMode, false, false) c.Next() } } diff --git a/core/web/withdrawals_controller_test.go b/core/web/withdrawals_controller_test.go index 875b458c6c6..62fa8724f46 100644 --- a/core/web/withdrawals_controller_test.go +++ b/core/web/withdrawals_controller_test.go @@ -43,7 +43,7 @@ func TestWithdrawalsController_CreateSuccess(t *testing.T) { subscription := cltest.EmptyMockSubscription() txManager := new(mocks.TxManager) - txManager.On("SubscribeToNewHeads", mock.Anything).Maybe().Return(subscription, nil) + txManager.On("SubscribeToNewHeads", mock.Anything, mock.Anything).Maybe().Return(subscription, nil) txManager.On("GetChainID").Maybe().Return(big.NewInt(3), nil) txManager.On("Register", mock.Anything).Return(big.NewInt(3), nil) txManager.On("ContractLINKBalance", wr).Return(*wr.Amount, nil) diff --git a/design/nodeslogos.sketch b/design/nodeslogos.sketch index f0cca51cc2b..c9bad2608a8 100644 Binary files a/design/nodeslogos.sketch and b/design/nodeslogos.sketch differ diff --git a/design/sponsorslogos.sketch b/design/sponsorslogos.sketch index 6adf0fbe4e4..177d2b4261a 100644 Binary files a/design/sponsorslogos.sketch and b/design/sponsorslogos.sketch differ diff --git a/evm-contracts/app.config.json b/evm-contracts/app.config.json index fe1f8037e19..08a4ba7568a 100644 --- a/evm-contracts/app.config.json +++ b/evm-contracts/app.config.json @@ -8,6 +8,9 @@ "v0.4": "0.4.24", "v0.5": "0.5.0", "v0.6": "0.6.2" + }, + "optimizer": { + "runs": 200 } }, "publicVersions": ["0.4.24", "0.5.0"] diff --git a/evm-contracts/package.json b/evm-contracts/package.json index 807fbbdfb90..fe7558b7662 100644 --- a/evm-contracts/package.json +++ b/evm-contracts/package.json @@ -18,8 +18,8 @@ "@chainlink/belt": "0.0.1", "@chainlink/test-helpers": "0.0.2", "@types/jest": "^25.1.1", - "@types/node": "^13.7.0", - "ethers": "^4.0.44", + "@types/node": "^13.9.1", + "ethers": "^4.0.45", "jest": "^25.1.0", "ts-jest": "^25.2.0", "typescript": "^3.7.5" @@ -33,9 +33,6 @@ ], "optionalDependencies": { "@truffle/contract": "^4.1.8", - "ethers": "^4.0.44" - }, - "dependencies": { - "@types/inquirer": "^6.5.0" + "ethers": "^4.0.45" } } diff --git a/evm-contracts/src/v0.5/dev/VRF.sol b/evm-contracts/src/v0.5/dev/VRF.sol deleted file mode 100644 index 5cc3a2fc77d..00000000000 --- a/evm-contracts/src/v0.5/dev/VRF.sol +++ /dev/null @@ -1,382 +0,0 @@ -pragma solidity 0.5.0; - -//////////////////////////////////////////////////////////////////////////////// -// XXX: Do not use in production until this code has been audited. -//////////////////////////////////////////////////////////////////////////////// - -/** **************************************************************************** - @notice on-chain verification of verifiable-random-function (VRF) proofs as - described in https://eprint.iacr.org/2017/099.pdf (security proofs) - and https://tools.ietf.org/html/draft-goldbe-vrf-01#section-5 (spec) - **************************************************************************** - @dev PURPOSE - - @dev Reggie the Random Oracle (not his real job) wants to provide randomness - to Vera the verifier in such a way that Vera can be sure he's not - making his output up to suit himself. Reggie provides Vera a public key - to which he knows the secret key. Each time Vera provides a seed to - Reggie, he gives back a value which is computed completely - deterministically from the seed and the secret key, but which is - indistinguishable from randomness to Vera. Nonetheless, Vera is able to - verify that Reggie's output came from her seed and his secret key. - - @dev The purpose of this contract is to perform that verification. - **************************************************************************** - @dev USAGE - - @dev The main entry point is isValidVRFOutput. See its docstring. - Design notes - ------------ - - An elliptic curve point is generally represented in the solidity code as a - uint256[2], corresponding to its affine coordinates in GF(fieldSize). - - For the sake of efficiency, this implementation deviates from the spec in - some minor ways: - - - Keccak hash rather than SHA256. This is because it's provided natively by - the EVM, and therefore costs much less gas. The impact on security should - be minor. - - - Secp256k1 curve instead of P-256. It abuses ECRECOVER for the most - expensive ECC arithmetic. - - - scalarFromCurve recursively hashes and takes the relevant hash bits until - it finds a point less than the group order. This results in uniform - sampling over the the possible values scalarFromCurve could take. The spec - recommends just uing the first hash output as a uint256, which is a - slightly biased sample. See the zqHash function. - - - hashToCurve recursively hashes until it finds a curve x-ordinate. The spec - recommends that the initial input should be concatenated with a nonce and - then hashed, and this input should be rehashed with the nonce updated - until an x-ordinate is found. Recursive hashing is slightly more - efficient. The spec also recommends - (https://tools.ietf.org/html/rfc8032#section-5.1.3 , by the specification - of RS2ECP) that the x-ordinate should be rejected if it is greater than - the modulus. - - - In the calculation of the challenge value "c", the "u" value (or "k*g", if - you know the secret nonce) - - The spec also requires the y ordinate of the hashToCurve to be negated if y - is odd. See http://www.secg.org/sec1-v2.pdf#page=17 . This sacrifices one - bit of entropy in the random output. Instead, here y is chosen based on - whether an extra hash of the inputs is even or odd. */ - -contract VRF { - - // See https://en.bitcoin.it/wiki/Secp256k1 for these constants. - uint256 constant public GROUP_ORDER = // Number of points in Secp256k1 - // solium-disable-next-line indentation - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - // Prime characteristic of the galois field over which Secp256k1 is defined - // solium-disable-next-line zeppelin/no-arithmetic-operations - uint256 constant public FIELD_SIZE = - // solium-disable-next-line indentation - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F; - - // solium-disable zeppelin/no-arithmetic-operations - uint256 constant public MINUS_ONE = FIELD_SIZE - 1; - uint256 constant public MULTIPLICATIVE_GROUP_ORDER = FIELD_SIZE - 1; - // pow(x, SQRT_POWER, FIELD_SIZE) == √x, since FIELD_SIZE % 4 = 3 - // https://en.wikipedia.org/wiki/Modular_square_root#Prime_or_prime_power_modulus - uint256 constant public SQRT_POWER = (FIELD_SIZE + 1) >> 2; - - uint256 constant public WORD_LENGTH_BYTES = 0x20; - - // (base**exponent) % modulus - // Cribbed from https://medium.com/@rbkhmrcr/precompiles-solidity-e5d29bd428c4 - function bigModExp(uint256 base, uint256 exponent, uint256 modulus) - public view returns (uint256 exponentiation) { - uint256 callResult; - uint256[6] memory bigModExpContractInputs; - bigModExpContractInputs[0] = WORD_LENGTH_BYTES; // Length of base - bigModExpContractInputs[1] = WORD_LENGTH_BYTES; // Length of exponent - bigModExpContractInputs[2] = WORD_LENGTH_BYTES; // Length of modulus - bigModExpContractInputs[3] = base; - bigModExpContractInputs[4] = exponent; - bigModExpContractInputs[5] = modulus; - uint256[1] memory output; - assembly { // solhint-disable-line no-inline-assembly - callResult := - staticcall(13056, // Gas cost. See EIP-198's 1st e.g. - 0x05, // Bigmodexp contract address - bigModExpContractInputs, - 0xc0, // Length of input segment - output, - 0x20) // Length of output segment - } - if (callResult == 0) {revert("bigModExp failure!");} - return output[0]; - } - - // Computes a s.t. a^2 = x in the field. Assumes x is a square. - function squareRoot(uint256 x) public view returns (uint256) { - return bigModExp(x, SQRT_POWER, FIELD_SIZE); - } - - function ySquared(uint256 x) public view returns (uint256) { - // Curve equation is y^2=x^3+7. See - return (bigModExp(x, 3, FIELD_SIZE) + 7) % FIELD_SIZE; - } - - // Hash x uniformly into {0, ..., q-1}. Expects x to ALREADY have the - // necessary entropy... If x < q, returns x! - function zqHash(uint256 q, uint256 x) public pure returns (uint256 x_) { - x_ = x; - while (x_ >= q) { - x_ = uint256(keccak256(abi.encodePacked(x_))); - } - } - - // One-way hash function onto the curve. - function hashToCurve(uint256[2] memory k, uint256 input) - public view returns (uint256[2] memory rv) { - bytes32 hash = keccak256(abi.encodePacked(k, input)); - rv[0] = zqHash(FIELD_SIZE, uint256(hash)); - while (true) { - rv[0] = zqHash(FIELD_SIZE, uint256(keccak256(abi.encodePacked(rv[0])))); - rv[1] = squareRoot(ySquared(rv[0])); - if (mulmod(rv[1], rv[1], FIELD_SIZE) == ySquared(rv[0])) { - break; - } - } - // Two possible y ordinates for x ordinate rv[0]; pick one "randomly" - if (uint256(keccak256(abi.encodePacked(rv[0], input))) % 2 == 0) { - rv[1] = -rv[1]; - } - } - - // Bits used in Ethereum address - uint256 constant public BOTTOM_160_BITS = 2**161 - 1; - - // Returns the ethereum address associated with point. - function pointAddress(uint256[2] calldata point) external pure returns(address) { - bytes memory packedPoint = abi.encodePacked(point); - // Lower 160 bits of the keccak hash of (x,y) as 64 bytes - return address(uint256(keccak256(packedPoint)) & BOTTOM_160_BITS); - } - - // Returns true iff q==scalar*x, with cryptographically high probability. - // Based on Vitalik Buterin's idea in above ethresear.ch post. - function ecmulVerify(uint256[2] memory x, uint256 scalar, uint256[2] memory q) - public pure returns(bool) { - // This ecrecover returns the address associated with c*R. See - // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 - // The point corresponding to the address returned by ecrecover(0,v,r,s=c*r) - // is (r⁻¹ mod Q) * (c*r * R - 0 * g) = c * R, where R is the point - // specified by (v, r). See https://crypto.stackexchange.com/a/18106 - bytes32 cTimesX0 = bytes32(mulmod(scalar, x[0], GROUP_ORDER)); - uint8 parity = x[1] % 2 != 0 ? 28 : 27; - return ecrecover(bytes32(0), parity, bytes32(x[0]), cTimesX0) == - address(uint256(keccak256(abi.encodePacked(q))) & BOTTOM_160_BITS); - } - - // Returns x1/z1+x2/z2=(x1z2+x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) - function projectiveAdd(uint256 x1, uint256 z1, uint256 x2, uint256 z2) - external pure returns(uint256 x3, uint256 z3) { - uint256 crossMultNumerator1 = mulmod(z2, x1, FIELD_SIZE); - uint256 crossMultNumerator2 = mulmod(z1, x2, FIELD_SIZE); - uint256 denom = mulmod(z1, z2, FIELD_SIZE); - uint256 numerator = addmod(crossMultNumerator1, crossMultNumerator2, FIELD_SIZE); - return (numerator, denom); - } - - // Returns x1/z1-x2/z2=(x1z2+x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) - function projectiveSub(uint256 x1, uint256 z1, uint256 x2, uint256 z2) - public pure returns(uint256 x3, uint256 z3) { - uint256 num1 = mulmod(z2, x1, FIELD_SIZE); - uint256 num2 = mulmod(FIELD_SIZE - x2, z1, FIELD_SIZE); - (x3, z3) = (addmod(num1, num2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); - } - - // Returns x1/z1*x2/z2=(x1x2)/(z1z2), in projective coordinates on P¹(𝔽ₙ) - function projectiveMul(uint256 x1, uint256 z1, uint256 x2, uint256 z2) - public pure returns(uint256 x3, uint256 z3) { - (x3, z3) = (mulmod(x1, x2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); - } - - // Returns x1/z1/(x2/z2)=(x1z2)/(x2z1), in projective coordinates on P¹(𝔽ₙ) - function projectiveDiv(uint256 x1, uint256 z1, uint256 x2, uint256 z2) - external pure returns(uint256 x3, uint256 z3) { - (x3, z3) = (mulmod(x1, z2, FIELD_SIZE), mulmod(z1, x2, FIELD_SIZE)); - } - - /** ************************************************************************** - @notice Computes elliptic-curve sum, in projective co-ordinates - - @dev Using projective coordinates avoids costly divisions - - @dev To use this with x and y in affine coordinates, compute - projectiveECAdd(x[0], x[1], 1, y[0], y[1], 1) - - @dev This can be used to calculate the z which is the inverse to zInv in - isValidVRFOutput. But consider using a faster re-implementation. - - @dev This function assumes [x1,y1,z1],[x2,y2,z2] are valid projective - coordinates of secp256k1 points. That is safe in this contract, - because this method is only used by linearCombination, which checks - points are on the curve via ecrecover, and ensures valid projective - coordinates by passing z1=z2=1. - ************************************************************************** - @param x1 The first affine coordinate of the first summand - @param y1 The second affine coordinate of the first summand - @param x2 The first affine coordinate of the second summand - @param y2 The second affine coordinate of the second summand - ************************************************************************** - @return [x1,y1,z1]+[x2,y2,z2] as points on secp256k1, in P²(𝔽ₙ) - */ - function projectiveECAdd(uint256 x1, uint256 y1, uint256 x2, uint256 y2) - public pure returns(uint256 x3, uint256 y3, uint256 z3) { - // See "Group law for E/K : y^2 = x^3 + ax + b", in section 3.1.2, p. 80, - // "Guide to Elliptic Curve Cryptography" by Hankerson, Menezes and Vanstone - // We take the equations there for (x3,y3), and homogenize them to - // projective coordinates. That way, no inverses are required, here, and we - // only need the one inverse in affineECAdd. - - // We only need the "point addition" equations from Hankerson et al. Can - // skip the "point doubling" equations because p1 == p2 is cryptographically - // impossible, and require'd not to be the case in linearCombination. - - // Add extra "projective coordinate" to the two points - (uint256 z1, uint256 z2) = (1, 1); - - // (lx, lz) = (y2-y1)/(x2-x1), i.e., gradient of secant line. - uint256 lx = addmod(y2, FIELD_SIZE - y1, FIELD_SIZE); - uint256 lz = addmod(x2, FIELD_SIZE - x1, FIELD_SIZE); - - uint256 dx; // Accumulates denominator from x3 calculation - // x3=((y2-y1)/(x2-x1))^2-x1-x2 - (x3, dx) = projectiveMul(lx, lz, lx, lz); // ((y2-y1)/(x2-x1))^2 - (x3, dx) = projectiveSub(x3, dx, x1, z1); // ((y2-y1)/(x2-x1))^2-x1 - (x3, dx) = projectiveSub(x3, dx, x2, z2); // ((y2-y1)/(x2-x1))^2-x1-x2 - - uint256 dy; // Accumulates denominator from y3 calculation - // y3=((y2-y1)/(x2-x1))(x1-x3)-y1 - (y3, dy) = projectiveSub(x1, z1, x3, dx); // x1-x3 - (y3, dy) = projectiveMul(y3, dy, lx, lz); // ((y2-y1)/(x2-x1))(x1-x3) - (y3, dy) = projectiveSub(y3, dy, y1, z1); // ((y2-y1)/(x2-x1))(x1-x3)-y1 - - if (dx != dy) { // Cross-multiply to put everything over a common denominator - x3 = mulmod(x3, dy, FIELD_SIZE); - y3 = mulmod(y3, dx, FIELD_SIZE); - z3 = mulmod(dx, dy, FIELD_SIZE); - } else { - z3 = dx; - } - } - - // Returns p1+p2, as affine points on secp256k1. invZ must be the inverse of - // the z returned by projectiveECAdd(p1, p2). It is computed off-chain to - // save gas. - function affineECAdd( - uint256[2] memory p1, uint256[2] memory p2, - uint256 invZ) public pure returns (uint256[2] memory) { - uint256 x; - uint256 y; - uint256 z; - (x, y, z) = projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); - require(mulmod(z, invZ, FIELD_SIZE) == 1, "_invZ must be inverse of z"); - // Clear the z ordinate of the projective representation by dividing through - // by it, to obtain the affine representation - return [mulmod(x, invZ, FIELD_SIZE), mulmod(y, invZ, FIELD_SIZE)]; - } - - // Returns true iff address(c*p+s*g) == lcWitness, where g is generator. - function verifyLinearCombinationWithGenerator( - uint256 c, uint256[2] memory p, uint256 s, address lcWitness) - public pure returns (bool) { - // ecrecover returns 0x0 in certain failure modes. Ensure witness differs. - require(lcWitness != address(0), "bad witness"); - // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 - // The point corresponding to the address returned by - // ecrecover(-s*p[0],v,_p[0],_c*p[0]) is - // (p[0]⁻¹ mod GROUP_ORDER)*(c*p[0]-(-s)*p[0]*g)=_c*p+s*g, where v - // is the parity of p[1]. See https://crypto.stackexchange.com/a/18106 - bytes32 pseudoHash = bytes32(GROUP_ORDER - mulmod(p[0], s, GROUP_ORDER)); - // https://bitcoin.stackexchange.com/questions/38351/ecdsa-v-r-s-what-is-v - uint8 v = (p[1] % 2 == 0) ? 27 : 28; - bytes32 pseudoSignature = bytes32(mulmod(c, p[0], GROUP_ORDER)); - address computed = ecrecover(pseudoHash, v, bytes32(p[0]), pseudoSignature); - return computed == lcWitness; - } - - // c*p1 + s*p2 - function linearCombination( - uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, - uint256 s, uint256[2] memory p2, uint256[2] memory sp2Witness, - uint256 zInv) - public pure returns (uint256[2] memory) { - require(cp1Witness[0] != sp2Witness[0], "points must differ in sum"); - require(ecmulVerify(p1, c, cp1Witness), "First multiplication check failed"); - require(ecmulVerify(p2, s, sp2Witness), "Second multiplication check failed"); - return affineECAdd(cp1Witness, sp2Witness, zInv); - } - - // Pseudo-random number from inputs. Corresponds to vrf.go/scalarFromCurve. - function scalarFromCurve( - uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, - address uWitness, uint256[2] memory v) - public pure returns (uint256 s) { - bytes32 iHash = keccak256(abi.encodePacked(hash, pk, gamma, v, uWitness)); - return zqHash(GROUP_ORDER, uint256(iHash)); - } - - // True if (gamma, c, s) is a correctly constructed randomness proof from pk - // and seed. zInv must be the inverse of the third ordinate from - // projectiveECAdd applied to cGammaWitness and sHashWitness - function verifyVRFProof( - uint256[2] memory pk, uint256[2] memory gamma, uint256 c, uint256 s, - uint256 seed, address uWitness, uint256[2] memory cGammaWitness, - uint256[2] memory sHashWitness, uint256 zInv) - public view returns (bool) { - // NB: Curve operations already check that (pkX, pkY), (gammaX, gammaY) - // are valid curve points. No need to do that explicitly. - require( - verifyLinearCombinationWithGenerator(c, pk, s, uWitness), - "Could not verify that address(c*pk+s*generator)=_uWitness"); - uint256[2] memory hash = hashToCurve(pk, seed); - uint256[2] memory v = linearCombination( - c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); - return (c == scalarFromCurve(hash, pk, gamma, uWitness, v)); - } - - /** ************************************************************************** - @notice isValidVRFOutput returns true iff the proof can be verified as - showing that output was generated as mandated. - - @dev See the invocation of verifyVRFProof in VRF.js, for an example. - ************************************************************************** - @dev Let x be the secret key associated with the public key pk - - @param pk Affine coordinates of the secp256k1 public key for this VRF - @param gamma Intermediate output of the VRF as an affine secp256k1 point - @param c The challenge value for proof that gamma = x*hashToCurve(seed) - See the variable c on p. 28 of - https://www.cs.bu.edu/~goldbe/papers/VRF_ietf99_print.pdf - @param s The response value for the proof. See s on p. 28 - @param seed The input seed from which the VRF output is computed - @param uWitness The ethereum address of c*pk + s*, in - elliptic-curve arithmetic - @param cGammaWitness c*gamma on the elliptic-curve - @param sHashWitness s*hashToCurve(seed) on the elliptic-curve - @param zInv Inverse of the third ordinate of the return value from - projectiveECAdd(c*gamma, s*hashToCurve(seed)). Passed in here - to save gas, because computing modular inverses is expensive in the - EVM. - @param output The actual output of the VRF. - ************************************************************************** - @return True iff all the above parameters are correct - */ - function isValidVRFOutput( - uint256[2] calldata pk, uint256[2] calldata gamma, uint256 c, uint256 s, - uint256 seed, address uWitness, uint256[2] calldata cGammaWitness, - uint256[2] calldata sHashWitness, uint256 zInv, uint256 output) - external view returns (bool) { - return verifyVRFProof( - pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, - zInv) && - (uint256(keccak256(abi.encodePacked(gamma))) == output); - } -} diff --git a/evm-contracts/src/v0.6/VRF.sol b/evm-contracts/src/v0.6/VRF.sol new file mode 100644 index 00000000000..ac906389c29 --- /dev/null +++ b/evm-contracts/src/v0.6/VRF.sol @@ -0,0 +1,532 @@ +pragma solidity 0.6.2; + +/** **************************************************************************** + * @notice Verification of verifiable-random-function (VRF) proofs, following + * @notice https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @notice See https://eprint.iacr.org/2017/099.pdf for security proofs. + + * @dev Bibliographic references: + + * @dev Goldberg, et al., "Verifiable Random Functions (VRFs)", Internet Draft + * @dev draft-irtf-cfrg-vrf-05, IETF, Aug 11 2019, + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05 + + * @dev Papadopoulos, et al., "Making NSEC5 Practical for DNSSEC", Cryptology + * @dev ePrint Archive, Report 2017/099, https://eprint.iacr.org/2017/099.pdf + * **************************************************************************** + * @dev USAGE + + * @dev The main entry point is randomValueFromVRFProof. See its docstring. + * **************************************************************************** + * @dev PURPOSE + + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is computationally indistinguishable to her from a uniform + * @dev random sample from the output space. + + * @dev The purpose of this contract is to perform that verification. + * **************************************************************************** + * @dev DESIGN NOTES + + * @dev The VRF algorithm verified here satisfies the full unqiqueness, full + * @dev collision resistance, and full pseudorandomness security properties. + * @dev See "SECURITY PROPERTIES" below, and + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-3 + + * @dev An elliptic curve point is generally represented in the solidity code + * @dev as a uint256[2], corresponding to its affine coordinates in + * @dev GF(FIELD_SIZE). + + * @dev For the sake of efficiency, this implementation deviates from the spec + * @dev in some minor ways: + + * @dev - Keccak hash rather than the SHA256 hash recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev Keccak costs much less gas on the EVM, and provides similar security. + + * @dev - Secp256k1 curve instead of the P-256 or ED25519 curves recommended in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 + * @dev For curve-point multiplication, it's much cheaper to abuse ECRECOVER + + * @dev - hashToCurve recursively hashes until it finds a curve x-ordinate. On + * @dev the EVM, this is slightly more efficient than the recommendation in + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + * @dev step 5, to concatenate with a nonce then hash, and rehash with the + * @dev nonce updated until a valid x-ordinate is found. + + * @dev - hashToCurve does not include a cipher version string or the byte 0x1 + * @dev in the hash message, as recommended in step 5.B of the draft + * @dev standard. They are unnecessary here because no variation in the + * @dev cipher suite is allowed. + + * @dev - Similarly, the hash input in scalarFromCurvePoints does not include a + * @dev commitment to the cipher suite, either, which differs from step 2 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + * @dev . Also, the hash input is the concatenation of the uncompressed + * @dev points, not the compressed points as recommended in step 3. + + * @dev - In the calculation of the challenge value "c", the "u" value (i.e. + * @dev the value computed by Reggie as the nonce times the secp256k1 + * @dev generator point, see steps 5 and 7 of + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.3 + * @dev ) is replaced by its ethereum address, i.e. the lower 160 bits of the + * @dev keccak hash of the original u. This is because we only verify the + * @dev calculation of u up to its address, by abusing ECRECOVER. + * **************************************************************************** + * @dev SECURITY PROPERTIES + + * @dev Here are the security properties for this VRF: + + * @dev Full uniqueness: For any seed and valid VRF public key, there is + * @dev exactly one VRF output which can be proved to come from that seed, in + * @dev the sense that the proof will pass verifyVRFProof. + + * @dev Full collision resistance: It's cryptographically infeasible to find + * @dev two seeds with same VRF output from a fixed, valid VRF key + + * @dev Full pseudorandomness: Absent the proofs that the VRF outputs are + * @dev derived from a given seed, the outputs are computationally + * @dev indistinguishable from randomness. + + * @dev https://eprint.iacr.org/2017/099.pdf, Appendix B contains the proofs + * @dev for these properties. + + * @dev For secp256k1, the key validation described in section + * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.6 + * @dev is unnecessary, because secp256k1 has cofactor 1, and the + * @dev representation of the public key used here (affine x- and y-ordinates + * @dev of the secp256k1 point on the standard y^2=x^3+7 curve) cannot refer to + * @dev the point at infinity. + * **************************************************************************** + * @dev OTHER SECURITY CONSIDERATIONS + * + * @dev The seed input to the VRF could in principle force an arbitrary amount + * @dev of work in hashToCurve, by requiring extra rounds of hashing and + * @dev checking whether that's yielded the x ordinate of a secp256k1 point. + * @dev However, under the Random Oracle Model the probability of choosing a + * @dev point which forces n extra rounds in hashToCurve is 2⁻ⁿ. The base cost + * @dev for calling hashToCurve is about 25,000 gas, and each round of checking + * @dev for a valid x ordinate costs about 15,555 gas, so to find a seed for + * @dev which hashToCurve would cost more than 2,017,000 gas, one would have to + * @dev try, in expectation, about 2¹²⁸ seeds, which is infeasible for any + * @dev forseeable computational resources. (25,000 + 128 * 15,555 < 2,017,000.) + + * @dev Since the gas block limit for the Ethereum main net is 10,000,000 gas, + * @dev this means it is infeasible for an adversary to prevent correct + * @dev operation of this contract by choosing an adverse seed. + + * @dev (See TestMeasureHashToCurveGasCost for verification of the gas cost for + * @dev hashToCurve.) + + * @dev It may be possible to make a secure constant-time hashToCurve function. + * @dev See notes in hashToCurve docstring. +*/ +contract VRF { + + // See https://www.secg.org/sec2-v2.pdf, section 2.4.1, for these constants. + uint256 constant private GROUP_ORDER = // Number of points in Secp256k1 + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + // Prime characteristic of the galois field over which Secp256k1 is defined + uint256 constant private FIELD_SIZE = + // solium-disable-next-line indentation + 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F; + uint256 constant private WORD_LENGTH_BYTES = 0x20; + + // (base^exponent) % FIELD_SIZE + // Cribbed from https://medium.com/@rbkhmrcr/precompiles-solidity-e5d29bd428c4 + function bigModExp(uint256 base, uint256 exponent) + internal view returns (uint256 exponentiation) { + uint256 callResult; + uint256[6] memory bigModExpContractInputs; + bigModExpContractInputs[0] = WORD_LENGTH_BYTES; // Length of base + bigModExpContractInputs[1] = WORD_LENGTH_BYTES; // Length of exponent + bigModExpContractInputs[2] = WORD_LENGTH_BYTES; // Length of modulus + bigModExpContractInputs[3] = base; + bigModExpContractInputs[4] = exponent; + bigModExpContractInputs[5] = FIELD_SIZE; + uint256[1] memory output; + assembly { // solhint-disable-line no-inline-assembly + callResult := staticcall( + not(0), // Gas cost: no limit + 0x05, // Bigmodexp contract address + bigModExpContractInputs, + 0xc0, // Length of input segment: 6*0x20-bytes + output, + 0x20 // Length of output segment + ) + } + if (callResult == 0) {revert("bigModExp failure!");} + return output[0]; + } + + // Let q=FIELD_SIZE. q % 4 = 3, ∴ x≡r^2 mod q ⇒ x^SQRT_POWER≡±r mod q. See + // https://en.wikipedia.org/wiki/Modular_square_root#Prime_or_prime_power_modulus + uint256 constant private SQRT_POWER = (FIELD_SIZE + 1) >> 2; + + // Computes a s.t. a^2 = x in the field. Assumes a exists + function squareRoot(uint256 x) internal view returns (uint256) { + return bigModExp(x, SQRT_POWER); + } + + // The value of y^2 given that (x,y) is on secp256k1. + function ySquared(uint256 x) internal pure returns (uint256) { + // Curve is y^2=x^3+7. See section 2.4.1 of https://www.secg.org/sec2-v2.pdf + uint256 xCubed = mulmod(x, mulmod(x, x, FIELD_SIZE), FIELD_SIZE); + return addmod(xCubed, 7, FIELD_SIZE); + } + + // True iff p is on secp256k1 + function isOnCurve(uint256[2] memory p) internal pure returns (bool) { + return ySquared(p[0]) == mulmod(p[1], p[1], FIELD_SIZE); + } + + // Hash x uniformly into {0, ..., FIELD_SIZE-1}. + function fieldHash(bytes memory b) internal pure returns (uint256 x_) { + x_ = uint256(keccak256(b)); + // Rejecting if x >= FIELD_SIZE corresponds to step 2.1 in section 2.3.4 of + // http://www.secg.org/sec1-v2.pdf , which is part of the definition of + // string_to_point in the IETF draft + while (x_ >= FIELD_SIZE) { + x_ = uint256(keccak256(abi.encodePacked(x_))); + } + } + + // Hash b to a random point which hopefully lies on secp256k1. The y ordinate + // is always even, due to + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 + // step 5.C, which references arbitrary_string_to_point, defined in + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 as + // returning the point with given x ordinate, and even y ordinate. + function newCandidateSecp256k1Point(bytes memory b) + internal view returns (uint256[2] memory p) { + p[0] = fieldHash(b); + p[1] = squareRoot(ySquared(p[0])); + if (p[1] % 2 == 1) { + p[1] = FIELD_SIZE - p[1]; + } + } + + // Domain-separation tag for initial hash in hashToCurve. Corresponds to + // vrf.go/hashToCurveHashPrefix + uint256 constant HASH_TO_CURVE_HASH_PREFIX = 1; + + // Cryptographic hash function onto the curve. + // + // Corresponds to algorithm in section 5.4.1.1 of the draft standard. (But see + // DESIGN NOTES above for slight differences.) + // + // TODO(alx): Implement a bounded-computation hash-to-curve, as described in + // "Construction of Rational Points on Elliptic Curves over Finite Fields" + // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.831.5299&rep=rep1&type=pdf + // and suggested by + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-01#section-5.2.2 + // (Though we can't used exactly that because secp256k1's j-invariant is 0.) + // + // This would greatly simplify the analysis in "OTHER SECURITY CONSIDERATIONS" + // https://www.pivotaltracker.com/story/show/171120900 + function hashToCurve(uint256[2] memory pk, uint256 input) + internal view returns (uint256[2] memory rv) { + rv = newCandidateSecp256k1Point(abi.encodePacked(HASH_TO_CURVE_HASH_PREFIX, + pk, input)); + while (!isOnCurve(rv)) { + rv = newCandidateSecp256k1Point(abi.encodePacked(rv[0])); + } + } + + /** ********************************************************************* + * @notice Check that product==scalar*multiplicand + * + * @dev Based on Vitalik Buterin's idea in ethresear.ch post cited below. + * + * @param multiplicand: secp256k1 point + * @param scalar: non-zero GF(GROUP_ORDER) scalar + * @param product: secp256k1 expected to be mulitplier * multiplicand + * @return verifies true iff product==scalar*multiplicand, with cryptographically high probability + */ + function ecmulVerify(uint256[2] memory multiplicand, uint256 scalar, + uint256[2] memory product) internal pure returns(bool verifies) + { + require(scalar != 0); // Rules out an ecrecover failure case + uint256 x = multiplicand[0]; // x ordinate of multiplicand + uint8 v = multiplicand[1] % 2 == 0 ? 27 : 28; // parity of y ordinate + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // Point corresponding to address ecrecover(0, v, x, s=scalar*x) is + // (x⁻¹ mod GROUP_ORDER) * (scalar * x * multiplicand - 0 * g), i.e. + // scalar*multiplicand. See https://crypto.stackexchange.com/a/18106 + bytes32 scalarTimesX = bytes32(mulmod(scalar, x, GROUP_ORDER)); + address actual = ecrecover(bytes32(0), v, bytes32(x), scalarTimesX); + // Explicit conversion to address takes bottom 160 bits + address expected = address(uint256(keccak256(abi.encodePacked(product)))); + return (actual == expected); + } + + // Returns x1/z1-x2/z2=(x1z2-x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) + function projectiveSub(uint256 x1, uint256 z1, uint256 x2, uint256 z2) + internal pure returns(uint256 x3, uint256 z3) { + uint256 num1 = mulmod(z2, x1, FIELD_SIZE); + uint256 num2 = mulmod(FIELD_SIZE - x2, z1, FIELD_SIZE); + (x3, z3) = (addmod(num1, num2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + } + + // Returns x1/z1*x2/z2=(x1x2)/(z1z2), in projective coordinates on P¹(𝔽ₙ) + function projectiveMul(uint256 x1, uint256 z1, uint256 x2, uint256 z2) + internal pure returns(uint256 x3, uint256 z3) { + (x3, z3) = (mulmod(x1, x2, FIELD_SIZE), mulmod(z1, z2, FIELD_SIZE)); + } + + /** ************************************************************************** + @notice Computes elliptic-curve sum, in projective co-ordinates + + @dev Using projective coordinates avoids costly divisions + + @dev To use this with p and q in affine coordinates, call + @dev projectiveECAdd(px, py, qx, qy). This will return + @dev the addition of (px, py, 1) and (qx, qy, 1), in the + @dev secp256k1 group. + + @dev This can be used to calculate the z which is the inverse to zInv + @dev in isValidVRFOutput. But consider using a faster + @dev re-implementation such as ProjectiveECAdd in the golang vrf package. + + @dev This function assumes [px,py,1],[qx,qy,1] are valid projective + coordinates of secp256k1 points. That is safe in this contract, + because this method is only used by linearCombination, which checks + points are on the curve via ecrecover. + ************************************************************************** + @param px The first affine coordinate of the first summand + @param py The second affine coordinate of the first summand + @param qx The first affine coordinate of the second summand + @param qy The second affine coordinate of the second summand + + (px,py) and (qx,qy) must be distinct, valid secp256k1 points. + ************************************************************************** + Return values are projective coordinates of [px,py,1]+[qx,qy,1] as points + on secp256k1, in P²(𝔽ₙ) + @return sx + @return sy + @return sz + */ + function projectiveECAdd(uint256 px, uint256 py, uint256 qx, uint256 qy) + internal pure returns(uint256 sx, uint256 sy, uint256 sz) { + // See "Group law for E/K : y^2 = x^3 + ax + b", in section 3.1.2, p. 80, + // "Guide to Elliptic Curve Cryptography" by Hankerson, Menezes and Vanstone + // We take the equations there for (sx,sy), and homogenize them to + // projective coordinates. That way, no inverses are required, here, and we + // only need the one inverse in affineECAdd. + + // We only need the "point addition" equations from Hankerson et al. Can + // skip the "point doubling" equations because p1 == p2 is cryptographically + // impossible, and require'd not to be the case in linearCombination. + + // Add extra "projective coordinate" to the two points + (uint256 z1, uint256 z2) = (1, 1); + + // (lx, lz) = (qy-py)/(qx-px), i.e., gradient of secant line. + uint256 lx = addmod(qy, FIELD_SIZE - py, FIELD_SIZE); + uint256 lz = addmod(qx, FIELD_SIZE - px, FIELD_SIZE); + + uint256 dx; // Accumulates denominator from sx calculation + // sx=((qy-py)/(qx-px))^2-px-qx + (sx, dx) = projectiveMul(lx, lz, lx, lz); // ((qy-py)/(qx-px))^2 + (sx, dx) = projectiveSub(sx, dx, px, z1); // ((qy-py)/(qx-px))^2-px + (sx, dx) = projectiveSub(sx, dx, qx, z2); // ((qy-py)/(qx-px))^2-px-qx + + uint256 dy; // Accumulates denominator from sy calculation + // sy=((qy-py)/(qx-px))(px-sx)-py + (sy, dy) = projectiveSub(px, z1, sx, dx); // px-sx + (sy, dy) = projectiveMul(sy, dy, lx, lz); // ((qy-py)/(qx-px))(px-sx) + (sy, dy) = projectiveSub(sy, dy, py, z1); // ((qy-py)/(qx-px))(px-sx)-py + + if (dx != dy) { // Cross-multiply to put everything over a common denominator + sx = mulmod(sx, dy, FIELD_SIZE); + sy = mulmod(sy, dx, FIELD_SIZE); + sz = mulmod(dx, dy, FIELD_SIZE); + } else { // Already over a common denominator, use that for z ordinate + sz = dx; + } + } + + // p1+p2, as affine points on secp256k1. + // + // invZ must be the inverse of the z returned by projectiveECAdd(p1, p2). + // It is computed off-chain to save gas. + // + // p1 and p2 must be distinct, because projectiveECAdd doesn't handle + // point doubling. + function affineECAdd( + uint256[2] memory p1, uint256[2] memory p2, + uint256 invZ) internal pure returns (uint256[2] memory) { + uint256 x; + uint256 y; + uint256 z; + (x, y, z) = projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); + require(mulmod(z, invZ, FIELD_SIZE) == 1, "invZ must be inverse of z"); + // Clear the z ordinate of the projective representation by dividing through + // by it, to obtain the affine representation + return [mulmod(x, invZ, FIELD_SIZE), mulmod(y, invZ, FIELD_SIZE)]; + } + + // True iff address(c*p+s*g) == lcWitness, where g is generator. (With + // cryptographically high probability.) + function verifyLinearCombinationWithGenerator( + uint256 c, uint256[2] memory p, uint256 s, address lcWitness) + internal pure returns (bool) { + // Rule out ecrecover failure modes which return address 0. + require(lcWitness != address(0), "bad witness"); + uint8 v = (p[1] % 2 == 0) ? 27 : 28; // parity of y-ordinate of p + bytes32 pseudoHash = bytes32(GROUP_ORDER - mulmod(p[0], s, GROUP_ORDER)); // -s*p[0] + bytes32 pseudoSignature = bytes32(mulmod(c, p[0], GROUP_ORDER)); // c*p[0] + // https://ethresear.ch/t/you-can-kinda-abuse-ecrecover-to-do-ecmul-in-secp256k1-today/2384/9 + // The point corresponding to the address returned by + // ecrecover(-s*p[0],v,p[0],c*p[0]) is + // (p[0]⁻¹ mod GROUP_ORDER)*(c*p[0]-(-s)*p[0]*g)=c*p+s*g. + // See https://crypto.stackexchange.com/a/18106 + // https://bitcoin.stackexchange.com/questions/38351/ecdsa-v-r-s-what-is-v + address computed = ecrecover(pseudoHash, v, bytes32(p[0]), pseudoSignature); + return computed == lcWitness; + } + + // c*p1 + s*p2. Requires cp1Witness=c*p1 and sp2Witness=s*p2. Also + // requires cp1Witness != sp2Witness (which is fine for this application, + // since it is cryptographically impossible for them to be equal. In the + // (cryptographically impossible) case that a prover accidentally derives + // a proof with equal c*p1 and s*p2, they should retry with a different + // proof nonce.) Assumes that all points are on secp256k1 + // (which is checked in verifyVRFProof below.) + function linearCombination( + uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, + uint256 s, uint256[2] memory p2, uint256[2] memory sp2Witness, + uint256 zInv) + internal pure returns (uint256[2] memory) { + require((cp1Witness[0] - sp2Witness[0]) % FIELD_SIZE != 0, + "points in sum must be distinct"); + require(ecmulVerify(p1, c, cp1Witness), "First multiplication check failed"); + require(ecmulVerify(p2, s, sp2Witness), "Second multiplication check failed"); + return affineECAdd(cp1Witness, sp2Witness, zInv); + } + + // Domain-separation tag for the hash taken in scalarFromCurvePoints. + // Corresponds to scalarFromCurveHashPrefix in vrf.go + uint256 constant SCALAR_FROM_CURVE_POINTS_HASH_PREFIX = 2; + + // Pseudo-random number from inputs. Matches vrf.go/scalarFromCurvePoints, and + // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 + // The draft calls (in step 7, via the definition of string_to_int, in + // https://datatracker.ietf.org/doc/html/rfc8017#section-4.2 ) for taking the + // first hash without checking that it corresponds to a number less than the + // group order, which will lead to a slight bias in the sample. + // + // TODO(alx): We could save a bit of gas by following the standard here and + // using the compressed representation of the points, if we collated the y + // parities into a single bytes32. + // https://www.pivotaltracker.com/story/show/171120588 + function scalarFromCurvePoints( + uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, + address uWitness, uint256[2] memory v) + internal pure returns (uint256 s) { + return uint256( + keccak256(abi.encodePacked(SCALAR_FROM_CURVE_POINTS_HASH_PREFIX, + hash, pk, gamma, v, uWitness))); + } + + // True if (gamma, c, s) is a correctly constructed randomness proof from pk + // and seed. zInv must be the inverse of the third ordinate from + // projectiveECAdd applied to cGammaWitness and sHashWitness. Corresponds to + // section 5.3 of the IETF draft. + // + // TODO(alx): Since I'm only using pk in the ecrecover call, I could only pass + // the x ordinate, and the parity of the y ordinate in the top bit of uWitness + // (which I could make a uint256 without using any extra space.) Would save + // about 2000 gas. https://www.pivotaltracker.com/story/show/170828567 + function verifyVRFProof( + uint256[2] memory pk, uint256[2] memory gamma, uint256 c, uint256 s, + uint256 seed, address uWitness, uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, uint256 zInv) + internal view { + require(isOnCurve(pk), "public key is not on curve"); + require(isOnCurve(gamma), "gamma is not on curve"); + require(isOnCurve(cGammaWitness), "cGammaWitness is not on curve"); + require(isOnCurve(sHashWitness), "sHashWitness is not on curve"); + // Step 5. of IETF draft section 5.3 (pk corresponds to 5.3's Y, and here + // we use the address of u instead of u itself. Also, here we add the + // terms instead of taking the difference, and in the proof consruction in + // vrf.GenerateProof, we correspondingly take the difference instead of + // taking the sum as they do in step 7 of section 5.1.) + require( + verifyLinearCombinationWithGenerator(c, pk, s, uWitness), + "addr(c*pk+s*g)≠_uWitness" + ); + // Step 4. of IETF draft section 5.3 (pk corresponds to Y, seed to alpha_string) + uint256[2] memory hash = hashToCurve(pk, seed); + // Step 6. of IETF draft section 5.3, but see note for step 5 about +/- terms + uint256[2] memory v = linearCombination( + c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); + // Steps 7. and 8. of IETF draft section 5.3 + uint256 derivedC = scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + require(c == derivedC, "invalid proof"); + } + + // Domain-separation tag for the hash used as the final VRF output. + // Corresponds to vrfRandomOutputHashPrefix in vrf.go + uint256 constant VRF_RANDOM_OUTPUT_HASH_PREFIX = 3; + + // Length of proof marshaled to bytes array. Shows layout of proof + uint private constant PROOF_LENGTH = 64 + // PublicKey (uncompressed format.) + 64 + // Gamma + 32 + // C + 32 + // S + 32 + // Seed + 0 + // Dummy entry: The following elements are included for gas efficiency: + 32 + // uWitness (gets padded to 256 bits, even though it's only 160) + 64 + // cGammaWitness + 64 + // sHashWitness + 32; // zInv (Leave Output out, because that can be efficiently calculated) + + /* *************************************************************************** + * @notice Returns proof's output, if proof is valid. Otherwise reverts + + * @param proof A binary-encoded proof, as output by vrf.Proof.MarshalForSolidityVerifier + * + * Throws if proof is invalid, otherwise: + * @return output i.e., the random output implied by the proof + * *************************************************************************** + * @dev See the calculation of PROOF_LENGTH for the binary layout of proof. + */ + function randomValueFromVRFProof(bytes memory proof) + internal view returns (uint256 output) { + require(proof.length == PROOF_LENGTH, "wrong proof length"); + + uint256[2] memory pk; // parse proof contents into these variables + uint256[2] memory gamma; + // c, s and seed combined (prevents "stack too deep" compilation error) + uint256[3] memory cSSeed; + address uWitness; + uint256[2] memory cGammaWitness; + uint256[2] memory sHashWitness; + uint256 zInv; + (pk, gamma, cSSeed, uWitness, cGammaWitness, sHashWitness, zInv) = abi.decode( + proof, (uint256[2], uint256[2], uint256[3], address, uint256[2], + uint256[2], uint256)); + verifyVRFProof( + pk, + gamma, + cSSeed[0], // c + cSSeed[1], // s + cSSeed[2], // seed + uWitness, + cGammaWitness, + sHashWitness, + zInv + ); + output = uint256(keccak256(abi.encode(VRF_RANDOM_OUTPUT_HASH_PREFIX, gamma))); + } +} diff --git a/evm-contracts/src/v0.6/VRFConsumerBase.sol b/evm-contracts/src/v0.6/VRFConsumerBase.sol new file mode 100644 index 00000000000..862cd7f6a58 --- /dev/null +++ b/evm-contracts/src/v0.6/VRFConsumerBase.sol @@ -0,0 +1,149 @@ +pragma solidity 0.6.2; + +import "./vendor/SafeMath.sol"; + +import "./interfaces/LinkTokenInterface.sol"; + +import "./VRFRequestIDBase.sol"; + +/** **************************************************************************** + * @notice Interface for contracts using VRF randomness + * ***************************************************************************** + * @dev PURPOSE + * + * @dev Reggie the Random Oracle (not his real job) wants to provide randomness + * @dev to Vera the verifier in such a way that Vera can be sure he's not + * @dev making his output up to suit himself. Reggie provides Vera a public key + * @dev to which he knows the secret key. Each time Vera provides a seed to + * @dev Reggie, he gives back a value which is computed completely + * @dev deterministically from the seed and the secret key. + * + * @dev Reggie provides a proof by which Vera can verify that the output was + * @dev correctly computed once Reggie tells it to her, but without that proof, + * @dev the output is indistinguishable to her from a uniform random sample + * @dev from the output space. + * + * @dev The purpose of this contract is to make it easy for unrelated contracts + * @dev to talk to Vera the verifier about the work Reggie is doing, to provide + * @dev simple access to a verifiable source of randomness. + * ***************************************************************************** + * @dev USAGE + * + * @dev Calling contracts must inherit from VRFConsumerInterface, and can + * @dev initialize VRFConsumerInterface's attributes in their constructor as + * @dev shown: + * + * @dev contract VRFConsumer { + * @dev constuctor(, address _vrfCoordinator, address _link) + * @dev VRFConsumerBase(_vrfCoordinator, _link) public { + * @dev + * @dev } + * @dev } + * + * @dev The oracle will have given you an ID for the VRF keypair they have + * @dev committed to (let's call it keyHash), and have told you the minimum LINK + * @dev price for VRF service. Make sure your contract has sufficient LINK, and + * @dev call requestRandomness(keyHash, fee, seed), where seed is the input you + * @dev want to generate randomness from. + * + * @dev Once the VRFCoordinator has received and validated the oracle's response + * @dev to your request, it will call your contract's fulfillRandomness method. + * + * @dev The randomness argument to fulfillRandomness is the actual random value + * @dev generated from your seed. + * + * @dev The requestId argument is generated from the keyHash and the seed by + * @dev makeRequestId(keyHash, seed). If your contract could have concurrent + * @dev requests open, you can use the requestId to track which seed is + * @dev associated with which randomness. See VRFRequestIDBase.sol for more + * @dev details. + * + * @dev Colliding `requestId`s are cryptographically impossible as long as seeds + * @dev differ. (Which is critical to making unpredictable randomness! See the + * @dev next section.) + * + * ***************************************************************************** + * @dev SECURITY CONSIDERATIONS + * + * @dev To increase trust in your contract, the source of your seeds should be + * @dev hard for anyone to influence or predict. Any party who can influence + * @dev them could in principle collude with the oracle (who can instantly + * @dev compute the VRF output for any given seed) to bias the outcomes from + * @dev your contract in their favor. For instance, the block hash is a natural + * @dev choice of seed for many applications, but miners in control of a + * @dev substantial fraction of hashing power and with access to VRF outputs + * @dev could check the result of prospective block hashes as they are mined, + * @dev and decide not to publish a block if they don't like the outcome it will + * @dev lead to. + * + * @dev On the other hand, using block hashes as the seed makes it particularly + * @dev easy to estimate the economic cost to a miner for this kind of cheating + * @dev (namely, the block reward and transaction fees they forgo by refraining + * @dev from publishing a block.) + */ +abstract contract VRFConsumerBase is VRFRequestIDBase { + + using SafeMath for uint256; + + /** + * @notice fulfillRandomness handles the VRF response. Your contract must + * @notice implement it. + * + * @dev The VRFCoordinator expects a calling contract to have a method with + * @dev this signature, and will call it once it has verified the proof + * @dev associated with the randomness. + * + * @param requestId The Id initially returned by requestRandomness + * @param randomness the VRF output + */ + function fulfillRandomness(bytes32 requestId, uint256 randomness) + external virtual; + + /** + * @notice requestRandomness initiates a request for VRF output given _seed + * + * @dev The source of the seed data must be something which the oracle + * @dev cannot anticipate. See "SECURITY CONSIDERATIONS" above. + * + * @dev The fulfillRandomness method receives the output, once it's provided + * @dev by the Oracle, and verified by the vrfCoordinator. + * + * @dev The _keyHash must already be registered with the VRFCoordinator, and + * @dev the _fee must exceed the fee specified during registration of the + * @dev _keyHash. + * + * @param _keyHash ID of public key against which randomness is generated + * @param _fee The amount of LINK to send with the request + * @param _seed Random seed from which output randomness is determined + * + * @return requestId which will be returned with the response to this request + * + * @dev The returned requestId can be used to distinguish responses to * + * @dev concurrent requests. It is passed as the first argument to + * @dev fulfillRandomness. + */ + function requestRandomness(bytes32 _keyHash, uint256 _fee, uint256 _seed) + public returns (bytes32 requestId) + { + LINK.transferAndCall(vrfCoordinator, _fee, abi.encode(_keyHash, _seed)); + // This is the seed actually passed to the VRF in VRFCoordinator + uint256 vRFSeed = makeVRFInputSeed(_keyHash, _seed, address(this), nonces[_keyHash]); + // nonces[_keyHash] must stay in sync with + // VRFCoordinator.nonces[_keyHash][this], which was incremented by the above + // successful LINK.transferAndCall (in VRFCoordinator.randomnessRequest) + nonces[_keyHash] = nonces[_keyHash].add(1); + return makeRequestId(_keyHash, vRFSeed); + } + + LinkTokenInterface internal LINK; + address internal vrfCoordinator; + + // Nonces for each VRF key from which randomness has been requested. + // + // Must stay in sync with VRFCoordinator[_keyHash][this] + mapping(bytes32 /* keyHash */ => uint256 /* nonce */) public nonces; + constructor(address _vrfCoordinator, address _link) public { + vrfCoordinator = _vrfCoordinator; + LINK = LinkTokenInterface(_link); + } +} diff --git a/evm-contracts/src/v0.6/VRFCoordinator.sol b/evm-contracts/src/v0.6/VRFCoordinator.sol new file mode 100644 index 00000000000..2a0dbe56511 --- /dev/null +++ b/evm-contracts/src/v0.6/VRFCoordinator.sol @@ -0,0 +1,225 @@ +pragma solidity 0.6.2; + +import "./vendor/SafeMath.sol"; + +import "./interfaces/LinkTokenInterface.sol"; + +import "./VRF.sol"; +import "./VRFRequestIDBase.sol"; +import "./VRFConsumerBase.sol"; + +/** + * @title VRFCoordinator coordinates on-chain verifiable-randomness requests + * @title with off-chain responses + */ +contract VRFCoordinator is VRF, VRFRequestIDBase { + + using SafeMath for uint256; + + LinkTokenInterface internal LINK; + + constructor(address _link) public { + LINK = LinkTokenInterface(_link); + } + + struct Callback { // Tracks an ongoing request + address callbackContract; // Requesting contract, which will receive response + uint256 randomnessFee; // Amount of LINK paid at request time + // Seed for the *oracle* to use in generating this random value. It is the + // hash of the seed provided as input during a randomnessRequest, plus the + // address of the contract making the request, plus an increasing nonce + // specific to the VRF proving key and the calling contract. Including this + // extra data in the VRF input seed helps to prevent unauthorized queries + // against a VRF by any party who has prior knowledge of the requester's + // prospective seed. Only the specified contract can make that request. + uint256 seed; + } + + struct ServiceAgreement { // Tracks oracle commitments to VRF service + address vRFOracle; // Oracle committing to respond with VRF service + bytes32 jobID; // ID of corresponding chainlink job in oracle's DB + uint256 fee; // Minimum payment for oracle response + } + + mapping(bytes32 /* (provingKey, seed) */ => Callback) public callbacks; + mapping(bytes32 /* provingKey */ => ServiceAgreement) + public serviceAgreements; + mapping(address /* oracle */ => uint256 /* LINK balance */) + public withdrawableTokens; + mapping(bytes32 /* provingKey */ => mapping(address /* consumer */ => uint256)) + private nonces; + + // The oracle only needs the jobID to look up the VRF, but specifying public + // key as well prevents a malicious oracle from inducing VRF outputs from + // another oracle by reusing the jobID. + event RandomnessRequest( + bytes32 keyHash, + uint256 seed, + bytes32 indexed jobID, + address sender, + uint256 fee); + + event NewServiceAgreement(bytes32 keyHash, uint256 fee); + + /** + * @notice Commits calling address to serve randomness + * @param _fee minimum LINK payment required to serve randomness + * @param _publicProvingKey public key used to prove randomness + * @param _jobID ID of the corresponding chainlink job in the oracle's db + */ + function registerProvingKey( + uint256 _fee, uint256[2] calldata _publicProvingKey, bytes32 _jobID + ) + external + { + bytes32 keyHash = hashOfKey(_publicProvingKey); + address oldVRFOracle = serviceAgreements[keyHash].vRFOracle; + require(oldVRFOracle == address(0), "please register a new key"); + serviceAgreements[keyHash].vRFOracle = msg.sender; + serviceAgreements[keyHash].jobID = _jobID; + serviceAgreements[keyHash].fee = _fee; + emit NewServiceAgreement(keyHash, _fee); + } + + /** + * @notice Called by LINK.transferAndCall, on successful LINK transfer + * + * @dev To invoke this, use the requestRandomness method in VRFConsumerBase. + * + * @dev The VRFCoordinator will call back to the calling contract when the + * @dev oracle responds, on the method fulfillRandomness. See + * @dev VRFConsumerBase.fullfilRandomnessRequest for its signature. Your + * @dev consuming contract should inherit from VRFConsumerBase, and implement + * @dev fullfilRandomnessRequest. + * + * @param _sender address: who sent the LINK (must be a contract) + * @param _fee amount of LINK sent + * @param _data abi-encoded call to randomnessRequest + */ + function onTokenTransfer(address _sender, uint256 _fee, bytes memory _data) + public + onlyLINK + { + (bytes32 keyHash, uint256 seed) = abi.decode(_data, (bytes32, uint256)); + randomnessRequest(keyHash, seed, _fee, _sender); + } + + /** + * @notice creates the chainlink request for randomness + * + * @param _keyHash ID of the VRF public key against which to generate output + * @param _seed Input to the VRF, from which randomness is generated + * @param _feePaid Amount of LINK sent with request. Must exceed fee for key + * @param _sender Requesting contract; to be called back with VRF output + */ + function randomnessRequest( + bytes32 _keyHash, + uint256 _seed, + uint256 _feePaid, + address _sender + ) + internal + sufficientLINK(_feePaid, _keyHash) + { + uint256 nonce = nonces[_keyHash][_sender]; + uint256 seed = makeVRFInputSeed(_keyHash, _seed, _sender, nonce); + bytes32 requestId = makeRequestId(_keyHash, seed); + // Cryptographically guaranteed by seed including an increasing nonce + assert(callbacks[requestId].callbackContract == address(0)); + callbacks[requestId].callbackContract = _sender; + callbacks[requestId].randomnessFee = _feePaid; + callbacks[requestId].seed = seed; + emit RandomnessRequest(_keyHash, seed, serviceAgreements[_keyHash].jobID, + _sender, _feePaid); + nonces[_keyHash][_sender] = nonces[_keyHash][_sender].add(1); + } + + /** + * @notice Called by the chainlink node to fullfil requests + * @param _proof the proof of randomness. Actual random output built from this + * + * @dev This is the main entrypoint for chainlink. If you change this, you + * @dev should also change the solidityABISstring in solidity_proof.go. + */ + function fulfillRandomnessRequest(bytes memory _proof) public returns (bool) { + // TODO(alx): Replace the public key in the above proof with an argument + // specifying the keyHash. Splice the key in here before sending it to + // VRF.sol. Should be able to save about 2,000 gas that way. + // https://www.pivotaltracker.com/story/show/170828567 + // + // TODO(alx): Move this parsing into VRF.sol, where the bytes layout is recorded. + // https://www.pivotaltracker.com/story/show/170828697 + uint256[2] memory publicKey; + uint256 seed; + // solhint-disable-next-line no-inline-assembly + assembly { // Extract the public key and seed from proof + publicKey := add(_proof, 0x20) // Skip length word for first 64 bytes + seed := mload(add(_proof, 0xe0)) // Seed is 7th word in proof, plus word for length + } + bytes32 currentKeyHash = hashOfKey(publicKey); + bytes32 requestId = makeRequestId(currentKeyHash, seed); + Callback memory callback = callbacks[requestId]; + require(callback.callbackContract != address(0), "no corresponding request"); + uint256 randomness = VRF.randomValueFromVRFProof(_proof); // Reverts on failure + address oadd = serviceAgreements[currentKeyHash].vRFOracle; + withdrawableTokens[oadd] = withdrawableTokens[oadd].add(callback.randomnessFee); + // Dummy variable; allows access to method selector in next line. See + // https://github.com/ethereum/solidity/issues/3506#issuecomment-553727797 + VRFConsumerBase v; + bytes memory resp = abi.encodeWithSelector( + v.fulfillRandomness.selector, requestId, randomness); + // solhint-disable-next-line avoid-low-level-calls + (bool success,) = callback.callbackContract.call(resp); + delete callbacks[requestId]; // Be a good ethereum citizen + return success; + } + + /** + * @dev Allows the oracle operator to withdraw their LINK + * @param _recipient is the address the funds will be sent to + * @param _amount is the amount of LINK transfered from the Coordinator contract + */ + function withdraw(address _recipient, uint256 _amount) + external + hasAvailableFunds(_amount) + { + withdrawableTokens[msg.sender] = withdrawableTokens[msg.sender].sub(_amount); + assert(LINK.transfer(_recipient, _amount)); + } + + /** + * @notice Returns the serviceAgreements key associated with this public key + * @param _publicKey the key to return the address for + */ + function hashOfKey(uint256[2] memory _publicKey) public pure returns (bytes32) { + return keccak256(abi.encodePacked(_publicKey)); + } + + /** + * @dev Reverts if amount is not at least what was agreed upon in the service agreement + * @param _feePaid The payment for the request + * @param _keyHash The key which the request is for + */ + modifier sufficientLINK(uint256 _feePaid, bytes32 _keyHash) { + require(_feePaid >= serviceAgreements[_keyHash].fee, "Below agreed payment"); + _; + } + +/** + * @dev Reverts if not sent from the LINK token + */ + modifier onlyLINK() { + require(msg.sender == address(LINK), "Must use LINK token"); + _; + } + + /** + * @dev Reverts if amount requested is greater than withdrawable balance + * @param _amount The given amount to compare to `withdrawableTokens` + */ + modifier hasAvailableFunds(uint256 _amount) { + require(withdrawableTokens[msg.sender] >= _amount, "can't withdraw more than balance"); + _; + } + +} diff --git a/evm-contracts/src/v0.6/VRFRequestIDBase.sol b/evm-contracts/src/v0.6/VRFRequestIDBase.sol new file mode 100644 index 00000000000..8585c31d408 --- /dev/null +++ b/evm-contracts/src/v0.6/VRFRequestIDBase.sol @@ -0,0 +1,44 @@ +pragma solidity 0.6.2; + +contract VRFRequestIDBase { + + /** + * @notice returns the seed which is actually input to the VRF + * + * @dev To prevent repetition of VRF output due to repetition against the + * @dev user-supplied seed, that seed is combined in a hash with the a + * @dev user-specific nonce, and the address of the consuming contract. + * + * @dev Of course, crucial security guranatees would be broken by repetition + * @dev of the user-supplied seed, as all the other inputs can be anticipated + * @dev and the user-specified seed is public once the initial request is + * @dev made, so if the oracle has reason to belive that a user-specified seed + * @dev will be repeated, it may be able to anticipate its future outputs. So + * @dev it may make sense, for certain applications, for the VRF framework to + * @dev simply refuse to operate, if given a seed it's seen before. + * + * @param _userSeed VRF seed input provided by user + * @param _requester Address of the requesting contract + * @param _nonce User-specific nonce at the time of the request + */ + function makeVRFInputSeed(bytes32 _keyHash, uint256 _userSeed, + address _requester, uint256 _nonce) + internal pure returns (uint256) + { + return uint256(keccak256(abi.encode(_keyHash, _userSeed, _requester, _nonce))); + } + + /** + * @notice Returns the id for this request + * @param _keyHash The serviceAgreement ID to be used for this request + * @param _vRFInputSeed The seed to be passed directly to the VRF + * @return The id for this request + * + * @dev Note that _vRFInputSeed is not the seed passed by the consuming + * @dev contract, but the one generated by makeVRFInputSeed + */ + function makeRequestId( + bytes32 _keyHash, uint256 _vRFInputSeed) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(_keyHash, _vRFInputSeed)); + } +} diff --git a/evm-contracts/src/v0.6/dev/FluxAggregator.sol b/evm-contracts/src/v0.6/dev/FluxAggregator.sol index 37e80cef76a..7c09c62a412 100644 --- a/evm-contracts/src/v0.6/dev/FluxAggregator.sol +++ b/evm-contracts/src/v0.6/dev/FluxAggregator.sol @@ -112,6 +112,7 @@ contract FluxAggregator is AggregatorInterface, Owned { timeout = _timeout; decimals = _decimals; description = _description; + rounds[0].updatedAt = uint64(block.timestamp.sub(uint256(_timeout))); } /** @@ -128,7 +129,7 @@ contract FluxAggregator is AggregatorInterface, Owned { recordSubmission(_answer, uint32(_round)); updateRoundAnswer(uint32(_round)); payOracle(uint32(_round)); - deleteRound(uint32(_round)); + deleteRoundDetails(uint32(_round)); } /** @@ -153,12 +154,9 @@ contract FluxAggregator is AggregatorInterface, Owned { onlyOwner() onlyUnenabledAddress(_oracle) { - require(oracleCount() < 42, "cannot add more than 42 oracles"); - require(_admin != address(0), "admin address must not be 0x0"); - require( - oracles[_oracle].admin == address(0) || oracles[_oracle].admin == _admin, - "cannot modify previously-set admin address" - ); + require(oracleCount() < 42); + require(_admin != address(0)); + require(oracles[_oracle].admin == address(0) || oracles[_oracle].admin == _admin); oracles[_oracle].startingRound = getStartingRound(_oracle); oracles[_oracle].endingRound = ROUND_MAX; oracleAddresses.push(_oracle); @@ -359,8 +357,8 @@ contract FluxAggregator is AggregatorInterface, Owned { return answeredIn > 0 && answeredIn != roundId; } - /** - * @notice get the start time of the current reporting round + /** + * @notice get the start time of the current reporting round */ function reportingRoundStartedAt() external @@ -415,11 +413,11 @@ contract FluxAggregator is AggregatorInterface, Owned { function withdraw(address _oracle, address _recipient, uint256 _amount) external { - require(oracles[_oracle].admin == msg.sender, "Only admin can withdraw"); + require(oracles[_oracle].admin == msg.sender); uint128 amount = uint128(_amount); uint128 available = oracles[_oracle].withdrawable; - require(available >= amount, "Insufficient balance"); + require(available >= amount); oracles[_oracle].withdrawable = available.sub(amount); allocatedFunds = allocatedFunds.sub(amount); @@ -436,8 +434,8 @@ contract FluxAggregator is AggregatorInterface, Owned { external onlyOwner() { - require(availableFunds >= _amount, "Insufficient funds"); - require(LINK.transfer(_recipient, _amount), "LINK transfer failed"); + require(availableFunds >= _amount); + require(LINK.transfer(_recipient, _amount)); updateAvailableFunds(); } @@ -473,7 +471,7 @@ contract FluxAggregator is AggregatorInterface, Owned { function updateAdmin(address _oracle, address _newAdmin) external { - require(oracles[_oracle].admin == msg.sender, "Only admin can update admin"); + require(oracles[_oracle].admin == msg.sender); oracles[_oracle].admin = _newAdmin; emit OracleAdminUpdated(_oracle, _newAdmin); @@ -488,7 +486,7 @@ contract FluxAggregator is AggregatorInterface, Owned { { uint32 current = reportingRoundId; - require(rounds[current].updatedAt > 0 || timedOut(current), 'Cannot start a round mid-round'); + require(rounds[current].updatedAt > 0 || timedOut(current)); initializeNewRound(current.add(1)); } @@ -636,7 +634,7 @@ contract FluxAggregator is AggregatorInterface, Owned { emit SubmissionReceived(_answer, _id, msg.sender); } - function deleteRound(uint32 _id) + function deleteRoundDetails(uint32 _id) private ifMaxAnswersReceived(_id) { @@ -673,16 +671,72 @@ contract FluxAggregator is AggregatorInterface, Owned { return currentRound.add(1); } + function roundState(address _oracle) + external + view + returns ( + uint32 _reportableRoundId, + bool _eligibleToSubmit, + int256 _latestRoundAnswer, + uint64 _timesOutAt, + uint128 _availableFunds, + uint128 _paymentAmount + ) + { + bool finishedOrTimedOut = rounds[reportingRoundId].details.answers.length >= rounds[reportingRoundId].details.maxAnswers || timedOut(reportingRoundId); + _reportableRoundId = finishedOrTimedOut ? reportingRoundId.add(1) : reportingRoundId; + return ( + _reportableRoundId, + eligibleToSubmit(_oracle, _reportableRoundId, finishedOrTimedOut), + rounds[latestRoundId].answer, + finishedOrTimedOut ? 0 : rounds[_reportableRoundId].startedAt + rounds[_reportableRoundId].details.timeout, + availableFunds, + rounds[_reportableRoundId].details.paymentAmount + ); + } + + function eligibleToSubmit(address _oracle, uint32 reportableRoundId, bool finishedOrTimedOut) + private + view + returns (bool) + { + uint32 startingRound = oracles[_oracle].startingRound; + if (startingRound == 0) { + return false; + } + if (startingRound > reportableRoundId) { + return false; + } else if (oracles[_oracle].endingRound < reportableRoundId) { + return false; + } else if (oracles[_oracle].lastReportedRound >= reportableRoundId) { + return false; + } + if (finishedOrTimedOut) { + uint32 lastStartedRound = oracles[_oracle].lastStartedRound; + if (reportableRoundId <= lastStartedRound + restartDelay && lastStartedRound > 0) { + return false; + } else if (maxAnswerCount == 0) { + return false; + } + } else { + if (rounds[reportableRoundId].details.maxAnswers == 0) { + return false; + } + } + + return true; + } + /** * Modifiers */ modifier onlyValidOracleRound(uint32 _id) { uint32 startingRound = oracles[msg.sender].startingRound; - require(startingRound != 0, "Only updatable by whitelisted oracles"); - require(startingRound <= _id, "New oracles cannot participate in in-progress rounds"); - require(oracles[msg.sender].endingRound >= _id, "Oracle has been removed from whitelist"); - require(oracles[msg.sender].lastReportedRound < _id, "Cannot update round reports"); + require(startingRound != 0); + require(startingRound <= _id); + require(oracles[msg.sender].endingRound >= _id); + require(oracles[msg.sender].lastReportedRound < _id); _; } @@ -699,7 +753,7 @@ contract FluxAggregator is AggregatorInterface, Owned { } modifier onlyWhenAcceptingAnswers(uint32 _id) { - require(rounds[_id].details.maxAnswers != 0, "Round not currently eligible for reporting"); + require(rounds[_id].details.maxAnswers != 0); _; } @@ -717,26 +771,26 @@ contract FluxAggregator is AggregatorInterface, Owned { } modifier onlyValidRoundId(uint32 _id) { - require(_id == reportingRoundId || _id == reportingRoundId.add(1), "Must report on current round"); - require(_id == 1 || finished(_id.sub(1)) || timedOut(_id.sub(1)), "Not eligible to bump round"); + require(_id == reportingRoundId || _id == reportingRoundId.add(1)); + require(_id == 1 || finished(_id.sub(1)) || timedOut(_id.sub(1))); _; } modifier onlyValidRange(uint32 _min, uint32 _max, uint32 _restartDelay) { uint32 oracleNum = oracleCount(); // Save on storage reads - require(oracleNum >= _max, "Cannot have the answer max higher oracle count"); - require(_max >= _min, "Cannot have the answer minimum higher the max"); - require(oracleNum == 0 || oracleNum > _restartDelay, "Restart delay must be less than oracle count"); + require(oracleNum >= _max); + require(_max >= _min); + require(oracleNum == 0 || oracleNum > _restartDelay); _; } modifier onlyUnenabledAddress(address _oracle) { - require(oracles[_oracle].endingRound != ROUND_MAX, "Address is already recorded as an oracle"); + require(oracles[_oracle].endingRound != ROUND_MAX); _; } modifier onlyEnabledAddress(address _oracle) { - require(oracles[_oracle].endingRound == ROUND_MAX, "Address is not a whitelisted oracle"); + require(oracles[_oracle].endingRound == ROUND_MAX); _; } @@ -747,12 +801,12 @@ contract FluxAggregator is AggregatorInterface, Owned { } modifier onlyWithPreviousAnswer(uint32 _id) { - require(rounds[_id.sub(1)].updatedAt != 0, "Must have a previous answer to pull from"); + require(rounds[_id.sub(1)].updatedAt != 0); _; } modifier onlyAuthorizedRequesters() { - require(authorizedRequesters[msg.sender], "Only authorized requesters can call"); + require(authorizedRequesters[msg.sender]); _; } diff --git a/evm-contracts/src/v0.6/tests/VRFConsumer.sol b/evm-contracts/src/v0.6/tests/VRFConsumer.sol new file mode 100644 index 00000000000..dbf05982cd5 --- /dev/null +++ b/evm-contracts/src/v0.6/tests/VRFConsumer.sol @@ -0,0 +1,22 @@ +pragma solidity 0.6.2; + +import "../interfaces/LinkTokenInterface.sol"; +import "../VRFCoordinator.sol"; +import "../VRFConsumerBase.sol"; + +contract VRFConsumer is VRFConsumerBase { + + uint256 public randomnessOutput; + bytes32 public requestId; + + constructor(address _vrfCoordinator, address _link) public + // solhint-disable-next-line no-empty-blocks + VRFConsumerBase(_vrfCoordinator, _link) { /* empty */ } + + function fulfillRandomness(bytes32 _requestId, uint256 _randomness) + external override + { + randomnessOutput = _randomness; + requestId = _requestId; + } +} diff --git a/evm-contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol b/evm-contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol new file mode 100644 index 00000000000..c73078a9a4f --- /dev/null +++ b/evm-contracts/src/v0.6/tests/VRFRequestIDBaseTestHelper.sol @@ -0,0 +1,17 @@ +pragma solidity 0.6.2; + +import "../VRFRequestIDBase.sol"; + +contract VRFRequestIDBaseTestHelper is VRFRequestIDBase { + + function makeVRFInputSeed_(bytes32 _keyHash, uint256 _userSeed, + address _requester, uint256 _nonce) + public pure returns (uint256) { + return makeVRFInputSeed(_keyHash, _userSeed, _requester, _nonce); + } + + function makeRequestId_( + bytes32 _keyHash, uint256 _vRFInputSeed) public pure returns (bytes32) { + return makeRequestId(_keyHash, _vRFInputSeed); + } +} diff --git a/evm-contracts/src/v0.6/tests/VRFTestHelper.sol b/evm-contracts/src/v0.6/tests/VRFTestHelper.sol new file mode 100644 index 00000000000..51a0fc9c124 --- /dev/null +++ b/evm-contracts/src/v0.6/tests/VRFTestHelper.sol @@ -0,0 +1,54 @@ +pragma solidity 0.6.2; + +import "../VRF.sol"; + +/** *********************************************************************** + @notice Testing harness for VRF.sol, exposing its internal methods. Not to + @notice be used for production. +*/ +contract VRFTestHelper is VRF { + function bigModExp_(uint256 base, uint256 exponent) public view returns (uint256) { + return super.bigModExp(base, exponent); + } + function squareRoot_(uint256 x) public view returns (uint256) { + return super.squareRoot(x); + } + function ySquared_(uint256 x) public pure returns (uint256) { + return super.ySquared(x); + } + function fieldHash_(bytes memory b) public pure returns (uint256) { + return super.fieldHash(b); + } + function hashToCurve_(uint256[2] memory pk, uint256 x) public view returns(uint256[2] memory) { + return super.hashToCurve(pk, x); + } + function ecmulVerify_(uint256[2] memory x, uint256 scalar, uint256[2] memory q) public pure returns (bool) { + return super.ecmulVerify(x, scalar, q); + } + function projectiveECAdd_(uint256 px, uint256 py, uint256 qx, uint256 qy) public pure returns(uint256, uint256, uint256) { + return super.projectiveECAdd(px, py, qx, qy); + } + function affineECAdd_(uint256[2] memory p1, uint256[2] memory p2, uint256 invZ) public pure returns (uint256[2] memory) { + return super.affineECAdd(p1, p2, invZ); + } + function verifyLinearCombinationWithGenerator_(uint256 c, uint256[2] memory p, uint256 s, address lcWitness) public pure returns (bool) { + return super.verifyLinearCombinationWithGenerator(c, p, s, lcWitness); + } + function linearCombination_(uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, uint256 s, uint256[2] memory p2, uint256[2] memory sp2Witness, uint256 zInv) public pure returns (uint256[2] memory) { + return super.linearCombination(c, p1, cp1Witness, s, p2, sp2Witness, zInv); + } + function scalarFromCurvePoints_(uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, address uWitness, uint256[2] memory v) public pure returns (uint256) { + return super.scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + } + function verifyVRFProof_( + uint256[2] memory pk, uint256[2] memory gamma, uint256 c, uint256 s, + uint256 seed, address uWitness, uint256[2] memory cGammaWitness, + uint256[2] memory sHashWitness, uint256 zInv) + public view { + super.verifyVRFProof(pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv); + } + function randomValueFromVRFProof_(bytes memory proof) + public view returns (uint256 output) { + return super.randomValueFromVRFProof(proof); + } +} diff --git a/evm-contracts/test/v0.5/VRF/VRF.test.ts b/evm-contracts/test/v0.5/VRF/VRF.test.ts deleted file mode 100644 index f8c295aa767..00000000000 --- a/evm-contracts/test/v0.5/VRF/VRF.test.ts +++ /dev/null @@ -1,212 +0,0 @@ -import { - contract, - extensions, - helpers as h, - matchers, - setup, -} from '@chainlink/test-helpers' -import { assert } from 'chai' -import { ethers } from 'ethers' -import { VRFFactory } from '../../../ethers/v0.5/VRFFactory' -import * as f from './fixtures' - -const { bigNumberify: bn } = ethers.utils -extensions.ethers.BigNumber.extend(ethers.utils.BigNumber) - -const big1 = bn(1) -const big2 = bn(2) -const big3 = bn(3) - -function assertPointsEqual( - x: ethers.utils.BigNumber[], - y: ethers.utils.BigNumber[], -) { - matchers.bigNum(x[0], y[0]) - matchers.bigNum(x[1], y[1]) -} - -const vrfFactory = new VRFFactory() -const provider = setup.provider() - -let defaultAccount: ethers.Wallet -beforeAll(async () => { - const users = await setup.users(provider) - defaultAccount = users.roles.defaultAccount -}) - -describe('VRF', () => { - let VRF: contract.Instance - const deployment = setup.snapshot(provider, async () => { - VRF = await vrfFactory.connect(defaultAccount).deploy() - }) - - beforeEach(async () => { - await deployment() - }) - - it('Accurately calculates simple and obvious bigModExp test inputs', async () => { - const rawExp = 3 ** 2 // Appease prettier but clarify operator precedence - matchers.bigNum(await VRF.bigModExp(3, 2, 5), rawExp % 5) - }) - - it('accurately calculates the sum of g and 2g (i.e., 3g)', async () => { - const projectiveResult = await VRF.projectiveECAdd( - f.generator[0], - f.generator[1], - f.twiceGenerator[0], - f.twiceGenerator[1], - ) - const zInv = projectiveResult.z3.invm(f.fieldSize) - const affineResult = await VRF.affineECAdd( - f.generator, - f.twiceGenerator, - zInv, - ) - assertPointsEqual(f.thriceGenerator, affineResult) - }) - - it('Accurately verifies multiplication of a point by a scalar', async () => { - assert(await VRF.ecmulVerify(f.generator, 2, f.twiceGenerator)) - }) - - it('Can compute square roots', async () => { - matchers.bigNum(2, await VRF.squareRoot(4), '4=2^2') // 4**((fieldSize-1)/2) - }) - - it('Can compute the square of the y ordinate given the x ordinate', async () => { - matchers.bigNum(8, await VRF.ySquared(1), '8=1^3+7') - }) - - it('Hashes to the curve with the same results as the golang code', async () => { - let result = await VRF.hashToCurve(f.generator, 1) - matchers.bigNum( - bn(result[0]) - .pow(big3) - .add(bn(7)) - .umod(f.fieldSize), - bn(result[1]) - .pow(big2) - .umod(f.fieldSize), - 'y^2=x^3+7', - ) - // See golang code - result = await VRF.hashToCurve(f.generator, 1) - matchers.bigNum( - result[0], - '0x530fddd863609aa12030a07c5fdb323bb392a88343cea123b7f074883d2654c4', - 'mismatch with output from services/vrf/vrf_test.go/TestVRF_HashToCurve', - ) - matchers.bigNum( - result[1], - '0x6fd4ee394bf2a3de542c0e5f3c86fc8f75b278a017701a59d69bdf5134dd6b70', - 'mismatch with output from services/vrf/vrf_test.go/TestVRF_HashToCurve', - ) - }) - - it('Correctly verifies linear combinations with generator', async () => { - assert( - await VRF.verifyLinearCombinationWithGenerator( - 5, - f.twiceGenerator, - 7, - h.pubkeyToAddress(f.seventeenTimesGenerator), - ), - '5*(2*g)+7*g=17*g?', - ) - }) - - it('Correctly computes full linear combinations', async () => { - const projSum = await VRF.projectiveECAdd( - f.eightTimesGenerator[0], - f.eightTimesGenerator[1], - f.nineTimesGenerator[0], - f.nineTimesGenerator[1], - ) - const zInv = projSum[2].invm(f.fieldSize) - assertPointsEqual( - f.seventeenTimesGenerator, - - // '4*(2*g)+3*(3*g)=17*g?' - await VRF.linearCombination( - 4, - f.twiceGenerator, - f.eightTimesGenerator, - 3, - f.thriceGenerator, - f.nineTimesGenerator, - zInv, - ), - ) - }) - - it('Computes the same hashed scalar from curve points as the golang code', async () => { - const scalar = await VRF.scalarFromCurve( - f.generator, - f.generator, - f.generator, - h.pubkeyToAddress(f.generator), - f.generator, - ) - matchers.bigNum( - '0x2b1049accb1596a24517f96761b22600a690ee5c6b6cadae3fa522e7d95ba338', - scalar, - 'mismatch with output from services/vrf/vrf_test.go/TestVRF_ScalarFromCurve', - ) - }) - - it('Knows a good VRF proof from bad', async () => { - const x = big1 // "secret" key in Goldberg's notation - const pk = f.generator - const seed = 1 - const hash = await VRF.hashToCurve(pk, seed) - const gamma = hash // Since gamma = x * hash = hash - const k = big1 // "Random" nonce, ha ha - const u = f.generator // Since u = k * generator = generator - const v = hash // Since v = k * hash = hash - const c = await VRF.scalarFromCurve( - hash, - pk, - gamma, - h.pubkeyToAddress(u), - v, - ) - const s = k.sub(c.mul(x)).umod(f.groupOrder) // s = k - c * x mod group size - const cGamma = [ - // >>> print("'0x%x',\n'0x%x'" % tuple(s.multiply(gamma, c))) - bn('0xa2e03a05b089db7b79cd0f6655d6af3e2d06bd0129f87f9f2155612b4e2a41d8'), - bn('0xa1dadcabf900bdfb6484e9a4390bffa6ccd666a565a991f061faf868cc9fce8'), - ] - const sHash = [ - // >>> print("'0x%x',\n'0x%x'" % tuple(s.multiply(hash, signature))) - bn('0xf82b4f9161ab41ae7c11e7deb628024ef9f5e9a0bca029f0ccb5cb534c70be31'), - bn('0xf26e7c0b4f039ca54cfa100b3457b301acb3e0b6c690d7ea5a86f8e1c481057e'), - ] - const projSum = await VRF.projectiveECAdd( - cGamma[0], - cGamma[1], - sHash[0], - sHash[1], - ) - const zInv = projSum[2].invm(f.fieldSize) - - const checkOutput = async (o: ethers.utils.BigNumberish) => - VRF.isValidVRFOutput( - pk, - gamma, - c, - s, - seed, - h.pubkeyToAddress(u), - cGamma, - sHash, - zInv, - o, - ) - assert(!(await checkOutput(0)), 'accepted a bad proof') - const output = ethers.utils.keccak256( - Buffer.concat(gamma.map(x => ethers.utils.arrayify(x))), - ) - - assert(await checkOutput(bn(output)), 'rejected good proof') - }) -}) diff --git a/evm-contracts/test/v0.5/VRF/fixtures.ts b/evm-contracts/test/v0.5/VRF/fixtures.ts deleted file mode 100644 index 1c50e91ee66..00000000000 --- a/evm-contracts/test/v0.5/VRF/fixtures.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { extensions } from '@chainlink/test-helpers' -import { ethers } from 'ethers' -extensions.ethers.BigNumber.extend(ethers.utils.BigNumber) - -const { bigNumberify: bn } = ethers.utils - -// Group elements are {(x,y) in GF(fieldSize)^2 | y^2=x^3+3}, where -// GF(fieldSize) is arithmetic modulo fieldSize on {0, 1, ..., fieldSize-1} -export const fieldSize = bn( - '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F', -) -export const groupOrder = bn( - // Number of elements in the set {(x,y) in GF(fieldSize)^2 | y^2=x^3+3} - '0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141', -) - -export const generator = [ - bn('0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798'), - bn('0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8'), -] // Point in EC group -export const twiceGenerator = [ - // '>>>' means "computed in python" - // >>> import py_ecc.secp256k1.secp256k1 as s - // >>> print("'0x%x',\n'0x%x'" % tuple(s.multiply(s.G, 2))) - bn('0xC6047F9441ED7D6D3045406E95C07CD85C778E4B8CEF3CA7ABAC09B95C709EE5'), - bn('0x1AE168FEA63DC339A3C58419466CEAEEF7F632653266D0E1236431A950CFE52A'), -] -export const thriceGenerator = [ - bn('0xF9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9'), - bn('0x388F7B0F632DE8140FE337E62A37F3566500A99934C2231B6CB9FD7584B8E672'), -] -export const eightTimesGenerator = [ - bn('0x2F01E5E15CCA351DAFF3843FB70F3C2F0A1BDD05E5AF888A67784EF3E10A2A01'), - bn('0x5C4DA8A741539949293D082A132D13B4C2E213D6BA5B7617B5DA2CB76CBDE904'), -] -export const nineTimesGenerator = [ - bn('0xACD484E2F0C7F65309AD178A9F559ABDE09796974C57E714C35F110DFC27CCBE'), - bn('0xCC338921B0A7D9FD64380971763B61E9ADD888A4375F8E0F05CC262AC64F9C37'), -] -export const seventeenTimesGenerator = [ - bn('0xDEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34'), - bn('0x4211AB0694635168E997B0EAD2A93DAECED1F4A04A95C0F6CFB199F69E56EB77'), -] diff --git a/evm-contracts/test/v0.6/FluxAggregator.test.ts b/evm-contracts/test/v0.6/FluxAggregator.test.ts index d2ac8700725..16d0e9effc8 100644 --- a/evm-contracts/test/v0.6/FluxAggregator.test.ts +++ b/evm-contracts/test/v0.6/FluxAggregator.test.ts @@ -109,6 +109,7 @@ describe('FluxAggregator', () => { 'reportingRound', 'reportingRoundStartedAt', 'restartDelay', + 'roundState', 'startNewRound', 'timeout', 'updateAdmin', @@ -271,10 +272,7 @@ describe('FluxAggregator', () => { }) it('reverts', async () => { - await matchers.evmRevert( - aggregator.updateAnswer(nextRound + 1, answer), - 'Not eligible to bump round', - ) + await matchers.evmRevert(aggregator.updateAnswer(nextRound + 1, answer)) }) }) @@ -296,8 +294,9 @@ describe('FluxAggregator', () => { }) it('updates the updated timestamp', async () => { + const deployTime = aggregator.deployTransaction.chainId / 1000 const originalTimestamp = await aggregator.latestTimestamp() - assert.equal(0, originalTimestamp.toNumber()) + assert.closeTo(deployTime, originalTimestamp.toNumber(), timeout) await aggregator.connect(personas.Nelly).updateAnswer(nextRound, answer) @@ -339,7 +338,6 @@ describe('FluxAggregator', () => { await matchers.evmRevert( aggregator.connect(personas.Neil).updateAnswer(nextRound, answer), - 'Cannot update round reports', ) }) }) @@ -353,7 +351,6 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( aggregator.connect(personas.Ned).updateAnswer(nextRound, answer), - 'Round not currently eligible for reporting', ) }) }) @@ -408,7 +405,6 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( aggregator.connect(personas.Neil).updateAnswer(nextRound + 1, answer), - 'Must report on current round', ) }) }) @@ -417,7 +413,6 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( aggregator.connect(personas.Carol).updateAnswer(nextRound, answer), - 'Only updatable by whitelisted oracles', ) }) }) @@ -432,7 +427,6 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( aggregator.connect(personas.Neil).updateAnswer(nextRound, answer), - 'SafeMath: subtraction overflow', ) }) }) @@ -499,7 +493,6 @@ describe('FluxAggregator', () => { await matchers.evmRevert( aggregator.connect(personas.Neil).updateAnswer(nextRound, answer), - 'Not eligible to bump round', ) }) }) @@ -538,24 +531,37 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( aggregator.connect(personas.Ned).updateAnswer(nextRound, answer), - 'Round not currently eligible for reporting', ) await matchers.evmRevert( aggregator.connect(personas.Nelly).updateAnswer(nextRound, answer), - 'Round not currently eligible for reporting', ) }) }) }) describe('when the price is not updated for a round', () => { + describe('before the third round', () => { + beforeEach(async () => { + await aggregator + .connect(personas.Neil) + .updateAnswer(nextRound, answer) + await h.increaseTimeBy(timeout + 1, provider) + nextRound++ + }) + + it('allows a new round to be started', async () => { + await aggregator + .connect(personas.Nelly) + .updateAnswer(nextRound, answer) + }) + }) + // For a round to timeout, it needs a previous round to pull an answer // from, so the second round is the earliest round that can timeout, // pulling its answer from the first. The start of the third round is // the trigger that timesout the second round, so the start of the // third round is the earliest we can test a timeout. - describe('on the third round or later', () => { beforeEach(async () => { await updateFutureRounds(aggregator, { @@ -629,7 +635,6 @@ describe('FluxAggregator', () => { // expected to revert because the sender started the last round await matchers.evmRevert( aggregator.connect(personas.Ned).updateAnswer(nextRound, answer), - 'Round not currently eligible for reporting', ) }) @@ -643,33 +648,6 @@ describe('FluxAggregator', () => { .updateAnswer(nextRound, answer) }) }) - - describe('earlier than the third round', () => { - beforeEach(async () => { - await aggregator - .connect(personas.Neil) - .updateAnswer(nextRound, answer) - await aggregator - .connect(personas.Nelly) - .updateAnswer(nextRound, answer) - assert.equal( - nextRound, - (await aggregator.reportingRound()).toNumber(), - ) - - await h.increaseTimeBy(timeout + 1, provider) - - nextRound++ - assert.equal(2, nextRound) - }) - - it('does not allow a round to be started', async () => { - await matchers.evmRevert( - aggregator.connect(personas.Nelly).updateAnswer(nextRound, answer), - 'Must have a previous answer to pull from', - ) - }) - }) }) }) @@ -847,7 +825,6 @@ describe('FluxAggregator', () => { maxAns, rrDelay, ), - 'Address is already recorded as an oracle', ) }) }) @@ -864,7 +841,6 @@ describe('FluxAggregator', () => { maxAns, rrDelay, ), - 'Only callable by owner', ) }) }) @@ -900,7 +876,6 @@ describe('FluxAggregator', () => { it('does not allow the oracle to update the round', async () => { await matchers.evmRevert( aggregator.connect(personas.Nelly).updateAnswer(nextRound, answer), - 'New oracles cannot participate in in-progress rounds', ) }) @@ -1025,7 +1000,6 @@ describe('FluxAggregator', () => { 1, rrDelay, ), - 'cannot modify previously-set admin address', ) }) }) @@ -1051,7 +1025,6 @@ describe('FluxAggregator', () => { limit + 1, rrDelay, ), - `cannot add more than ${limit} oracles`, ) }) }) @@ -1132,7 +1105,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Carol) .removeOracle(personas.Neil.address, minAns, maxAns, rrDelay), - 'Address is not a whitelisted oracle', ) }) }) @@ -1155,7 +1127,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Ned) .removeOracle(personas.Neil.address, 0, 0, rrDelay), - 'Only callable by owner', ) }) }) @@ -1176,7 +1147,6 @@ describe('FluxAggregator', () => { // cannot participate in future rounds await matchers.evmRevert( aggregator.connect(personas.Nelly).updateAnswer(nextRound, answer), - 'Oracle has been removed from whitelist', ) }) }) @@ -1315,7 +1285,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Carol) .withdrawFunds(personas.Carol.address, deposit), - 'Insufficient funds', ) }) @@ -1340,7 +1309,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Carol) .withdrawFunds(personas.Carol.address, deposit), - 'Insufficient funds', ) matchers.bigNum( @@ -1357,7 +1325,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Eddy) .withdrawFunds(personas.Carol.address, deposit), - 'Only callable by owner', ) matchers.bigNum(deposit, await aggregator.availableFunds()) @@ -1441,7 +1408,6 @@ describe('FluxAggregator', () => { updateFutureRounds(aggregator, { maxAnswers: 4, }), - 'Cannot have the answer max higher oracle count', ) }) }) @@ -1453,7 +1419,6 @@ describe('FluxAggregator', () => { minAnswers: 3, maxAnswers: 2, }), - 'Cannot have the answer minimum higher the max', ) }) }) @@ -1464,7 +1429,6 @@ describe('FluxAggregator', () => { updateFutureRounds(aggregator, { restartDelay: 3, }), - 'Restart delay must be less than oracle count', ) }) }) @@ -1473,7 +1437,6 @@ describe('FluxAggregator', () => { it('reverts', async () => { await matchers.evmRevert( updateFutureRounds(aggregator.connect(personas.Ned)), - 'Only callable by owner', ) }) }) @@ -1592,7 +1555,6 @@ describe('FluxAggregator', () => { personas.Neil.address, paymentAmount.add(ethers.utils.bigNumberify(1)), ), - 'Insufficient balance', ) }) }) @@ -1607,7 +1569,6 @@ describe('FluxAggregator', () => { personas.Nelly.address, ethers.utils.bigNumberify(1), ), - 'Only admin can withdraw', ) }) }) @@ -1653,7 +1614,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Carol) .updateAdmin(personas.Ned.address, personas.Nelly.address), - 'Only admin can update admin', ) }) }) @@ -1664,7 +1624,6 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Ned) .updateAdmin(personas.Ned.address, personas.Nelly.address), - 'Only admin can update admin', ) }) }) @@ -1716,10 +1675,7 @@ describe('FluxAggregator', () => { }) it('reverts', async () => { - await matchers.evmRevert( - aggregator.startNewRound(), - 'Cannot start a round mid-round', - ) + await matchers.evmRevert(aggregator.startNewRound()) }) describe('when that round has timed out', () => { @@ -1784,7 +1740,6 @@ describe('FluxAggregator', () => { await matchers.evmRevert( aggregator.connect(personas.Neil).startNewRound(), - 'Only authorized requesters can call', ) }) @@ -1812,12 +1767,10 @@ describe('FluxAggregator', () => { aggregator .connect(personas.Neil) .setAuthorization(personas.Neil.address, true), - 'Only callable by owner', ) await matchers.evmRevert( aggregator.connect(personas.Neil).startNewRound(), - 'Only authorized requesters can call', ) }) }) diff --git a/evm-contracts/test/v0.6/WhitelistedAggregator.test.ts b/evm-contracts/test/v0.6/WhitelistedAggregator.test.ts index 62a43a7a780..3ced92df2c8 100644 --- a/evm-contracts/test/v0.6/WhitelistedAggregator.test.ts +++ b/evm-contracts/test/v0.6/WhitelistedAggregator.test.ts @@ -82,6 +82,7 @@ describe('WhitelistedAggregator', () => { 'reportingRound', 'reportingRoundStartedAt', 'restartDelay', + 'roundState', 'setAuthorization', 'startNewRound', 'timeout', diff --git a/evm-test-helpers/package.json b/evm-test-helpers/package.json index ecae53ad45d..f7ff0b71086 100644 --- a/evm-test-helpers/package.json +++ b/evm-test-helpers/package.json @@ -22,20 +22,20 @@ "chai": "^4.2.0", "chalk": "^2.4.2", "debug": "^4.1.1", - "ethers": "^4.0.44" + "ethers": "^4.0.45" }, "devDependencies": { "@types/cbor": "^5.0.0", - "@types/chai": "^4.2.4", + "@types/chai": "^4.2.9", "@types/debug": "^4.1.5", "@types/ganache-core": "^2.7.0", "@types/jest": "^24.0.25", - "@types/node": "^12.7.5", + "@types/node": "^13.9.1", "jest": "^24.9.0", - "jest-circus": "^24.9.0", + "jest-circus": "^25.1.0", "rimraf": "^3.0.1", "ts-jest": "^24.1.0", - "typechain": "^1.0.4", + "typechain": "^1.0.5", "typechain-target-ethers": "^1.0.3", "typescript": "^3.7.4" }, diff --git a/explorer/Dockerfile b/explorer/Dockerfile index 98a500d119a..9ddfe7eb503 100755 --- a/explorer/Dockerfile +++ b/explorer/Dockerfile @@ -1,5 +1,7 @@ FROM node:10.16.3 as builder +RUN apt-get update && apt-get install -y libudev-dev libusb-dev libusb-1.0-0 + RUN mkdir -p /usr/src/app WORKDIR /usr/src/app ENV PATH /usr/src/app/node_modules/.bin:$PATH diff --git a/explorer/README.md b/explorer/README.md index 65b4bd4c2c2..7a19572308c 100644 --- a/explorer/README.md +++ b/explorer/README.md @@ -1,5 +1,9 @@ # Explorer +## Environment variables + +See [./src/config.ts](./src/config.ts) for the available list of environment variables. + ## Deployment ### Build Docker image @@ -55,6 +59,53 @@ $ yarn run dev # in another terminal Please see [TypeORM's migration guide](https://typeorm.io/#/migrations). +## Running on seperate origins locally + +The client is able to run on a different origin than the server. The steps below outline a +quick way of testing this locally via [ngrok.](https://ngrok.com/) + +### Configure ngrok + +In a terminal pane: + +```sh +# Setup ngrok to proxy the default server settings. +ngrok http 8080 +``` + +In a seperate terminal pane: + +```sh +# Setup ngrok to proxy the default client settings. +ngrok http 3001 +``` + +### Configuring the server + +```sh +# replace http://1b623c12.ngrok.io with the forwarded url that the previous step gave you for +# forwarding the client via ngrok +EXPLORER_CLIENT_ORIGIN=http://1b623c12.ngrok.io yarn dev:server +``` + +### Configuring the client + +```sh +# replace http://03045a9a.ngrok.io with the forwarded url that the previous step gave you for +# forwarding the server via ngrok +DANGEROUSLY_DISABLE_HOST_CHECK=true REACT_APP_EXPLORER_BASEURL=http://03045a9a.ngrok.io yarn start +``` + +Note the usage of `DANGEROUSLY_DISABLE_HOST_CHECK`, it is described here: https://create-react-app.dev/docs/proxying-api-requests-in-development/#invalid-host-header-errors-after-configuring-proxy +Using the safe `HOST` variable does not work with ngrok, so unforunately this is the only way of using ngrok with +create react app. Consider running the client dev server in a VM or remote machine that is sandboxed. + +Another way of testing a seperate domain is to not use ngrok to forward the client, and to just use it locally via +`localhost:3001`. Make sure to set `EXPLORER_CLIENT_ORIGIN` to `http://localhost:3001` if so. + +You should now be able to visit the client via browser by using the forwarded ngrok url, or localhost. +Observe network requests using the api having a different origin than the client, and successfully returning data. + ## Typescript If you would like to add an npm package that doesn't have Typescript support you will need diff --git a/explorer/client/@types/globals.d.ts b/explorer/client/@types/globals.d.ts new file mode 100644 index 00000000000..50e2094503a --- /dev/null +++ b/explorer/client/@types/globals.d.ts @@ -0,0 +1,4 @@ +declare const __EXPLORER_SERVER_VERSION__: string +declare const __EXPLORER_CLIENT_VERSION__: string +declare const __GIT_SHA__: string +declare const __GIT_BRANCH__: string diff --git a/explorer/client/craco.config.js b/explorer/client/craco.config.js index 17ee0661dcc..70804deda83 100644 --- a/explorer/client/craco.config.js +++ b/explorer/client/craco.config.js @@ -2,6 +2,11 @@ const HtmlWebpackPlugin = require('html-webpack-plugin') const DynamicCdnWebpackPlugin = require('dynamic-cdn-webpack-plugin') const CompressionPlugin = require('compression-webpack-plugin') const BrotliPlugin = require('brotli-webpack-plugin') +const webpack = require('webpack') +const clientPkg = require('./package.json') +const serverPkg = require('../package.json') +const GitRevisionPlugin = require('git-revision-webpack-plugin') +const gitRevisionPlugin = new GitRevisionPlugin({ branch: true }) module.exports = { webpack: { @@ -21,6 +26,12 @@ module.exports = { threshold: 0, minRatio: 0.8, }), + new webpack.DefinePlugin({ + __EXPLORER_CLIENT_VERSION__: JSON.stringify(clientPkg.version), + __EXPLORER_SERVER_VERSION__: JSON.stringify(serverPkg.version), + __GIT_SHA__: JSON.stringify(gitRevisionPlugin.commithash()), + __GIT_BRANCH__: JSON.stringify(gitRevisionPlugin.branch()) + }) ], }, eslint: { diff --git a/explorer/client/package.json b/explorer/client/package.json index bfe8b348221..ff1f0c94eeb 100644 --- a/explorer/client/package.json +++ b/explorer/client/package.json @@ -1,7 +1,7 @@ { "name": "@chainlink/explorer-client", "private": true, - "version": "0.0.1", + "version": "0.8.1", "keywords": [], "browserslist": { "production": [ @@ -36,7 +36,8 @@ "@chainlink/styleguide": "0.0.1", "@material-ui/core": "^3.9.2", "@material-ui/icons": "^4.5.1", - "@reach/router": "^1.2.1", + "@reach/router": "^1.3.0", + "autobind-decorator": "^2.4.0", "change-case": "^4.1.1", "classnames": "^2.2.6", "jsbi": "^3.1.1", @@ -54,20 +55,19 @@ "title-case": "^3.0.2" }, "devDependencies": { - "@chainlink/ts-test-helpers": "0.0.1", + "@chainlink/ts-helpers": "0.0.1", "@craco/craco": "^5.5.0", "@types/classnames": "^2.2.7", - "@types/enzyme": "^3.9.1", + "@types/enzyme": "^3.10.5", "@types/enzyme-adapter-react-16": "^1.0.5", "@types/jest": "^24.0.25", "@types/material-ui": "^0.21.6", - "@types/reach__router": "^1.2.3", + "@types/reach__router": "^1.3.0", "@types/react": "^16.9.17", "@types/react-dom": "^16.8.2", "@types/react-hot-loader": "^4.1.0", "@types/react-redux": "^7.0.5", "@types/react-resize-detector": "^3.1.1", - "@types/redux-logger": "^3.0.7", "@types/redux-thunk": "^2.1.0", "brotli-webpack-plugin": "^1.1.0", "compression-webpack-plugin": "^3.1.0", @@ -76,13 +76,13 @@ "dynamic-cdn-webpack-plugin": "^5.0.0", "enzyme": "^3.11.0", "enzyme-adapter-react-16": "^1.15.1", + "git-revision-webpack-plugin": "^3.0.4", "html-webpack-plugin": "4.0.0-beta.5", "jest": "^24.9.0", "module-to-cdn": "^3.1.2", "react-hot-loader": "^4.12.19", "react-scripts": "^3.1.0", "redux-devtools-extension": "^2.13.8", - "redux-logger": "^3.0.6", "rimraf": "^3.0.1", "ts-jest": "^24.1.0", "typescript": "^3.7.4", diff --git a/explorer/client/src/__tests__/components/JobRuns/Details.test.tsx b/explorer/client/src/__tests__/components/JobRuns/Details.test.tsx index fc7f85f3147..aa802a3d13b 100644 --- a/explorer/client/src/__tests__/components/JobRuns/Details.test.tsx +++ b/explorer/client/src/__tests__/components/JobRuns/Details.test.tsx @@ -1,6 +1,6 @@ import { mount } from 'enzyme' import React from 'react' -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import { JobRun, ChainlinkNode } from 'explorer/models' import Details from '../../../components/JobRuns/Details' diff --git a/explorer/client/src/__tests__/components/Table.test.tsx b/explorer/client/src/__tests__/components/Table.test.tsx index acaf3ec3b83..540bbe0cd69 100644 --- a/explorer/client/src/__tests__/components/Table.test.tsx +++ b/explorer/client/src/__tests__/components/Table.test.tsx @@ -60,7 +60,9 @@ describe('components/Table', () => { {}} />, ) - expect(wrapper.text()).toContain('No results') + expect(wrapper.text()).toContain( + 'There are no results added to the Explorer yet', + ) }) it('can override the default empty message', () => { @@ -73,7 +75,9 @@ describe('components/Table', () => { />, ) - expect(wrapper.text()).not.toContain('No results') + expect(wrapper.text()).not.toContain( + 'There are no results added to the Explorer yet', + ) expect(wrapper.text()).toContain('EMPTY') }) }) diff --git a/explorer/client/src/__tests__/reducers/adminOperators.test.ts b/explorer/client/src/__tests__/reducers/adminOperators.test.ts index f6042d3d445..d6ececd9708 100644 --- a/explorer/client/src/__tests__/reducers/adminOperators.test.ts +++ b/explorer/client/src/__tests__/reducers/adminOperators.test.ts @@ -1,4 +1,4 @@ -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import { ChainlinkNode } from 'explorer/models' import reducer, { INITIAL_STATE as initialRootState, diff --git a/explorer/client/src/__tests__/reducers/adminOperatorsIndex.test.ts b/explorer/client/src/__tests__/reducers/adminOperatorsIndex.test.ts index 4779dc397c0..060f574d864 100644 --- a/explorer/client/src/__tests__/reducers/adminOperatorsIndex.test.ts +++ b/explorer/client/src/__tests__/reducers/adminOperatorsIndex.test.ts @@ -1,12 +1,16 @@ +import { partialAsFull } from '@chainlink/ts-helpers' import reducer, { INITIAL_STATE as initialRootState, AppState, } from '../../reducers' -import { FetchAdminOperatorsSucceededAction } from '../../reducers/actions' +import { + FetchAdminOperatorsSucceededAction, + FetchAdminOperatorsErrorAction, +} from '../../reducers/actions' const INITIAL_STATE: AppState = { ...initialRootState, - adminOperatorsIndex: { items: ['replace-me'] }, + adminOperatorsIndex: { items: ['replace-me'], loaded: false }, } describe('reducers/adminOperatorsIndex', () => { @@ -17,26 +21,35 @@ describe('reducers/adminOperatorsIndex', () => { expect(state.adminOperatorsIndex).toEqual(INITIAL_STATE.adminOperatorsIndex) }) - describe('FETCH_ADMIN_OPERATORS_SUCCEEDED', () => { - it('can replace items', () => { - const action: FetchAdminOperatorsSucceededAction = { - type: 'FETCH_ADMIN_OPERATORS_SUCCEEDED', - data: { - chainlinkNodes: [], - meta: { - currentPageOperators: { - data: [{ id: '9b7d791a-9a1f-4c55-a6be-b4231cf9fd4e' }], - meta: { count: 100 }, - }, + it('FETCH_ADMIN_OPERATORS_SUCCEEDED can replace items', () => { + const action: FetchAdminOperatorsSucceededAction = { + type: 'FETCH_ADMIN_OPERATORS_SUCCEEDED', + data: { + chainlinkNodes: [], + meta: { + currentPageOperators: { + data: [{ id: '9b7d791a-9a1f-4c55-a6be-b4231cf9fd4e' }], + meta: { count: 100 }, }, }, - } - const state = reducer(INITIAL_STATE, action) + }, + } + const state = reducer(INITIAL_STATE, action) + + expect(state.adminOperatorsIndex.items).toEqual([ + '9b7d791a-9a1f-4c55-a6be-b4231cf9fd4e', + ]) + expect(state.adminOperatorsIndex.count).toEqual(100) + expect(state.adminOperatorsIndex.loaded).toEqual(true) + }) - expect(state.adminOperatorsIndex).toEqual({ - items: ['9b7d791a-9a1f-4c55-a6be-b4231cf9fd4e'], - count: 100, - }) + it('FETCH_ADMIN_OPERATORS_ERROR sets loaded', () => { + const action = partialAsFull({ + type: 'FETCH_ADMIN_OPERATORS_ERROR', + error: new Error(), }) + const state = reducer(INITIAL_STATE, action) + + expect(state.adminOperatorsIndex.loaded).toEqual(true) }) }) diff --git a/explorer/client/src/__tests__/reducers/jobRuns.test.ts b/explorer/client/src/__tests__/reducers/jobRuns.test.ts index 0fbdf9de9a2..92d0d2cbf00 100644 --- a/explorer/client/src/__tests__/reducers/jobRuns.test.ts +++ b/explorer/client/src/__tests__/reducers/jobRuns.test.ts @@ -1,4 +1,4 @@ -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import { JobRun } from 'explorer/models' import reducer, { INITIAL_STATE as initialRootState, diff --git a/explorer/client/src/__tests__/reducers/jobRunsIndex.test.ts b/explorer/client/src/__tests__/reducers/jobRunsIndex.test.ts index ec4126dea0a..0e0f245a196 100644 --- a/explorer/client/src/__tests__/reducers/jobRunsIndex.test.ts +++ b/explorer/client/src/__tests__/reducers/jobRunsIndex.test.ts @@ -1,4 +1,4 @@ -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import reducer, { INITIAL_STATE as initialRootState, AppState, diff --git a/explorer/client/src/__tests__/reducers/notifications.test.ts b/explorer/client/src/__tests__/reducers/notifications.test.ts index 8c9a82b58f3..68b5d0f0b44 100644 --- a/explorer/client/src/__tests__/reducers/notifications.test.ts +++ b/explorer/client/src/__tests__/reducers/notifications.test.ts @@ -1,5 +1,5 @@ import * as jsonapi from '@chainlink/json-api-client' -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import reducer, { INITIAL_STATE } from '../../reducers' import { FetchAdminSigninErrorAction } from '../../reducers/actions' diff --git a/explorer/client/src/__tests__/utils/status.test.ts b/explorer/client/src/__tests__/utils/status.test.ts index 57b1e46ebe6..2c74cf3d81f 100644 --- a/explorer/client/src/__tests__/utils/status.test.ts +++ b/explorer/client/src/__tests__/utils/status.test.ts @@ -1,4 +1,4 @@ -import { partialAsFull } from '@chainlink/ts-test-helpers' +import { partialAsFull } from '@chainlink/ts-helpers' import status from '../../utils/status' import { JobRun, TaskRun } from 'explorer/models' diff --git a/explorer/client/src/api/v1/admin/auth.ts b/explorer/client/src/api/v1/admin/auth.ts index cbdbd0f8c3c..c45b37706c0 100644 --- a/explorer/client/src/api/v1/admin/auth.ts +++ b/explorer/client/src/api/v1/admin/auth.ts @@ -1,5 +1,6 @@ import * as jsonapi from '@chainlink/json-api-client' - +import { Api } from '@chainlink/json-api-client' +import { boundMethod } from 'autobind-decorator' const SIGN_IN_ENDPOINT = '/api/v1/admin/login' interface SignInRequestParams { @@ -7,26 +8,32 @@ interface SignInRequestParams { password: string } -/** - * SignIn authenticates an admin user. - * @param username The admin username - * @param password The plain text password - */ -export function signIn( - username: string, - password: string, -): Promise> { - return jsonapi.createResource(SIGN_IN_ENDPOINT)({ - username, - password, - }) -} - const SIGN_OUT_ENDPOINT = '/api/v1/admin/logout' -/** - * SignOut authenticates an admin user. - */ -export function signOut(): Promise<{}> { - return jsonapi.deleteResource<{}, {}>(SIGN_OUT_ENDPOINT)() +export class Auth { + constructor(private api: Api) {} + + /** + * SignIn authenticates an admin user. + * @param username The admin username + * @param password The plain text password + */ + @boundMethod + public signIn( + username: string, + password: string, + ): Promise> { + return this.api.POST(SIGN_IN_ENDPOINT)({ + username, + password, + }) + } + + /** + * SignOut signs out an admin user. + */ + @boundMethod + public signOut(): Promise<{}> { + return this.api.DELETE<{}, {}>(SIGN_OUT_ENDPOINT)() + } } diff --git a/explorer/client/src/api/v1/admin/operators.ts b/explorer/client/src/api/v1/admin/operators.ts index 71217c72bed..517c8fb234e 100644 --- a/explorer/client/src/api/v1/admin/operators.ts +++ b/explorer/client/src/api/v1/admin/operators.ts @@ -1,4 +1,6 @@ import * as jsonapi from '@chainlink/json-api-client' +import { Api } from '@chainlink/json-api-client' +import { boundMethod } from 'autobind-decorator' import * as models from 'explorer/models' /** @@ -8,32 +10,43 @@ import * as models from 'explorer/models' */ const INDEX_ENDPOINT = '/api/v1/admin/nodes' type IndexRequestParams = jsonapi.PaginatedRequestParams -const index = jsonapi.fetchResource( - INDEX_ENDPOINT, -) - -/** - * Index lists Operators, one page at a time. - * @param page The page number to fetch - * @param size The maximum number of operators in the page - */ -export function getOperators( - page: number, - size: number, -): Promise> { - return index({ page, size }) -} interface ShowPathParams { id: string } const SHOW_ENDPOINT = '/api/v1/admin/nodes/:id' -const show = jsonapi.fetchResource<{}, models.ChainlinkNode, ShowPathParams>( - SHOW_ENDPOINT, -) -export function getOperator( - id: string, -): Promise> { - return show({}, { id }) +export class Operators { + constructor(private api: Api) {} + + /** + * Index lists Operators, one page at a time. + * @param page The page number to fetch + * @param size The maximum number of operators in the page + */ + @boundMethod + public getOperators( + page: number, + size: number, + ): Promise> { + return this.index({ page, size }) + } + + @boundMethod + public getOperator( + id: string, + ): Promise> { + return this.show({}, { id }) + } + + private index = this.api.fetchResource< + IndexRequestParams, + models.ChainlinkNode[] + >(INDEX_ENDPOINT) + + private show = this.api.fetchResource< + {}, + models.ChainlinkNode, + ShowPathParams + >(SHOW_ENDPOINT) } diff --git a/explorer/client/src/api/v1/index.ts b/explorer/client/src/api/v1/index.ts index 590f6d8d4ea..9a5e2ca9620 100644 --- a/explorer/client/src/api/v1/index.ts +++ b/explorer/client/src/api/v1/index.ts @@ -1,5 +1,14 @@ -import * as adminAuth from './admin/auth' -import * as adminOperators from './admin/operators' -import * as jobRuns from './jobRuns' +import { Api } from '@chainlink/json-api-client' +import { Auth } from './admin/auth' +import { Operators } from './admin/operators' +import { JobRuns } from './jobRuns' + +const api = new Api({ + base: process.env.REACT_APP_EXPLORER_BASEURL, +}) + +const adminAuth = new Auth(api) +const adminOperators = new Operators(api) +const jobRuns = new JobRuns(api) export { adminAuth, adminOperators, jobRuns } diff --git a/explorer/client/src/api/v1/jobRuns.ts b/explorer/client/src/api/v1/jobRuns.ts index f8782fe1761..0d607ff6882 100644 --- a/explorer/client/src/api/v1/jobRuns.ts +++ b/explorer/client/src/api/v1/jobRuns.ts @@ -1,4 +1,6 @@ import * as jsonapi from '@chainlink/json-api-client' +import { Api } from '@chainlink/json-api-client' +import { boundMethod } from 'autobind-decorator' import * as models from 'explorer/models' /** @@ -10,9 +12,6 @@ const INDEX_ENDPOINT = '/api/v1/job_runs' interface IndexRequestParams extends jsonapi.PaginatedRequestParams { query: string | undefined } -const index = jsonapi.fetchResource( - INDEX_ENDPOINT, -) /** * Show returns the details of a JobRun. @@ -23,30 +22,40 @@ interface ShowPathParams { id: string } const SHOW_ENDPOINT = `/api/v1/job_runs/:id` -const show = jsonapi.fetchResource<{}, models.JobRun, ShowPathParams>( - SHOW_ENDPOINT, -) -/** - * Index lists JobRuns, one page at a time. - * @param query The token to search - * @param page The page number to fetch - * @param size The maximum number of job runs in the page - */ -export function getJobRuns( - query: string | undefined, - page: number, - size: number, -): Promise> { - return index({ query, page, size }) -} +export class JobRuns { + constructor(private api: Api) {} -/** - * Get the details of a single JobRun by id - * @param id The id of the JobRun to obtain - */ -export function getJobRun( - id: string, -): Promise> { - return show({}, { id }) + /** + * Index lists JobRuns, one page at a time. + * @param query The token to search + * @param page The page number to fetch + * @param size The maximum number of job runs in the page + */ + @boundMethod + public getJobRuns( + query: string | undefined, + page: number, + size: number, + ): Promise> { + return this.index({ query, page, size }) + } + + /** + * Get the details of a single JobRun by id + * @param id The id of the JobRun to obtain + */ + @boundMethod + public getJobRun(id: string): Promise> { + return this.show({}, { id }) + } + + private index = this.api.fetchResource< + IndexRequestParams, + models.ChainlinkNode[] + >(INDEX_ENDPOINT) + + private show = this.api.fetchResource<{}, models.JobRun, ShowPathParams>( + SHOW_ENDPOINT, + ) } diff --git a/explorer/client/src/components/Admin/Operators/Footer.tsx b/explorer/client/src/components/Admin/Operators/Footer.tsx new file mode 100644 index 00000000000..6080a32d8ed --- /dev/null +++ b/explorer/client/src/components/Admin/Operators/Footer.tsx @@ -0,0 +1,45 @@ +import Card from '@material-ui/core/Card' +import { + createStyles, + Theme, + withStyles, + WithStyles, +} from '@material-ui/core/styles' +import Typography from '@material-ui/core/Typography' +import React from 'react' + +const styles = (theme: Theme) => + createStyles({ + style: { + textAlign: 'center', + padding: theme.spacing.unit * 2.5, + position: 'fixed', + left: '0', + bottom: '0', + width: '100%', + }, + }) + +interface Props extends WithStyles {} + +const Footnote: React.FC = ({ classes }) => { + const backendVersion = `Backend v${__EXPLORER_SERVER_VERSION__}` + const clientVersion = `Client v${__EXPLORER_CLIENT_VERSION__}` + + return ( + + + {backendVersion} {' | '} + + + {clientVersion} {' | '} + + + {__GIT_SHA__} {' | '} + + {__GIT_BRANCH__} + + ) +} + +export default withStyles(styles)(Footnote) diff --git a/explorer/client/src/components/Admin/Operators/List.tsx b/explorer/client/src/components/Admin/Operators/List.tsx index 21e27440389..30d7dda90df 100644 --- a/explorer/client/src/components/Admin/Operators/List.tsx +++ b/explorer/client/src/components/Admin/Operators/List.tsx @@ -6,9 +6,10 @@ import Table, { ChangePageEvent } from '../../Table' import { LinkColumn, TextColumn, TimeAgoColumn } from '../../Table/TableCell' import { ChainlinkNode } from 'explorer/models' -const HEADERS = ['Name', 'URL', 'Created At'] +const HEADERS = ['Name', 'URL', 'Created At'] as const const LOADING_MSG = 'Loading operators...' const EMPTY_MSG = 'There are no operators added to the Explorer yet.' +const ERROR_MSG = 'Error loading operators.' function buildNameCol(operator: ChainlinkNode): UrlColumn { return { @@ -50,12 +51,14 @@ function rows( interface Props { currentPage: number onChangePage: (event: ChangePageEvent, page: number) => void + loaded: boolean operators?: ChainlinkNode[] count?: number className?: string } const List: React.FC = ({ + loaded, operators, count, currentPage, @@ -71,8 +74,10 @@ const List: React.FC = ({ rows={rows(operators)} count={count} onChangePage={onChangePage} + loaded={loaded} loadingMsg={LOADING_MSG} emptyMsg={EMPTY_MSG} + errorMsg={ERROR_MSG} /> diff --git a/explorer/client/src/components/Table.tsx b/explorer/client/src/components/Table.tsx index 7d13cdc5ad4..0175003b051 100644 --- a/explorer/client/src/components/Table.tsx +++ b/explorer/client/src/components/Table.tsx @@ -18,31 +18,36 @@ import PaginationActions from './Table/PaginationActions' export const DEFAULT_ROWS_PER_PAGE = 10 export const DEFAULT_CURRENT_PAGE = 0 -interface LoadingProps { +interface MsgProps { colCount: number msg?: string } -const Loading = ({ colCount, msg }: LoadingProps) => { +const Loading: React.FC = ({ colCount, msg }) => { return ( - {msg || 'Loading...'} + {msg ?? 'Loading...'} ) } -interface EmptyProps { - colCount: number - msg?: string +const Empty: React.FC = ({ colCount, msg }) => { + return ( + + + {msg ?? 'There are no results added to the Explorer yet.'} + + + ) } -const Empty = ({ colCount, msg }: EmptyProps) => { +const Error: React.FC = ({ colCount, msg }) => { return ( - {msg || 'No results'} + {msg ?? 'Error loading resources.'} ) @@ -66,18 +71,29 @@ const styles = (theme: Theme) => export type ChangePageEvent = React.MouseEvent | null interface Props extends WithStyles { - headers: string[] + headers: readonly string[] rowsPerPage: number currentPage: number onChangePage: (event: ChangePageEvent, page: number) => void + loaded?: boolean rows?: Column[][] count?: number loadingMsg?: string emptyMsg?: string + errorMsg?: string } -const renderRows = ({ headers, rows, loadingMsg, emptyMsg }: Props) => { - if (!rows) { +const renderRows = ({ + loaded, + headers, + rows, + loadingMsg, + emptyMsg, + errorMsg, +}: Props) => { + if (loaded && !rows) { + return + } else if (!rows) { return } else if (rows.length === 0) { return @@ -107,7 +123,7 @@ const Table = (props: Props) => { ({ - lightTooltip: { - background: palette.primary.contrastText, - color: palette.text.primary, - boxShadow: shadows[24], - ...typography.h6, - }, -}) - -interface Props extends WithStyles { - title: string - children: React.ReactElement -} - -const Tooltip = ({ title, children, classes }: Props) => { - return ( - - {children} - - ) -} - -export default withStyles(styles)(Tooltip) diff --git a/explorer/client/src/containers/Admin/Operator/Index.tsx b/explorer/client/src/containers/Admin/Operator/Index.tsx index de33dcdd4e6..27aaed90ec4 100644 --- a/explorer/client/src/containers/Admin/Operator/Index.tsx +++ b/explorer/client/src/containers/Admin/Operator/Index.tsx @@ -10,12 +10,12 @@ import { ChainlinkNode } from 'explorer/models' import React, { useEffect, useState } from 'react' import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux' import build from 'redux-object' +import { DispatchBinding } from '@chainlink/ts-helpers' import { fetchAdminOperators } from '../../../actions/adminOperators' import List from '../../../components/Admin/Operators/List' import { ChangePageEvent } from '../../../components/Table' import Title from '../../../components/Title' import { AppState } from '../../../reducers' -import { DispatchBinding } from '../../../utils/types' const styles = ({ breakpoints, spacing }: Theme) => createStyles({ @@ -33,8 +33,9 @@ interface OwnProps { } interface StateProps { - adminOperators?: ChainlinkNode[] + loaded: boolean count: AppState['adminOperatorsIndex']['count'] + adminOperators?: ChainlinkNode[] } interface DispatchProps { @@ -50,6 +51,7 @@ interface Props export const Index: React.FC = ({ classes, + loaded, adminOperators, fetchAdminOperators, count, @@ -75,6 +77,7 @@ export const Index: React.FC = ({ Endorsed Operators createStyles({ diff --git a/explorer/client/src/containers/Admin/Private.tsx b/explorer/client/src/containers/Admin/Private.tsx index 265a056b1ce..9f31fdd8cee 100644 --- a/explorer/client/src/containers/Admin/Private.tsx +++ b/explorer/client/src/containers/Admin/Private.tsx @@ -17,8 +17,12 @@ interface Props DispatchProps, OwnProps {} -const Private: React.FC = ({ authenticated }) => { - return authenticated ? <> : +const Private: React.FC = ({ authenticated, children }) => { + return authenticated ? ( + <>{children} + ) : ( + + ) } const mapStateToProps: MapStateToProps< diff --git a/explorer/client/src/containers/Admin/SignIn.tsx b/explorer/client/src/containers/Admin/SignIn.tsx index 72c9b7998ab..149023b8a89 100644 --- a/explorer/client/src/containers/Admin/SignIn.tsx +++ b/explorer/client/src/containers/Admin/SignIn.tsx @@ -1,10 +1,10 @@ import React from 'react' import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux' import { Redirect, RouteComponentProps } from '@reach/router' +import { DispatchBinding } from '@chainlink/ts-helpers' import { SignIn as SignInForm } from '../../components/Forms/SignIn' import { signIn } from '../../actions/adminAuth' import { AppState } from '../../reducers' -import { DispatchBinding } from '../../utils/types' interface OwnProps {} diff --git a/explorer/client/src/containers/Admin/SignOut.tsx b/explorer/client/src/containers/Admin/SignOut.tsx index 62342b09932..9e25fbad7cc 100644 --- a/explorer/client/src/containers/Admin/SignOut.tsx +++ b/explorer/client/src/containers/Admin/SignOut.tsx @@ -1,9 +1,9 @@ import React, { useEffect } from 'react' import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux' import { Redirect, RouteComponentProps } from '@reach/router' +import { DispatchBinding } from '@chainlink/ts-helpers' import { signOut } from '../../actions/adminAuth' import { AppState } from '../../reducers' -import { DispatchBinding } from '../../utils/types' interface OwnProps {} diff --git a/explorer/client/src/containers/JobRuns/Index.tsx b/explorer/client/src/containers/JobRuns/Index.tsx index 821dd11fa7e..8aabf936339 100644 --- a/explorer/client/src/containers/JobRuns/Index.tsx +++ b/explorer/client/src/containers/JobRuns/Index.tsx @@ -4,15 +4,15 @@ import { withStyles, WithStyles, } from '@material-ui/core/styles' -import { JobRun } from 'explorer/models' import React, { useEffect, useState } from 'react' import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux' import build from 'redux-object' +import { DispatchBinding } from '@chainlink/ts-helpers' +import { JobRun } from 'explorer/models' import { fetchJobRuns } from '../../actions/jobRuns' import List from '../../components/JobRuns/List' import { ChangePageEvent } from '../../components/Table' import { AppState } from '../../reducers' -import { DispatchBinding } from '../../utils/types' const EMPTY_MSG = "We couldn't find any results for your search query. Try again with the job id, run id, requester, requester id or transaction hash" diff --git a/explorer/client/src/containers/JobRuns/Show.tsx b/explorer/client/src/containers/JobRuns/Show.tsx index 8cb5c45091f..0815aed6091 100644 --- a/explorer/client/src/containers/JobRuns/Show.tsx +++ b/explorer/client/src/containers/JobRuns/Show.tsx @@ -11,6 +11,7 @@ import TableBody from '@material-ui/core/TableBody' import TableCell from '@material-ui/core/TableCell' import TableRow from '@material-ui/core/TableRow' import { RouteComponentProps } from '@reach/router' +import { DispatchBinding } from '@chainlink/ts-helpers' import { JobRun } from 'explorer/models' import React, { useEffect } from 'react' import { connect, MapDispatchToProps, MapStateToProps } from 'react-redux' @@ -21,7 +22,6 @@ import Details from '../../components/JobRuns/Details' import RegionalNav from '../../components/JobRuns/RegionalNav' import RunStatus from '../../components/JobRuns/RunStatus' import { AppState } from '../../reducers' -import { DispatchBinding } from '../../utils/types' const Loading = () => (
diff --git a/explorer/client/src/layouts/Admin.tsx b/explorer/client/src/layouts/Admin.tsx index 8c180613a9c..a5f547c6ad4 100644 --- a/explorer/client/src/layouts/Admin.tsx +++ b/explorer/client/src/layouts/Admin.tsx @@ -1,15 +1,16 @@ -import React, { useState } from 'react' -import { RouteComponentProps } from '@reach/router' +import Grid from '@material-ui/core/Grid' import { createStyles, Theme, withStyles, WithStyles, } from '@material-ui/core/styles' -import Grid from '@material-ui/core/Grid' -import AdminPrivate from '../containers/Admin/Private' -import Header from '../containers/Admin/Header' +import { RouteComponentProps } from '@reach/router' +import React, { useState } from 'react' +import Footer from '../components/Admin/Operators/Footer' import { DEFAULT_HEADER_HEIGHT } from '../constants' +import Header from '../containers/Admin/Header' +import AdminPrivate from '../containers/Admin/Private' const styles = (theme: Theme) => createStyles({ @@ -32,9 +33,7 @@ export const Admin: React.FC = ({ children, classes }) => { const onHeaderResize = (_width: number, height: number) => setHeight(height) return ( - <> - - +
@@ -46,7 +45,8 @@ export const Admin: React.FC = ({ children, classes }) => { - +
+ ) } diff --git a/explorer/client/src/reducers/actions.ts b/explorer/client/src/reducers/actions.ts index 0378a91f6f0..f82ec9cd7a0 100644 --- a/explorer/client/src/reducers/actions.ts +++ b/explorer/client/src/reducers/actions.ts @@ -58,6 +58,15 @@ export type FetchAdminOperatorsSucceededAction = { data: AdminOperatorsNormalizedData } +/** + * FETCH_ADMIN_OPERATORS_ERROR + */ + +export type FetchAdminOperatorsErrorAction = { + type: 'FETCH_ADMIN_OPERATORS_ERROR' + error: Error +} + /** * FETCH_ADMIN_OPERATOR_SUCCEEDED */ @@ -132,6 +141,7 @@ export type Actions = | FetchAdminSigninErrorAction | FetchAdminSignoutSucceededAction | FetchAdminOperatorsSucceededAction + | FetchAdminOperatorsErrorAction | FetchAdminOperatorSucceededAction | FetchJobRunsSucceededAction | FetchJobRunSucceededAction diff --git a/explorer/client/src/reducers/adminOperatorsIndex.ts b/explorer/client/src/reducers/adminOperatorsIndex.ts index f18b8c1d972..889425cf4ef 100644 --- a/explorer/client/src/reducers/adminOperatorsIndex.ts +++ b/explorer/client/src/reducers/adminOperatorsIndex.ts @@ -4,9 +4,12 @@ import { Reducer } from 'redux' export interface State { items?: string[] count?: number + loaded: boolean } -const INITIAL_STATE: State = {} +const INITIAL_STATE: State = { + loaded: false, +} export const adminOperatorsIndex: Reducer = ( state = INITIAL_STATE, @@ -15,9 +18,18 @@ export const adminOperatorsIndex: Reducer = ( switch (action.type) { case 'FETCH_ADMIN_OPERATORS_SUCCEEDED': return { + ...state, items: action.data.meta.currentPageOperators.data.map(o => o.id), count: action.data.meta.currentPageOperators.meta.count, + loaded: true, } + + case 'FETCH_ADMIN_OPERATORS_ERROR': + return { + ...state, + loaded: true, + } + default: return state } diff --git a/explorer/client/src/reducers/adminOperatorsShow.ts b/explorer/client/src/reducers/adminOperatorsShow.ts index 05a69109e8e..b5a5210af36 100644 --- a/explorer/client/src/reducers/adminOperatorsShow.ts +++ b/explorer/client/src/reducers/adminOperatorsShow.ts @@ -15,7 +15,7 @@ export interface OperatorShowData { } } -interface State { +export interface State { id?: { attributes: OperatorShowData } diff --git a/explorer/client/tsconfig.json b/explorer/client/tsconfig.json index 9d7ad120c44..7cf31ca2a7a 100644 --- a/explorer/client/tsconfig.json +++ b/explorer/client/tsconfig.json @@ -10,15 +10,9 @@ }, "include": ["src", "@types", "test"], "references": [ - { - "path": "../../tools/json-api-client" - }, - { - "path": "../../tools/redux" - }, - { - "path": "../../tools/ts-test-helpers" - }, + { "path": "../../tools/json-api-client" }, + { "path": "../../tools/redux" }, + { "path": "../../tools/ts-helpers" }, { "path": "../../styleguide" } ] } diff --git a/explorer/package.json b/explorer/package.json index fa4c2b56843..c7d05dba9da 100644 --- a/explorer/package.json +++ b/explorer/package.json @@ -1,7 +1,7 @@ { "name": "@chainlink/explorer", "private": true, - "version": "0.0.1", + "version": "0.8.1", "description": "LINK Explorer", "author": "Chainlink Dev Team", "license": "MIT", @@ -15,10 +15,10 @@ "admin:clnodes:delete": "ts-node src/bin/clnodes.ts delete", "depcheck": "echo \"@chainlink/explorer\" && depcheck --ignore-dirs=client || exit 0", "predev": "yarn automigrate", - "dev": "concurrently \"cd client && yarn start\" \"ts-node-dev --respawn --transpileOnly ./src/index.ts\"", + "dev": "concurrently \"yarn dev:server\" \"yarn dev:client\" ", "dev:client": "yarn workspace @chainlink/explorer-client run start", - "dev:server": "ts-node-dev --respawn --transpileOnly ./src/index.ts", - "dev:compose": "COMPOSE_MODE=TRUE yarn migration:run && ts-node ./src/index.ts", + "dev:server": "EXPLORER_DEV=TRUE ts-node-dev --respawn --transpileOnly ./src/index.ts", + "dev:compose": "COMPOSE_MODE=TRUE EXPLORER_DEV=TRUE yarn migration:run && ts-node ./src/index.ts", "setup": "tsc -b", "clean": "rimraf .migrations.*.md5", "prod": "yarn migration:run && ts-node ./src/index.ts", @@ -35,44 +35,48 @@ "argon2": "^0.25.1", "class-validator": "^0.11.0", "cookie-session": "^1.3.3", + "cors": "^2.8.5", "express": "^4.16.4", - "express-winston": "^3.4.0", - "helmet": "^3.20.0", + "express-pino-logger": "^4.0.0", + "helmet": "^3.21.3", "jayson": "^3.1.2", "js-sha256": "^0.9.0", "jsonapi-serializer": "^3.6.6", "local-storage-fallback": "^4.1.1", "mime-types": "^2.1.26", "pg": "^7.17.1", + "pino": "^5.17.0", "reflect-metadata": "^0.1.13", "ts-node": "^8.6.2", - "typeorm": "^0.2.20", - "winston": "^3.2.1", + "typeorm": "^0.2.24", "ws": "^7.2.0", "yargs": "^15.1.0" }, "devDependencies": { "@types/bcrypt": "^3.0.0", "@types/cookie-session": "^2.0.38", + "@types/cors": "^2.8.6", "@types/express": "^4.16.1", - "@types/express-winston": "^3.0.1", + "@types/express-pino-logger": "^4.0.2", "@types/helmet": "0.0.45", "@types/jest": "^24.0.25", "@types/jsonapi-serializer": "^3.6.1", "@types/material-ui": "^0.21.6", "@types/mime-types": "^2.1.0", - "@types/node": "^11.11.3", + "@types/node": "^13.9.1", "@types/node-fetch": "^2.5.0", + "@types/pino": "^5.17.0", "@types/supertest": "^2.0.7", "@types/uuid": "^3.4.7", "@types/ws": "^7.2.0", - "@types/yargs": "^15.0.3", + "@types/yargs": "^15.0.4", "concurrently": "^5.0.2", "cross-env": "^6.0.3", "depcheck": "^0.9.1", "http-status-codes": "^1.3.2", "jest": "^24.7.0", "node-fetch": "^2.6.0", + "pino-pretty": "^3.6.1", "rimraf": "^3.0.1", "supertest": "^4.0.2", "ts-jest": "^24.0.0", diff --git a/explorer/src/__tests__/controllers/admin/login.test.ts b/explorer/src/__tests__/controllers/admin/login.test.ts index 2c780281695..58c1144d170 100644 --- a/explorer/src/__tests__/controllers/admin/login.test.ts +++ b/explorer/src/__tests__/controllers/admin/login.test.ts @@ -1,11 +1,11 @@ -import request from 'supertest' import http from 'http' +import request from 'supertest' import { Connection } from 'typeorm' import { getDb } from '../../../database' -import { clearDb } from '../../testdatabase' import { createAdmin } from '../../../support/admin' -import { start, stop } from '../../../support/server' import { requestBuilder, RequestBuilder } from '../../../support/requestBuilder' +import { start, stop } from '../../../support/server' +import { clearDb } from '../../testdatabase' const USERNAME = 'myadmin' const PASSWORD = 'validpassword' diff --git a/explorer/src/__tests__/controllers/admin/nodes.test.ts b/explorer/src/__tests__/controllers/admin/nodes.test.ts index 5f579e8dc9b..e849043be45 100644 --- a/explorer/src/__tests__/controllers/admin/nodes.test.ts +++ b/explorer/src/__tests__/controllers/admin/nodes.test.ts @@ -112,7 +112,6 @@ describe('GET /api/v1/admin/nodes/:id', () => { rb.sendGet(path(node.id), USERNAME, PASSWORD) .expect(httpStatus.OK) .expect(res => { - console.log(res.body) expect(res.body.data.id).toBeDefined() }) .end(done) diff --git a/explorer/src/__tests__/middleware/adminAuth.test.ts b/explorer/src/__tests__/middleware/adminAuth.test.ts index a56f9abf15e..c453e966c25 100644 --- a/explorer/src/__tests__/middleware/adminAuth.test.ts +++ b/explorer/src/__tests__/middleware/adminAuth.test.ts @@ -1,5 +1,6 @@ import bodyParser from 'body-parser' import cookieSession from 'cookie-session' +import { randomBytes } from 'crypto' import express from 'express' import http from 'http' import httpStatus from 'http-status-codes' @@ -28,7 +29,7 @@ app.use( cookieSession({ name: 'explorer', maxAge: 60_000, - keys: ['key1', 'key2'], + secret: randomBytes(32).toString(), }), ) app.use(ROUTE_PATH, adminAuth) diff --git a/explorer/src/config.ts b/explorer/src/config.ts new file mode 100644 index 00000000000..6af6c6e82d7 --- /dev/null +++ b/explorer/src/config.ts @@ -0,0 +1,74 @@ +/** + * Application configuration for the explorer + */ +export interface ExplorerConfig { + /** + * The port to run the server on + */ + port: number + /** + * Whether dev mode is enabled or not + */ + dev: boolean + /** + * The origin of the client, used for CORS purposes + */ + clientOrigin: string + /** + * The value of the secret used to sign cookies. + * Must be at least 32 characters. + * + * For production usage, make sure this value is kept secret + * and has sufficient entropy + * + */ + cookieSecret: string + /** + * The cookie expiration time in milliseconds + */ + cookieExpirationMs: number +} + +/** + * Get application configuration for the explorer app + */ +export function getConfig(): ExplorerConfig { + const { env } = process + + const conf: ExplorerConfig = { + port: parseInt(env.EXPLORER_SERVER_PORT) || 8080, + dev: !!env.EXPLORER_DEV, + clientOrigin: env.EXPLORER_CLIENT_ORIGIN ?? '', + cookieSecret: env.EXPLORER_COOKIE_SECRET, + cookieExpirationMs: 86_400_000, // 1 day in ms + } + + validateCookieSecret(conf.cookieSecret) + + for (const [k, v] of Object.entries(conf)) { + if (v == undefined) { + throw Error( + `Expected environment variable for ${k} to be set. Got "${v}".`, + ) + } + } + + return conf +} + +/** + * Assert that a cookie secret is at least 32 characters in length. + * + * @param secret The secret value to validate. + */ +function validateCookieSecret(secret?: string): asserts secret is string { + if (!secret) { + throw Error( + 'Cookie secret is not set! Set via environment variable EXPLORER_COOKIE_SECRET', + ) + } + + if (secret.length < 32) { + throw Error('Cookie secret must be at least 32 characters') + } +} diff --git a/explorer/src/controllers/jobRuns.ts b/explorer/src/controllers/jobRuns.ts index 64e6597cfa8..fca870c082a 100644 --- a/explorer/src/controllers/jobRuns.ts +++ b/explorer/src/controllers/jobRuns.ts @@ -5,20 +5,16 @@ import jobRunsSerializer from '../serializers/jobRunsSerializer' import jobRunSerializer from '../serializers/jobRunSerializer' import { getCustomRepository } from 'typeorm' import { JobRunRepository } from '../repositories/JobRunRepository' +import * as pagination from '../utils/pagination' const router = Router() -const DEFAULT_PAGE = 1 -const DEFAULT_SIZE = 10 - const searchParams = (req: Request): SearchParams => { - const page = parseInt(req.query.page, 10) || DEFAULT_PAGE - const size = parseInt(req.query.size, 10) || DEFAULT_SIZE + const params = pagination.parseParams(req.query) return { + ...params, searchQuery: req.query.query, - page, - limit: size, } } diff --git a/explorer/src/database.ts b/explorer/src/database.ts index bfefbed13ff..a90633ad6ff 100644 --- a/explorer/src/database.ts +++ b/explorer/src/database.ts @@ -1,7 +1,8 @@ import 'reflect-metadata' -import { createConnection, Connection } from 'typeorm' +import { Connection, createConnection } from 'typeorm' import { PostgresConnectionOptions } from 'typeorm/driver/postgres/PostgresConnectionOptions' import options from '../ormconfig.json' +import { TypeOrmLogger } from './logging' const overridableKeys = ['host', 'port', 'username', 'password', 'database'] @@ -26,7 +27,7 @@ const loadOptions = (env?: string) => { // Loads the following ENV vars, giving them precedence. // i.e. TYPEORM_PORT will replace "port" in ormconfig.json. const mergeOptions = (): PostgresConnectionOptions => { - const envOptions: { [key: string]: string } = {} + const envOptions: Record = {} for (const v of overridableKeys) { const envVar = process.env[`TYPEORM_${v.toUpperCase()}`] if (envVar) { @@ -36,6 +37,7 @@ const mergeOptions = (): PostgresConnectionOptions => { return { ...loadOptions(), ...envOptions, + logger: new TypeOrmLogger(), } as PostgresConnectionOptions } diff --git a/explorer/src/entity/ChainlinkNode.ts b/explorer/src/entity/ChainlinkNode.ts index 8138e51c6c2..ac7251f7a27 100644 --- a/explorer/src/entity/ChainlinkNode.ts +++ b/explorer/src/entity/ChainlinkNode.ts @@ -175,40 +175,32 @@ export async function uptime(db: Connection, node: ChainlinkNode | number) { // uptime from completed sessions async function historicUptime(db: Connection, id: number): Promise { - const { seconds } = await db + const queryResult = await db .createQueryBuilder() .select( - `FLOOR(SUM( - (31536000 * DATE_PART('year', session.finishedAt - session.createdAt)) + - (86400 * DATE_PART('day', session.finishedAt - session.createdAt)) + - (3600 * DATE_PART('hour', session.finishedAt - session.createdAt)) + - (60 * DATE_PART('minute', session.finishedAt - session.createdAt)) + - (DATE_PART('second', session.finishedAt - session.createdAt)) - )) as seconds`, + `EXTRACT(EPOCH FROM session."finishedAt" - session."createdAt") as seconds`, ) .from(Session, 'session') .where({ chainlinkNodeId: id }) .andWhere('session.finishedAt is not null') .getRawOne() - return parseInt(seconds) || 0 + // NOTE: If there are no sessions, SELECT EXTRACT... returns null + const seconds = queryResult?.seconds ?? 0 + return Math.max(0, seconds) } // uptime from current open session async function currentUptime(db: Connection, id: number): Promise { - const { seconds } = await db + const queryResult = await db .createQueryBuilder() .select( - `FLOOR(SUM( - (31536000 * DATE_PART('year', now() - session.createdAt)) + - (86400 * DATE_PART('day', now() - session.createdAt)) + - (3600 * DATE_PART('hour', now() - session.createdAt)) + - (60 * DATE_PART('minute', now() - session.createdAt)) + - (DATE_PART('second', now() - session.createdAt)) - )) as seconds`, + `FLOOR(EXTRACT(EPOCH FROM (now() - session."createdAt"))) as seconds`, ) .from(Session, 'session') .where({ chainlinkNodeId: id }) - .andWhere('session.finishedAt is null') + .andWhere('session."finishedAt" is null') .getRawOne() - return parseInt(seconds) || 0 + // NOTE: If there are no sessions, SELECT EXTRACT... returns null + const seconds = queryResult?.seconds ?? 0 + return Math.max(0, seconds) } diff --git a/explorer/src/entity/Session.ts b/explorer/src/entity/Session.ts index bdc764fa850..6da039d4416 100644 --- a/explorer/src/entity/Session.ts +++ b/explorer/src/entity/Session.ts @@ -33,11 +33,10 @@ export async function createSession( db: Connection, node: ChainlinkNode, ): Promise { - const now = new Date() await db.manager .createQueryBuilder() .update(Session) - .set({ finishedAt: now }) + .set({ finishedAt: () => 'now()' }) .where({ chainlinkNodeId: node.id, finishedAt: null }) .execute() const session = new Session() @@ -61,7 +60,7 @@ export async function closeSession( return db.manager .createQueryBuilder() .update(Session) - .set({ finishedAt: new Date() }) + .set({ finishedAt: () => 'now()' }) .where({ sessionId: session.id }) .execute() } diff --git a/explorer/src/index.ts b/explorer/src/index.ts index 634b4d1d4cc..a0d1e90984b 100644 --- a/explorer/src/index.ts +++ b/explorer/src/index.ts @@ -14,4 +14,9 @@ const start = async () => { server() } -start().catch(logger.error) +start().catch(e => { + logger.error({ + msg: `Exception during startup: ${e.message}`, + stack: e.stack, + }) +}) diff --git a/explorer/src/logging.ts b/explorer/src/logging.ts index 8ceb14982a0..9b9706ae732 100644 --- a/explorer/src/logging.ts +++ b/explorer/src/logging.ts @@ -1,46 +1,55 @@ -import { - requestWhitelist, - logger as loggerConfig, - errorLogger, -} from 'express-winston' -import * as winston from 'winston' import express from 'express' +import PinoHttp from 'express-pino-logger' +import pino from 'pino' +import { Logger } from 'typeorm' -const LOGGER_WHITELIST = [ - 'url', - 'method', - 'httpVersion', - 'originalUrl', - 'query', -] -requestWhitelist.splice(0, requestWhitelist.length, ...LOGGER_WHITELIST) +const options: Parameters[0] = { + name: 'Explorer', + level: 'info', + redact: { + paths: ['req.headers', 'res.headers'], + }, +} +if (process.env.EXPLORER_DEV) { + options.prettyPrint = { colorize: true } + options.level = 'debug' +} else if (process.env.NODE_ENV === 'test') { + options.level = 'silent' +} +export const logger = pino(options) export const addRequestLogging = (app: express.Express) => { - const consoleTransport = new winston.transports.Console() - - app.use( - loggerConfig({ - expressFormat: true, - meta: true, - msg: 'HTTP {{req.method}} {{req.url}}', - transports: [consoleTransport], - }), - ) - - app.use( - errorLogger({ - transports: [consoleTransport], - }), - ) + app.use(PinoHttp({ logger })) } -const transports = { - console: new winston.transports.Console({ - level: 'info', - silent: process.env.NODE_ENV === 'test', - }), -} +export class TypeOrmLogger implements Logger { + private logger = logger.child({ module: 'TypeORM' }) + + public logQuery(query: string, parameters?: any[]): any { + this.logger.trace(query, { parameters }) + } + + public logQueryError(error: string, query: string, parameters?: any[]): any { + this.logger.error('DB query failed', { error, query, parameters }) + } -export const logger = winston.createLogger({ - transports: [transports.console], -}) + public logQuerySlow(time: number, query: string, parameters?: any[]): any { + this.logger.warn('Slow DB query detected', { + duration: time, + query, + parameters, + }) + } + + public logSchemaBuild(message: string): any { + this.logger.trace(message) + } + + public logMigration(message: string): any { + this.logger.info(message) + } + + public log(level: 'log' | 'info' | 'warn', message: any): any { + this.logger[level](message) + } +} diff --git a/explorer/src/middleware/adminAuth.ts b/explorer/src/middleware/adminAuth.ts index 327703cb2cf..2c0bcf2a479 100644 --- a/explorer/src/middleware/adminAuth.ts +++ b/explorer/src/middleware/adminAuth.ts @@ -33,6 +33,7 @@ export default async function(req: Request, res: Response, next: NextFunction) { /* eslint-disable-next-line require-atomic-updates */ req.session.admin = null res.sendStatus(httpStatus.UNAUTHORIZED) + return } } diff --git a/explorer/src/server.ts b/explorer/src/server.ts index 40fddb51dd5..c05d4d39b38 100644 --- a/explorer/src/server.ts +++ b/explorer/src/server.ts @@ -1,19 +1,18 @@ +import cookieSession from 'cookie-session' import express from 'express' import helmet from 'helmet' import http from 'http' import mime from 'mime-types' -import cookieSession from 'cookie-session' -import adminAuth from './middleware/adminAuth' +import { getConfig } from './config' import * as controllers from './controllers' import { addRequestLogging, logger } from './logging' -import { bootstrapRealtime } from './server/realtime' +import adminAuth from './middleware/adminAuth' import seed from './seed' +import { bootstrapRealtime } from './server/realtime' -export const DEFAULT_PORT = parseInt(process.env.SERVER_PORT, 10) || 8080 -export const COOKIE_EXPIRATION_MS = 86400000 // 1 day in ms - -const server = (port: number = DEFAULT_PORT): http.Server => { - if (process.env.NODE_ENV === 'development') { +export default function server(): http.Server { + const conf = getConfig() + if (conf.dev) { seed() } @@ -21,11 +20,26 @@ const server = (port: number = DEFAULT_PORT): http.Server => { addRequestLogging(app) app.use(helmet()) + if (conf.dev) { + // eslint-disable-next-line @typescript-eslint/no-var-requires + const cors: typeof import('cors') = require('cors') + + app.use( + cors({ + origin: [conf.clientOrigin], + methods: 'GET,HEAD,PUT,PATCH,POST,DELETE', + preflightContinue: false, + optionsSuccessStatus: 204, + credentials: true, + }), + ) + } + app.use( cookieSession({ name: 'explorer', - maxAge: COOKIE_EXPIRATION_MS, - keys: ['key1', 'key2'], + maxAge: conf.cookieExpirationMs, + secret: conf.cookieSecret, }), ) app.use(express.json()) @@ -57,9 +71,7 @@ const server = (port: number = DEFAULT_PORT): http.Server => { const httpServer = new http.Server(app) bootstrapRealtime(httpServer) - return httpServer.listen(port, () => { - logger.info(`server started, listening on port ${port}`) + return httpServer.listen(conf.port, () => { + logger.info(`Server started, listening on port ${conf.port}`) }) } - -export default server diff --git a/explorer/src/server/realtime.ts b/explorer/src/server/realtime.ts index 3c10e52a25d..94d6dd9e223 100644 --- a/explorer/src/server/realtime.ts +++ b/explorer/src/server/realtime.ts @@ -31,26 +31,39 @@ export const bootstrapRealtime = async (server: http.Server) => { headers?: http.OutgoingHttpHeaders, ) => void, ) => { - logger.debug('websocket connection attempt') + const remote = remoteDetails(info.req) + logger.debug({ msg: 'websocket connection attempt', remote }) const accessKey = info.req.headers[ACCESS_KEY_HEADER] const secret = info.req.headers[SECRET_HEADER] if (typeof accessKey !== 'string' || typeof secret !== 'string') { - logger.info('client rejected, invalid authentication request') + logger.info({ + msg: 'client rejected, invalid authentication request', + origin: info.origin, + ...remote, + }) return } authenticate(db, accessKey, secret).then((session: Session | null) => { if (session === null) { - logger.info('client rejected, failed authentication') + logger.info({ + msg: 'client rejected, failed authentication', + accessKey, + origin: info.origin, + ...remote, + }) callback(false, 401) return } - logger.debug( - `websocket client successfully authenticated, new session for node ${session.chainlinkNodeId}`, - ) + logger.debug({ + msg: `websocket client successfully authenticated`, + nodeID: session.chainlinkNodeId, + origin: info.origin, + ...remote, + }) sessions.set(accessKey, session) const existingConnection = connections.get(accessKey) if (existingConnection) { @@ -62,14 +75,18 @@ export const bootstrapRealtime = async (server: http.Server) => { }) wss.on('connection', (ws: WebSocket, request: http.IncomingMessage) => { + const remote = remoteDetails(request) + // accessKey type already validated in verifyClient() const accessKey = request.headers[ACCESS_KEY_HEADER].toString() connections.set(accessKey, ws) clnodeCount = clnodeCount + 1 - logger.info( - `websocket connected, total chainlink nodes connected: ${clnodeCount}`, - ) + logger.info({ + msg: 'websocket connected', + nodeCount: clnodeCount, + ...remote, + }) ws.on('message', async (message: WebSocket.Data) => { const session = sessions.get(accessKey) @@ -97,9 +114,20 @@ export const bootstrapRealtime = async (server: http.Server) => { connections.delete(accessKey) } clnodeCount = clnodeCount - 1 - logger.info( - `websocket disconnected, total chainlink nodes connected: ${clnodeCount}`, - ) + logger.info({ + msg: 'websocket disconnected', + nodeCount: clnodeCount, + ...remote, + }) }) }) } + +function remoteDetails( + req: http.IncomingMessage, +): Record { + return { + remotePort: req.socket.remotePort, + remoteAddress: req.socket.remoteAddress, + } +} diff --git a/explorer/src/support/server.ts b/explorer/src/support/server.ts index 0d49efc89ee..6e01c81cdc8 100644 --- a/explorer/src/support/server.ts +++ b/explorer/src/support/server.ts @@ -1,16 +1,22 @@ -import { getDb, closeDbConnection } from '../database' -import server from '../server' +import { randomBytes } from 'crypto' import http from 'http' +import { closeDbConnection, getDb } from '../database' +import server from '../server' export const DEFAULT_TEST_PORT = - parseInt(process.env.TEST_SERVER_PORT, 10) || 8081 + parseInt(process.env.EXPLORER_TEST_SERVER_PORT, 10) || 8081 /** * Start database then initialize the server on the specified port */ -export async function start(port: number = DEFAULT_TEST_PORT) { +export async function start() { + Object.assign(process.env, { + EXPLORER_SERVER_PORT: `${DEFAULT_TEST_PORT}`, + EXPLORER_COOKIE_SECRET: randomBytes(32).toString('hex'), + }) + await getDb() - return server(port) + return server() } /** diff --git a/feeds/@types/feeds.d.ts b/feeds/@types/feeds.d.ts new file mode 100644 index 00000000000..d8fdef059be --- /dev/null +++ b/feeds/@types/feeds.d.ts @@ -0,0 +1,20 @@ +declare module 'feeds' { + interface FeedConfig { + contractAddress: string + listing: boolean + contractVersion?: number + contractType: string + name: string + valuePrefix: string + pair: string[] + counter?: number + path: string + networkId: number + history: boolean + decimalPlaces?: number + multiply?: string + sponsored?: string[] + threshold: number + compareOffchain?: string + } +} diff --git a/feeds/README.md b/feeds/README.md index 611800ca99b..a4bc842a80f 100644 --- a/feeds/README.md +++ b/feeds/README.md @@ -66,3 +66,36 @@ Deploy the newly built image by releasing the container from the root of the mon ``` $ heroku container:release web -a the-app-name ``` + +## Hidden Features + +### Display Offchain Comparison Links + +On the landing page you can enable a link to an offchain price comparison resource with the `compare_offchain` query parameter. + +``` +https://feeds.chain.link?compare_offchain=true +``` + +![display-offchain-comparison](./docs/feed-landing-compare-offchain.png) + +### Display Reference Contract Health + +On the landing page you can enable live health checks with: + +``` +https://feeds.chain.link?health=true +``` + +![reference-contract-health](./docs/reference-contract-health.png) + +#### Checks + +* Within price threshold +* Answer is 0 + +Color Codes + +* Red: A check has failed (hover for tooltip that includes failed checks) +* Yellow: Unknown status (when the price health check is not configured) +* Green: Ok diff --git a/feeds/docs/feed-landing-compare-offchain.png b/feeds/docs/feed-landing-compare-offchain.png new file mode 100644 index 00000000000..20d844ae006 Binary files /dev/null and b/feeds/docs/feed-landing-compare-offchain.png differ diff --git a/feeds/docs/reference-contract-health.png b/feeds/docs/reference-contract-health.png new file mode 100644 index 00000000000..e4c8b4c4271 Binary files /dev/null and b/feeds/docs/reference-contract-health.png differ diff --git a/feeds/jest.config.js b/feeds/jest.config.js new file mode 100644 index 00000000000..d4af4a8b8b7 --- /dev/null +++ b/feeds/jest.config.js @@ -0,0 +1,3 @@ +module.exports = { + preset: 'ts-jest/presets/js-with-ts', +} diff --git a/feeds/jsconfig.json b/feeds/jsconfig.json deleted file mode 100644 index ec2332eb49c..00000000000 --- a/feeds/jsconfig.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "compilerOptions": { - "baseUrl": "src" - } -} diff --git a/feeds/package.json b/feeds/package.json index 73209e09eda..367b704ef23 100644 --- a/feeds/package.json +++ b/feeds/package.json @@ -25,16 +25,20 @@ "build": "react-scripts build", "postbuild": "./postbuild.sh", "clean": "rimraf -rf build", - "test": "react-scripts test", + "test": "jest", "build-theme": "lessc --js src/theme/theme.less src/theme.css", "watch-theme": "less-watch-compiler --enable-js src/theme src theme.less", - "server": "node server" + "server": "node server", + "setup": "tsc -b" }, "dependencies": { + "@chainlink/redux": "0.0.1", + "@chainlink/ts-helpers": "0.0.1", "antd": "^3.23.3", + "classnames": "^2.2.6", "d3": "^5.11.0", "eslint": "^6.6.0", - "ethers": "^4.0.44", + "ethers": "^4.0.45", "express": "^4.17.1", "less": "^3.11.1", "lodash": "^4.17.15", @@ -49,7 +53,6 @@ "react-scripts": "^3.1.0", "recompose": "^0.30.0", "redux": "^4.0.4", - "redux-logger": "^3.0.6", "redux-persist": "^5.10.0", "redux-persist-transform-filter": "^0.0.18", "redux-thunk": "^2.3.0", @@ -61,9 +64,12 @@ "@testing-library/jest-dom": "^4.2.4", "@testing-library/react": "^9.4.1", "@types/jest": "^24.0.25", + "@types/react-router": "^5.1.4", + "@types/redux-thunk": "^2.1.0", "cross-env": "^6.0.3", "jest": "^24.9.0", "redux-mock-store": "^1.5.4", - "rimraf": "^3.0.1" + "rimraf": "^3.0.1", + "ts-jest": "^24.1.0" } } diff --git a/feeds/public/index.html b/feeds/public/index.html index 7618b24923f..0f1101b5f82 100644 --- a/feeds/public/index.html +++ b/feeds/public/index.html @@ -5,16 +5,44 @@ - - - - - - - - - - + + + + + + + + + + + + + + + + +