diff --git a/src/bridge/Bridge.sol b/src/bridge/Bridge.sol index 31b41554..fa9aae5b 100644 --- a/src/bridge/Bridge.sol +++ b/src/bridge/Bridge.sol @@ -63,6 +63,11 @@ contract Bridge is Initializable, DelegateCallAware, IBridge { rollup = rollup_; } + /// @notice Allows the proxy owner to set the rollup address + function updateRollupAddress(IOwnable _rollup) external onlyDelegated onlyProxyOwner { + rollup = _rollup; + } + modifier onlyRollupOrOwner() { if (msg.sender != address(rollup)) { address rollupOwner = rollup.owner(); diff --git a/src/bridge/IBridge.sol b/src/bridge/IBridge.sol index f1fff13c..b174d57e 100644 --- a/src/bridge/IBridge.sol +++ b/src/bridge/IBridge.sol @@ -112,4 +112,6 @@ interface IBridge { // ---------- initializer ---------- function initialize(IOwnable rollup_) external; + + function updateRollupAddress(IOwnable _rollup) external; } diff --git a/src/bridge/IOutbox.sol b/src/bridge/IOutbox.sol index 3a551ce6..bb83894c 100644 --- a/src/bridge/IOutbox.sol +++ b/src/bridge/IOutbox.sol @@ -28,6 +28,8 @@ interface IOutbox { function OUTBOX_VERSION() external view returns (uint128); // the outbox version function updateSendRoot(bytes32 sendRoot, bytes32 l2BlockHash) external; + + function updateRollupAddress() external; /// @notice When l2ToL1Sender returns a nonzero address, the message was originated by an L2 account /// When the return value is zero, that means this is a system message diff --git a/src/bridge/ISequencerInbox.sol b/src/bridge/ISequencerInbox.sol index 5f19471e..922fd584 100644 --- a/src/bridge/ISequencerInbox.sol +++ b/src/bridge/ISequencerInbox.sol @@ -175,4 +175,6 @@ interface ISequencerInbox is IDelayedMessageProvider { // ---------- initializer ---------- function initialize(IBridge bridge_, MaxTimeVariation calldata maxTimeVariation_) external; + + function updateRollupAddress() external; } diff --git a/src/bridge/Inbox.sol b/src/bridge/Inbox.sol index aa23d07a..445c7c5a 100644 --- a/src/bridge/Inbox.sol +++ b/src/bridge/Inbox.sol @@ -35,7 +35,7 @@ import { L2MessageType_unsignedEOATx, L2MessageType_unsignedContractTx } from "../libraries/MessageTypes.sol"; -import {MAX_DATA_SIZE, UNISWAP_L1_TIMELOCK, UNISWAP_L2_FACTORY} from "../libraries/Constants.sol"; +import {MAX_DATA_SIZE} from "../libraries/Constants.sol"; import "../precompiles/ArbSys.sol"; import "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; @@ -527,83 +527,6 @@ contract Inbox is DelegateCallAware, PausableUpgradeable, IInbox { ); } - /// @notice This is an one-time-exception to resolve a misconfiguration of Uniswap Arbitrum deployment - /// Only the Uniswap L1 Timelock may call this function and it is allowed to create a crosschain - /// retryable ticket without address aliasing. More info here: - /// https://gov.uniswap.org/t/consensus-check-fix-the-cross-chain-messaging-bridge-on-arbitrum/18547 - /// @dev This function will be removed in future releases - function uniswapCreateRetryableTicket( - address to, - uint256 l2CallValue, - uint256 maxSubmissionCost, - address excessFeeRefundAddress, - address callValueRefundAddress, - uint256 gasLimit, - uint256 maxFeePerGas, - bytes calldata data - ) external payable whenNotPaused onlyAllowed returns (uint256) { - // this can only be called by UNISWAP_L1_TIMELOCK - require(msg.sender == UNISWAP_L1_TIMELOCK, "NOT_UNISWAP_L1_TIMELOCK"); - // the retryable can only call UNISWAP_L2_FACTORY - require(to == UNISWAP_L2_FACTORY, "NOT_TO_UNISWAP_L2_FACTORY"); - - // ensure the user's deposit alone will make submission succeed - if (msg.value < (maxSubmissionCost + l2CallValue + gasLimit * maxFeePerGas)) { - revert InsufficientValue( - maxSubmissionCost + l2CallValue + gasLimit * maxFeePerGas, - msg.value - ); - } - - // if a refund address is a contract, we apply the alias to it - // so that it can access its funds on the L2 - // since the beneficiary and other refund addresses don't get rewritten by arb-os - if (AddressUpgradeable.isContract(excessFeeRefundAddress)) { - excessFeeRefundAddress = AddressAliasHelper.applyL1ToL2Alias(excessFeeRefundAddress); - } - if (AddressUpgradeable.isContract(callValueRefundAddress)) { - // this is the beneficiary. be careful since this is the address that can cancel the retryable in the L2 - callValueRefundAddress = AddressAliasHelper.applyL1ToL2Alias(callValueRefundAddress); - } - - // gas price and limit of 1 should never be a valid input, so instead they are used as - // magic values to trigger a revert in eth calls that surface data without requiring a tx trace - if (gasLimit == 1 || maxFeePerGas == 1) - revert RetryableData( - msg.sender, - to, - l2CallValue, - msg.value, - maxSubmissionCost, - excessFeeRefundAddress, - callValueRefundAddress, - gasLimit, - maxFeePerGas, - data - ); - - uint256 submissionFee = calculateRetryableSubmissionFee(data.length, block.basefee); - if (maxSubmissionCost < submissionFee) - revert InsufficientSubmissionCost(submissionFee, maxSubmissionCost); - - return - _deliverMessage( - L1MessageType_submitRetryableTx, - AddressAliasHelper.undoL1ToL2Alias(msg.sender), - abi.encodePacked( - uint256(uint160(to)), - l2CallValue, - msg.value, - maxSubmissionCost, - uint256(uint160(excessFeeRefundAddress)), - uint256(uint160(callValueRefundAddress)), - gasLimit, - maxFeePerGas, - data.length, - data - ) - ); - } function _deliverMessage( uint8 _kind, diff --git a/src/bridge/Outbox.sol b/src/bridge/Outbox.sol index 9020e660..00009901 100644 --- a/src/bridge/Outbox.sol +++ b/src/bridge/Outbox.sol @@ -68,6 +68,11 @@ contract Outbox is DelegateCallAware, IOutbox { rollup = address(_bridge.rollup()); } + /// @notice Allows the proxy owner to set the rollup address + function updateRollupAddress() external onlyDelegated onlyProxyOwner { + rollup = address(bridge.rollup()); + } + function updateSendRoot(bytes32 root, bytes32 l2BlockHash) external { if (msg.sender != rollup) revert NotRollup(msg.sender, rollup); roots[root] = l2BlockHash; diff --git a/src/bridge/SequencerInbox.sol b/src/bridge/SequencerInbox.sol index d639a16c..ca74f429 100644 --- a/src/bridge/SequencerInbox.sol +++ b/src/bridge/SequencerInbox.sol @@ -27,10 +27,13 @@ import "./IInbox.sol"; import "./ISequencerInbox.sol"; import "../rollup/IRollupLogic.sol"; import "./Messages.sol"; +import "../precompiles/ArbGasInfo.sol"; +import "../precompiles/ArbSys.sol"; import {L1MessageType_batchPostingReport} from "../libraries/MessageTypes.sol"; import {GasRefundEnabled, IGasRefunder} from "../libraries/IGasRefunder.sol"; import "../libraries/DelegateCallAware.sol"; +import "../libraries/ArbitrumChecker.sol"; import {MAX_DATA_SIZE} from "../libraries/Constants.sol"; /** @@ -66,6 +69,9 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox mapping(address => bool) public isSequencer; + // If the chain this SequencerInbox is deployed on is an Arbitrum chain. + bool internal immutable hostChainIsArbitrum = ArbitrumChecker.runningOnArbitrum(); + function _chainIdChanged() internal view returns (bool) { return deployTimeChainId != block.chainid; } @@ -81,6 +87,11 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox maxTimeVariation = maxTimeVariation_; } + /// @notice Allows the proxy owner to set the rollup address + function updateRollupAddress() external onlyDelegated onlyProxyOwner { + rollup = bridge.rollup(); + } + function getTimeBounds() internal view virtual returns (TimeBounds memory) { TimeBounds memory bounds; if (block.timestamp > maxTimeVariation.delaySeconds) { @@ -387,13 +398,29 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox // this msg isn't included in the current sequencer batch, but instead added to // the delayed messages queue that is yet to be included address batchPoster = msg.sender; - bytes memory spendingReportMsg = abi.encodePacked( - block.timestamp, - batchPoster, - dataHash, - seqMessageIndex, - block.basefee - ); + bytes memory spendingReportMsg; + if (hostChainIsArbitrum) { + // Include extra gas for the host chain's L1 gas charging + uint256 l1Fees = ArbGasInfo(address(0x6c)).getCurrentTxL1GasFees(); + uint256 extraGas = l1Fees / block.basefee; + require(extraGas <= type(uint64).max, "L1_GAS_NOT_UINT64"); + spendingReportMsg = abi.encodePacked( + block.timestamp, + batchPoster, + dataHash, + seqMessageIndex, + block.basefee, + uint64(extraGas) + ); + } else { + spendingReportMsg = abi.encodePacked( + block.timestamp, + batchPoster, + dataHash, + seqMessageIndex, + block.basefee + ); + } uint256 msgNum = bridge.submitBatchSpendingReport( batchPoster, keccak256(spendingReportMsg) @@ -433,9 +460,13 @@ contract SequencerInbox is DelegateCallAware, GasRefundEnabled, ISequencerInbox require(keysetBytes.length < 64 * 1024, "keyset is too large"); if (dasKeySetInfo[ksHash].isValidKeyset) revert AlreadyValidDASKeyset(ksHash); + uint256 creationBlock = block.number; + if (hostChainIsArbitrum) { + creationBlock = ArbSys(address(100)).arbBlockNumber(); + } dasKeySetInfo[ksHash] = DasKeySetInfo({ isValidKeyset: true, - creationBlock: uint64(block.number) + creationBlock: uint64(creationBlock) }); emit SetValidKeyset(ksHash, keysetBytes); emit OwnerFunctionCalled(2); diff --git a/src/challenge/IChallengeManager.sol b/src/challenge/IOldChallengeManager.sol similarity index 90% rename from src/challenge/IChallengeManager.sol rename to src/challenge/IOldChallengeManager.sol index b6f63d67..1d6e53a0 100644 --- a/src/challenge/IChallengeManager.sol +++ b/src/challenge/IOldChallengeManager.sol @@ -9,11 +9,11 @@ import "../bridge/IBridge.sol"; import "../bridge/ISequencerInbox.sol"; import "../osp/IOneStepProofEntry.sol"; -import "./IChallengeResultReceiver.sol"; +import "./IOldChallengeResultReceiver.sol"; -import "./ChallengeLib.sol"; +import "./OldChallengeLib.sol"; -interface IChallengeManager { +interface IOldChallengeManager { enum ChallengeTerminationType { TIMEOUT, BLOCK_PROOF, @@ -41,7 +41,7 @@ interface IChallengeManager { event ChallengeEnded(uint64 indexed challengeIndex, ChallengeTerminationType kind); function initialize( - IChallengeResultReceiver resultReceiver_, + IOldChallengeResultReceiver resultReceiver_, ISequencerInbox sequencerInbox_, IBridge bridge_, IOneStepProofEntry osp_ @@ -61,7 +61,7 @@ interface IChallengeManager { function challengeInfo(uint64 challengeIndex_) external view - returns (ChallengeLib.Challenge memory); + returns (OldChallengeLib.Challenge memory); function currentResponder(uint64 challengeIndex) external view returns (address); diff --git a/src/challenge/IChallengeResultReceiver.sol b/src/challenge/IOldChallengeResultReceiver.sol similarity index 89% rename from src/challenge/IChallengeResultReceiver.sol rename to src/challenge/IOldChallengeResultReceiver.sol index 264a0ae2..29d996cd 100644 --- a/src/challenge/IChallengeResultReceiver.sol +++ b/src/challenge/IOldChallengeResultReceiver.sol @@ -4,7 +4,7 @@ pragma solidity ^0.8.0; -interface IChallengeResultReceiver { +interface IOldChallengeResultReceiver { function completeChallenge( uint256 challengeIndex, address winner, diff --git a/src/challenge/ChallengeLib.sol b/src/challenge/OldChallengeLib.sol similarity index 93% rename from src/challenge/ChallengeLib.sol rename to src/challenge/OldChallengeLib.sol index 25ff894d..97da4b05 100644 --- a/src/challenge/ChallengeLib.sol +++ b/src/challenge/OldChallengeLib.sol @@ -7,9 +7,9 @@ pragma solidity ^0.8.0; import "../state/Machine.sol"; import "../state/GlobalState.sol"; -library ChallengeLib { +library OldChallengeLib { using MachineLib for Machine; - using ChallengeLib for Challenge; + using OldChallengeLib for Challenge; /// @dev It's assumed that that uninitialzed challenges have mode NONE enum ChallengeMode { @@ -86,8 +86,6 @@ library ChallengeLib { return keccak256(abi.encodePacked("Machine finished:", globalStateHash)); } else if (status == MachineStatus.ERRORED) { return keccak256(abi.encodePacked("Machine errored:")); - } else if (status == MachineStatus.TOO_FAR) { - return keccak256(abi.encodePacked("Machine too far:")); } else { revert("BAD_BLOCK_STATUS"); } @@ -124,8 +122,6 @@ library ChallengeLib { return keccak256(abi.encodePacked("Block state:", globalStateHash)); } else if (status == MachineStatus.ERRORED) { return keccak256(abi.encodePacked("Block state, errored:", globalStateHash)); - } else if (status == MachineStatus.TOO_FAR) { - return keccak256(abi.encodePacked("Block state, too far:")); } else { revert("BAD_BLOCK_STATUS"); } diff --git a/src/challenge/ChallengeManager.sol b/src/challenge/OldChallengeManager.sol similarity index 77% rename from src/challenge/ChallengeManager.sol rename to src/challenge/OldChallengeManager.sol index 12cad085..93c18d70 100644 --- a/src/challenge/ChallengeManager.sol +++ b/src/challenge/OldChallengeManager.sol @@ -7,16 +7,16 @@ pragma solidity ^0.8.0; import "../libraries/DelegateCallAware.sol"; import "../osp/IOneStepProofEntry.sol"; import "../state/GlobalState.sol"; -import "./IChallengeResultReceiver.sol"; -import "./ChallengeLib.sol"; -import "./IChallengeManager.sol"; +import "./IOldChallengeResultReceiver.sol"; +import "./OldChallengeLib.sol"; +import "./IOldChallengeManager.sol"; import {NO_CHAL_INDEX} from "../libraries/Constants.sol"; -contract ChallengeManager is DelegateCallAware, IChallengeManager { +contract OldChallengeManager is DelegateCallAware, IOldChallengeManager { using GlobalStateLib for GlobalState; using MachineLib for Machine; - using ChallengeLib for ChallengeLib.Challenge; + using OldChallengeLib for OldChallengeLib.Challenge; enum ChallengeModeRequirement { ANY, @@ -28,9 +28,9 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { uint256 private constant MAX_CHALLENGE_DEGREE = 40; uint64 public totalChallengesCreated; - mapping(uint256 => ChallengeLib.Challenge) public challenges; + mapping(uint256 => OldChallengeLib.Challenge) public challenges; - IChallengeResultReceiver public resultReceiver; + IOldChallengeResultReceiver public resultReceiver; ISequencerInbox public sequencerInbox; IBridge public bridge; @@ -40,33 +40,33 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { external view override - returns (ChallengeLib.Challenge memory) + returns (OldChallengeLib.Challenge memory) { return challenges[challengeIndex]; } modifier takeTurn( uint64 challengeIndex, - ChallengeLib.SegmentSelection calldata selection, + OldChallengeLib.SegmentSelection calldata selection, ChallengeModeRequirement expectedMode ) { - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; require(msg.sender == currentResponder(challengeIndex), "CHAL_SENDER"); require(!isTimedOut(challengeIndex), "CHAL_DEADLINE"); if (expectedMode == ChallengeModeRequirement.ANY) { - require(challenge.mode != ChallengeLib.ChallengeMode.NONE, NO_CHAL); + require(challenge.mode != OldChallengeLib.ChallengeMode.NONE, NO_CHAL); } else if (expectedMode == ChallengeModeRequirement.BLOCK) { - require(challenge.mode == ChallengeLib.ChallengeMode.BLOCK, "CHAL_NOT_BLOCK"); + require(challenge.mode == OldChallengeLib.ChallengeMode.BLOCK, "CHAL_NOT_BLOCK"); } else if (expectedMode == ChallengeModeRequirement.EXECUTION) { - require(challenge.mode == ChallengeLib.ChallengeMode.EXECUTION, "CHAL_NOT_EXECUTION"); + require(challenge.mode == OldChallengeLib.ChallengeMode.EXECUTION, "CHAL_NOT_EXECUTION"); } else { assert(false); } require( challenge.challengeStateHash == - ChallengeLib.hashChallengeState( + OldChallengeLib.hashChallengeState( selection.oldSegmentsStart, selection.oldSegmentsLength, selection.oldSegments @@ -82,12 +82,12 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { _; - if (challenge.mode == ChallengeLib.ChallengeMode.NONE) { + if (challenge.mode == OldChallengeLib.ChallengeMode.NONE) { // Early return since challenge must have terminated return; } - ChallengeLib.Participant memory current = challenge.current; + OldChallengeLib.Participant memory current = challenge.current; current.timeLeft -= block.timestamp - challenge.lastMoveTimestamp; challenge.current = challenge.next; @@ -97,7 +97,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { } function initialize( - IChallengeResultReceiver resultReceiver_, + IOldChallengeResultReceiver resultReceiver_, ISequencerInbox sequencerInbox_, IBridge bridge_, IOneStepProofEntry osp_ @@ -122,11 +122,11 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { ) external override returns (uint64) { require(msg.sender == address(resultReceiver), "ONLY_ROLLUP_CHAL"); bytes32[] memory segments = new bytes32[](2); - segments[0] = ChallengeLib.blockStateHash( + segments[0] = OldChallengeLib.blockStateHash( startAndEndMachineStatuses_[0], startAndEndGlobalStates_[0].hash() ); - segments[1] = ChallengeLib.blockStateHash( + segments[1] = OldChallengeLib.blockStateHash( startAndEndMachineStatuses_[1], startAndEndGlobalStates_[1].hash() ); @@ -134,7 +134,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { uint64 challengeIndex = ++totalChallengesCreated; // The following is an assertion since it should never be possible, but it's an important invariant assert(challengeIndex != NO_CHAL_INDEX); - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; challenge.wasmModuleRoot = wasmModuleRoot_; // See validator/assertion.go ExecutionState RequiredBatches() for reasoning @@ -146,13 +146,13 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { maxInboxMessagesRead++; } challenge.maxInboxMessages = maxInboxMessagesRead; - challenge.next = ChallengeLib.Participant({addr: asserter_, timeLeft: asserterTimeLeft_}); - challenge.current = ChallengeLib.Participant({ + challenge.next = OldChallengeLib.Participant({addr: asserter_, timeLeft: asserterTimeLeft_}); + challenge.current = OldChallengeLib.Participant({ addr: challenger_, timeLeft: challengerTimeLeft_ }); challenge.lastMoveTimestamp = block.timestamp; - challenge.mode = ChallengeLib.ChallengeMode.BLOCK; + challenge.mode = OldChallengeLib.ChallengeMode.BLOCK; emit InitiatedChallenge( challengeIndex, @@ -170,10 +170,10 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { */ function bisectExecution( uint64 challengeIndex, - ChallengeLib.SegmentSelection calldata selection, + OldChallengeLib.SegmentSelection calldata selection, bytes32[] calldata newSegments ) external takeTurn(challengeIndex, selection, ChallengeModeRequirement.ANY) { - (uint256 challengeStart, uint256 challengeLength) = ChallengeLib.extractChallengeSegment( + (uint256 challengeStart, uint256 challengeLength) = OldChallengeLib.extractChallengeSegment( selection ); require(challengeLength > 1, "TOO_SHORT"); @@ -192,7 +192,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { function challengeExecution( uint64 challengeIndex, - ChallengeLib.SegmentSelection calldata selection, + OldChallengeLib.SegmentSelection calldata selection, MachineStatus[2] calldata machineStatuses, bytes32[2] calldata globalStateHashes, uint256 numSteps @@ -201,12 +201,12 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { require(numSteps <= OneStepProofEntryLib.MAX_STEPS, "CHALLENGE_TOO_LONG"); requireValidBisection( selection, - ChallengeLib.blockStateHash(machineStatuses[0], globalStateHashes[0]), - ChallengeLib.blockStateHash(machineStatuses[1], globalStateHashes[1]) + OldChallengeLib.blockStateHash(machineStatuses[0], globalStateHashes[0]), + OldChallengeLib.blockStateHash(machineStatuses[1], globalStateHashes[1]) ); - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; - (uint256 executionChallengeAtSteps, uint256 challengeLength) = ChallengeLib + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + (uint256 executionChallengeAtSteps, uint256 challengeLength) = OldChallengeLib .extractChallengeSegment(selection); require(challengeLength == 1, "TOO_LONG"); @@ -227,13 +227,13 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { } bytes32[] memory segments = new bytes32[](2); - segments[0] = ChallengeLib.getStartMachineHash( + segments[0] = OldChallengeLib.getStartMachineHash( globalStateHashes[0], challenge.wasmModuleRoot ); - segments[1] = ChallengeLib.getEndMachineHash(machineStatuses[1], globalStateHashes[1]); + segments[1] = OldChallengeLib.getEndMachineHash(machineStatuses[1], globalStateHashes[1]); - challenge.mode = ChallengeLib.ChallengeMode.EXECUTION; + challenge.mode = OldChallengeLib.ChallengeMode.EXECUTION; completeBisection(challengeIndex, 0, numSteps, segments); @@ -242,19 +242,19 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { function oneStepProveExecution( uint64 challengeIndex, - ChallengeLib.SegmentSelection calldata selection, + OldChallengeLib.SegmentSelection calldata selection, bytes calldata proof ) external takeTurn(challengeIndex, selection, ChallengeModeRequirement.EXECUTION) { - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; uint256 challengeStart; { uint256 challengeLength; - (challengeStart, challengeLength) = ChallengeLib.extractChallengeSegment(selection); + (challengeStart, challengeLength) = OldChallengeLib.extractChallengeSegment(selection); require(challengeLength == 1, "TOO_LONG"); } bytes32 afterHash = osp.proveOneStep( - ExecutionContext({maxInboxMessagesRead: challenge.maxInboxMessages, bridge: bridge}), + ExecutionContext({maxInboxMessagesRead: challenge.maxInboxMessages, bridge: bridge, initialWasmModuleRoot: challenge.wasmModuleRoot}), challengeStart, selection.oldSegments[selection.challengePosition], proof @@ -269,14 +269,14 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { } function timeout(uint64 challengeIndex) external override { - require(challenges[challengeIndex].mode != ChallengeLib.ChallengeMode.NONE, NO_CHAL); + require(challenges[challengeIndex].mode != OldChallengeLib.ChallengeMode.NONE, NO_CHAL); require(isTimedOut(challengeIndex), "TIMEOUT_DEADLINE"); _nextWin(challengeIndex, ChallengeTerminationType.TIMEOUT); } function clearChallenge(uint64 challengeIndex) external override { require(msg.sender == address(resultReceiver), "NOT_RES_RECEIVER"); - require(challenges[challengeIndex].mode != ChallengeLib.ChallengeMode.NONE, NO_CHAL); + require(challenges[challengeIndex].mode != OldChallengeLib.ChallengeMode.NONE, NO_CHAL); delete challenges[challengeIndex]; emit ChallengeEnded(challengeIndex, ChallengeTerminationType.CLEARED); } @@ -285,12 +285,12 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { return challenges[challengeIndex].current.addr; } - function isTimedOut(uint64 challengeIndex) public view virtual override returns (bool) { + function isTimedOut(uint64 challengeIndex) public view override returns (bool) { return challenges[challengeIndex].isTimedOut(); } function requireValidBisection( - ChallengeLib.SegmentSelection calldata selection, + OldChallengeLib.SegmentSelection calldata selection, bytes32 startHash, bytes32 endHash ) private pure { @@ -307,7 +307,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { assert(challengeLength >= 1); assert(newSegments.length >= 2); - bytes32 challengeStateHash = ChallengeLib.hashChallengeState( + bytes32 challengeStateHash = OldChallengeLib.hashChallengeState( challengeStart, challengeLength, newSegments @@ -325,7 +325,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { /// @dev This function causes the mode of the challenge to be set to NONE by deleting the challenge function _nextWin(uint64 challengeIndex, ChallengeTerminationType reason) private { - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; address next = challenge.next.addr; address current = challenge.current.addr; delete challenges[challengeIndex]; @@ -342,7 +342,7 @@ contract ChallengeManager is DelegateCallAware, IChallengeManager { uint64 challengeIndex, ChallengeTerminationType /* reason */ ) private { - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; challenge.challengeStateHash = bytes32(0); // address next = challenge.next.addr; diff --git a/src/challengeV2/EdgeChallengeManager.sol b/src/challengeV2/EdgeChallengeManager.sol new file mode 100644 index 00000000..2e0de25f --- /dev/null +++ b/src/challengeV2/EdgeChallengeManager.sol @@ -0,0 +1,625 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "../rollup/Assertion.sol"; +import "./libraries/UintUtilsLib.sol"; +import "./IAssertionChain.sol"; +import "./libraries/EdgeChallengeManagerLib.sol"; +import "../libraries/Constants.sol"; +import "../state/Machine.sol"; + +import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; +import "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; + +/// @title EdgeChallengeManager interface +interface IEdgeChallengeManager { + /// @notice Initialize the EdgeChallengeManager. EdgeChallengeManagers are upgradeable + /// so use the initializer paradigm + /// @param _assertionChain The assertion chain contract + /// @param _challengePeriodBlocks The amount of cumulative time an edge must spend unrivaled before it can be confirmed + /// @param _oneStepProofEntry The one step proof logic + /// @param layerZeroBlockEdgeHeight The end height of layer zero edges of type Block + /// @param layerZeroBigStepEdgeHeight The end height of layer zero edges of type BigStep + /// @param layerZeroSmallStepEdgeHeight The end height of layer zero edges of type SmallStep + /// @param _stakeToken The token that stake will be provided in when creating zero layer block edges + /// @param _stakeAmount The amount of stake (in units of stake token) required to create a block edge + /// @param _excessStakeReceiver The address that excess stake will be sent to when 2nd+ block edge is created + function initialize( + IAssertionChain _assertionChain, + uint256 _challengePeriodBlocks, + IOneStepProofEntry _oneStepProofEntry, + uint256 layerZeroBlockEdgeHeight, + uint256 layerZeroBigStepEdgeHeight, + uint256 layerZeroSmallStepEdgeHeight, + IERC20 _stakeToken, + uint256 _stakeAmount, + address _excessStakeReceiver + ) external; + + function challengePeriodBlocks() external view returns (uint256); + + /// @notice The one step proof resolver used to decide between rival SmallStep edges of length 1 + function oneStepProofEntry() external view returns (IOneStepProofEntry); + + /// @notice Performs necessary checks and creates a new layer zero edge + /// @param args Edge creation args + function createLayerZeroEdge(CreateEdgeArgs calldata args) external returns (bytes32); + + /// @notice Bisect an edge. This creates two child edges: + /// lowerChild: has the same start root and height as this edge, but a different end root and height + /// upperChild: has the same end root and height as this edge, but a different start root and height + /// The lower child end root and height are equal to the upper child start root and height. This height + /// is the mandatoryBisectionHeight. + /// The lower child may already exist, however it's not possible for the upper child to exist as that would + /// mean that the edge has already been bisected + /// @param edgeId Edge to bisect + /// @param bisectionHistoryRoot The new history root to be used in the lower and upper children + /// @param prefixProof A proof to show that the bisectionHistoryRoot commits to a prefix of the current endHistoryRoot + /// @return lowerChildId The id of the newly created lower child edge + /// @return upperChildId The id of the newly created upper child edge + function bisectEdge(bytes32 edgeId, bytes32 bisectionHistoryRoot, bytes calldata prefixProof) + external + returns (bytes32, bytes32); + + /// @notice Confirm an edge if both its children are already confirmed + function confirmEdgeByChildren(bytes32 edgeId) external; + + /// @notice An edge can be confirmed if the total amount of time it and a single chain of its direct ancestors + /// has spent unrivaled is greater than the challenge period. + /// @dev Edges inherit time from their parents, so the sum of unrivaled timers is compared against the threshold. + /// Given that an edge cannot become unrivaled after becoming rivaled, once the threshold is passed + /// it will always remain passed. The direct ancestors of an edge are linked by parent-child links for edges + /// of the same edgeType, and claimId-edgeId links for zero layer edges that claim an edge in the level above. + /// This method also includes the amount of time the assertion being claimed spent without a sibling + /// @param edgeId The id of the edge to confirm + /// @param ancestorEdgeIds The ids of the direct ancestors of an edge. These are ordered from the parent first, then going to grand-parent, + /// great-grandparent etc. The chain can extend only as far as the zero layer edge of type Block. + function confirmEdgeByTime( + bytes32 edgeId, + bytes32[] calldata ancestorEdgeIds, + ExecutionStateData calldata claimStateData + ) external; + + /// @notice If a confirmed edge exists whose claim id is equal to this edge, then this edge can be confirmed + /// @dev When zero layer edges are created they reference an edge, or assertion, in the level above. If a zero layer + /// edge is confirmed, it becomes possible to also confirm the edge that it claims + /// @param edgeId The id of the edge to confirm + /// @param claimingEdgeId The id of the edge which has a claimId equal to edgeId + function confirmEdgeByClaim(bytes32 edgeId, bytes32 claimingEdgeId) external; + + /// @notice Confirm an edge by executing a one step proof + /// @dev One step proofs can only be executed against edges that have length one and of type SmallStep + /// @param edgeId The id of the edge to confirm + /// @param oneStepData Input data to the one step proof + /// @param prevConfig Data about the config set in prev + /// @param beforeHistoryInclusionProof Proof that the state which is the start of the edge is committed to by the startHistoryRoot + /// @param afterHistoryInclusionProof Proof that the state which is the end of the edge is committed to by the endHistoryRoot + function confirmEdgeByOneStepProof( + bytes32 edgeId, + OneStepData calldata oneStepData, + ConfigData calldata prevConfig, + bytes32[] calldata beforeHistoryInclusionProof, + bytes32[] calldata afterHistoryInclusionProof + ) external; + + /// @notice When zero layer block edges are created a stake is also provided + /// The stake on this edge can be refunded if the edge is confirme + function refundStake(bytes32 edgeId) external; + + /// @notice Zero layer edges have to be a fixed height. + /// This function returns the end height for a given edge type + function getLayerZeroEndHeight(EdgeType eType) external view returns (uint256); + + /// @notice Calculate the unique id of an edge + /// @param edgeType The type of edge + /// @param originId The origin id of the edge + /// @param startHeight The start height of the edge + /// @param startHistoryRoot The start history root of the edge + /// @param endHeight The end height of the edge + /// @param endHistoryRoot The end history root of the edge + function calculateEdgeId( + EdgeType edgeType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight, + bytes32 endHistoryRoot + ) external pure returns (bytes32); + + /// @notice Calculate the mutual id of the edge + /// Edges that are rivals share the same mutual id + /// @param edgeType The type of the edge + /// @param originId The origin id of the edge + /// @param startHeight The start height of the edge + /// @param startHistoryRoot The start history root of the edge + /// @param endHeight The end height of the edge + function calculateMutualId( + EdgeType edgeType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight + ) external pure returns (bytes32); + + /// @notice Has the edge already been stored in the manager + function edgeExists(bytes32 edgeId) external view returns (bool); + + /// @notice Get full edge data for an edge + function getEdge(bytes32 edgeId) external view returns (ChallengeEdge memory); + + /// @notice The length of the edge, from start height to end height + function edgeLength(bytes32 edgeId) external view returns (uint256); + + /// @notice Does this edge currently have one or more rivals + /// Rival edges share the same mutual id + function hasRival(bytes32 edgeId) external view returns (bool); + + /// @notice Does the edge have at least one rival, and it has length one + function hasLengthOneRival(bytes32 edgeId) external view returns (bool); + + /// @notice The amount of time this edge has spent without rivals + /// This value is increasing whilst an edge is unrivaled, once a rival is created + /// it is fixed. If an edge has rivals from the moment it is created then it will have + /// a zero time unrivaled + function timeUnrivaled(bytes32 edgeId) external view returns (uint256); + + /// @notice Get the id of the prev assertion that this edge is originates from + /// @dev Uses the parent chain to traverse upwards SmallStep->BigStep->Block->Assertion + /// until it gets to the origin assertion + function getPrevAssertionHash(bytes32 edgeId) external view returns (bytes32); + + /// @notice Fetch the raw first rival record for this edge + /// @dev Returns 0 if the edge does not exist + /// Returns a magic string if the edge exists but is unrivaled + /// Returns the id of the second edge created with the same mutual id as this edge, if a rival exists + function firstRival(bytes32 edgeId) external view returns (bytes32); +} + +/// @title A challenge manager that uses edge structures to decide between Assertions +/// @notice When two assertions are created that have the same predecessor the protocol needs to decide which of the two is correct +/// This challenge manager allows the staker who has created the valid assertion to enforce that it will be confirmed, and all +/// other rival assertions will be rejected. The challenge is all-vs-all in that all assertions with the same +/// predecessor will vie for succession against each other. Stakers compete by creating edges that reference the assertion they +/// believe in. These edges are then bisected, reducing the size of the disagreement with each bisection, and narrowing in on the +/// exact point of disagreement. Eventually, at step size 1, the step can be proved on-chain directly proving that the related assertion +/// must be invalid. +contract EdgeChallengeManager is IEdgeChallengeManager, Initializable { + using EdgeChallengeManagerLib for EdgeStore; + using ChallengeEdgeLib for ChallengeEdge; + using SafeERC20 for IERC20; + + /// @notice A new edge has been added to the challenge manager + /// @param edgeId The id of the newly added edge + /// @param mutualId The mutual id of the added edge - all rivals share the same mutual id + /// @param originId The origin id of the added edge - origin ids link an edge to the level above + /// @param hasRival Does the newly added edge have a rival upon creation + /// @param length The length of the new edge + /// @param eType The type of the new edge + /// @param isLayerZero Whether the new edge was added at layer zero - has a claim and a staker + event EdgeAdded( + bytes32 indexed edgeId, + bytes32 indexed mutualId, + bytes32 indexed originId, + bytes32 claimId, + uint256 length, + EdgeType eType, + bool hasRival, + bool isLayerZero + ); + + /// @notice An edge has been bisected + /// @param edgeId The id of the edge that was bisected + /// @param lowerChildId The id of the lower child created during bisection + /// @param upperChildId The id of the upper child created during bisection + /// @param lowerChildAlreadyExists When an edge is bisected the lower child may already exist - created by a rival. + event EdgeBisected( + bytes32 indexed edgeId, bytes32 indexed lowerChildId, bytes32 indexed upperChildId, bool lowerChildAlreadyExists + ); + + /// @notice An edge can be confirmed if both of its children were already confirmed. + /// @param edgeId The edge that was confirmed + /// @param mutualId The mutual id of the confirmed edge + event EdgeConfirmedByChildren(bytes32 indexed edgeId, bytes32 indexed mutualId); + + /// @notice An edge can be confirmed if the cumulative time unrivaled of it and a direct chain of ancestors is greater than a threshold + /// @param edgeId The edge that was confirmed + /// @param mutualId The mutual id of the confirmed edge + /// @param totalTimeUnrivaled The cumulative amount of time this edge spent unrivaled + event EdgeConfirmedByTime(bytes32 indexed edgeId, bytes32 indexed mutualId, uint256 totalTimeUnrivaled); + + /// @notice An edge can be confirmed if a zero layer edge in the level below claims this edge + /// @param edgeId The edge that was confirmed + /// @param mutualId The mutual id of the confirmed edge + /// @param claimingEdgeId The id of the zero layer edge that claimed this edge + event EdgeConfirmedByClaim(bytes32 indexed edgeId, bytes32 indexed mutualId, bytes32 claimingEdgeId); + + /// @notice A SmallStep edge of length 1 can be confirmed via a one step proof + /// @param edgeId The edge that was confirmed + /// @param mutualId The mutual id of the confirmed edge + event EdgeConfirmedByOneStepProof(bytes32 indexed edgeId, bytes32 indexed mutualId); + + /// @notice A stake has been refunded for a confirmed layer zero block edge + /// @param edgeId The edge that was confirmed + /// @param mutualId The mutual id of the confirmed edge + /// @param stakeToken The ERC20 being refunded + /// @param stakeAmount The amount of tokens being refunded + event EdgeRefunded(bytes32 indexed edgeId, bytes32 indexed mutualId, address stakeToken, uint256 stakeAmount); + + /// @dev Store for all edges and rival data + /// All edges, including edges from different challenges, are stored together in the same store + /// Since edge ids include the origin id, which is unique for each challenge, we can be sure that + /// edges from different challenges cannot have the same id, and so can be stored in the same store + EdgeStore internal store; + + /// @notice When creating a zero layer block edge a stake must be supplied. However since we know that only + /// one edge in a group of rivals can ever be confirmed, we only need to keep one stake in this contract + /// to later refund for that edge. Other stakes can immediately be sent to an excess stake receiver. + /// This excess stake receiver can then choose to refund the gas of participants who aided in the confirmation + /// of the winning edge + address public excessStakeReceiver; + + /// @notice The token to supply stake in + IERC20 public stakeToken; + + /// @notice The amount of stake token to be supplied when creating a zero layer block edge + uint256 public stakeAmount; + + /// @notice The number of blocks accumulated on an edge before it can be confirmed by time + uint256 public challengePeriodBlocks; + + /// @notice The assertion chain about which challenges are created + IAssertionChain public assertionChain; + + /// @inheritdoc IEdgeChallengeManager + IOneStepProofEntry public override oneStepProofEntry; + + /// @notice The end height of layer zero Block edges + uint256 public LAYERZERO_BLOCKEDGE_HEIGHT; + /// @notice The end height of layer zero BigStep edges + uint256 public LAYERZERO_BIGSTEPEDGE_HEIGHT; + /// @notice The end height of layer zero SmallStep edges + uint256 public LAYERZERO_SMALLSTEPEDGE_HEIGHT; + + constructor() { + _disableInitializers(); + } + + /// @inheritdoc IEdgeChallengeManager + function initialize( + IAssertionChain _assertionChain, + uint256 _challengePeriodBlocks, + IOneStepProofEntry _oneStepProofEntry, + uint256 layerZeroBlockEdgeHeight, + uint256 layerZeroBigStepEdgeHeight, + uint256 layerZeroSmallStepEdgeHeight, + IERC20 _stakeToken, + uint256 _stakeAmount, + address _excessStakeReceiver + ) public initializer { + if (address(_assertionChain) == address(0)) { + revert EmptyAssertionChain(); + } + assertionChain = _assertionChain; + if (address(_oneStepProofEntry) == address(0)) { + revert EmptyOneStepProofEntry(); + } + oneStepProofEntry = _oneStepProofEntry; + if (_challengePeriodBlocks == 0) { + revert EmptyChallengePeriod(); + } + challengePeriodBlocks = _challengePeriodBlocks; + + stakeToken = _stakeToken; + stakeAmount = _stakeAmount; + if (_excessStakeReceiver == address(0)) { + revert EmptyStakeReceiver(); + } + excessStakeReceiver = _excessStakeReceiver; + + if (!EdgeChallengeManagerLib.isPowerOfTwo(layerZeroBlockEdgeHeight)) { + revert NotPowerOfTwo(layerZeroBlockEdgeHeight); + } + LAYERZERO_BLOCKEDGE_HEIGHT = layerZeroBlockEdgeHeight; + if (!EdgeChallengeManagerLib.isPowerOfTwo(layerZeroBigStepEdgeHeight)) { + revert NotPowerOfTwo(layerZeroBigStepEdgeHeight); + } + LAYERZERO_BIGSTEPEDGE_HEIGHT = layerZeroBigStepEdgeHeight; + if (!EdgeChallengeManagerLib.isPowerOfTwo(layerZeroSmallStepEdgeHeight)) { + revert NotPowerOfTwo(layerZeroSmallStepEdgeHeight); + } + LAYERZERO_SMALLSTEPEDGE_HEIGHT = layerZeroSmallStepEdgeHeight; + } + + ///////////////////////////// + // STATE MUTATING SECTIION // + ///////////////////////////// + + /// @inheritdoc IEdgeChallengeManager + function createLayerZeroEdge(CreateEdgeArgs calldata args) external returns (bytes32) { + EdgeAddedData memory edgeAdded; + uint256 expectedEndHeight = getLayerZeroEndHeight(args.edgeType); + AssertionReferenceData memory ard; + if (args.edgeType == EdgeType.Block) { + // for block type edges we need to provide some extra assertion data context + if (args.proof.length == 0) { + revert EmptyEdgeSpecificProof(); + } + (, ExecutionStateData memory predecessorStateData, ExecutionStateData memory claimStateData) = + abi.decode(args.proof, (bytes32[], ExecutionStateData, ExecutionStateData)); + + assertionChain.validateAssertionHash( + args.claimId, claimStateData.executionState, claimStateData.prevAssertionHash, claimStateData.inboxAcc + ); + + assertionChain.validateAssertionHash( + claimStateData.prevAssertionHash, + predecessorStateData.executionState, + predecessorStateData.prevAssertionHash, + predecessorStateData.inboxAcc + ); + + ard = AssertionReferenceData( + args.claimId, + claimStateData.prevAssertionHash, + assertionChain.isPending(args.claimId), + assertionChain.getSecondChildCreationBlock(claimStateData.prevAssertionHash) > 0, + predecessorStateData.executionState, + claimStateData.executionState + ); + + edgeAdded = store.createLayerZeroEdge(args, ard, oneStepProofEntry, expectedEndHeight); + } else { + edgeAdded = store.createLayerZeroEdge(args, ard, oneStepProofEntry, expectedEndHeight); + } + + IERC20 st = stakeToken; + uint256 sa = stakeAmount; + // when a zero layer edge is created it must include stake amount. Each time a zero layer + // edge is created it forces the honest participants to do some work, so we want to disincentive + // their creation. The amount should also be enough to pay for the gas costs incurred by the honest + // participant. This can be arranged out of bound by the excess stake receiver. + // The contract initializer can disable staking by setting zeros for token or amount, to change + // this a new challenge manager needs to be deployed and its address updated in the assertion chain + if (address(st) != address(0) && sa != 0) { + // since only one edge in a group of rivals can ever be confirmed, we know that we + // will never need to refund more than one edge. Therefore we can immediately send + // all stakes provided after the first one to an excess stake receiver. + address receiver = edgeAdded.hasRival ? excessStakeReceiver : address(this); + st.safeTransferFrom(msg.sender, receiver, sa); + } + + emit EdgeAdded( + edgeAdded.edgeId, + edgeAdded.mutualId, + edgeAdded.originId, + edgeAdded.claimId, + edgeAdded.length, + edgeAdded.eType, + edgeAdded.hasRival, + edgeAdded.isLayerZero + ); + return edgeAdded.edgeId; + } + + /// @inheritdoc IEdgeChallengeManager + function bisectEdge(bytes32 edgeId, bytes32 bisectionHistoryRoot, bytes calldata prefixProof) + external + returns (bytes32, bytes32) + { + (bytes32 lowerChildId, EdgeAddedData memory lowerChildAdded, EdgeAddedData memory upperChildAdded) = + store.bisectEdge(edgeId, bisectionHistoryRoot, prefixProof); + + bool lowerChildAlreadyExists = lowerChildAdded.edgeId == 0; + // the lower child might already exist, if it didnt then a new + // edge was added + if (!lowerChildAlreadyExists) { + emit EdgeAdded( + lowerChildAdded.edgeId, + lowerChildAdded.mutualId, + lowerChildAdded.originId, + lowerChildAdded.claimId, + lowerChildAdded.length, + lowerChildAdded.eType, + lowerChildAdded.hasRival, + lowerChildAdded.isLayerZero + ); + } + // upper child is always added + emit EdgeAdded( + upperChildAdded.edgeId, + upperChildAdded.mutualId, + upperChildAdded.originId, + upperChildAdded.claimId, + upperChildAdded.length, + upperChildAdded.eType, + upperChildAdded.hasRival, + upperChildAdded.isLayerZero + ); + + emit EdgeBisected(edgeId, lowerChildId, upperChildAdded.edgeId, lowerChildAlreadyExists); + + return (lowerChildId, upperChildAdded.edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function confirmEdgeByChildren(bytes32 edgeId) public { + store.confirmEdgeByChildren(edgeId); + + emit EdgeConfirmedByChildren(edgeId, store.edges[edgeId].mutualId()); + } + + /// @inheritdoc IEdgeChallengeManager + function confirmEdgeByClaim(bytes32 edgeId, bytes32 claimingEdgeId) public { + store.confirmEdgeByClaim(edgeId, claimingEdgeId); + + emit EdgeConfirmedByClaim(edgeId, store.edges[edgeId].mutualId(), claimingEdgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function confirmEdgeByTime( + bytes32 edgeId, + bytes32[] memory ancestorEdges, + ExecutionStateData calldata claimStateData + ) public { + // if there are no ancestors provided, then the top edge is the edge we're confirming itself + bytes32 lastEdgeId = ancestorEdges.length > 0 ? ancestorEdges[ancestorEdges.length - 1] : edgeId; + ChallengeEdge storage topEdge = store.get(lastEdgeId); + + if (topEdge.eType != EdgeType.Block) { + revert EdgeTypeNotBlock(topEdge.eType); + } + if (!topEdge.isLayerZero()) { + revert EdgeNotLayerZero(topEdge.id(), topEdge.staker, topEdge.claimId); + } + + uint256 assertionBlocks; + // if the assertion being claiming against was the first child of its predecessor + // then we are able to count the time between the first and second child as time towards + // the this edge + bool isFirstChild = assertionChain.isFirstChild(topEdge.claimId); + if (isFirstChild) { + assertionChain.validateAssertionHash( + topEdge.claimId, + claimStateData.executionState, + claimStateData.prevAssertionHash, + claimStateData.inboxAcc + ); + assertionBlocks = assertionChain.getSecondChildCreationBlock(claimStateData.prevAssertionHash) + - assertionChain.getFirstChildCreationBlock(claimStateData.prevAssertionHash); + } else { + // if the assertion being claimed is not the first child, then it had siblings from the moment + // it was created, so it has no time unrivaled + assertionBlocks = 0; + } + + uint256 totalTimeUnrivaled = + store.confirmEdgeByTime(edgeId, ancestorEdges, assertionBlocks, challengePeriodBlocks); + + emit EdgeConfirmedByTime(edgeId, store.edges[edgeId].mutualId(), totalTimeUnrivaled); + } + + /// @inheritdoc IEdgeChallengeManager + function confirmEdgeByOneStepProof( + bytes32 edgeId, + OneStepData calldata oneStepData, + ConfigData calldata prevConfig, + bytes32[] calldata beforeHistoryInclusionProof, + bytes32[] calldata afterHistoryInclusionProof + ) public { + bytes32 prevAssertionHash = store.getPrevAssertionHash(edgeId); + + assertionChain.validateConfig(prevAssertionHash, prevConfig); + + ExecutionContext memory execCtx = ExecutionContext({ + maxInboxMessagesRead: prevConfig.nextInboxPosition, + bridge: assertionChain.bridge(), + initialWasmModuleRoot: prevConfig.wasmModuleRoot + }); + + store.confirmEdgeByOneStepProof( + edgeId, oneStepProofEntry, oneStepData, execCtx, beforeHistoryInclusionProof, afterHistoryInclusionProof + ); + + emit EdgeConfirmedByOneStepProof(edgeId, store.edges[edgeId].mutualId()); + } + + /// @inheritdoc IEdgeChallengeManager + function refundStake(bytes32 edgeId) public { + ChallengeEdge storage edge = store.get(edgeId); + // setting refunded also do checks that the edge cannot be refunded twice + edge.setRefunded(); + + IERC20 st = stakeToken; + uint256 sa = stakeAmount; + // no need to refund with the token or amount where zero'd out + if (address(st) != address(0) && sa != 0) { + st.safeTransfer(edge.staker, sa); + } + + emit EdgeRefunded(edgeId, store.edges[edgeId].mutualId(), address(st), sa); + } + + /////////////////////// + // VIEW ONLY SECTION // + /////////////////////// + + /// @inheritdoc IEdgeChallengeManager + function getLayerZeroEndHeight(EdgeType eType) public view returns (uint256) { + if (eType == EdgeType.Block) { + return LAYERZERO_BLOCKEDGE_HEIGHT; + } else if (eType == EdgeType.BigStep) { + return LAYERZERO_BIGSTEPEDGE_HEIGHT; + } else if (eType == EdgeType.SmallStep) { + return LAYERZERO_SMALLSTEPEDGE_HEIGHT; + } else { + revert("Unrecognised edge type"); + } + } + + /// @inheritdoc IEdgeChallengeManager + function calculateEdgeId( + EdgeType edgeType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight, + bytes32 endHistoryRoot + ) public pure returns (bytes32) { + return + ChallengeEdgeLib.idComponent(edgeType, originId, startHeight, startHistoryRoot, endHeight, endHistoryRoot); + } + + /// @inheritdoc IEdgeChallengeManager + function calculateMutualId( + EdgeType edgeType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight + ) public pure returns (bytes32) { + return ChallengeEdgeLib.mutualIdComponent(edgeType, originId, startHeight, startHistoryRoot, endHeight); + } + + /// @inheritdoc IEdgeChallengeManager + function edgeExists(bytes32 edgeId) public view returns (bool) { + return store.edges[edgeId].exists(); + } + + /// @inheritdoc IEdgeChallengeManager + function getEdge(bytes32 edgeId) public view returns (ChallengeEdge memory) { + return store.get(edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function edgeLength(bytes32 edgeId) public view returns (uint256) { + return store.get(edgeId).length(); + } + + /// @inheritdoc IEdgeChallengeManager + function hasRival(bytes32 edgeId) public view returns (bool) { + return store.hasRival(edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function hasLengthOneRival(bytes32 edgeId) public view returns (bool) { + return store.hasLengthOneRival(edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function timeUnrivaled(bytes32 edgeId) public view returns (uint256) { + return store.timeUnrivaled(edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function getPrevAssertionHash(bytes32 edgeId) public view returns (bytes32) { + return store.getPrevAssertionHash(edgeId); + } + + /// @inheritdoc IEdgeChallengeManager + function firstRival(bytes32 edgeId) public view returns (bytes32) { + return store.firstRivals[edgeId]; + } +} diff --git a/src/challengeV2/IAssertionChain.sol b/src/challengeV2/IAssertionChain.sol new file mode 100644 index 00000000..fc172acc --- /dev/null +++ b/src/challengeV2/IAssertionChain.sol @@ -0,0 +1,26 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "../bridge/IBridge.sol"; +import "../osp/IOneStepProofEntry.sol"; +import "../rollup/Assertion.sol"; + +/// @title Assertion chain interface +/// @notice The interface required by the EdgeChallengeManager for requesting assertion data from the AssertionChain +interface IAssertionChain { + function bridge() external view returns (IBridge); + function validateAssertionHash( + bytes32 assertionHash, + ExecutionState calldata state, + bytes32 prevAssertionHash, + bytes32 inboxAcc + ) external view; + function validateConfig(bytes32 assertionHash, ConfigData calldata configData) external view; + function getFirstChildCreationBlock(bytes32 assertionHash) external view returns (uint256); + function getSecondChildCreationBlock(bytes32 assertionHash) external view returns (uint256); + function isFirstChild(bytes32 assertionHash) external view returns (bool); + function isPending(bytes32 assertionHash) external view returns (bool); +} diff --git a/src/challengeV2/libraries/ArrayUtilsLib.sol b/src/challengeV2/libraries/ArrayUtilsLib.sol new file mode 100644 index 00000000..957fe75b --- /dev/null +++ b/src/challengeV2/libraries/ArrayUtilsLib.sol @@ -0,0 +1,55 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +/// @title Array utils library +/// @notice Utils for working with bytes32 arrays +library ArrayUtilsLib { + /// @notice Append an item to the end of an array + /// @param arr The array to append to + /// @param newItem The item to append + function append(bytes32[] memory arr, bytes32 newItem) internal pure returns (bytes32[] memory) { + bytes32[] memory clone = new bytes32[](arr.length + 1); + for (uint256 i = 0; i < arr.length; i++) { + clone[i] = arr[i]; + } + clone[clone.length - 1] = newItem; + return clone; + } + + /// @notice Get a slice of an existing array + /// @dev End index exlusive so slice(arr, 0, 5) gets the first 5 elements of arr + /// @param arr Array to slice + /// @param startIndex The start index of the slice in the original array - inclusive + /// @param endIndex The end index of the slice in the original array - exlusive + function slice(bytes32[] memory arr, uint256 startIndex, uint256 endIndex) + internal + pure + returns (bytes32[] memory) + { + require(startIndex < endIndex, "Start not less than end"); + require(endIndex <= arr.length, "End not less or equal than length"); + + bytes32[] memory newArr = new bytes32[](endIndex - startIndex); + for (uint256 i = startIndex; i < endIndex; i++) { + newArr[i - startIndex] = arr[i]; + } + return newArr; + } + + /// @notice Concatenated to arrays + /// @param arr1 First array + /// @param arr1 Second array + function concat(bytes32[] memory arr1, bytes32[] memory arr2) internal pure returns (bytes32[] memory) { + bytes32[] memory full = new bytes32[](arr1.length + arr2.length); + for (uint256 i = 0; i < arr1.length; i++) { + full[i] = arr1[i]; + } + for (uint256 i = 0; i < arr2.length; i++) { + full[arr1.length + i] = arr2[i]; + } + return full; + } +} \ No newline at end of file diff --git a/src/challengeV2/libraries/ChallengeEdgeLib.sol b/src/challengeV2/libraries/ChallengeEdgeLib.sol new file mode 100644 index 00000000..693f4aca --- /dev/null +++ b/src/challengeV2/libraries/ChallengeEdgeLib.sol @@ -0,0 +1,261 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "./Enums.sol"; +import "./ChallengeErrors.sol"; + +/// @notice An edge committing to a range of states. These edges will be bisected, slowly +/// reducing them in length until they reach length one. At that point new edges of a different +/// type will be added that claim the result of this edge, or a one step proof will be calculated +/// if the edge type is already SmallStep. +struct ChallengeEdge { + /// @notice The origin id is a link from the edge to an edge or assertion at a higher type. The types + /// of edge are Block, BigStep and SmallStep. + /// Intuitively all edges with the same origin id agree on the information committed to in the origin id + /// For a SmallStep edge the origin id is the 'mutual' id of the length one BigStep edge being claimed by the zero layer ancestors of this edge + /// For a BigStep edge the origin id is the 'mutual' id of the length one Block edge being claimed by the zero layer ancestors of this edge + /// For a Block edge the origin id is the assertion hash of the assertion that is the root of the challenge - all edges in this challenge agree + /// that that assertion hash is valid. + /// The purpose of the origin id is to ensure that only edges that agree on a common start position + /// are being compared against one another. + bytes32 originId; + /// @notice A root of all the states in the history up to the startHeight + bytes32 startHistoryRoot; + /// @notice The number of states (+1 for 0 index) that the startHistoryRoot commits to + uint256 startHeight; + /// @notice A root of all the states in the history up to the endHeight. Since endHeight > startHeight, the startHistoryRoot must + /// commit to a prefix of the states committed to by the endHistoryRoot + bytes32 endHistoryRoot; + /// @notice The number of states (+1 for 0 index) that the endHistoryRoot commits to + uint256 endHeight; + /// @notice Edges can be bisected into two children. If this edge has been bisected the id of the + /// lower child is populated here, until that time this value is 0. The lower child has startHistoryRoot and startHeight + /// equal to this edge, but endHistoryRoot and endHeight equal to some prefix of the endHistoryRoot of this edge + bytes32 lowerChildId; + /// @notice Edges can be bisected into two children. If this edge has been bisected the id of the + /// upper child is populated here, until that time this value is 0. The upper child has startHistoryRoot and startHeight + /// equal to some prefix of the endHistoryRoot of this edge, and endHistoryRoot and endHeight equal to this edge + bytes32 upperChildId; + /// @notice The block number when this edge was created + uint256 createdAtBlock; + /// @notice The edge or assertion in the upper level that this edge claims to be true. + /// Only populated on zero layer edges + bytes32 claimId; + /// @notice The entity that supplied a mini-stake accompanying this edge + /// Only populated on zero layer edges + address staker; + /// @notice Current status of this edge. All edges are created Pending, and may be updated to Confirmed + /// Once Confirmed they cannot transition back to Pending + EdgeStatus status; + /// @notice The type of edge Block, BigStep or SmallStep that this edge is. + EdgeType eType; + /// @notice Set to true when the staker has been refunded. Can only be set to true if the status is Confirmed + /// and the staker is non zero. + bool refunded; +} + +library ChallengeEdgeLib { + /// @notice Common checks to do when adding an edge + function newEdgeChecks( + bytes32 originId, + bytes32 startHistoryRoot, + uint256 startHeight, + bytes32 endHistoryRoot, + uint256 endHeight + ) internal pure { + if (originId == 0) { + revert EmptyOriginId(); + } + if (endHeight <= startHeight) { + revert InvalidHeights(startHeight, endHeight); + } + if (startHistoryRoot == 0) { + revert EmptyStartRoot(); + } + if (endHistoryRoot == 0) { + revert EmptyEndRoot(); + } + } + + /// @notice Create a new layer zero edge. These edges make claims about length one edges in the level + /// (edge type) above. Creating a layer zero edge also requires placing a mini stake, so information + /// about that staker is also stored on this edge. + function newLayerZeroEdge( + bytes32 originId, + bytes32 startHistoryRoot, + uint256 startHeight, + bytes32 endHistoryRoot, + uint256 endHeight, + bytes32 claimId, + address staker, + EdgeType eType + ) internal view returns (ChallengeEdge memory) { + if (staker == address(0)) { + revert EmptyStaker(); + } + if (claimId == 0) { + revert EmptyClaimId(); + } + + newEdgeChecks(originId, startHistoryRoot, startHeight, endHistoryRoot, endHeight); + + return ChallengeEdge({ + originId: originId, + startHeight: startHeight, + startHistoryRoot: startHistoryRoot, + endHeight: endHeight, + endHistoryRoot: endHistoryRoot, + lowerChildId: 0, + upperChildId: 0, + createdAtBlock: block.number, + claimId: claimId, + staker: staker, + status: EdgeStatus.Pending, + eType: eType, + refunded: false + }); + } + + /// @notice Creates a new child edge. All edges except layer zero edges are child edges. + /// These are edges that are created by bisection, and have parents rather than claims. + function newChildEdge( + bytes32 originId, + bytes32 startHistoryRoot, + uint256 startHeight, + bytes32 endHistoryRoot, + uint256 endHeight, + EdgeType eType + ) internal view returns (ChallengeEdge memory) { + newEdgeChecks(originId, startHistoryRoot, startHeight, endHistoryRoot, endHeight); + + return ChallengeEdge({ + originId: originId, + startHeight: startHeight, + startHistoryRoot: startHistoryRoot, + endHeight: endHeight, + endHistoryRoot: endHistoryRoot, + lowerChildId: 0, + upperChildId: 0, + createdAtBlock: block.number, + claimId: 0, + staker: address(0), + status: EdgeStatus.Pending, + eType: eType, + refunded: false + }); + } + + /// @notice The "mutualId" of an edge. A mutual id is a hash of all the data that is shared by rivals. + /// Rivals have the same start height, start history root and end height. They also have the same origin id and type. + /// The difference between rivals is that they have a different endHistoryRoot, so that information + /// is not included in this hash. + function mutualIdComponent( + EdgeType eType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight + ) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(eType, originId, startHeight, startHistoryRoot, endHeight)); + } + + /// @notice The "mutualId" of an edge. A mutual id is a hash of all the data that is shared by rivals. + /// Rivals have the same start height, start history root and end height. They also have the same origin id and type. + /// The difference between rivals is that they have a different endHistoryRoot, so that information + /// is not included in this hash. + function mutualId(ChallengeEdge storage ce) internal view returns (bytes32) { + return mutualIdComponent(ce.eType, ce.originId, ce.startHeight, ce.startHistoryRoot, ce.endHeight); + } + + /// @notice The id of an edge. Edges are uniquely identified by their id, and commit to the same information + function idComponent( + EdgeType eType, + bytes32 originId, + uint256 startHeight, + bytes32 startHistoryRoot, + uint256 endHeight, + bytes32 endHistoryRoot + ) internal pure returns (bytes32) { + return keccak256( + abi.encodePacked( + mutualIdComponent(eType, originId, startHeight, startHistoryRoot, endHeight), endHistoryRoot + ) + ); + } + + /// @notice The id of an edge. Edges are uniquely identified by their id, and commit to the same information + /// @dev This separate idMem method is to be explicit about when ChallengeEdges are copied into memory. It is + /// possible to pass a storage edge to this method and the id be computed correctly, but that would load + /// the whole struct into memory, so we're explicit here that this should be used for edges already in memory. + function idMem(ChallengeEdge memory edge) internal pure returns (bytes32) { + return idComponent( + edge.eType, edge.originId, edge.startHeight, edge.startHistoryRoot, edge.endHeight, edge.endHistoryRoot + ); + } + + /// @notice The id of an edge. Edges are uniquely identified by their id, and commit to the same information + function id(ChallengeEdge storage edge) internal view returns (bytes32) { + return idComponent( + edge.eType, edge.originId, edge.startHeight, edge.startHistoryRoot, edge.endHeight, edge.endHistoryRoot + ); + } + + /// @notice Does this edge exist in storage + function exists(ChallengeEdge storage edge) internal view returns (bool) { + // All edges have a createdAtBlock number + return edge.createdAtBlock != 0; + } + + /// @notice The length of this edge - difference between the start and end heights + function length(ChallengeEdge storage edge) internal view returns (uint256) { + uint256 len = edge.endHeight - edge.startHeight; + // It's impossible for a zero length edge to exist + if (len == 0) { + revert EdgeNotExists(ChallengeEdgeLib.id(edge)); + } + return len; + } + + /// @notice Set the children of an edge + /// @dev Children can only be set once + function setChildren(ChallengeEdge storage edge, bytes32 lowerChildId, bytes32 upperChildId) internal { + if (edge.lowerChildId != 0 || edge.upperChildId != 0) { + revert ChildrenAlreadySet(ChallengeEdgeLib.id(edge), edge.lowerChildId, edge.upperChildId); + } + edge.lowerChildId = lowerChildId; + edge.upperChildId = upperChildId; + } + + /// @notice Set the status of an edge to Confirmed + /// @dev Only Pending edges can be confirmed + function setConfirmed(ChallengeEdge storage edge) internal { + if (edge.status != EdgeStatus.Pending) { + revert EdgeNotPending(ChallengeEdgeLib.id(edge), edge.status); + } + edge.status = EdgeStatus.Confirmed; + } + + /// @notice Is the edge a layer zero edge. + function isLayerZero(ChallengeEdge storage edge) internal view returns (bool) { + return edge.claimId != 0 && edge.staker != address(0); + } + + /// @notice Set the refunded flag of an edge + /// @dev Checks internally that edge is confirmed, layer zero edge and hasnt been refunded already + function setRefunded(ChallengeEdge storage edge) internal { + if (edge.status != EdgeStatus.Confirmed) { + revert EdgeNotConfirmed(ChallengeEdgeLib.id(edge), edge.status); + } + if (!isLayerZero(edge)) { + revert EdgeNotLayerZero(ChallengeEdgeLib.id(edge), edge.staker, edge.claimId); + } + if (edge.refunded == true) { + revert EdgeAlreadyRefunded(ChallengeEdgeLib.id(edge)); + } + + edge.refunded = true; + } +} diff --git a/src/challengeV2/libraries/ChallengeErrors.sol b/src/challengeV2/libraries/ChallengeErrors.sol new file mode 100644 index 00000000..2fd2e5ff --- /dev/null +++ b/src/challengeV2/libraries/ChallengeErrors.sol @@ -0,0 +1,92 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "./Enums.sol"; + +/// @dev The edge is not currently stored +error EdgeNotExists(bytes32 edgeId); +/// @dev The edge has already been stored +error EdgeAlreadyExists(bytes32 edgeId); +/// @dev The provided assertion hash was empty +error AssertionHashEmpty(); +/// @dev The assertion hashes are not the same, but should have been +error AssertionHashMismatch(bytes32 h1, bytes32 h2); +/// @dev The assertion is not currently pending +error AssertionNotPending(); +/// @dev The assertion has no sibling +error AssertionNoSibling(); +/// @dev The edge type specific proof data is empty +error EmptyEdgeSpecificProof(); +/// @dev The start machine status is empty +error EmptyStartMachineStatus(); +/// @dev The end machine status is empty +error EmptyEndMachineStatus(); +/// @dev The claim edge is not pending +error ClaimEdgeNotPending(); +/// @dev The claim edge does not have a length one rival +error ClaimEdgeNotLengthOneRival(bytes32 claimId); +/// @dev The claim edge has an invalid type +error ClaimEdgeInvalidType(EdgeType argType, EdgeType claimType); +/// @dev The val is not a power of two +error NotPowerOfTwo(uint256 val); +/// @dev The height has an unexpected value +error InvalidEndHeight(uint256 actualHeight, uint256 expectedHeight); +/// @dev The prefix proof is empty +error EmptyPrefixProof(); +/// @dev The edge is not of type Block +error EdgeTypeNotBlock(EdgeType eType); +/// @dev The edge is not of type SmallStep +error EdgeTypeNotSmallStep(EdgeType eType); +/// @dev The first rival record is empty +error EmptyFirstRival(); +/// @dev The difference between two heights is less than 2 +error HeightDiffLtTwo(uint256 h1, uint256 h2); +/// @dev The edge is not pending +error EdgeNotPending(bytes32 edgeId, EdgeStatus status); +/// @dev The edge is unrivaled +error EdgeUnrivaled(bytes32 edgeId); +/// @dev The edge is not confirmed +error EdgeNotConfirmed(bytes32 edgeId, EdgeStatus); +/// @dev The edge type is unexpected +error EdgeTypeInvalid(bytes32 edgeId1, bytes32 edgeId2, EdgeType type1, EdgeType type2); +/// @dev The claim id on the claimingEdge does not match the provided edge id +error EdgeClaimMismatch(bytes32 edgeId, bytes32 claimingEdgeId); +/// @dev The origin id is not equal to the mutual id +error OriginIdMutualIdMismatch(bytes32 mutualId, bytes32 originId); +/// @dev The edge does not have a valid ancestor link +error EdgeNotAncestor( + bytes32 edgeId, bytes32 lowerChildId, bytes32 upperChildId, bytes32 ancestorEdgeId, bytes32 claimId +); +/// @dev The total number of blocks is not above the threshold +error InsufficientConfirmationBlocks(uint256 totalBlocks, uint256 thresholdBlocks); +/// @dev The edge is not of length one +error EdgeNotLengthOne(uint256 length); +/// @dev No origin id supplied when creating an edge +error EmptyOriginId(); +/// @dev Invalid heights supplied when creating an edge +error InvalidHeights(uint256 start, uint256 end); +/// @dev No start root supplied when creating an edge +error EmptyStartRoot(); +/// @dev No end root supplied when creating an edge +error EmptyEndRoot(); +/// @dev No staker supplied when creating a layer zero edge +error EmptyStaker(); +/// @dev No claim id supplied when creating a layer zero edge +error EmptyClaimId(); +/// @dev Children already set on edge +error ChildrenAlreadySet(bytes32 edgeId, bytes32 lowerChildId, bytes32 upperChildId); +/// @dev Edge is not a layer zero edge +error EdgeNotLayerZero(bytes32 edgeId, address staker, bytes32 claimId); +/// @dev The edge staker has already been refunded +error EdgeAlreadyRefunded(bytes32 edgeId); +/// @dev No assertion chain address supplied +error EmptyAssertionChain(); +/// @dev No one step proof entry address supplied +error EmptyOneStepProofEntry(); +/// @dev No challenge period supplied +error EmptyChallengePeriod(); +/// @dev No stake receiver address supplied +error EmptyStakeReceiver(); diff --git a/src/challengeV2/libraries/EdgeChallengeManagerLib.sol b/src/challengeV2/libraries/EdgeChallengeManagerLib.sol new file mode 100644 index 00000000..e7bf6de6 --- /dev/null +++ b/src/challengeV2/libraries/EdgeChallengeManagerLib.sol @@ -0,0 +1,796 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "./UintUtilsLib.sol"; +import "./MerkleTreeLib.sol"; +import "./ChallengeEdgeLib.sol"; +import "../../osp/IOneStepProofEntry.sol"; +import "../../libraries/Constants.sol"; +import "./ChallengeErrors.sol"; + +/// @notice An execution state and proof to show that it's valid +struct ExecutionStateData { + /// @notice An execution state + ExecutionState executionState; + /// @notice assertion Hash of the prev assertion + bytes32 prevAssertionHash; + /// @notice Inbox accumulator of the assertion + bytes32 inboxAcc; +} + +/// @notice Data for creating a layer zero edge +struct CreateEdgeArgs { + /// @notice The type of edge to be created + EdgeType edgeType; + /// @notice The end history root of the edge to be created + bytes32 endHistoryRoot; + /// @notice The end height of the edge to be created. + /// @dev End height is deterministic for different edge types but supplying it here gives the + /// caller a bit of extra security that they are supplying data for the correct type of edge + uint256 endHeight; + /// @notice The edge, or assertion, that is being claimed correct by the newly created edge. + bytes32 claimId; + /// @notice Proof that the start history root commits to a prefix of the states that + /// end history root commits to + bytes prefixProof; + /// @notice Edge type specific data + /// For Block type edges this is the abi encoding of: + /// bytes32[]: Inclusion proof - proof to show that the end state is the last state in the end history root + /// ExecutionStateData: the before state of the edge + /// ExecutionStateData: the after state of the edge + /// bytes32 predecessorId: id of the prev assertion + /// bytes32 inboxAcc: the inbox accumulator of the assertion + /// For BigStep and SmallStep edges this is the abi encoding of: + /// bytes32: Start state - first state the edge commits to + /// bytes32: End state - last state the edge commits to + /// bytes32[]: Claim start inclusion proof - proof to show the start state is the first state in the claim edge + /// bytes32[]: Claim end inclusion proof - proof to show the end state is the last state in the claim edge + /// bytes32[]: Inclusion proof - proof to show that the end state is the last state in the end history root + bytes proof; +} + +/// @notice Data parsed raw proof data +struct ProofData { + /// @notice The first state being committed to by an edge + bytes32 startState; + /// @notice The last state being committed to by an edge + bytes32 endState; + /// @notice A proof that the end state is included in the edge + bytes32[] inclusionProof; +} + +/// @notice Stores all edges and their rival status +struct EdgeStore { + /// @notice A mapping of edge id to edges. Edges are never deleted, only created, and potentially confirmed. + mapping(bytes32 => ChallengeEdge) edges; + /// @notice A mapping of mutualId to edge id. Rivals share the same mutual id, and here we + /// store the edge id of the second edge that was created with the same mutual id - the first rival + /// When only one edge exists for a specific mutual id then a special magic string hash is stored instead + /// of the first rival id, to signify that a single edge does exist with this mutual id + mapping(bytes32 => bytes32) firstRivals; +} + +/// @notice Input data to a one step proof +struct OneStepData { + /// @notice The hash of the state that's being executed from + bytes32 beforeHash; + /// @notice Proof data to accompany the execution context + bytes proof; +} + +/// @notice Data about a recently added edge +struct EdgeAddedData { + bytes32 edgeId; + bytes32 mutualId; + bytes32 originId; + bytes32 claimId; + uint256 length; + EdgeType eType; + bool hasRival; + bool isLayerZero; +} + +/// @notice Data about an assertion that is being claimed by an edge +/// @dev This extra information that is needed in order to verify that a block edge can be created +struct AssertionReferenceData { + /// @notice The id of the assertion - will be used in a sanity check + bytes32 assertionHash; + /// @notice The predecessor of the assertion + bytes32 predecessorId; + /// @notice Is the assertion pending + bool isPending; + /// @notice Does the assertion have a sibling + bool hasSibling; + /// @notice The execution state of the predecessor assertion + ExecutionState startState; + /// @notice The execution state of the assertion being claimed + ExecutionState endState; +} + +/// @title Core functionality for the Edge Challenge Manager +/// @notice The edge manager library allows edges to be added and bisected, and keeps track of the amount +/// of time an edge remained unrivaled. +library EdgeChallengeManagerLib { + using ChallengeEdgeLib for ChallengeEdge; + using GlobalStateLib for GlobalState; + + /// @dev Magic string hash to represent that a edges with a given mutual id have no rivals + bytes32 public constant UNRIVALED = keccak256(abi.encodePacked("UNRIVALED")); + + /// @notice Get an edge from the store + /// @dev Throws if the edge does not exist in the store + /// @param store The edge store to fetch an id from + /// @param edgeId The id of the edge to fetch + function get(EdgeStore storage store, bytes32 edgeId) internal view returns (ChallengeEdge storage) { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + return store.edges[edgeId]; + } + + /// @notice Gets an edge from the store without checking if it exists + /// @dev Useful where you already know the edge exists in the store - avoid a storage lookup + /// @param store The edge store to fetch an id from + /// @param edgeId The id of the edge to fetch + function getNoCheck(EdgeStore storage store, bytes32 edgeId) internal view returns (ChallengeEdge storage) { + return store.edges[edgeId]; + } + + /// @notice Adds a new edge to the store + /// @dev Updates first rival info for later use in calculating time unrivaled + /// @param store The store to add the edge to + /// @param edge The edge to add + function add(EdgeStore storage store, ChallengeEdge memory edge) internal returns (EdgeAddedData memory) { + bytes32 eId = edge.idMem(); + // add the edge if it doesnt exist already + if (store.edges[eId].exists()) { + revert EdgeAlreadyExists(eId); + } + store.edges[eId] = edge; + + // edges that are rivals share the same mutual id + // we use records of whether a mutual id has ever been added to decide if + // the new edge is a rival. This will later allow us to calculate time an edge + // stayed unrivaled + bytes32 mutualId = ChallengeEdgeLib.mutualIdComponent( + edge.eType, edge.originId, edge.startHeight, edge.startHistoryRoot, edge.endHeight + ); + bytes32 firstRival = store.firstRivals[mutualId]; + + // the first time we add a mutual id we store a magic string hash against it + // We do this to distinguish from there being no edges + // with this mutual. And to distinguish it from the first rival, where we + // will use an actual edge id so that we can look up the created when time + // of the first rival, and use it for calculating time unrivaled + if (firstRival == 0) { + store.firstRivals[mutualId] = UNRIVALED; + } else if (firstRival == UNRIVALED) { + store.firstRivals[mutualId] = eId; + } else { + // after we've stored the first rival we dont need to keep a record of any + // other rival edges - they will all have a zero time unrivaled + } + + return EdgeAddedData( + eId, + mutualId, + edge.originId, + edge.claimId, + store.edges[eId].length(), + edge.eType, + firstRival != 0, + edge.claimId != 0 + ); + } + + /// @notice Conduct checks that are specific to the edge type. + /// @dev Since different edge types also require different proofs, we also include the specific + /// proof parsing logic and return the common parts for later use. + /// @param store The store containing current edges + /// @param args The edge creation args + /// @param ard Data about the assertion data is is also need to when creating a Block edge type + /// The created edge must be shown to be consistent with the states in the assertion chain + /// Empty for non block edge type edges + /// @param oneStepProofEntry The one step proof contract that defines how machine states are hashed + /// @return Data parsed from the proof, or fetched from elsewhere. Also the origin id for the edge to be created. + function layerZeroTypeSpecificChecks( + EdgeStore storage store, + CreateEdgeArgs calldata args, + AssertionReferenceData memory ard, + IOneStepProofEntry oneStepProofEntry + ) private view returns (ProofData memory, bytes32) { + if (args.edgeType == EdgeType.Block) { + // origin id is the assertion which is the root of challenge + // all rivals and their children share the same origin id - it is a link to the information + // they agree on + bytes32 originId = ard.predecessorId; + + // Sanity check: The assertion reference data should be related to the claim + // Of course the caller can provide whatever args they wish, so this is really just a helpful + // check to avoid mistakes + if (ard.assertionHash == 0) { + revert AssertionHashEmpty(); + } + if (ard.assertionHash != args.claimId) { + revert AssertionHashMismatch(ard.assertionHash, args.claimId); + } + + // if the assertion is already confirmed or rejected then it cant be referenced as a claim + if (!ard.isPending) { + revert AssertionNotPending(); + } + + // if the claim doesnt have a sibling then it is undisputed, there's no need + // to open challenge edges for it + if (!ard.hasSibling) { + revert AssertionNoSibling(); + } + + // parse the inclusion proof for later use + if (args.proof.length == 0) { + revert EmptyEdgeSpecificProof(); + } + (bytes32[] memory inclusionProof,,) = + abi.decode(args.proof, (bytes32[], ExecutionStateData, ExecutionStateData)); + + // check the start and end execution states exist, the block hash entry should be non zero + if (ard.startState.machineStatus == MachineStatus.RUNNING) { + revert EmptyStartMachineStatus(); + } + if (ard.endState.machineStatus == MachineStatus.RUNNING) { + revert EmptyEndMachineStatus(); + } + + // Create machine hashes out of the proven state + bytes32 startStateHash = oneStepProofEntry.getMachineHash(ard.startState); + bytes32 endStateHash = oneStepProofEntry.getMachineHash(ard.endState); + + return (ProofData(startStateHash, endStateHash, inclusionProof), originId); + } else { + // Claim must be length one. If it is unrivaled then its unrivaled time is ticking up, so there's + // no need to create claims against it + if (!hasLengthOneRival(store, args.claimId)) { + revert ClaimEdgeNotLengthOneRival(args.claimId); + } + + // hasLengthOneRival checks existance, so we can use getNoCheck + ChallengeEdge storage claimEdge = getNoCheck(store, args.claimId); + + // origin id is the mutual id of the claim + // all rivals and their children share the same origin id - it is a link to the information + // they agree on + bytes32 originId = claimEdge.mutualId(); + + // once a claim is confirmed it's status can never become pending again, so there is no point + // opening a challenge that references it + if (claimEdge.status != EdgeStatus.Pending) { + revert ClaimEdgeNotPending(); + } + + // the edge must be a level down from the claim + if (args.edgeType != EdgeChallengeManagerLib.nextEdgeType(claimEdge.eType)) { + revert ClaimEdgeInvalidType(args.edgeType, claimEdge.eType); + } + + // parse the proofs + if (args.proof.length == 0) { + revert EmptyEdgeSpecificProof(); + } + ( + bytes32 startState, + bytes32 endState, + bytes32[] memory claimStartInclusionProof, + bytes32[] memory claimEndInclusionProof, + bytes32[] memory edgeInclusionProof + ) = abi.decode(args.proof, (bytes32, bytes32, bytes32[], bytes32[], bytes32[])); + + // if the start and end states are consistent with the claim edge + // this guarantees that the edge we're creating is a 'continuation' of the claim edge, it is + // a commitment to the states that between start and end states of the claim + MerkleTreeLib.verifyInclusionProof( + claimEdge.startHistoryRoot, startState, claimEdge.startHeight, claimStartInclusionProof + ); + + // it's doubly important to check the end state since if the end state since the claim id is + // not part of the edge id, so we need to ensure that it's not possible to create two edges of the + // same id, but with different claim id. Ensuring that the end state is linked to the claim, + // and later ensuring that the end state is part of the history commitment of the new edge ensures + // that the end history root of the new edge will be different for different claim ids, and therefore + // the edge ids will be different + MerkleTreeLib.verifyInclusionProof( + claimEdge.endHistoryRoot, endState, claimEdge.endHeight, claimEndInclusionProof + ); + + return (ProofData(startState, endState, edgeInclusionProof), originId); + } + } + + /// @notice Check that a uint is a power of 2 + function isPowerOfTwo(uint256 x) internal pure returns (bool) { + // zero is not a power of 2 + if (x == 0) { + return false; + } + + // if x is a power of 2, then this will be 0111111 + uint256 y = x - 1; + + // if x is a power of 2 then y will share no bits with x + return ((x & y) == 0); + } + + /// @notice Common checks that apply to all layer zero edges + /// @param proofData Data extracted from supplied proof + /// @param args The edge creation args + /// @param expectedEndHeight Edges have a deterministic end height dependent on their type + function layerZeroCommonChecks(ProofData memory proofData, CreateEdgeArgs calldata args, uint256 expectedEndHeight) + private + pure + returns (bytes32) + { + // since zero layer edges have a start height of zero, we know that they are a size + // one tree containing only the start state. We can then compute the history root directly + bytes32 startHistoryRoot = MerkleTreeLib.root(MerkleTreeLib.appendLeaf(new bytes32[](0), proofData.startState)); + + // all end heights are expected to be a power of 2, the specific power is defined by the + // edge challenge manager itself + if (!isPowerOfTwo(expectedEndHeight)) { + revert NotPowerOfTwo(expectedEndHeight); + } + + // It isnt strictly necessary to pass in the end height, we know what it + // should be so we could just use the end height that we get from getLayerZeroEndHeight + // However it's a nice sanity check for the calling code to check that their local edge + // will have the same height as the one created here + if (args.endHeight != expectedEndHeight) { + revert InvalidEndHeight(args.endHeight, expectedEndHeight); + } + + // the end state is checked/determined as part of the specific edge type + // We then ensure that that same end state is part of the end history root we're creating + // This ensures continuity of states between levels - the state is present in both this + // level and the one above + MerkleTreeLib.verifyInclusionProof( + args.endHistoryRoot, proofData.endState, args.endHeight, proofData.inclusionProof + ); + + // start root must always be a prefix of end root, we ensure that + // this new edge adheres to this. Future bisections will ensure that this + // property is conserved + if (args.prefixProof.length == 0) { + revert EmptyPrefixProof(); + } + (bytes32[] memory preExpansion, bytes32[] memory preProof) = + abi.decode(args.prefixProof, (bytes32[], bytes32[])); + MerkleTreeLib.verifyPrefixProof( + startHistoryRoot, 1, args.endHistoryRoot, args.endHeight + 1, preExpansion, preProof + ); + + return (startHistoryRoot); + } + + /// @notice Creates a new layer zero edges from edge creation args + function toLayerZeroEdge(bytes32 originId, bytes32 startHistoryRoot, CreateEdgeArgs calldata args) + private + view + returns (ChallengeEdge memory) + { + return ChallengeEdgeLib.newLayerZeroEdge( + originId, startHistoryRoot, 0, args.endHistoryRoot, args.endHeight, args.claimId, msg.sender, args.edgeType + ); + } + + /// @notice Performs necessary checks and creates a new layer zero edge + /// @param store The store containing existing edges + /// @param args Edge data + /// @param ard If the edge being added is of Block type then additional assertion data is required + /// to check if the edge can be added. Empty if edge is not of type Block. + /// The supplied assertion data must be related to the assertion that is being claimed + /// by the supplied edge args + /// @param oneStepProofEntry The one step proof contract that defines how machine states are hashed + /// @param expectedEndHeight The expected end height of an edge. Layer zero block edges have predefined heights. + function createLayerZeroEdge( + EdgeStore storage store, + CreateEdgeArgs calldata args, + AssertionReferenceData memory ard, + IOneStepProofEntry oneStepProofEntry, + uint256 expectedEndHeight + ) internal returns (EdgeAddedData memory) { + // each edge type requires some specific checks + (ProofData memory proofData, bytes32 originId) = + layerZeroTypeSpecificChecks(store, args, ard, oneStepProofEntry); + // all edge types share some common checks + (bytes32 startHistoryRoot) = layerZeroCommonChecks(proofData, args, expectedEndHeight); + // we only wrap the struct creation in a function as doing so with exceeds the stack limit + ChallengeEdge memory ce = toLayerZeroEdge(originId, startHistoryRoot, args); + return add(store, ce); + } + + /// @notice From any given edge, get the id of the previous assertion + /// @param edgeId The edge to get the prev assertion Hash + function getPrevAssertionHash(EdgeStore storage store, bytes32 edgeId) internal view returns (bytes32) { + ChallengeEdge storage edge = get(store, edgeId); + + // if the edge is small step, find a big step edge that it's linked to + if (edge.eType == EdgeType.SmallStep) { + bytes32 bigStepEdgeId = store.firstRivals[edge.originId]; + edge = get(store, bigStepEdgeId); + } + + // if the edge is big step, find a block edge that it's linked to + if (edge.eType == EdgeType.BigStep) { + bytes32 blockEdgeId = store.firstRivals[edge.originId]; + edge = get(store, blockEdgeId); + } + + // Sanity Check: should never be hit for validly constructed edges + if (edge.eType != EdgeType.Block) { + revert EdgeTypeNotBlock(edge.eType); + } + + // For Block type edges the origin id is the assertion hash of claim prev + return edge.originId; + } + + /// @notice Does this edge currently have one or more rivals + /// Rival edges share the same startHeight, startHistoryCommitment and the same endHeight, + /// but they have a different endHistoryRoot. Rival edges have the same mutualId + /// @param store The edge store containing the edge + /// @param edgeId The edge if to test if it is unrivaled + function hasRival(EdgeStore storage store, bytes32 edgeId) internal view returns (bool) { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + + // rivals have the same mutual id + bytes32 mutualId = store.edges[edgeId].mutualId(); + bytes32 firstRival = store.firstRivals[mutualId]; + // Sanity check: it should never be possible to create an edge without having an entry in firstRivals + if (firstRival == 0) { + revert EmptyFirstRival(); + } + + // can only have no rival if the firstRival is the UNRIVALED magic hash + return firstRival != UNRIVALED; + } + + /// @notice Is the edge a single step in length, and does it have at least one rival. + /// @param store The edge store containing the edge + /// @param edgeId The edge id to test for single step and rivaled + function hasLengthOneRival(EdgeStore storage store, bytes32 edgeId) internal view returns (bool) { + // must be length 1 and have rivals - all rivals have the same length + return (hasRival(store, edgeId) && store.edges[edgeId].length() == 1); + } + + /// @notice The amount of time (in blocks) this edge has spent without rivals + /// This value is increasing whilst an edge is unrivaled, once a rival is created + /// it is fixed. If an edge has rivals from the moment it is created then it will have + /// a zero time unrivaled + function timeUnrivaled(EdgeStore storage store, bytes32 edgeId) internal view returns (uint256) { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + + bytes32 mutualId = store.edges[edgeId].mutualId(); + bytes32 firstRival = store.firstRivals[mutualId]; + // Sanity check: it's not possible to have a 0 first rival for an edge that exists + if (firstRival == 0) { + revert EmptyFirstRival(); + } + + // this edge has no rivals, the time is still going up + // we give the current amount of time unrivaled + if (firstRival == UNRIVALED) { + return block.number - store.edges[edgeId].createdAtBlock; + } else { + // Sanity check: it's not possible an edge does not exist for a first rival record + if (!store.edges[firstRival].exists()) { + revert EdgeNotExists(firstRival); + } + + // rivals exist for this edge + uint256 firstRivalCreatedAtBlock = store.edges[firstRival].createdAtBlock; + uint256 edgeCreatedAtBlock = store.edges[edgeId].createdAtBlock; + if (firstRivalCreatedAtBlock > edgeCreatedAtBlock) { + // if this edge was created before the first rival then we return the difference + // in createdAtBlock number + return firstRivalCreatedAtBlock - edgeCreatedAtBlock; + } else { + // if this was created at the same time as, or after the the first rival + // then we return 0 + return 0; + } + } + } + + /// @notice Given a start and an endpoint determine the bisection height + /// @dev Returns the highest power of 2 in the differing lower bits of start and end + function mandatoryBisectionHeight(uint256 start, uint256 end) internal pure returns (uint256) { + if (end - start < 2) { + revert HeightDiffLtTwo(start, end); + } + if (end - start == 2) { + return start + 1; + } + + uint256 diff = (end - 1) ^ start; + uint256 mostSignificantSharedBit = UintUtilsLib.mostSignificantBit(diff); + uint256 mask = type(uint256).max << mostSignificantSharedBit; + return ((end - 1) & mask); + } + + /// @notice Bisect and edge. This creates two child edges: + /// lowerChild: has the same start root and height as this edge, but a different end root and height + /// upperChild: has the same end root and height as this edge, but a different start root and height + /// The lower child end root and height are equal to the upper child start root and height. This height + /// is the mandatoryBisectionHeight. + /// The lower child may already exist, however it's not possible for the upper child to exist as that would + /// mean that the edge has already been bisected + /// @param store The edge store containing the edge to bisect + /// @param edgeId Edge to bisect + /// @param bisectionHistoryRoot The new history root to be used in the lower and upper children + /// @param prefixProof A proof to show that the bisectionHistoryRoot commits to a prefix of the current endHistoryRoot + /// @return lowerChildId The id of the newly created lower child edge + /// @return lowerChildAdded Data about the lower child edge, empty if the lower child already existed + /// @return upperChildAdded Data about the upper child edge, never empty + function bisectEdge(EdgeStore storage store, bytes32 edgeId, bytes32 bisectionHistoryRoot, bytes memory prefixProof) + internal + returns (bytes32, EdgeAddedData memory, EdgeAddedData memory) + { + if (store.edges[edgeId].status != EdgeStatus.Pending) { + revert EdgeNotPending(edgeId, store.edges[edgeId].status); + } + if (!hasRival(store, edgeId)) { + revert EdgeUnrivaled(edgeId); + } + + // cannot bisect an edge twice + // has rival above checks the edge - so no need to check again + ChallengeEdge memory ce = getNoCheck(store, edgeId); + + // bisections occur at deterministic heights, this ensures that + // rival edges bisect at the same height, and create the same child if they agree + uint256 middleHeight = mandatoryBisectionHeight(ce.startHeight, ce.endHeight); + { + (bytes32[] memory preExpansion, bytes32[] memory proof) = abi.decode(prefixProof, (bytes32[], bytes32[])); + MerkleTreeLib.verifyPrefixProof( + bisectionHistoryRoot, middleHeight + 1, ce.endHistoryRoot, ce.endHeight + 1, preExpansion, proof + ); + } + + bytes32 lowerChildId; + EdgeAddedData memory lowerChildAdded; + { + // midpoint proof it valid, create and store the children + ChallengeEdge memory lowerChild = ChallengeEdgeLib.newChildEdge( + ce.originId, ce.startHistoryRoot, ce.startHeight, bisectionHistoryRoot, middleHeight, ce.eType + ); + lowerChildId = lowerChild.idMem(); + // it's possible that the store already has the lower child if it was created by a rival + // (aka a merge move) + if (!store.edges[lowerChildId].exists()) { + lowerChildAdded = add(store, lowerChild); + } + } + + EdgeAddedData memory upperChildAdded; + { + ChallengeEdge memory upperChild = ChallengeEdgeLib.newChildEdge( + ce.originId, bisectionHistoryRoot, middleHeight, ce.endHistoryRoot, ce.endHeight, ce.eType + ); + + // add checks existence and throws if the id already exists + upperChildAdded = add(store, upperChild); + } + + store.edges[edgeId].setChildren(lowerChildId, upperChildAdded.edgeId); + + return (lowerChildId, lowerChildAdded, upperChildAdded); + } + + /// @notice Confirm an edge if both its children are already confirmed + function confirmEdgeByChildren(EdgeStore storage store, bytes32 edgeId) internal { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + + bytes32 lowerChildId = store.edges[edgeId].lowerChildId; + // Sanity check: it bisect should already enforce that this child exists + if (!store.edges[lowerChildId].exists()) { + revert EdgeNotExists(lowerChildId); + } + if (store.edges[lowerChildId].status != EdgeStatus.Confirmed) { + revert EdgeNotConfirmed(lowerChildId, store.edges[lowerChildId].status); + } + + bytes32 upperChildId = store.edges[edgeId].upperChildId; + // Sanity check: it bisect should already enforce that this child exists + if (!store.edges[upperChildId].exists()) { + revert EdgeNotExists(upperChildId); + } + if (store.edges[upperChildId].status != EdgeStatus.Confirmed) { + revert EdgeNotConfirmed(upperChildId, store.edges[upperChildId].status); + } + + // we also check the edge is pending in setConfirmed() + store.edges[edgeId].setConfirmed(); + } + + /// @notice Returns the sub edge type of the provided edge type + function nextEdgeType(EdgeType eType) internal pure returns (EdgeType) { + if (eType == EdgeType.Block) { + return EdgeType.BigStep; + } else if (eType == EdgeType.BigStep) { + return EdgeType.SmallStep; + } else if (eType == EdgeType.SmallStep) { + revert("No next type after SmallStep"); + } else { + revert("Unexpected edge type"); + } + } + + /// @notice Check that the originId of a claiming edge matched the mutualId() of a supplied edge + /// @dev Does some additional sanity checks to ensure that the claim id link is valid + /// @param store The store containing all edges and rivals + /// @param edgeId The edge being claimed + /// @param claimingEdgeId The edge with a claim id equal to edge id + function checkClaimIdLink(EdgeStore storage store, bytes32 edgeId, bytes32 claimingEdgeId) private view { + // we do some extra checks that edge being claimed is eligible to be claimed by the claiming edge + // these shouldn't be necessary since it should be impossible to add layer zero edges that do not + // satisfy the checks below, but we conduct these checks anyway for double safety + + // the origin id of an edge should be the mutual id of the edge in the level above + if (store.edges[edgeId].mutualId() != store.edges[claimingEdgeId].originId) { + revert OriginIdMutualIdMismatch(store.edges[edgeId].mutualId(), store.edges[claimingEdgeId].originId); + } + // the claiming edge must be exactly one level below + if (nextEdgeType(store.edges[edgeId].eType) != store.edges[claimingEdgeId].eType) { + revert EdgeTypeInvalid( + edgeId, claimingEdgeId, nextEdgeType(store.edges[edgeId].eType), store.edges[claimingEdgeId].eType + ); + } + } + + /// @notice If a confirmed edge exists whose claim id is equal to this edge, then this edge can be confirmed + /// @dev When zero layer edges are created they reference an edge, or assertion, in the level above. If a zero layer + /// edge is confirmed, it becomes possible to also confirm the edge that it claims + /// @param store The store containing all edges and rivals data + /// @param edgeId The id of the edge to confirm + /// @param claimingEdgeId The id of the edge which has a claimId equal to edgeId + function confirmEdgeByClaim(EdgeStore storage store, bytes32 edgeId, bytes32 claimingEdgeId) internal { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + + // the claiming edge is confirmed + if (!store.edges[claimingEdgeId].exists()) { + revert EdgeNotExists(edgeId); + } + if (store.edges[claimingEdgeId].status != EdgeStatus.Confirmed) { + revert EdgeNotConfirmed(claimingEdgeId, store.edges[claimingEdgeId].status); + } + + checkClaimIdLink(store, edgeId, claimingEdgeId); + if (edgeId != store.edges[claimingEdgeId].claimId) { + revert EdgeClaimMismatch(edgeId, store.edges[claimingEdgeId].claimId); + } + + // we also check the edge is pending in setConfirmed() + store.edges[edgeId].setConfirmed(); + } + + /// @notice An edge can be confirmed if the total amount of time (in blocks) it and a single chain of its direct ancestors + /// has spent unrivaled is greater than the challenge period. + /// @dev Edges inherit time from their parents, so the sum of unrivaled timer is compared against the threshold. + /// Given that an edge cannot become unrivaled after becoming rivaled, once the threshold is passed + /// it will always remain passed. The direct ancestors of an edge are linked by parent-child links for edges + /// of the same edgeType, and claimId-edgeId links for zero layer edges that claim an edge in the level above. + /// @param store The edge store containing all edges and rival data + /// @param edgeId The id of the edge to confirm + /// @param ancestorEdgeIds The ids of the direct ancestors of an edge. These are ordered from the parent first, then going to grand-parent, + /// great-grandparent etc. The chain can extend only as far as the zero layer edge of type Block. + /// @param claimedAssertionUnrivaledBlocks The number of blocks that the assertion ultimately being claimed by this edge spent unrivaled + /// @param confirmationThresholdBlock The number of blocks that the total unrivaled time of an ancestor chain needs to exceed in + /// order to be confirmed + function confirmEdgeByTime( + EdgeStore storage store, + bytes32 edgeId, + bytes32[] memory ancestorEdgeIds, + uint256 claimedAssertionUnrivaledBlocks, + uint256 confirmationThresholdBlock + ) internal returns (uint256) { + if (!store.edges[edgeId].exists()) { + revert EdgeNotExists(edgeId); + } + + bytes32 currentEdgeId = edgeId; + uint256 totalTimeUnrivaled = timeUnrivaled(store, edgeId); + + // ancestors start from parent, then extend upwards + for (uint256 i = 0; i < ancestorEdgeIds.length; i++) { + ChallengeEdge storage e = get(store, ancestorEdgeIds[i]); + // the ancestor must either have a parent-child link + // or have a claim id-edge link when the ancestor is of a different edge type to its child + if (e.lowerChildId == currentEdgeId || e.upperChildId == currentEdgeId) { + totalTimeUnrivaled += timeUnrivaled(store, e.id()); + currentEdgeId = ancestorEdgeIds[i]; + } else if (ancestorEdgeIds[i] == store.edges[currentEdgeId].claimId) { + checkClaimIdLink(store, ancestorEdgeIds[i], currentEdgeId); + totalTimeUnrivaled += timeUnrivaled(store, e.id()); + currentEdgeId = ancestorEdgeIds[i]; + } else { + revert EdgeNotAncestor( + currentEdgeId, + e.lowerChildId, + e.upperChildId, + ancestorEdgeIds[i], + store.edges[currentEdgeId].claimId + ); + } + } + + // since sibling assertions have the same predecessor, they can be viewed as + // rival edges. Adding the assertion unrivaled time allows us to start the confirmation + // timer from the moment the first assertion is made, rather than having to wait until the + // second assertion is made. + totalTimeUnrivaled += claimedAssertionUnrivaledBlocks; + + if (totalTimeUnrivaled < confirmationThresholdBlock) { + revert InsufficientConfirmationBlocks(totalTimeUnrivaled, confirmationThresholdBlock); + } + + // we also check the edge is pending in setConfirmed() + store.edges[edgeId].setConfirmed(); + + return totalTimeUnrivaled; + } + + /// @notice Confirm an edge by executing a one step proof + /// @dev One step proofs can only be executed against edges that have length one and of type SmallStep + /// @param store The edge store containing all edges and rival data + /// @param edgeId The id of the edge to confirm + /// @param oneStepProofEntry The one step proof contract + /// @param oneStepData Input data to the one step proof + /// @param beforeHistoryInclusionProof Proof that the state which is the start of the edge is committed to by the startHistoryRoot + /// @param afterHistoryInclusionProof Proof that the state which is the end of the edge is committed to by the endHistoryRoot + function confirmEdgeByOneStepProof( + EdgeStore storage store, + bytes32 edgeId, + IOneStepProofEntry oneStepProofEntry, + OneStepData calldata oneStepData, + ExecutionContext memory execCtx, + bytes32[] calldata beforeHistoryInclusionProof, + bytes32[] calldata afterHistoryInclusionProof + ) internal { + // get checks existence + uint256 machineStep = get(store, edgeId).startHeight; + + // edge must be length one and be of type SmallStep + if (store.edges[edgeId].eType != EdgeType.SmallStep) { + revert EdgeTypeNotSmallStep(store.edges[edgeId].eType); + } + if (store.edges[edgeId].length() != 1) { + revert EdgeNotLengthOne(store.edges[edgeId].length()); + } + + // the state in the onestep data must be committed to by the startHistoryRoot + MerkleTreeLib.verifyInclusionProof( + store.edges[edgeId].startHistoryRoot, oneStepData.beforeHash, machineStep, beforeHistoryInclusionProof + ); + + // execute the single step to produce the after state + bytes32 afterHash = + oneStepProofEntry.proveOneStep(execCtx, machineStep, oneStepData.beforeHash, oneStepData.proof); + + // check that the after state was indeed committed to by the endHistoryRoot + MerkleTreeLib.verifyInclusionProof( + store.edges[edgeId].endHistoryRoot, afterHash, machineStep + 1, afterHistoryInclusionProof + ); + + // we also check the edge is pending in setConfirmed() + store.edges[edgeId].setConfirmed(); + } +} diff --git a/src/challengeV2/libraries/Enums.sol b/src/challengeV2/libraries/Enums.sol new file mode 100644 index 00000000..b5df37fc --- /dev/null +++ b/src/challengeV2/libraries/Enums.sol @@ -0,0 +1,26 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +/// @notice The status of the edge +/// - Pending: Yet to be confirmed. Not all edges can be confirmed. +/// - Confirmed: Once confirmed it cannot transition back to pending +enum EdgeStatus { + Pending, + Confirmed +} + +/// @notice The type of the edge. Challenges are decomposed into 3 types of subchallenge +/// represented here by the edge type. Edges are initially created of type Block +/// and are then bisected until they have length one. After that new BigStep edges are +/// added that claim a Block type edge, and are then bisected until they have length one. +/// Then a SmallStep edge is added that claims a length one BigStep edge, and these +/// SmallStep edges are bisected until they reach length one. A length one small step edge +/// can then be directly executed using a one-step proof. +enum EdgeType { + Block, + BigStep, + SmallStep +} diff --git a/src/challengeV2/libraries/MerkleTreeLib.sol b/src/challengeV2/libraries/MerkleTreeLib.sol new file mode 100644 index 00000000..dfaa1260 --- /dev/null +++ b/src/challengeV2/libraries/MerkleTreeLib.sol @@ -0,0 +1,366 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "../../libraries/MerkleLib.sol"; +import "./ArrayUtilsLib.sol"; +import "./UintUtilsLib.sol"; + +/// @title Binary merkle tree utilities +/// @notice +/// Binary trees +/// -------------------------------------------------------------------------------------------- +/// A complete tree is a balanced binary tree - each node has two children except the leaf +/// Leaves have no children, they are a complete tree of size one +/// Any tree (can be incomplete) can be represented as a collection of complete sub trees. +/// Since the tree is binary only one or zero complete tree at each level is enough to define any size of tree. +/// The root of a tree (incomplete or otherwise) is defined as the cumulative hashing of all of the +/// roots of each of it's complete and empty subtrees. +/// --------- +/// eg. Below a tree of size 3 is represented as the composition of 2 complete subtrees, one of size +/// 2 (AB) and one of size one (C). +/// AB +/// / \ +/// A B C +/// +/// Merkle expansions and roots +/// -------------------------------------------------------------------------------------------- +/// The minimal amount of information we need to keep in order to compute the root of a tree +/// is the roots of each of its sub trees, and the levels of each of those trees +/// A "merkle expansion" (ME) is this information - it is a vector of roots of each complete subtree, +/// the level of the tree being the index in the vector, the subtree root being the value. +/// The root is calculated by hashing each of the levels of the subtree together, adding zero hashes +/// where relevant to make a balanced tree. +/// --------- +/// +/// ME Example 1 +/// +/// C => (C) +/// +/// ME of the C tree = (C), root=(C) +/// The merkle expansion of a tree consisting of a single leaf is vector of size one with the +/// zeroth index being the leaf C. The zeroth index of the vector represents the presence of a size +/// one complete subtree in the overall tree. So if a tree has a size one complete subtree as part +/// of its composition, the root of that size one tree will be present in the zeroth index. +/// +/// ME Example 2 +/// +/// AB +/// / \ +/// A B +/// +/// ME of the AB tree = (0, AB), root=AB +/// The merkle expansion of a tree consisting of a single size 2 complete subtree is a vector +/// of size 2, with the zeroth index value being 0, and the 1st index value being the root of the size +/// 2 subtree. The zero in the zeroth index indicated that there is not a size 1 subtree in the tree's +/// composition. If a tree has a size 2 subtree in its composition its root will be present in the +/// 1st index. +/// +/// ME Example 3 +/// +/// AB +/// / \ +/// A B C +/// +/// ME of the composed ABC tree = (C, AB), root=hash(AB, hash(C, 0)). +/// When a tree is not itself a complete subtree, but rather a composition, zeros are added when +/// calculating the root. To do this hash the first complete sub tree with zero, and from there +/// cumulatively hash the merkle expansion. +/// The merkle expansion of this composed tree is a vector of size two. Since it has a size one tree in +/// its composition the root of that goes in the zeroth index of the expansion - C, and since it has a +/// size two tree in its composition the root of that goes in the 1st index, to give (C, AB). +/// +/// Tree operations +/// -------------------------------------------------------------------------------------------- +/// Binary trees are modified by adding or subtracting complete subtrees, however this library +/// supports additive only trees since we dont have a specific use for subtraction at the moment. +/// We call adding a complete subtree to an existing tree "appending", appending has the following +/// rules: +/// 1. Only a complete sub trees can be appended +/// 2. Complete sub trees can only be appended at the level of the lowest complete subtree in the tree, or below +/// 3. If the existing tree is empty a sub tree can be appended at any level +/// When appending a sub tree we may increase the size of the merkle expansion vector, in the same +/// that adding 1 to a binary number may increase the index of its most significant bit +/// --------- +/// eg. A complete subtree can only be appended to the ABC tree at level 0, since the its lowest complete +/// subtree (C) is at level 0. Doing so would create a complete sub tree at level 1, which would in turn +/// cause the creation of new size 4 sub tree +/// +/// ABCD +/// / \ +/// AB AB CD +/// / \ + = / \ / \ +/// A B C D A B C D +/// +/// ME of ABCD = (0, AB) + (C) + (D) +/// = (C, AB) + (D) +/// = (0, 0, ABCD) +/// root of ABCD =hash(AB, CD) +/// -------------------------------------------------------------------------------------------- +library MerkleTreeLib { + // the go code uses uint64, so we ensure we never go above that here + uint256 public constant MAX_LEVEL = 64; + + /// @notice The root of the subtree. A collision free commitment to the contents of the tree. + /// @dev The root of a tree is defined as the cumulative hashing of the + /// roots of all of it's subtrees. Throws error for empty tree + /// @param me The merkle expansion to calculate the root of + function root(bytes32[] memory me) internal pure returns (bytes32) { + require(me.length > 0, "Empty merkle expansion"); + require(me.length <= MAX_LEVEL, "Merkle expansion too large"); + + bytes32 accum = 0; + for (uint256 i = 0; i < me.length; i++) { + bytes32 val = me[i]; + if (accum == 0) { + if (val != 0) { + accum = val; + + // the tree is balanced if the only non zero entry in the merkle extension + // is the last entry + // otherwise the lowest level entry needs to be combined with a zero to balance the bottom + // level, after which zeros in the merkle extension above that will balance the rest + if (i != me.length - 1) { + accum = keccak256(abi.encodePacked(accum, bytes32(0))); + } + } + } else if (val != 0) { + // accum represents the smaller sub trees, since it is earlier in the expansion + // we put the larger subtrees on the left + accum = keccak256(abi.encodePacked(val, accum)); + } else { + // by definition we always complete trees by appending zeros to the right + accum = keccak256(abi.encodePacked(accum, bytes32(0))); + } + } + + return accum; + } + + /// @notice Append a complete subtree to an existing tree + /// @dev See above description of trees for rules on how appending can occur. + /// Briefly, appending works like binary addition only that the value being added be an + /// exact power of two (complete), and must equal to or less than the least significant bit + /// in the existing tree. + /// If the me is empty, will just append directly. + /// @param me The merkle expansion to append a complete sub tree to + /// @param level The level at which to append the complete subtree + /// @param subtreeRoot The root of the complete subtree to be appended + function appendCompleteSubTree(bytes32[] memory me, uint256 level, bytes32 subtreeRoot) + internal + pure + returns (bytes32[] memory) + { + // we use number representations of the levels elsewhere, so we need to ensure we're appending a leve + // that's too high to use in uint + require(level < MAX_LEVEL, "Level too high"); + require(subtreeRoot != 0, "Cannot append empty subtree"); + require(me.length <= MAX_LEVEL, "Merkle expansion too large"); + + if (me.length == 0) { + bytes32[] memory empty = new bytes32[](level + 1); + empty[level] = subtreeRoot; + return empty; + } + + // This technically isn't necessary since it would be caught by the i < level check + // on the last loop of the for-loop below, but we add it for a clearer error message + require(level < me.length, "Level greater than highest level of current expansion"); + + bytes32 accumHash = subtreeRoot; + uint256 meSize = treeSize(me); + uint256 postSize = meSize + 2 ** level; + + // if by appending the sub tree we increase the numbe of most sig bits of the size, that means + // we'll need more space in the expansion to describe the tree, so we enlarge by one + bytes32[] memory next = UintUtilsLib.mostSignificantBit(postSize) > UintUtilsLib.mostSignificantBit(meSize) + ? new bytes32[](me.length + 1) + : new bytes32[](me.length ); + + // ensure we're never creating an expansion that's too big + require(next.length <= MAX_LEVEL, "Append creates oversize tree"); + + // loop through all the levels in self and try to append the new subtree + // since each node has two children by appending a subtree we may complete another one + // in the level above. So we move through the levels updating the result at each level + for (uint256 i = 0; i < me.length; i++) { + // we can only append at the level of the smallest complete sub tree or below + // appending above this level would mean create "holes" in the tree + // we can find the smallest complete sub tree by looking for the first entry in the merkle expansion + if (i < level) { + // we're below the level we want to append - no complete sub trees allowed down here + // if the level is 0 there are no complete subtrees, and we therefore cannot be too low + require(me[i] == 0, "Append above least significant bit"); + } else { + // we're at or above the level + if (accumHash == 0) { + // no more changes to propagate upwards - just fill the tree + next[i] = me[i]; + } else { + // we have a change to propagate + if (me[i] == 0) { + // if the level is currently empty we can just add the change + next[i] = accumHash; + // and then there's nothing more to propagate + accumHash = 0; + } else { + // if the level is not currently empty then we combine it with propagation + // change, and propagate that to the level above. This level is now part of a complete subtree + // so we zero it out + next[i] = 0; + accumHash = keccak256(abi.encodePacked(me[i], accumHash)); + } + } + } + } + + // we had a final change to propagate above the existing highest complete sub tree + // so we have a new highest complete sub tree in the level above - this was why we + // increased the storeage above + if (accumHash != 0) { + next[next.length - 1] = accumHash; + } + + // it should never be possible to achieve this ever we sized the array correctly + // so this is just a sanity check + require(next[next.length - 1] != 0, "Last entry zero"); + + return next; + } + + /// @notice Append a leaf to a subtree + /// @dev Leaves are just complete subtrees at level 0, however we hash the leaf before putting it + /// into the tree to avoid root collisions. + /// @param me The merkle expansion to append a leaf to + /// @param leaf The leaf to append - will be hashed in here before appending + function appendLeaf(bytes32[] memory me, bytes32 leaf) internal pure returns (bytes32[] memory) { + // it's important that we hash the leaf, this ensures that this leaf cannot be a collision with any other non leaf + // or root node, since these are always the hash of 64 bytes of data, and we're hashing 32 bytes + return appendCompleteSubTree(me, 0, keccak256(abi.encodePacked(leaf))); + } + + /// @notice Find the highest level which can be appended to tree of size startSize without + /// creating a tree with size greater than end size (inclusive) + /// @dev Subtrees can only be appended according to certain rules, see tree description at top of file + /// for details. A subtree can only be appended if it is at the same level, or below, the current lowest + /// subtree in the expansion + /// @param startSize The size of the start tree to find the maximum append to + /// @param endSize The size of the end tree to find a maximum append under + function maximumAppendBetween(uint256 startSize, uint256 endSize) internal pure returns (uint256) { + // Since the tree is binary we can represent it using the binary representation of a number + // As described above, subtrees can only be appended to a tree if they are at the same level, or below, + // the current lowest subtree. + // In this function we want to find the level of the highest tree that can be appended to the current + // tree, without the resulting tree surpassing the end point. We do this by looking at the difference + // between the start and end size, and iteratively reducing it in the maximal way. + + // The start and end size will share some higher order bits, below that they differ, and it is this + // difference that we need to fill in the minimum number of appends + // startSize looks like: xxxxxxyyyy + // endSize looks like: xxxxxxzzzz + // where x are the complete sub trees they share, and y and z are the subtrees they dont + + require(startSize < endSize, "Start not less than end"); + + // remove the high order bits that are shared + uint256 msb = UintUtilsLib.mostSignificantBit(startSize ^ endSize); + uint256 mask = (1 << (msb) + 1) - 1; + uint256 y = startSize & mask; + uint256 z = endSize & mask; + + // Since in the verification we will be appending at start size, the highest level at which we + // can append is the lowest complete subtree - the least significant bit + if (y != 0) { + return UintUtilsLib.leastSignificantBit(y); + } + + // y == 0, therefore we can append at any of levels where start and end differ + // The highest level that we can append at without surpassing the end, is the most significant + // bit of the end + if (z != 0) { + return UintUtilsLib.mostSignificantBit(z); + } + + // since we enforce that start < end, we know that y and z cannot both be 0 + revert("Both y and z cannot be zero"); + } + + /// @notice Calculate the full tree size represented by a merkle expansion + /// @param me The merkle expansion to calculate the tree size of + function treeSize(bytes32[] memory me) internal pure returns (uint256) { + uint256 sum = 0; + for (uint256 i = 0; i < me.length; i++) { + if (me[i] != 0) { + sum += 2 ** i; + } + } + return sum; + } + + /// @notice Verify that a pre-root commits to a prefix of the leaves committed by a post-root + /// @dev Verifies by appending sub trees to the pre tree until we get to the size of the post tree + /// and then checking that the root of the calculated post tree is equal to the supplied one + /// @param preRoot The root of the tree which is a prefix of the post tree + /// @param preSize The size of the pre-tree + /// @param postRoot The root the post-tree - the tree which we're proving pre is a prefix of + /// @param postSize The size of the post-tree + /// @param preExpansion The merkle expansion of the pre-tree + /// @param proof The proof is the minimum set of complete sub-tree hashes that can be appended to + /// the pre-tree in order to form the post tree + function verifyPrefixProof( + bytes32 preRoot, + uint256 preSize, + bytes32 postRoot, + uint256 postSize, + bytes32[] memory preExpansion, + bytes32[] memory proof + ) internal pure { + require(preSize > 0, "Pre-size cannot be 0"); + require(root(preExpansion) == preRoot, "Pre expansion root mismatch"); + require(treeSize(preExpansion) == preSize, "Pre size does not match expansion"); + require(preSize < postSize, "Pre size not less than post size"); + + uint256 size = preSize; + uint256 proofIndex = 0; + // we clone here to avoid mutating the input arguments + // which could be unexpected for callers + bytes32[] memory exp = ArrayUtilsLib.slice(preExpansion, 0, preExpansion.length); + + // Iteratively append a tree at the maximum possible level until we get to the post size + while (size < postSize) { + uint256 level = maximumAppendBetween(size, postSize); + + require(proofIndex < proof.length, "Index out of range"); + exp = appendCompleteSubTree(exp, level, proof[proofIndex]); + + uint256 numLeaves = 1 << level; + size += numLeaves; + assert(size <= postSize); + proofIndex++; + } + + // Check that the calculated root is equal to the provided post root + require(root(exp) == postRoot, "Post expansion root not equal post"); + + // ensure that we consumed the full proof + // this is just a safety check to guard against mistakenly supplied args + require(proofIndex == proof.length, "Incomplete proof usage"); + } + + /// @notice Using the provided proof verify that the provided leaf is included in the roothash at + /// the specified index. Note that here we use a 0-indexed value for the leaf number, whereas + /// elsewhere we use size. + /// @param rootHash The root hash to prove inclusion in + /// @param leaf The leaf preimage to prove inclusion - will be hashed in here before checking inclusion + /// @param index The index of the leaf in the tree + /// @param proof The path from the leaf to the root + function verifyInclusionProof(bytes32 rootHash, bytes32 leaf, uint256 index, bytes32[] memory proof) + internal + pure + { + bytes32 calculatedRoot = MerkleLib.calculateRoot(proof, index, keccak256(abi.encodePacked(leaf))); + require(rootHash == calculatedRoot, "Invalid inclusion proof"); + } +} diff --git a/src/challengeV2/libraries/UintUtilsLib.sol b/src/challengeV2/libraries/UintUtilsLib.sol new file mode 100644 index 00000000..82cd5423 --- /dev/null +++ b/src/challengeV2/libraries/UintUtilsLib.sol @@ -0,0 +1,70 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +/// @title Uint utils library +/// @notice Some additional bit inspection tools +library UintUtilsLib { + /// @notice The least significant bit in the bit representation of a uint + /// @dev Zero indexed from the least sig bit. Eg 1010 => 1, 1100 => 2, 1001 => 0 + /// Finds lsb in linear (uint size) time + /// @param x Cannot be zero, since zero that has no signficant bits + function leastSignificantBit(uint256 x) internal pure returns (uint256 msb) { + require(x > 0, "Zero has no significant bits"); + + // isolate the least sig bit + uint256 isolated = ((x - 1) & x) ^ x; + + // since we removed all higher bits, least sig == most sig + return mostSignificantBit(isolated); + } + + /// @notice The most significant bit in the bit representation of a uint + /// @dev Zero indexed from the least sig bit. Eg 1010 => 3, 110 => 2, 1 => 0 + /// Taken from https://solidity-by-example.org/bitwise/ + /// Finds msb in log (uint size) time + /// @param x Cannot be zero, since zero has no sigificant bits + function mostSignificantBit(uint256 x) internal pure returns (uint256 msb) { + require(x != 0, "Zero has no significant bits"); + + // x >= 2 ** 128 + if (x >= 0x100000000000000000000000000000000) { + x >>= 128; + msb += 128; + } + // x >= 2 ** 64 + if (x >= 0x10000000000000000) { + x >>= 64; + msb += 64; + } + // x >= 2 ** 32 + if (x >= 0x100000000) { + x >>= 32; + msb += 32; + } + // x >= 2 ** 16 + if (x >= 0x10000) { + x >>= 16; + msb += 16; + } + // x >= 2 ** 8 + if (x >= 0x100) { + x >>= 8; + msb += 8; + } + // x >= 2 ** 4 + if (x >= 0x10) { + x >>= 4; + msb += 4; + } + // x >= 2 ** 2 + if (x >= 0x4) { + x >>= 2; + msb += 2; + } + // x >= 2 ** 1 + if (x >= 0x2) msb += 1; + } +} \ No newline at end of file diff --git a/src/libraries/ArbitrumChecker.sol b/src/libraries/ArbitrumChecker.sol new file mode 100644 index 00000000..ad04615d --- /dev/null +++ b/src/libraries/ArbitrumChecker.sol @@ -0,0 +1,16 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity ^0.8.0; + +import "../precompiles/ArbSys.sol"; + +library ArbitrumChecker { + function runningOnArbitrum() internal view returns (bool) { + (bool ok, bytes memory data) = address(100).staticcall( + abi.encodeWithSelector(ArbSys.arbOSVersion.selector) + ); + return ok && data.length == 32; + } +} diff --git a/src/libraries/Constants.sol b/src/libraries/Constants.sol index 57dfec6f..d15bdf16 100644 --- a/src/libraries/Constants.sol +++ b/src/libraries/Constants.sol @@ -11,6 +11,3 @@ uint64 constant NO_CHAL_INDEX = 0; // Expected seconds per block in Ethereum PoS uint256 constant ETH_POS_BLOCK_TIME = 12; - -address constant UNISWAP_L1_TIMELOCK = 0x1a9C8182C09F50C8318d769245beA52c32BE35BC; -address constant UNISWAP_L2_FACTORY = 0x1F98431c8aD98523631AE4a59f267346ea31F984; diff --git a/src/libraries/IGasRefunder.sol b/src/libraries/IGasRefunder.sol index e7b08656..6b87b724 100644 --- a/src/libraries/IGasRefunder.sol +++ b/src/libraries/IGasRefunder.sol @@ -24,6 +24,9 @@ abstract contract GasRefundEnabled { uint256 calldataSize = msg.data.length; uint256 calldataWords = (calldataSize + 31) / 32; // account for the CALLDATACOPY cost of the proxy contract, including the memory expansion cost + // memory_expansion_cost = (memory_size_word ** 2) / 512 + (3 * memory_size_word) + // CALLDATACOPY_cost = 3 * memory_size_word + memory_expansion_cost + // CALLDATACOPY_cost = 6 * memory_size_word + (memory_size_word ** 2) / 512 startGasLeft += calldataWords * 6 + (calldataWords**2) / 512; // if triggered in a contract call, the spender may be overrefunded by appending dummy data to the call // so we check if it is a top level call, which would mean the sender paid calldata as part of tx.input diff --git a/src/mocks/BridgeStub.sol b/src/mocks/BridgeStub.sol index d0f9e8cc..24c5bd79 100644 --- a/src/mocks/BridgeStub.sol +++ b/src/mocks/BridgeStub.sol @@ -18,6 +18,7 @@ contract BridgeStub is IBridge { mapping(address => InOutInfo) private allowedDelayedInboxesMap; //mapping(address => InOutInfo) private allowedOutboxesMap; + IOwnable rollupItem; address[] public allowedDelayedInboxList; address[] public allowedOutboxList; @@ -41,6 +42,10 @@ contract BridgeStub is IBridge { } function allowedOutboxes(address) external pure override returns (bool) { + return true; + } + + function updateRollupAddress(IOwnable) external pure { revert("NOT_IMPLEMENTED"); } @@ -132,7 +137,7 @@ contract BridgeStub is IBridge { uint256, bytes calldata ) external pure override returns (bool, bytes memory) { - revert("NOT_IMPLEMENTED"); + revert("NOT_IMPLEMENTED_EXECUTE_CALL"); } function setDelayedInbox(address inbox, bool enabled) external override { @@ -159,7 +164,6 @@ contract BridgeStub is IBridge { address, /* outbox */ bool /* enabled*/ ) external pure override { - revert("NOT_IMPLEMENTED"); } function delayedMessageCount() external view override returns (uint256) { @@ -170,13 +174,13 @@ contract BridgeStub is IBridge { return sequencerInboxAccs.length; } - function rollup() external pure override returns (IOwnable) { - revert("NOT_IMPLEMENTED"); + function rollup() external view override returns (IOwnable) { + return rollupItem; } function acceptFundsFromOldBridge() external payable {} - function initialize(IOwnable) external pure { - revert("NOT_IMPLEMENTED"); + function initialize(IOwnable rollup_) external { + rollupItem = rollup_; } } diff --git a/src/mocks/SingleExecutionChallenge.sol b/src/mocks/ExecutionManager.sol similarity index 66% rename from src/mocks/SingleExecutionChallenge.sol rename to src/mocks/ExecutionManager.sol index cd44ce1e..782e03c5 100644 --- a/src/mocks/SingleExecutionChallenge.sol +++ b/src/mocks/ExecutionManager.sol @@ -4,12 +4,12 @@ pragma solidity ^0.8.0; -import "../challenge/ChallengeManager.sol"; +import "../challenge/OldChallengeManager.sol"; -contract SingleExecutionChallenge is ChallengeManager { +contract SingleExecutionChallenge is OldChallengeManager { constructor( IOneStepProofEntry osp_, - IChallengeResultReceiver resultReceiver_, + IOldChallengeResultReceiver resultReceiver_, uint64 maxInboxMessagesRead_, bytes32[2] memory startAndEndHashes, uint256 numSteps_, @@ -21,20 +21,20 @@ contract SingleExecutionChallenge is ChallengeManager { osp = osp_; resultReceiver = resultReceiver_; uint64 challengeIndex = ++totalChallengesCreated; - ChallengeLib.Challenge storage challenge = challenges[challengeIndex]; + OldChallengeLib.Challenge storage challenge = challenges[challengeIndex]; challenge.maxInboxMessages = maxInboxMessagesRead_; bytes32[] memory segments = new bytes32[](2); segments[0] = startAndEndHashes[0]; segments[1] = startAndEndHashes[1]; - bytes32 challengeStateHash = ChallengeLib.hashChallengeState(0, numSteps_, segments); + bytes32 challengeStateHash = OldChallengeLib.hashChallengeState(0, numSteps_, segments); challenge.challengeStateHash = challengeStateHash; - challenge.next = ChallengeLib.Participant({addr: asserter_, timeLeft: asserterTimeLeft_}); - challenge.current = ChallengeLib.Participant({ + challenge.next = OldChallengeLib.Participant({addr: asserter_, timeLeft: asserterTimeLeft_}); + challenge.current = OldChallengeLib.Participant({ addr: challenger_, timeLeft: challengerTimeLeft_ }); challenge.lastMoveTimestamp = block.timestamp; - challenge.mode = ChallengeLib.ChallengeMode.EXECUTION; + challenge.mode = OldChallengeLib.ChallengeMode.EXECUTION; emit Bisected(challengeIndex, challengeStateHash, 0, numSteps_, segments); } diff --git a/src/mocks/InboxStub.sol b/src/mocks/InboxStub.sol index a31f33ef..23da741d 100644 --- a/src/mocks/InboxStub.sol +++ b/src/mocks/InboxStub.sol @@ -151,7 +151,7 @@ contract InboxStub is IInbox { address, uint256, bytes calldata - ) external pure returns (uint256) { + ) external returns (uint256) { revert("NOT_IMPLEMENTED"); } @@ -161,7 +161,7 @@ contract InboxStub is IInbox { uint256, uint256, address - ) external pure returns (uint256) { + ) external returns (uint256) { revert("NOT_IMPLEMENTED"); } diff --git a/src/mocks/MerkleTreeAccess.sol b/src/mocks/MerkleTreeAccess.sol new file mode 100644 index 00000000..b885fc8f --- /dev/null +++ b/src/mocks/MerkleTreeAccess.sol @@ -0,0 +1,56 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "../challengeV2/libraries/MerkleTreeLib.sol"; +import "../challengeV2/libraries/UintUtilsLib.sol"; + +contract MerkleTreeAccess { + function mostSignificantBit(uint256 x) external pure returns (uint256) { + return UintUtilsLib.mostSignificantBit(x); + } + + function leastSignificantBit(uint256 x) external pure returns (uint256) { + return UintUtilsLib.leastSignificantBit(x); + } + + function root(bytes32[] memory me) external pure returns (bytes32) { + return MerkleTreeLib.root(me); + } + + function appendCompleteSubTree(bytes32[] memory me, uint256 level, bytes32 subtreeRoot) + external + pure + returns (bytes32[] memory) + { + return MerkleTreeLib.appendCompleteSubTree(me, level, subtreeRoot); + } + + function appendLeaf(bytes32[] memory me, bytes32 leaf) external pure returns (bytes32[] memory) { + return MerkleTreeLib.appendLeaf(me, leaf); + } + + function maximumAppendBetween(uint256 startSize, uint256 endSize) external pure returns (uint256) { + return MerkleTreeLib.maximumAppendBetween(startSize, endSize); + } + + function verifyPrefixProof( + bytes32 preRoot, + uint256 preSize, + bytes32 postRoot, + uint256 postSize, + bytes32[] memory preExpansion, + bytes32[] memory proof + ) external pure { + return MerkleTreeLib.verifyPrefixProof(preRoot, preSize, postRoot, postSize, preExpansion, proof); + } + + function verifyInclusionProof(bytes32 rootHash, bytes32 leaf, uint256 index, bytes32[] memory proof) + external + pure + { + MerkleTreeLib.verifyInclusionProof(rootHash, leaf, index, proof); + } +} diff --git a/src/mocks/MockResultReceiver.sol b/src/mocks/MockResultReceiver.sol index 46a4dd11..3a9c9aa4 100644 --- a/src/mocks/MockResultReceiver.sol +++ b/src/mocks/MockResultReceiver.sol @@ -4,11 +4,11 @@ pragma solidity ^0.8.0; -import "../challenge/IChallengeResultReceiver.sol"; -import "../challenge/IChallengeManager.sol"; +import "../challenge/IOldChallengeResultReceiver.sol"; +import "../challenge/IOldChallengeManager.sol"; -contract MockResultReceiver is IChallengeResultReceiver { - IChallengeManager public manager; +contract MockResultReceiver is IOldChallengeResultReceiver { + IOldChallengeManager public manager; address public winner; address public loser; uint256 public challengeIndex; @@ -19,7 +19,7 @@ contract MockResultReceiver is IChallengeResultReceiver { address indexed loser ); - constructor(IChallengeManager manager_) { + constructor(IOldChallengeManager manager_) { manager = manager_; } diff --git a/src/mocks/ProxyAdminForBinding.sol b/src/mocks/ProxyAdminForBinding.sol deleted file mode 100644 index 4d770ab3..00000000 --- a/src/mocks/ProxyAdminForBinding.sol +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE -// SPDX-License-Identifier: BUSL-1.1 - -pragma solidity ^0.8.0; - -import "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; - -contract ProxyAdminForBinding is ProxyAdmin {} diff --git a/src/mocks/Simple.sol b/src/mocks/Simple.sol index 4ce8bc3c..c57369fb 100644 --- a/src/mocks/Simple.sol +++ b/src/mocks/Simple.sol @@ -13,23 +13,19 @@ contract Simple { event CounterEvent(uint64 count); event RedeemedEvent(address caller, address redeemer); event NullEvent(); - event LogAndIncrementCalled(uint256 expected, uint256 have); function increment() external { counter++; } - function logAndIncrement(uint256 expected) external { - emit LogAndIncrementCalled(expected, counter); - counter++; - } - function incrementEmit() external { counter++; emit CounterEvent(counter); } function incrementRedeem() external { + require(msg.sender == tx.origin, "SENDER_NOT_ORIGIN"); + require(ArbSys(address(0x64)).wasMyCallersAddressAliased(), "NOT_ALIASED"); counter++; emit RedeemedEvent(msg.sender, ArbRetryableTx(address(110)).getCurrentRedeemer()); } @@ -114,12 +110,4 @@ contract Simple { (success, ) = address(this).call(data); require(success, "CALL_FAILED"); } - - function checkGasUsed(address to, bytes calldata input) external view returns (uint256) { - uint256 before = gasleft(); - // The inner call may revert, but we still want to return the amount of gas used, - // so we ignore the result of this call. - (to.staticcall{gas: before - 10000}(input)); - return before - gasleft(); - } } diff --git a/src/mocks/SimpleOneStepProofEntry.sol b/src/mocks/SimpleOneStepProofEntry.sol new file mode 100644 index 00000000..9b9d5365 --- /dev/null +++ b/src/mocks/SimpleOneStepProofEntry.sol @@ -0,0 +1,51 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 +// +pragma solidity ^0.8.17; + +import "../challengeV2/EdgeChallengeManager.sol"; +import "../state/Deserialize.sol"; + +contract SimpleOneStepProofEntry is IOneStepProofEntry { + using GlobalStateLib for GlobalState; + + // End the batch after 2000 steps. This results in 11 blocks for an honest validator. + // This constant must be synchronized with the one in execution/engine.go + uint64 public constant STEPS_PER_BATCH = 2000; + + function proveOneStep( + ExecutionContext calldata execCtx, + uint256 step, + bytes32 beforeHash, + bytes calldata proof + ) external view returns (bytes32 afterHash) { + if (proof.length == 0) { + revert("EMPTY_PROOF"); + } + GlobalState memory globalState; + uint256 offset; + (globalState.u64Vals[0], offset) = Deserialize.u64(proof, offset); + (globalState.u64Vals[1], offset) = Deserialize.u64(proof, offset); + if (step > 0 && (beforeHash[0] == 0 || globalState.getPositionInMessage() == 0)) { + // We end the block when the first byte of the hash hits 0 or we advance a batch + return beforeHash; + } + if (globalState.getInboxPosition() >= execCtx.maxInboxMessagesRead) { + // We can't continue further because we've hit the max inbox messages read + return beforeHash; + } + require(globalState.hash() == beforeHash, "BAD_PROOF"); + globalState.u64Vals[1]++; + if (globalState.u64Vals[1] % STEPS_PER_BATCH == 0) { + globalState.u64Vals[0]++; + globalState.u64Vals[1] = 0; + } + return globalState.hash(); + } + + function getMachineHash(ExecutionState calldata execState) external pure override returns (bytes32) { + require(execState.machineStatus == MachineStatus.FINISHED, "BAD_MACHINE_STATUS"); + return execState.globalState.hash(); + } +} diff --git a/src/mocks/TestWETH9.sol b/src/mocks/TestWETH9.sol new file mode 100644 index 00000000..3c52e430 --- /dev/null +++ b/src/mocks/TestWETH9.sol @@ -0,0 +1,26 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity ^0.8.0; + +import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +interface IWETH9 { + function deposit() external payable; + + function withdraw(uint256 _amount) external; +} + +contract TestWETH9 is ERC20, IWETH9 { + constructor(string memory name_, string memory symbol_) ERC20(name_, symbol_) {} + + function deposit() external payable override { + _mint(msg.sender, msg.value); + } + + function withdraw(uint256 _amount) external override { + _burn(msg.sender, _amount); + payable(address(msg.sender)).transfer(_amount); + } +} diff --git a/src/mocks/TimedOutChallengeManager.sol b/src/mocks/TimedOutChallengeManager.sol deleted file mode 100644 index 83e0457f..00000000 --- a/src/mocks/TimedOutChallengeManager.sol +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE -// SPDX-License-Identifier: BUSL-1.1 - -pragma solidity ^0.8.0; - -import "../challenge/ChallengeManager.sol"; - -contract TimedOutChallengeManager is ChallengeManager { - function isTimedOut(uint64) public pure override returns (bool) { - return true; - } -} diff --git a/src/osp/IOneStepProofEntry.sol b/src/osp/IOneStepProofEntry.sol index fb00b74f..d8105970 100644 --- a/src/osp/IOneStepProofEntry.sol +++ b/src/osp/IOneStepProofEntry.sol @@ -5,11 +5,17 @@ pragma solidity ^0.8.0; import "./IOneStepProver.sol"; +import "../state/Machine.sol"; library OneStepProofEntryLib { uint256 internal constant MAX_STEPS = 1 << 43; } +struct ExecutionState { + GlobalState globalState; + MachineStatus machineStatus; +} + interface IOneStepProofEntry { function proveOneStep( ExecutionContext calldata execCtx, @@ -17,4 +23,6 @@ interface IOneStepProofEntry { bytes32 beforeHash, bytes calldata proof ) external view returns (bytes32 afterHash); + + function getMachineHash(ExecutionState calldata execState) external pure returns (bytes32); } diff --git a/src/osp/IOneStepProver.sol b/src/osp/IOneStepProver.sol index 2e11e657..6fbc7422 100644 --- a/src/osp/IOneStepProver.sol +++ b/src/osp/IOneStepProver.sol @@ -14,6 +14,7 @@ import "../bridge/IBridge.sol"; struct ExecutionContext { uint256 maxInboxMessagesRead; IBridge bridge; + bytes32 initialWasmModuleRoot; } abstract contract IOneStepProver { diff --git a/src/osp/OneStepProofEntry.sol b/src/osp/OneStepProofEntry.sol index 390727c3..7b46bf80 100644 --- a/src/osp/OneStepProofEntry.sol +++ b/src/osp/OneStepProofEntry.sol @@ -13,6 +13,7 @@ import "./IOneStepProofEntry.sol"; contract OneStepProofEntry is IOneStepProofEntry { using MerkleProofLib for MerkleProof; using MachineLib for Machine; + using GlobalStateLib for GlobalState; IOneStepProver public prover0; IOneStepProver public proverMem; @@ -31,6 +32,46 @@ contract OneStepProofEntry is IOneStepProofEntry { proverHostIo = proverHostIo_; } + // Copied from OldChallengeLib.sol + function getStartMachineHash(bytes32 globalStateHash, bytes32 wasmModuleRoot) + internal + pure + returns (bytes32) + { + // Start the value stack with the function call ABI for the entrypoint + Value[] memory startingValues = new Value[](3); + startingValues[0] = ValueLib.newRefNull(); + startingValues[1] = ValueLib.newI32(0); + startingValues[2] = ValueLib.newI32(0); + ValueArray memory valuesArray = ValueArray({inner: startingValues}); + ValueStack memory values = ValueStack({proved: valuesArray, remainingHash: 0}); + ValueStack memory internalStack; + StackFrameWindow memory frameStack; + + Machine memory mach = Machine({ + status: MachineStatus.RUNNING, + valueStack: values, + internalStack: internalStack, + frameStack: frameStack, + globalStateHash: globalStateHash, + moduleIdx: 0, + functionIdx: 0, + functionPc: 0, + modulesRoot: wasmModuleRoot + }); + return mach.hash(); + } + + function getMachineHash(ExecutionState calldata execState) external pure override returns (bytes32) { + if (execState.machineStatus == MachineStatus.FINISHED) { + return keccak256(abi.encodePacked("Machine finished:", execState.globalState.hash())); + } else if (execState.machineStatus == MachineStatus.ERRORED) { + return keccak256(abi.encodePacked("Machine errored:", execState.globalState.hash())); + } else { + revert("BAD_MACHINE_STATUS"); + } + } + function proveOneStep( ExecutionContext calldata execCtx, uint256 machineStep, @@ -49,6 +90,13 @@ contract OneStepProofEntry is IOneStepProofEntry { if (mach.status != MachineStatus.RUNNING) { // Machine is halted. // WARNING: at this point, most machine fields are unconstrained. + GlobalState memory globalState; + (globalState, offset) = Deserialize.globalState(proof, offset); + require(globalState.hash() == mach.globalStateHash, "BAD_GLOBAL_STATE"); + if (mach.status == MachineStatus.FINISHED && machineStep == 0 && globalState.getInboxPosition() < execCtx.maxInboxMessagesRead) { + // Kickstart the machine + return getStartMachineHash(mach.globalStateHash, execCtx.initialWasmModuleRoot); + } return mach.hash(); } diff --git a/src/osp/OneStepProverHostIo.sol b/src/osp/OneStepProverHostIo.sol index 260ab206..0b059af0 100644 --- a/src/osp/OneStepProverHostIo.sol +++ b/src/osp/OneStepProverHostIo.sol @@ -216,7 +216,7 @@ contract OneStepProverHostIo is IOneStepProver { inst.argumentData == Instructions.INBOX_INDEX_SEQUENCER && msgIndex >= execCtx.maxInboxMessagesRead ) { - mach.status = MachineStatus.TOO_FAR; + mach.status = MachineStatus.ERRORED; return; } diff --git a/src/precompiles/ArbDebug.sol b/src/precompiles/ArbDebug.sol index 9924eded..bfbe6da3 100644 --- a/src/precompiles/ArbDebug.sol +++ b/src/precompiles/ArbDebug.sol @@ -1,4 +1,4 @@ -// Copyright 2021-2023, Offchain Labs, Inc. +// Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE // SPDX-License-Identifier: BUSL-1.1 @@ -15,9 +15,6 @@ interface ArbDebug { /// @notice Emit events with values based on the args provided function events(bool flag, bytes32 value) external payable returns (address, uint256); - /// @notice Tries (and fails) to emit logs in a view context - function eventsView() external view; - // Events that exist for testing log creation and pricing event Basic(bool flag, bytes32 indexed value); event Mixed( @@ -37,8 +34,6 @@ interface ArbDebug { function customRevert(uint64 number) external pure; - function legacyError() external pure; - error Custom(uint64, string, bool); error Unused(); } diff --git a/src/precompiles/ArbGasInfo.sol b/src/precompiles/ArbGasInfo.sol index a7106883..31dd70ea 100644 --- a/src/precompiles/ArbGasInfo.sol +++ b/src/precompiles/ArbGasInfo.sol @@ -95,6 +95,14 @@ interface ArbGasInfo { /// @notice Get how slowly ArbOS updates its estimate of the L1 basefee function getL1BaseFeeEstimateInertia() external view returns (uint64); + /// @notice Get the L1 pricer reward rate, in wei per unit + /// Available in ArbOS version 11 + function getL1RewardRate() external view returns (uint64); + + /// @notice Get the L1 pricer reward recipient + /// Available in ArbOS version 11 + function getL1RewardRecipient() external view returns (address); + /// @notice Deprecated -- Same as getL1BaseFeeEstimate() function getL1GasPriceEstimate() external view returns (uint256); diff --git a/src/precompiles/ArbOwnerPublic.sol b/src/precompiles/ArbOwnerPublic.sol index 270f5f24..1fbfdc03 100644 --- a/src/precompiles/ArbOwnerPublic.sol +++ b/src/precompiles/ArbOwnerPublic.sol @@ -10,6 +10,13 @@ interface ArbOwnerPublic { /// @notice See if the user is a chain owner function isChainOwner(address addr) external view returns (bool); + /** + * @notice Rectify the list of chain owners + * If successful, emits ChainOwnerRectified event + * Available in ArbOS version 11 + */ + function rectifyChainOwner(address ownerToRectify) external; + /// @notice Retrieves the list of chain owners function getAllChainOwners() external view returns (address[] memory); @@ -18,4 +25,6 @@ interface ArbOwnerPublic { /// @notice Get the infrastructure fee collector function getInfraFeeAccount() external view returns (address); + + event ChainOwnerRectified(address rectifiedOwner); } diff --git a/src/precompiles/ArbSys.sol b/src/precompiles/ArbSys.sol index 4e7a8f41..8214aa58 100644 --- a/src/precompiles/ArbSys.sol +++ b/src/precompiles/ArbSys.sol @@ -147,6 +147,4 @@ interface ArbSys { bytes32 indexed hash, uint256 indexed position ); - - error InvalidBlockNumber(uint256 requested, uint256 current); } diff --git a/src/rollup/Assertion.sol b/src/rollup/Assertion.sol new file mode 100644 index 00000000..f7d626dd --- /dev/null +++ b/src/rollup/Assertion.sol @@ -0,0 +1,102 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity ^0.8.0; + +import "../state/GlobalState.sol"; +import "../state/Machine.sol"; +import "../osp/IOneStepProofEntry.sol"; + +enum AssertionStatus { + // No assertion at this index + NoAssertion, + // Assertion is being computed + Pending, + // Assertion is confirmed + Confirmed +} + +struct AssertionNode { + // This value starts at zero and is set to a value when the first child is created. After that it is constant until the assertion is destroyed or the owner destroys pending assertions + uint64 firstChildBlock; + // This value starts at zero and is set to a value when the second child is created. After that it is constant until the assertion is destroyed or the owner destroys pending assertions + uint64 secondChildBlock; + // The block number when this assertion was created + uint64 createdAtBlock; + // True if this assertion is the first child of its prev + bool isFirstChild; + // Status of the Assertion + AssertionStatus status; + // A hash of the context available at the time of this assertions creation. It should contain information that is not specific + // to this assertion, but instead to the environment at the time of creation. This is necessary to store on the assertion + // as this environment can change and we need to know what it was like at the time this assertion was created. An example + // of this is the wasm module root which determines the state transition function on the L2. If the wasm module root + // changes we need to know that previous assertions were made under a different root, so that we can understand that they + // were valid at the time. So when resolving a challenge by one step, the edge challenge manager finds the wasm module root + // that was recorded on the prev of the assertions being disputed and uses it to resolve the one step proof. + bytes32 configHash; +} + +struct BeforeStateData { + // The assertion hash of the prev of the beforeState(prev) + bytes32 prevPrevAssertionHash; + // The sequencer inbox accumulator asserted by the beforeState(prev) + bytes32 sequencerBatchAcc; + // below are the components of config hash + ConfigData configData; +} + +struct AssertionInputs { + // Additional data used to validate the before state + BeforeStateData beforeStateData; + ExecutionState beforeState; + ExecutionState afterState; +} + +struct ConfigData { + bytes32 wasmModuleRoot; + uint256 requiredStake; + address challengeManager; + uint64 confirmPeriodBlocks; + uint64 nextInboxPosition; +} + +/** + * @notice Utility functions for Assertion + */ +library AssertionNodeLib { + /** + * @notice Initialize a Assertion + */ + function createAssertion( + bool _isFirstChild, + bytes32 _configHash + ) internal view returns (AssertionNode memory) { + AssertionNode memory assertion; + assertion.createdAtBlock = uint64(block.number); + assertion.isFirstChild = _isFirstChild; + assertion.configHash = _configHash; + assertion.status = AssertionStatus.Pending; + return assertion; + } + + /** + * @notice Update child properties + */ + function childCreated(AssertionNode storage self) internal { + if (self.firstChildBlock == 0) { + self.firstChildBlock = uint64(block.number); + } else if (self.secondChildBlock == 0) { + self.secondChildBlock = uint64(block.number); + } + } + + function requireMoreThanOneChild(AssertionNode memory self) internal pure { + require(self.secondChildBlock > 0, "TOO_FEW_CHILD"); + } + + function requireExists(AssertionNode memory self) internal pure { + require(self.status != AssertionStatus.NoAssertion, "ASSERTION_NOT_EXIST"); + } +} diff --git a/src/rollup/BOLDUpgradeAction.sol b/src/rollup/BOLDUpgradeAction.sol new file mode 100644 index 00000000..a279f419 --- /dev/null +++ b/src/rollup/BOLDUpgradeAction.sol @@ -0,0 +1,458 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE +// SPDX-License-Identifier: BUSL-1.1 + +pragma solidity ^0.8.0; + +import "@openzeppelin/contracts-upgradeable/utils/Create2Upgradeable.sol"; +import "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; +import "./RollupProxy.sol"; +import "./RollupLib.sol"; +import "./RollupAdminLogic.sol"; + +struct Node { + // Hash of the state of the chain as of this node + bytes32 stateHash; + // Hash of the data that can be challenged + bytes32 challengeHash; + // Hash of the data that will be committed if this node is confirmed + bytes32 confirmData; + // Index of the node previous to this one + uint64 prevNum; + // Deadline at which this node can be confirmed + uint64 deadlineBlock; + // Deadline at which a child of this node can be confirmed + uint64 noChildConfirmedBeforeBlock; + // Number of stakers staked on this node. This includes real stakers and zombies + uint64 stakerCount; + // Number of stakers staked on a child node. This includes real stakers and zombies + uint64 childStakerCount; + // This value starts at zero and is set to a value when the first child is created. After that it is constant until the node is destroyed or the owner destroys pending nodes + uint64 firstChildBlock; + // The number of the latest child of this node to be created + uint64 latestChildNumber; + // The block number when this node was created + uint64 createdAtBlock; + // A hash of all the data needed to determine this node's validity, to protect against reorgs + bytes32 nodeHash; +} + +struct OldStaker { + uint256 amountStaked; + uint64 index; + uint64 latestStakedNode; + // currentChallenge is 0 if staker is not in a challenge + uint64 currentChallenge; // 1. cannot have current challenge + bool isStaked; // 2. must be staked +} + +interface IOldRollup { + struct Assertion { + ExecutionState beforeState; + ExecutionState afterState; + uint64 numBlocks; + } + + event NodeCreated( + uint64 indexed nodeNum, + bytes32 indexed parentNodeHash, + bytes32 indexed nodeHash, + bytes32 executionHash, + Assertion assertion, + bytes32 afterInboxBatchAcc, + bytes32 wasmModuleRoot, + uint256 inboxMaxCount + ); + + function wasmModuleRoot() external view returns (bytes32); + function latestConfirmed() external view returns (uint64); + function getNode(uint64 nodeNum) external view returns (Node memory); + function getStakerAddress(uint64 stakerNum) external view returns (address); + function stakerCount() external view returns (uint64); + function getStaker(address staker) external view returns (OldStaker memory); + function isValidator(address validator) external view returns (bool); + function validatorWalletCreator() external view returns (address); +} + +interface IOldRollupAdmin { + function forceRefundStaker(address[] memory stacker) external; + function pause() external; + function resume() external; +} + +/// @title Provides pre-images to a state hash +/// @notice We need to use the execution state of the latest confirmed node as the genesis +/// in the new rollup. However the this full state is not available on chain, only +/// the state hash is, which commits to this. This lookup contract should be deployed +/// before the upgrade, and just before the upgrade is executed the pre-image of the +/// latest confirmed state hash should be populated here. The upgrade contact can then +/// fetch this information and verify it before using it. +contract StateHashPreImageLookup { + using GlobalStateLib for GlobalState; + + event HashSet(bytes32 h, ExecutionState execState, uint256 inboxMaxCount); + + mapping(bytes32 => bytes) internal preImages; + + function stateHash(ExecutionState calldata execState, uint256 inboxMaxCount) public pure returns (bytes32) { + return keccak256(abi.encodePacked(execState.globalState.hash(), inboxMaxCount, execState.machineStatus)); + } + + function set(bytes32 h, ExecutionState calldata execState, uint256 inboxMaxCount) public { + require(h == stateHash(execState, inboxMaxCount), "Invalid hash"); + preImages[h] = abi.encode(execState, inboxMaxCount); + emit HashSet(h, execState, inboxMaxCount); + } + + function get(bytes32 h) public view returns (ExecutionState memory execState, uint256 inboxMaxCount) { + (execState, inboxMaxCount) = abi.decode(preImages[h], (ExecutionState, uint256)); + require(inboxMaxCount != 0, "Hash not yet set"); + } +} + +/// @title Forwards calls to the rollup so that they can be interpreted as a user +/// @notice In the upgrade executor we need to access functions on the rollup +/// but since the upgrade executor is the admin it will always be forwarded to the +/// rollup admin logic. We create a separate forwarder contract here that just relays +/// information, since it's not the admin it can access rollup user logic. +contract RollupReader is IOldRollup { + IOldRollup public immutable rollup; + + constructor(IOldRollup _rollup) { + rollup = _rollup; + } + + function wasmModuleRoot() external view returns (bytes32) { + return rollup.wasmModuleRoot(); + } + + function latestConfirmed() external view returns (uint64) { + return rollup.latestConfirmed(); + } + + function getNode(uint64 nodeNum) external view returns (Node memory) { + return rollup.getNode(nodeNum); + } + + function getStakerAddress(uint64 stakerNum) external view returns (address) { + return rollup.getStakerAddress(stakerNum); + } + + function stakerCount() external view returns (uint64) { + return rollup.stakerCount(); + } + + function getStaker(address staker) external view returns (OldStaker memory) { + return rollup.getStaker(staker); + } + + function isValidator(address validator) external view returns (bool) { + return rollup.isValidator(validator); + } + + function validatorWalletCreator() external view returns (address) { + return rollup.validatorWalletCreator(); + } +} + +/// @title Upgrades an Arbitrum rollup to the new challenge protocol +/// @notice Requires implementation contracts to be pre-deployed and provided in the constructor +/// Also requires a lookup contract to be provided that contains the pre-image of the state hash +/// that is in the latest confirmed assertion in the current rollup. +contract BOLDUpgradeAction { + uint256 public constant BLOCK_LEAF_SIZE = 2 ** 26; + uint256 public constant BIGSTEP_LEAF_SIZE = 2 ** 23; + uint256 public constant SMALLSTEP_LEAF_SIZE = 2 ** 20; + + address public immutable L1_TIMELOCK; + IOldRollup public immutable OLD_ROLLUP; + address public immutable BRIDGE; + address public immutable SEQ_INBOX; + address public immutable REI; + address public immutable OUTBOX; + address public immutable INBOX; + + uint64 public immutable CONFIRM_PERIOD_BLOCKS; + address public immutable STAKE_TOKEN; + uint256 public immutable STAKE_AMOUNT; + uint256 public immutable MINI_STAKE_AMOUNT; + uint256 public immutable CHAIN_ID; + address public immutable ANY_TRUST_FAST_CONFIRMER; + bool public immutable DISABLE_VALIDATOR_WHITELIST; + + IOneStepProofEntry public immutable OSP; + // proxy admins of the contracts to be upgraded + ProxyAdmin public immutable PROXY_ADMIN_OUTBOX; + ProxyAdmin public immutable PROXY_ADMIN_BRIDGE; + ProxyAdmin public immutable PROXY_ADMIN_REI; + ProxyAdmin public immutable PROXY_ADMIN_SEQUENCER_INBOX; + StateHashPreImageLookup public immutable PREIMAGE_LOOKUP; + RollupReader public immutable ROLLUP_READER; + + // new contract implementations + address public immutable IMPL_BRIDGE; + address public immutable IMPL_SEQUENCER_INBOX; + address public immutable IMPL_REI; + address public immutable IMPL_OUTBOX; + // the old rollup, but with whenNotPaused protection removed from stake withdrawal functions + address public immutable IMPL_PATCHED_OLD_ROLLUP_USER; + address public immutable IMPL_NEW_ROLLUP_USER; + address public immutable IMPL_NEW_ROLLUP_ADMIN; + address public immutable IMPL_CHALLENGE_MANAGER; + + event RollupMigrated(address rollup, address challengeManager); + + struct Settings { + uint64 confirmPeriodBlocks; + address stakeToken; + uint256 stakeAmt; + uint256 miniStakeAmt; + uint256 chainId; + address anyTrustFastConfirmer; + bool disableValidatorWhitelist; + } + + // Unfortunately these are not discoverable on-chain, so we need to supply them + struct ProxyAdmins { + address outbox; + address bridge; + address rei; + address seqInbox; + } + + struct Implementations { + address bridge; + address seqInbox; + address rei; + address outbox; + address oldRollupUser; + address newRollupUser; + address newRollupAdmin; + address challengeManager; + } + + struct Contracts { + address l1Timelock; + IOldRollup rollup; + address bridge; + address sequencerInbox; + address rollupEventInbox; + address outbox; + address inbox; + IOneStepProofEntry osp; + } + + constructor( + Contracts memory contracts, + ProxyAdmins memory proxyAdmins, + Implementations memory implementations, + Settings memory settings + ) { + L1_TIMELOCK = contracts.l1Timelock; + OLD_ROLLUP = contracts.rollup; + BRIDGE = contracts.bridge; + SEQ_INBOX = contracts.sequencerInbox; + REI = contracts.rollupEventInbox; + OUTBOX = contracts.outbox; + INBOX = contracts.inbox; + OSP = contracts.osp; + + PROXY_ADMIN_OUTBOX = ProxyAdmin(proxyAdmins.outbox); + PROXY_ADMIN_BRIDGE = ProxyAdmin(proxyAdmins.bridge); + PROXY_ADMIN_REI = ProxyAdmin(proxyAdmins.rei); + PROXY_ADMIN_SEQUENCER_INBOX = ProxyAdmin(proxyAdmins.seqInbox); + PREIMAGE_LOOKUP = new StateHashPreImageLookup(); + ROLLUP_READER = new RollupReader(contracts.rollup); + + IMPL_BRIDGE = implementations.bridge; + IMPL_SEQUENCER_INBOX = implementations.seqInbox; + IMPL_REI = implementations.rei; + IMPL_OUTBOX = implementations.outbox; + IMPL_PATCHED_OLD_ROLLUP_USER = implementations.oldRollupUser; + IMPL_NEW_ROLLUP_USER = implementations.newRollupUser; + IMPL_NEW_ROLLUP_ADMIN = implementations.newRollupAdmin; + IMPL_CHALLENGE_MANAGER = implementations.challengeManager; + + CHAIN_ID = settings.chainId; + CONFIRM_PERIOD_BLOCKS = settings.confirmPeriodBlocks; + STAKE_TOKEN = settings.stakeToken; + STAKE_AMOUNT = settings.stakeAmt; + MINI_STAKE_AMOUNT = settings.miniStakeAmt; + ANY_TRUST_FAST_CONFIRMER = settings.anyTrustFastConfirmer; + DISABLE_VALIDATOR_WHITELIST = settings.disableValidatorWhitelist; + } + + /// @dev Refund the existing stakers, pause and upgrade the current rollup to + /// allow them to withdraw after pausing + function cleanupOldRollup() private { + IOldRollupAdmin(address(OLD_ROLLUP)).pause(); + + uint64 stakerCount = ROLLUP_READER.stakerCount(); + // since we for-loop these stakers we set an arbitrary limit - we dont + // expect any instances to have close to this number of stakers + if (stakerCount > 50) { + stakerCount = 50; + } + for (uint64 i = 0; i < stakerCount; i++) { + address stakerAddr = ROLLUP_READER.getStakerAddress(i); + OldStaker memory staker = ROLLUP_READER.getStaker(stakerAddr); + if (staker.isStaked && staker.currentChallenge == 0) { + address[] memory stakersToRefund = new address[](1); + stakersToRefund[0] = stakerAddr; + + IOldRollupAdmin(address(OLD_ROLLUP)).forceRefundStaker(stakersToRefund); + } + } + + // upgrade the rollup to one that allows validators to withdraw even whilst paused + DoubleLogicUUPSUpgradeable(address(OLD_ROLLUP)).upgradeSecondaryTo(IMPL_PATCHED_OLD_ROLLUP_USER); + } + + /// @dev Create a config for the new rollup - fetches the latest confirmed + /// assertion from the old rollup and uses it as genesis + function createConfig() private view returns (Config memory) { + // fetch the assertion associated with the latest confirmed state + bytes32 latestConfirmedStateHash = ROLLUP_READER.getNode(ROLLUP_READER.latestConfirmed()).stateHash; + (ExecutionState memory genesisExecState, uint256 inboxMaxCount) = PREIMAGE_LOOKUP.get(latestConfirmedStateHash); + // double check the hash + require( + RollupLib.stateHashMem(genesisExecState, inboxMaxCount) == latestConfirmedStateHash, + "Invalid latest execution hash" + ); + + // this isnt used during rollup creation, so we can pass in empty + ISequencerInbox.MaxTimeVariation memory maxTimeVariation; + + return Config({ + confirmPeriodBlocks: CONFIRM_PERIOD_BLOCKS, + stakeToken: STAKE_TOKEN, + baseStake: STAKE_AMOUNT, + wasmModuleRoot: ROLLUP_READER.wasmModuleRoot(), + owner: address(this), // upgrade executor is the owner + loserStakeEscrow: L1_TIMELOCK, // additional funds get sent to the l1 timelock + chainId: CHAIN_ID, + chainConfig: "", // we can use an empty chain config it wont be used in the rollup initialization because we check if the rei is already connected there + miniStakeValue: MINI_STAKE_AMOUNT, + sequencerInboxMaxTimeVariation: maxTimeVariation, + layerZeroBlockEdgeHeight: BLOCK_LEAF_SIZE, + layerZeroBigStepEdgeHeight: BIGSTEP_LEAF_SIZE, + layerZeroSmallStepEdgeHeight: SMALLSTEP_LEAF_SIZE, + genesisExecutionState: genesisExecState, + genesisInboxCount: inboxMaxCount, + anyTrustFastConfirmer: ANY_TRUST_FAST_CONFIRMER + }); + } + + function upgradeSurroundingContracts(address newRollupAddress) private { + // now we upgrade each of the contracts that a reference to the rollup address + // first we upgrade to an implementation which allows setting, then set the rollup address + // then we revert to the previous implementation since we dont require this functionality going forward + + TransparentUpgradeableProxy bridge = TransparentUpgradeableProxy(payable(BRIDGE)); + address currentBridgeImpl = PROXY_ADMIN_BRIDGE.getProxyImplementation(bridge); + PROXY_ADMIN_BRIDGE.upgradeAndCall( + bridge, IMPL_BRIDGE, abi.encodeWithSelector(IBridge.updateRollupAddress.selector, newRollupAddress) + ); + PROXY_ADMIN_BRIDGE.upgrade(bridge, currentBridgeImpl); + + TransparentUpgradeableProxy sequencerInbox = TransparentUpgradeableProxy(payable(SEQ_INBOX)); + address currentSequencerInboxImpl = PROXY_ADMIN_BRIDGE.getProxyImplementation(sequencerInbox); + PROXY_ADMIN_SEQUENCER_INBOX.upgradeAndCall( + sequencerInbox, IMPL_SEQUENCER_INBOX, abi.encodeWithSelector(IOutbox.updateRollupAddress.selector) + ); + PROXY_ADMIN_SEQUENCER_INBOX.upgrade(sequencerInbox, currentSequencerInboxImpl); + + TransparentUpgradeableProxy rollupEventInbox = TransparentUpgradeableProxy(payable(REI)); + address currentRollupEventInboxImpl = PROXY_ADMIN_REI.getProxyImplementation(rollupEventInbox); + PROXY_ADMIN_REI.upgradeAndCall( + rollupEventInbox, IMPL_REI, abi.encodeWithSelector(IOutbox.updateRollupAddress.selector) + ); + PROXY_ADMIN_REI.upgrade(rollupEventInbox, currentRollupEventInboxImpl); + + TransparentUpgradeableProxy outbox = TransparentUpgradeableProxy(payable(OUTBOX)); + address currentOutboxImpl = PROXY_ADMIN_REI.getProxyImplementation(outbox); + PROXY_ADMIN_OUTBOX.upgradeAndCall( + outbox, IMPL_OUTBOX, abi.encodeWithSelector(IOutbox.updateRollupAddress.selector) + ); + PROXY_ADMIN_OUTBOX.upgrade(outbox, currentOutboxImpl); + } + + function perform(address[] memory validators) external { + // tidy up the old rollup - pause it and refund stakes + cleanupOldRollup(); + + // create the config, we do this now so that we compute the expected rollup address + Config memory config = createConfig(); + + // deploy the new challenge manager + IEdgeChallengeManager challengeManager = IEdgeChallengeManager( + address( + new TransparentUpgradeableProxy( + address(IMPL_CHALLENGE_MANAGER), + address(PROXY_ADMIN_BRIDGE), // use the same proxy admin as the bridge + "" + ) + ) + ); + + // now that all the dependent contracts are pointed at the new address we can + // deploy and init the new rollup + ContractDependencies memory connectedContracts = ContractDependencies({ + bridge: IBridge(BRIDGE), + sequencerInbox: ISequencerInbox(SEQ_INBOX), + inbox: IInbox(INBOX), + outbox: IOutbox(OUTBOX), + rollupEventInbox: IRollupEventInbox(REI), + challengeManager: challengeManager, + rollupAdminLogic: IMPL_NEW_ROLLUP_ADMIN, + rollupUserLogic: IRollupUser(IMPL_NEW_ROLLUP_USER), + validatorWalletCreator: ROLLUP_READER.validatorWalletCreator() + }); + + // upgrade the surrounding contracts eg bridge, outbox, seq inbox, rollup event inbox + // to set of the new rollup address + bytes32 rollupSalt = keccak256(abi.encode(config)); + address expectedRollupAddress = + Create2Upgradeable.computeAddress(rollupSalt, keccak256(type(RollupProxy).creationCode)); + upgradeSurroundingContracts(expectedRollupAddress); + + challengeManager.initialize({ + _assertionChain: IAssertionChain(expectedRollupAddress), + // confirm period and challenge period are the same atm + _challengePeriodBlocks: config.confirmPeriodBlocks, + _oneStepProofEntry: OSP, + layerZeroBlockEdgeHeight: config.layerZeroBlockEdgeHeight, + layerZeroBigStepEdgeHeight: config.layerZeroBigStepEdgeHeight, + layerZeroSmallStepEdgeHeight: config.layerZeroSmallStepEdgeHeight, + _stakeToken: IERC20(config.stakeToken), + _stakeAmount: config.miniStakeValue, + _excessStakeReceiver: L1_TIMELOCK + }); + + RollupProxy rollup = new RollupProxy{ salt: rollupSalt}(); + require(address(rollup) == expectedRollupAddress, "UNEXPCTED_ROLLUP_ADDR"); + + // initialize the rollup with this contract as owner to set batch poster and validators + // it will transfer the ownership back to the actual owner later + address actualOwner = config.owner; + config.owner = address(this); + + rollup.initializeProxy(config, connectedContracts); + + if (validators.length != 0) { + bool[] memory _vals = new bool[](validators.length); + for (uint256 i = 0; i < validators.length; i++) { + require(ROLLUP_READER.isValidator(validators[i]), "UNEXPECTED_NEW_VALIDATOR"); + _vals[i] = true; + } + IRollupAdmin(address(rollup)).setValidator(validators, _vals); + } + if (DISABLE_VALIDATOR_WHITELIST) { + IRollupAdmin(address(rollup)).setValidatorWhitelistDisabled(DISABLE_VALIDATOR_WHITELIST); + } + + IRollupAdmin(address(rollup)).setOwner(actualOwner); + + emit RollupMigrated(expectedRollupAddress, address(challengeManager)); + } +} diff --git a/src/rollup/Config.sol b/src/rollup/Config.sol index 269ecb5a..321397ef 100644 --- a/src/rollup/Config.sol +++ b/src/rollup/Config.sol @@ -12,11 +12,10 @@ import "../bridge/IOutbox.sol"; import "../bridge/IInbox.sol"; import "./IRollupEventInbox.sol"; import "./IRollupLogic.sol"; -import "../challenge/IChallengeManager.sol"; +import "../challengeV2/EdgeChallengeManager.sol"; struct Config { uint64 confirmPeriodBlocks; - uint64 extraChallengeTimeBlocks; address stakeToken; uint256 baseStake; bytes32 wasmModuleRoot; @@ -24,8 +23,16 @@ struct Config { address loserStakeEscrow; uint256 chainId; string chainConfig; - uint64 genesisBlockNum; + uint256 miniStakeValue; ISequencerInbox.MaxTimeVariation sequencerInboxMaxTimeVariation; + uint256 layerZeroBlockEdgeHeight; + uint256 layerZeroBigStepEdgeHeight; + uint256 layerZeroSmallStepEdgeHeight; + /// @notice The execution state to be used in the genesis assertion + ExecutionState genesisExecutionState; + /// @notice The inbox size at the time the genesis execution state was created + uint256 genesisInboxCount; + address anyTrustFastConfirmer; } struct ContractDependencies { @@ -34,10 +41,8 @@ struct ContractDependencies { IInbox inbox; IOutbox outbox; IRollupEventInbox rollupEventInbox; - IChallengeManager challengeManager; - address rollupAdminLogic; + IEdgeChallengeManager challengeManager; + address rollupAdminLogic; // this cannot be IRollupAdmin because of circular dependencies IRollupUser rollupUserLogic; - // misc contracts that are useful when interacting with the rollup - address validatorUtils; address validatorWalletCreator; } diff --git a/src/rollup/IRollupAdmin.sol b/src/rollup/IRollupAdmin.sol index bdad07b3..a75c967e 100644 --- a/src/rollup/IRollupAdmin.sol +++ b/src/rollup/IRollupAdmin.sol @@ -13,8 +13,7 @@ import "./Config.sol"; interface IRollupAdmin { event OwnerFunctionCalled(uint256 indexed id); - function initialize(Config calldata config, ContractDependencies calldata connectedContracts) - external; + function initialize(Config calldata config, ContractDependencies calldata connectedContracts) external; /** * @notice Add a contract authorized to put messages into this rollup's inbox @@ -67,53 +66,27 @@ interface IRollupAdmin { function setMinimumAssertionPeriod(uint256 newPeriod) external; /** - * @notice Set number of blocks until a node is considered confirmed - * @param newConfirmPeriod new number of blocks until a node is confirmed + * @notice Set number of blocks until a assertion is considered confirmed + * @param newConfirmPeriod new number of blocks until a assertion is confirmed */ function setConfirmPeriodBlocks(uint64 newConfirmPeriod) external; - /** - * @notice Set number of extra blocks after a challenge - * @param newExtraTimeBlocks new number of blocks - */ - function setExtraChallengeTimeBlocks(uint64 newExtraTimeBlocks) external; - /** * @notice Set base stake required for an assertion * @param newBaseStake maximum avmgas to be used per block */ function setBaseStake(uint256 newBaseStake) external; - /** - * @notice Set the token used for stake, where address(0) == eth - * @dev Before changing the base stake token, you might need to change the - * implementation of the Rollup User logic! - * @param newStakeToken address of token used for staking - */ - function setStakeToken(address newStakeToken) external; - - /** - * @notice Upgrades the implementation of a beacon controlled by the rollup - * @param beacon address of beacon to be upgraded - * @param newImplementation new address of implementation - */ - function upgradeBeacon(address beacon, address newImplementation) external; - - function forceResolveChallenge(address[] memory stackerA, address[] memory stackerB) external; - function forceRefundStaker(address[] memory stacker) external; - function forceCreateNode( - uint64 prevNode, - uint256 prevNodeInboxMaxCount, - Assertion memory assertion, - bytes32 expectedNodeHash - ) external; + function forceCreateAssertion(bytes32 prevAssertionHash, AssertionInputs calldata assertion, bytes32 expectedAssertionHash) + external; - function forceConfirmNode( - uint64 nodeNum, - bytes32 blockHash, - bytes32 sendRoot + function forceConfirmAssertion( + bytes32 assertionHash, + bytes32 parentAssertionHash, + ExecutionState calldata confirmState, + bytes32 inboxAcc ) external; function setLoserStakeEscrow(address newLoserStakerEscrow) external; @@ -135,4 +108,10 @@ interface IRollupAdmin { * @param _validatorWhitelistDisabled new value of validatorWhitelistDisabled, i.e. true = disabled */ function setValidatorWhitelistDisabled(bool _validatorWhitelistDisabled) external; + + /** + * @notice set a new challengeManager contract + * @param _challengeManager new value of challengeManager + */ + function setChallengeManager(address _challengeManager) external; } diff --git a/src/rollup/IRollupCore.sol b/src/rollup/IRollupCore.sol index e50f7a4a..6138f2f7 100644 --- a/src/rollup/IRollupCore.sol +++ b/src/rollup/IRollupCore.sol @@ -4,59 +4,47 @@ pragma solidity ^0.8.0; -import "./Node.sol"; +import "./Assertion.sol"; import "../bridge/IBridge.sol"; import "../bridge/IOutbox.sol"; import "../bridge/IInbox.sol"; import "./IRollupEventInbox.sol"; -import "../challenge/IChallengeManager.sol"; +import "../challengeV2/EdgeChallengeManager.sol"; -interface IRollupCore { +interface IRollupCore is IAssertionChain { struct Staker { uint256 amountStaked; + bytes32 latestStakedAssertion; uint64 index; - uint64 latestStakedNode; - // currentChallenge is 0 if staker is not in a challenge - uint64 currentChallenge; bool isStaked; } event RollupInitialized(bytes32 machineHash, uint256 chainId); - event NodeCreated( - uint64 indexed nodeNum, - bytes32 indexed parentNodeHash, - bytes32 indexed nodeHash, - bytes32 executionHash, - Assertion assertion, + event AssertionCreated( + bytes32 indexed assertionHash, + bytes32 indexed parentAssertionHash, + AssertionInputs assertion, bytes32 afterInboxBatchAcc, + uint256 inboxMaxCount, bytes32 wasmModuleRoot, - uint256 inboxMaxCount + uint256 requiredStake, + address challengeManager, + uint64 confirmPeriodBlocks ); - event NodeConfirmed(uint64 indexed nodeNum, bytes32 blockHash, bytes32 sendRoot); - - event NodeRejected(uint64 indexed nodeNum); + event AssertionConfirmed(bytes32 indexed assertionHash, bytes32 blockHash, bytes32 sendRoot); event RollupChallengeStarted( - uint64 indexed challengeIndex, - address asserter, - address challenger, - uint64 challengedNode + uint64 indexed challengeIndex, address asserter, address challenger, uint64 challengedAssertion ); event UserStakeUpdated(address indexed user, uint256 initialBalance, uint256 finalBalance); - event UserWithdrawableFundsUpdated( - address indexed user, - uint256 initialBalance, - uint256 finalBalance - ); + event UserWithdrawableFundsUpdated(address indexed user, uint256 initialBalance, uint256 finalBalance); function confirmPeriodBlocks() external view returns (uint64); - function extraChallengeTimeBlocks() external view returns (uint64); - function chainId() external view returns (uint256); function baseStake() external view returns (uint256); @@ -71,8 +59,6 @@ interface IRollupCore { function rollupEventInbox() external view returns (IRollupEventInbox); - function challengeManager() external view returns (IChallengeManager); - function loserStakeEscrow() external view returns (address); function stakeToken() external view returns (address); @@ -83,25 +69,21 @@ interface IRollupCore { function validatorWhitelistDisabled() external view returns (bool); + function genesisAssertionHash() external pure returns (bytes32); + /** - * @notice Get the Node for the given index. + * @notice Get the Assertion for the given id. */ - function getNode(uint64 nodeNum) external view returns (Node memory); + function getAssertion(bytes32 assertionHash) external view returns (AssertionNode memory); /** - * @notice Returns the block in which the given node was created for looking up its creation event. - * Unlike the Node's createdAtBlock field, this will be the ArbSys blockNumber if the host chain is an Arbitrum chain. + * @notice Returns the block in which the given assertion was created for looking up its creation event. + * Unlike the assertion's createdAtBlock field, this will be the ArbSys blockNumber if the host chain is an Arbitrum chain. * That means that the block number returned for this is usable for event queries. - * This function will revert if the given node number does not exist. + * This function will revert if the given assertion hash does not exist. * @dev This function is meant for internal use only and has no stability guarantees. */ - function getNodeCreationBlockForLogLookup(uint64 nodeNum) external view returns (uint256); - - /** - * @notice Check if the specified node has been staked on by the provided staker. - * Only accurate at the latest confirmed node and afterwards. - */ - function nodeHasStaker(uint64 nodeNum, address staker) external view returns (bool); + function getAssertionCreationBlockForLogLookup(bytes32 assertionHash) external view returns (uint256); /** * @notice Get the address of the staker at the given index @@ -118,18 +100,11 @@ interface IRollupCore { function isStaked(address staker) external view returns (bool); /** - * @notice Get the latest staked node of the given staker - * @param staker Staker address to lookup - * @return Latest node staked of the staker - */ - function latestStakedNode(address staker) external view returns (uint64); - - /** - * @notice Get the current challenge of the given staker + * @notice Get the latest staked assertion of the given staker * @param staker Staker address to lookup - * @return Current challenge of the staker + * @return Latest assertion staked of the staker */ - function currentChallenge(address staker) external view returns (uint64); + function latestStakedAssertion(address staker) external view returns (bytes32); /** * @notice Get the amount staked of the given staker @@ -145,46 +120,14 @@ interface IRollupCore { */ function getStaker(address staker) external view returns (Staker memory); - /** - * @notice Get the original staker address of the zombie at the given index - * @param zombieNum Index of the zombie to lookup - * @return Original staker address of the zombie - */ - function zombieAddress(uint256 zombieNum) external view returns (address); - - /** - * @notice Get Latest node that the given zombie at the given index is staked on - * @param zombieNum Index of the zombie to lookup - * @return Latest node that the given zombie is staked on - */ - function zombieLatestStakedNode(uint256 zombieNum) external view returns (uint64); - - /// @return Current number of un-removed zombies - function zombieCount() external view returns (uint256); - - function isZombie(address staker) external view returns (bool); - /** * @notice Get the amount of funds withdrawable by the given address * @param owner Address to check the funds of * @return Amount of funds withdrawable by owner */ function withdrawableFunds(address owner) external view returns (uint256); - - /** - * @return Index of the first unresolved node - * @dev If all nodes have been resolved, this will be latestNodeCreated + 1 - */ - function firstUnresolvedNode() external view returns (uint64); - - /// @return Index of the latest confirmed node - function latestConfirmed() external view returns (uint64); - - /// @return Index of the latest rollup node created - function latestNodeCreated() external view returns (uint64); - - /// @return Ethereum block that the most recent stake was created - function lastStakeBlock() external view returns (uint64); + /// @return Index of the latest confirmed assertion + function latestConfirmed() external view returns (bytes32); /// @return Number of active stakers currently staked function stakerCount() external view returns (uint64); diff --git a/src/rollup/IRollupEventInbox.sol b/src/rollup/IRollupEventInbox.sol index beb1b4ed..2e79f7e6 100644 --- a/src/rollup/IRollupEventInbox.sol +++ b/src/rollup/IRollupEventInbox.sol @@ -13,5 +13,7 @@ interface IRollupEventInbox { function rollup() external view returns (address); + function updateRollupAddress() external; + function rollupInitialized(uint256 chainId, string calldata chainConfig) external; } diff --git a/src/rollup/IRollupLogic.sol b/src/rollup/IRollupLogic.sol index bc1520be..445744fb 100644 --- a/src/rollup/IRollupLogic.sol +++ b/src/rollup/IRollupLogic.sol @@ -9,7 +9,7 @@ import "../bridge/ISequencerInbox.sol"; import "../bridge/IOutbox.sol"; import "../bridge/IOwnable.sol"; -interface IRollupUserAbs is IRollupCore, IOwnable { +interface IRollupUser is IRollupCore, IOwnable { /// @dev the user logic just validated configuration and shouldn't write to state during init /// this allows the admin logic to ensure consistency on parameters. function initialize(address stakeToken) external view; @@ -18,82 +18,27 @@ interface IRollupUserAbs is IRollupCore, IOwnable { function removeWhitelistAfterValidatorAfk() external; - function isERC20Enabled() external view returns (bool); - - function rejectNextNode(address stakerAddress) external; - - function confirmNextNode(bytes32 blockHash, bytes32 sendRoot) external; - - function stakeOnExistingNode(uint64 nodeNum, bytes32 nodeHash) external; - - function stakeOnNewNode( - Assertion memory assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount + function confirmAssertion( + bytes32 assertionHash, + bytes32 prevAssertionHash, + ExecutionState calldata confirmState, + bytes32 winningEdgeId, + ConfigData calldata prevConfig, + bytes32 inboxAcc ) external; - function returnOldDeposit(address stakerAddress) external; - - function reduceDeposit(uint256 target) external; - - function removeZombie(uint256 zombieNum, uint256 maxNodes) external; - - function removeOldZombies(uint256 startIndex) external; - - function requiredStake( - uint256 blockNumber, - uint64 firstUnresolvedNodeNum, - uint64 latestCreatedNode - ) external view returns (uint256); - - function currentRequiredStake() external view returns (uint256); - - function countStakedZombies(uint64 nodeNum) external view returns (uint256); - - function countZombiesStakedOnChildren(uint64 nodeNum) external view returns (uint256); + function stakeOnNewAssertion(AssertionInputs calldata assertion, bytes32 expectedAssertionHash) external; - function requireUnresolvedExists() external view; + function returnOldDeposit() external; - function requireUnresolved(uint256 nodeNum) external view; + function reduceDeposit(uint256 target) external; function withdrawStakerFunds() external returns (uint256); - function createChallenge( - address[2] calldata stakers, - uint64[2] calldata nodeNums, - MachineStatus[2] calldata machineStatuses, - GlobalState[2] calldata globalStates, - uint64 numBlocks, - bytes32 secondExecutionHash, - uint256[2] calldata proposedTimes, - bytes32[2] calldata wasmModuleRoots - ) external; -} - -interface IRollupUser is IRollupUserAbs { - function newStakeOnExistingNode(uint64 nodeNum, bytes32 nodeHash) external payable; - - function newStakeOnNewNode( - Assertion calldata assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount - ) external payable; - - function addToDeposit(address stakerAddress) external payable; -} - -interface IRollupUserERC20 is IRollupUserAbs { - function newStakeOnExistingNode( - uint256 tokenAmount, - uint64 nodeNum, - bytes32 nodeHash - ) external; - - function newStakeOnNewNode( + function newStakeOnNewAssertion( uint256 tokenAmount, - Assertion calldata assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount + AssertionInputs calldata assertion, + bytes32 expectedAssertionHash ) external; function addToDeposit(address stakerAddress, uint256 tokenAmount) external; diff --git a/src/rollup/Node.sol b/src/rollup/Node.sol deleted file mode 100644 index 6961168a..00000000 --- a/src/rollup/Node.sol +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE -// SPDX-License-Identifier: BUSL-1.1 - -pragma solidity ^0.8.0; - -import "../state/GlobalState.sol"; -import "../state/Machine.sol"; - -struct ExecutionState { - GlobalState globalState; - MachineStatus machineStatus; -} - -struct Assertion { - ExecutionState beforeState; - ExecutionState afterState; - uint64 numBlocks; -} - -struct Node { - // Hash of the state of the chain as of this node - bytes32 stateHash; - // Hash of the data that can be challenged - bytes32 challengeHash; - // Hash of the data that will be committed if this node is confirmed - bytes32 confirmData; - // Index of the node previous to this one - uint64 prevNum; - // Deadline at which this node can be confirmed - uint64 deadlineBlock; - // Deadline at which a child of this node can be confirmed - uint64 noChildConfirmedBeforeBlock; - // Number of stakers staked on this node. This includes real stakers and zombies - uint64 stakerCount; - // Number of stakers staked on a child node. This includes real stakers and zombies - uint64 childStakerCount; - // This value starts at zero and is set to a value when the first child is created. After that it is constant until the node is destroyed or the owner destroys pending nodes - uint64 firstChildBlock; - // The number of the latest child of this node to be created - uint64 latestChildNumber; - // The block number when this node was created - uint64 createdAtBlock; - // A hash of all the data needed to determine this node's validity, to protect against reorgs - bytes32 nodeHash; -} - -/** - * @notice Utility functions for Node - */ -library NodeLib { - /** - * @notice Initialize a Node - * @param _stateHash Initial value of stateHash - * @param _challengeHash Initial value of challengeHash - * @param _confirmData Initial value of confirmData - * @param _prevNum Initial value of prevNum - * @param _deadlineBlock Initial value of deadlineBlock - * @param _nodeHash Initial value of nodeHash - */ - function createNode( - bytes32 _stateHash, - bytes32 _challengeHash, - bytes32 _confirmData, - uint64 _prevNum, - uint64 _deadlineBlock, - bytes32 _nodeHash - ) internal view returns (Node memory) { - Node memory node; - node.stateHash = _stateHash; - node.challengeHash = _challengeHash; - node.confirmData = _confirmData; - node.prevNum = _prevNum; - node.deadlineBlock = _deadlineBlock; - node.noChildConfirmedBeforeBlock = _deadlineBlock; - node.createdAtBlock = uint64(block.number); - node.nodeHash = _nodeHash; - return node; - } - - /** - * @notice Update child properties - * @param number The child number to set - */ - function childCreated(Node storage self, uint64 number) internal { - if (self.firstChildBlock == 0) { - self.firstChildBlock = uint64(block.number); - } - self.latestChildNumber = number; - } - - /** - * @notice Update the child confirmed deadline - * @param deadline The new deadline to set - */ - function newChildConfirmDeadline(Node storage self, uint64 deadline) internal { - self.noChildConfirmedBeforeBlock = deadline; - } - - /** - * @notice Check whether the current block number has met or passed the node's deadline - */ - function requirePastDeadline(Node memory self) internal view { - require(block.number >= self.deadlineBlock, "BEFORE_DEADLINE"); - } - - /** - * @notice Check whether the current block number has met or passed deadline for children of this node to be confirmed - */ - function requirePastChildConfirmDeadline(Node memory self) internal view { - require(block.number >= self.noChildConfirmedBeforeBlock, "CHILD_TOO_RECENT"); - } -} diff --git a/src/rollup/RollupAdminLogic.sol b/src/rollup/RollupAdminLogic.sol index b6c3771f..568fc28f 100644 --- a/src/rollup/RollupAdminLogic.sol +++ b/src/rollup/RollupAdminLogic.sol @@ -9,12 +9,9 @@ import "./IRollupLogic.sol"; import "./RollupCore.sol"; import "../bridge/IOutbox.sol"; import "../bridge/ISequencerInbox.sol"; -import "../challenge/IChallengeManager.sol"; import "../libraries/DoubleLogicUUPSUpgradeable.sol"; import "@openzeppelin/contracts/proxy/beacon/UpgradeableBeacon.sol"; -import {NO_CHAL_INDEX} from "../libraries/Constants.sol"; - contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeable { function initialize(Config calldata config, ContractDependencies calldata connectedContracts) external @@ -24,7 +21,6 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl { rollupDeploymentBlock = block.number; bridge = connectedContracts.bridge; - sequencerInbox = connectedContracts.sequencerInbox; connectedContracts.bridge.setDelayedInbox(address(connectedContracts.inbox), true); connectedContracts.bridge.setSequencerInbox(address(connectedContracts.sequencerInbox)); @@ -32,30 +28,21 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl outbox = connectedContracts.outbox; connectedContracts.bridge.setOutbox(address(connectedContracts.outbox), true); rollupEventInbox = connectedContracts.rollupEventInbox; - connectedContracts.bridge.setDelayedInbox( - address(connectedContracts.rollupEventInbox), - true - ); - connectedContracts.rollupEventInbox.rollupInitialized(config.chainId, config.chainConfig); - connectedContracts.sequencerInbox.addSequencerL2Batch( - 0, - "", - 1, - IGasRefunder(address(0)), - 0, - 1 - ); + // dont need to connect and initialize the event inbox if it's already been initialized + if (!bridge.allowedDelayedInboxes(address(connectedContracts.rollupEventInbox))) { + connectedContracts.bridge.setDelayedInbox(address(connectedContracts.rollupEventInbox), true); + connectedContracts.rollupEventInbox.rollupInitialized(config.chainId, config.chainConfig); + } + + if (connectedContracts.sequencerInbox.totalDelayedMessagesRead() == 0) { + connectedContracts.sequencerInbox.addSequencerL2Batch(0, "", 1, IGasRefunder(address(0)), 0, 1); + } - validatorUtils = connectedContracts.validatorUtils; validatorWalletCreator = connectedContracts.validatorWalletCreator; challengeManager = connectedContracts.challengeManager; - Node memory node = createInitialNode(); - initializeCore(node); - confirmPeriodBlocks = config.confirmPeriodBlocks; - extraChallengeTimeBlocks = config.extraChallengeTimeBlocks; chainId = config.chainId; baseStake = config.baseStake; wasmModuleRoot = config.wasmModuleRoot; @@ -70,27 +57,54 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl loserStakeEscrow = config.loserStakeEscrow; stakeToken = config.stakeToken; + anyTrustFastConfirmer = config.anyTrustFastConfirmer; + + bytes32 genesisExecutionHash = RollupLib.executionStateHash(config.genesisExecutionState); + bytes32 parentAssertionHash = bytes32(0); + bytes32 inboxAcc = bytes32(0); + bytes32 genesisHash = RollupLib.assertionHash({ + parentAssertionHash: parentAssertionHash, + afterStateHash: genesisExecutionHash, + inboxAcc: inboxAcc + }); + + uint256 currentInboxCount = bridge.sequencerMessageCount(); + // ensure to move the inbox forward by at least one message + if (currentInboxCount == config.genesisInboxCount) { + currentInboxCount += 1; + } + AssertionNode memory initialAssertion = AssertionNodeLib.createAssertion( + true, + RollupLib.configHash({ + wasmModuleRoot: wasmModuleRoot, + requiredStake: baseStake, + challengeManager: address(challengeManager), + confirmPeriodBlocks: confirmPeriodBlocks, + nextInboxPosition: uint64(currentInboxCount) + }) + ); + initializeCore(initialAssertion, genesisHash); + + AssertionInputs memory assertionInputs; + assertionInputs.afterState = config.genesisExecutionState; + emit AssertionCreated( + genesisHash, + parentAssertionHash, + assertionInputs, + inboxAcc, + currentInboxCount, + wasmModuleRoot, + baseStake, + address(challengeManager), + confirmPeriodBlocks + ); + if (_hostChainIsArbitrum) { + _assertionCreatedAtArbSysBlock[genesisHash] = ArbSys(address(100)).arbBlockNumber(); + } emit RollupInitialized(config.wasmModuleRoot, config.chainId); } - function createInitialNode() private view returns (Node memory) { - GlobalState memory emptyGlobalState; - bytes32 state = RollupLib.stateHashMem( - ExecutionState(emptyGlobalState, MachineStatus.FINISHED), - 1 // inboxMaxCount - force the first assertion to read a message - ); - return - NodeLib.createNode( - state, - 0, // challenge hash (not challengeable) - 0, // confirm data - 0, // prev node - uint64(block.number), // deadline block (not challengeable) - 0 // initial node has a node hash of 0 - ); - } - /** * Functions are only to reach this logic contract if the caller is the owner * so there is no need for a redundant onlyOwner check @@ -128,10 +142,10 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl /** * @notice Pause interaction with the rollup contract. - * The time spent paused is not incremented in the rollup's timing for node validation. - * @dev this function may be frontrun by a validator (ie to create a node before the system is paused). + * The time spent paused is not incremented in the rollup's timing for assertion validation. + * @dev this function may be frontrun by a validator (ie to create a assertion before the system is paused). * The pause should be called atomically with required checks to be sure the system is paused in a consistent state. - * The RollupAdmin may execute a check against the Rollup's latest node num or the ChallengeManager, then execute this function atomically with it. + * The RollupAdmin may execute a check against the Rollup's latest assertion num or the OldChallengeManager, then execute this function atomically with it. */ function pause() external override { _pause(); @@ -193,7 +207,7 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl } /** - * @notice Set number of blocks until a node is considered confirmed + * @notice Set number of blocks until a assertion is considered confirmed * @param newConfirmPeriod new number of blocks */ function setConfirmPeriodBlocks(uint64 newConfirmPeriod) external override { @@ -202,15 +216,6 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl emit OwnerFunctionCalled(9); } - /** - * @notice Set number of extra blocks after a challenge - * @param newExtraTimeBlocks new number of blocks - */ - function setExtraChallengeTimeBlocks(uint64 newExtraTimeBlocks) external override { - extraChallengeTimeBlocks = newExtraTimeBlocks; - emit OwnerFunctionCalled(10); - } - /** * @notice Set base stake required for an assertion * @param newBaseStake minimum amount of stake required @@ -220,89 +225,44 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl emit OwnerFunctionCalled(12); } - /** - * @notice Set the token used for stake, where address(0) == eth - * @dev Before changing the base stake token, you might need to change the - * implementation of the Rollup User facet! - * @param newStakeToken address of token used for staking - */ - function setStakeToken(address newStakeToken) external override whenPaused { - /* - * To change the stake token without breaking consistency one would need to: - * Pause the system, have all stakers remove their funds, - * update the user logic to handle ERC20s, change the stake token, then resume. - * - * Note: To avoid loss of funds stakers must remove their funds and claim all the - * available withdrawable funds before the system is paused. - */ - bool expectERC20Support = newStakeToken != address(0); - // this assumes the rollup isn't its own admin. if needed, instead use a ProxyAdmin by OZ! - bool actualERC20Support = IRollupUser(address(this)).isERC20Enabled(); - require(actualERC20Support == expectERC20Support, "NO_USER_LOGIC_SUPPORT"); - require(stakerCount() == 0, "NO_ACTIVE_STAKERS"); - require(totalWithdrawableFunds == 0, "NO_PENDING_WITHDRAW"); - stakeToken = newStakeToken; - emit OwnerFunctionCalled(13); - } - - /** - * @notice Upgrades the implementation of a beacon controlled by the rollup - * @param beacon address of beacon to be upgraded - * @param newImplementation new address of implementation - */ - function upgradeBeacon(address beacon, address newImplementation) external override { - UpgradeableBeacon(beacon).upgradeTo(newImplementation); - emit OwnerFunctionCalled(20); - } - - function forceResolveChallenge(address[] calldata stakerA, address[] calldata stakerB) - external - override - whenPaused - { - require(stakerA.length > 0, "EMPTY_ARRAY"); - require(stakerA.length == stakerB.length, "WRONG_LENGTH"); - for (uint256 i = 0; i < stakerA.length; i++) { - uint64 chall = inChallenge(stakerA[i], stakerB[i]); - - require(chall != NO_CHAL_INDEX, "NOT_IN_CHALL"); - clearChallenge(stakerA[i]); - clearChallenge(stakerB[i]); - challengeManager.clearChallenge(chall); - } - emit OwnerFunctionCalled(21); - } - function forceRefundStaker(address[] calldata staker) external override whenPaused { require(staker.length > 0, "EMPTY_ARRAY"); for (uint256 i = 0; i < staker.length; i++) { - require(_stakerMap[staker[i]].currentChallenge == NO_CHAL_INDEX, "STAKER_IN_CHALL"); + requireInactiveStaker(staker[i]); reduceStakeTo(staker[i], 0); - turnIntoZombie(staker[i]); } emit OwnerFunctionCalled(22); } - function forceCreateNode( - uint64 prevNode, - uint256 prevNodeInboxMaxCount, - Assertion calldata assertion, - bytes32 expectedNodeHash + function forceCreateAssertion( + bytes32 prevAssertionHash, + AssertionInputs calldata assertion, + bytes32 expectedAssertionHash ) external override whenPaused { - require(prevNode == latestConfirmed(), "ONLY_LATEST_CONFIRMED"); - - createNewNode(assertion, prevNode, prevNodeInboxMaxCount, expectedNodeHash); + // To update the wasm module root in the case of a bug: + // 0. pause the contract + // 1. update the wasm module root in the contract + // 2. update the config hash of the assertion after which you wish to use the new wasm module root (functionality not written yet) + // 3. force refund the stake of the current leaf assertion(s) + // 4. create a new assertion using the assertion with the updated config has as a prev + // 5. force confirm it - this is necessary to set latestConfirmed on the correct line + // 6. unpause the contract + + // Normally, a new assertion is created using its prev's confirmPeriodBlocks + // in the case of a force create, we use the rollup's current confirmPeriodBlocks + createNewAssertion(assertion, prevAssertionHash, expectedAssertionHash); emit OwnerFunctionCalled(23); } - function forceConfirmNode( - uint64 nodeNum, - bytes32 blockHash, - bytes32 sendRoot + function forceConfirmAssertion( + bytes32 assertionHash, + bytes32 parentAssertionHash, + ExecutionState calldata confirmState, + bytes32 inboxAcc ) external override whenPaused { - // this skips deadline, staker and zombie validation - confirmNode(nodeNum, blockHash, sendRoot); + // this skip deadline, prev, challenge validations + confirmAssertionInternal(assertionHash, parentAssertionHash, confirmState, inboxAcc); emit OwnerFunctionCalled(24); } @@ -341,36 +301,6 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl emit OwnerFunctionCalled(28); } - function createNitroMigrationGenesis(Assertion calldata assertion) external whenPaused { - bytes32 expectedSendRoot = bytes32(0); - uint64 expectedInboxCount = 1; - - require(latestNodeCreated() == 0, "NON_GENESIS_NODES_EXIST"); - require(GlobalStateLib.isEmpty(assertion.beforeState.globalState), "NOT_EMPTY_BEFORE"); - require( - assertion.beforeState.machineStatus == MachineStatus.FINISHED, - "BEFORE_MACHINE_NOT_FINISHED" - ); - // accessors such as state.getSendRoot not available for calldata structs, only memory - require( - assertion.afterState.globalState.bytes32Vals[1] == expectedSendRoot, - "NOT_ZERO_SENDROOT" - ); - require( - assertion.afterState.globalState.u64Vals[0] == expectedInboxCount, - "INBOX_NOT_AT_ONE" - ); - require(assertion.afterState.globalState.u64Vals[1] == 0, "POSITION_IN_MESSAGE_NOT_ZERO"); - require( - assertion.afterState.machineStatus == MachineStatus.FINISHED, - "AFTER_MACHINE_NOT_FINISHED" - ); - bytes32 genesisBlockHash = assertion.afterState.globalState.bytes32Vals[0]; - createNewNode(assertion, 0, expectedInboxCount, bytes32(0)); - confirmNode(1, genesisBlockHash, expectedSendRoot); - emit OwnerFunctionCalled(29); - } - /** * @notice set the validatorWhitelistDisabled flag * @param _validatorWhitelistDisabled new value of validatorWhitelistDisabled, i.e. true = disabled @@ -379,4 +309,22 @@ contract RollupAdminLogic is RollupCore, IRollupAdmin, DoubleLogicUUPSUpgradeabl validatorWhitelistDisabled = _validatorWhitelistDisabled; emit OwnerFunctionCalled(30); } + + /** + * @notice set the anyTrustFastConfirmer address + * @param _anyTrustFastConfirmer new value of anyTrustFastConfirmer + */ + function setAnyTrustFastConfirmer(address _anyTrustFastConfirmer) external { + anyTrustFastConfirmer = _anyTrustFastConfirmer; + emit OwnerFunctionCalled(31); + } + + /** + * @notice set a new challengeManager contract + * @param _challengeManager new value of challengeManager + */ + function setChallengeManager(address _challengeManager) external { + challengeManager = IEdgeChallengeManager(_challengeManager); + emit OwnerFunctionCalled(32); + } } diff --git a/src/rollup/RollupCore.sol b/src/rollup/RollupCore.sol index fabe83fb..6101a682 100644 --- a/src/rollup/RollupCore.sol +++ b/src/rollup/RollupCore.sol @@ -6,137 +6,150 @@ pragma solidity ^0.8.0; import "@openzeppelin/contracts-upgradeable/security/PausableUpgradeable.sol"; -import "./Node.sol"; +import "./Assertion.sol"; import "./RollupLib.sol"; import "./IRollupEventInbox.sol"; import "./IRollupCore.sol"; -import "../challenge/IChallengeManager.sol"; +import "../state/Machine.sol"; import "../bridge/ISequencerInbox.sol"; import "../bridge/IBridge.sol"; import "../bridge/IOutbox.sol"; - -import "../precompiles/ArbSys.sol"; - -import {NO_CHAL_INDEX} from "../libraries/Constants.sol"; +import "../challengeV2/EdgeChallengeManager.sol"; +import "../libraries/ArbitrumChecker.sol"; abstract contract RollupCore is IRollupCore, PausableUpgradeable { - using NodeLib for Node; + using AssertionNodeLib for AssertionNode; using GlobalStateLib for GlobalState; // Rollup Config - uint64 public confirmPeriodBlocks; - uint64 public extraChallengeTimeBlocks; uint256 public chainId; + + // These 4 config should be stored into the prev and not used directly + // An assertion can be confirmed after confirmPeriodBlocks when it is unchallenged + uint64 public confirmPeriodBlocks; + + // ------------------------------ + // STAKING + // ------------------------------ + + // Overall + // ------------------------------ + // In order to create a new assertion the validator creating it must be staked. Only one stake + // is needed per consistent lineage of assertions, so additional stakes must be placed when + // lineages diverge. + // As an example, for the following chain only one stake would be locked up in the C assertion + // A -- B -- C + // However for the following chain 2 stakes would be locked up, in C and in D + // A -- B -- C + // \-- D + // Since we know that only one assertion chain can be correct, we only need one stake available + // to be refunded at any one time, and any more than one stake can be immediately confiscated. + // So in the above situation although 2 stakes are not available to be withdrawn as they are locked + // by C and D, only 1 stake needs to remain in the contract since one of the stakes will eventually + // be confiscated anyway. + // In practice, what we do here is increase the withdrawable amount of an escrow address that is + // expected to be controlled by the rollup owner, whenever the lineage forks. + + // Moving stake + // ------------------------------ + // Since we only need one stake per lineage we can lock the stake of the validator that last extended that + // lineage. All other stakes within that lineage are then free to be moved to other lineages, or be withdrawn. + // Additionally, it's inconsistent for a validator to stake on two different lineages, and as a validator + // should only need to have one stake in the system at any one time. + // In order to create a new assertion a validator needs to have free stake. Since stake is freed from an assertion + // when another assertion builds on it, we know that if the assertion that was last staked on by a validator + // has children, then that validator has free stake. Likewise, if the last staked assertion does not have children + // but it is the parent of the assertion the validator is trying to create, then we know that by the time the assertion + // is created it will have children, so we can allow this condition as well. + + // Updating stake amount + // ------------------------------ + // The stake required to create an assertion can be updated by the rollup owner. A required stake value is stored on each + // assertion, and shows how much stake is required to create the next assertion. Since we only store the last + // assertion made by a validator, we don't know if it has previously staked on lower/higher amounts and + // therefore offer partial withdrawals due to this difference. Instead we enforce that either all of the + // validators stake is locked, or none of it. uint256 public baseStake; + bytes32 public wasmModuleRoot; + // When there is a challenge, we trust the challenge manager to determine the winner + IEdgeChallengeManager public challengeManager; IInbox public inbox; IBridge public bridge; IOutbox public outbox; - ISequencerInbox public sequencerInbox; IRollupEventInbox public rollupEventInbox; - IChallengeManager public override challengeManager; - // misc useful contracts when interacting with the rollup - address public validatorUtils; address public validatorWalletCreator; - // when a staker loses a challenge, half of their funds get escrowed in this address + // only 1 child can be confirmed, the excess/loser stake will be sent to this address address public loserStakeEscrow; address public stakeToken; uint256 public minimumAssertionPeriod; mapping(address => bool) public isValidator; - // Stakers become Zombies after losing a challenge - struct Zombie { - address stakerAddress; - uint64 latestStakedNode; - } - - uint64 private _latestConfirmed; - uint64 private _firstUnresolvedNode; - uint64 private _latestNodeCreated; - uint64 private _lastStakeBlock; - mapping(uint64 => Node) private _nodes; - mapping(uint64 => mapping(address => bool)) private _nodeStakers; + bytes32 private _latestConfirmed; + mapping(bytes32 => AssertionNode) private _assertions; address[] private _stakerList; mapping(address => Staker) public _stakerMap; - Zombie[] private _zombies; - mapping(address => uint256) private _withdrawableFunds; uint256 public totalWithdrawableFunds; uint256 public rollupDeploymentBlock; - // The node number of the initial node - uint64 internal constant GENESIS_NODE = 0; - bool public validatorWhitelistDisabled; + address public anyTrustFastConfirmer; // If the chain this RollupCore is deployed on is an Arbitrum chain. - bool internal immutable _hostChainIsArbitrum; + bool internal immutable _hostChainIsArbitrum = ArbitrumChecker.runningOnArbitrum(); // If the chain RollupCore is deployed on, this will contain the ArbSys.blockNumber() at each node's creation. - mapping(uint64 => uint256) internal _nodeCreatedAtArbSysBlock; + mapping(bytes32 => uint256) internal _assertionCreatedAtArbSysBlock; - constructor() { - (bool ok, bytes memory data) = address(100).staticcall( - abi.encodeWithSelector(ArbSys.arbOSVersion.selector) - ); - _hostChainIsArbitrum = ok && data.length == 32; + function sequencerInbox() public view virtual returns (ISequencerInbox) { + return ISequencerInbox(bridge.sequencerInbox()); } /** - * @notice Get a storage reference to the Node for the given node index - * @param nodeNum Index of the node - * @return Node struct + * @notice Get a storage reference to the Assertion for the given assertion hash + * @dev The assertion may not exists + * @param assertionHash Id of the assertion + * @return Assertion struct */ - function getNodeStorage(uint64 nodeNum) internal view returns (Node storage) { - return _nodes[nodeNum]; + function getAssertionStorage(bytes32 assertionHash) internal view returns (AssertionNode storage) { + require(assertionHash != bytes32(0), "ASSERTION_ID_CANNOT_BE_ZERO"); + return _assertions[assertionHash]; } /** - * @notice Get the Node for the given index. + * @notice Get the Assertion for the given index. */ - function getNode(uint64 nodeNum) public view override returns (Node memory) { - return getNodeStorage(nodeNum); + function getAssertion(bytes32 assertionHash) public view override returns (AssertionNode memory) { + return getAssertionStorage(assertionHash); } /** - * @notice Returns the block in which the given node was created for looking up its creation event. - * Unlike the Node's createdAtBlock field, this will be the ArbSys blockNumber if the host chain is an Arbitrum chain. + * @notice Returns the block in which the given assertion was created for looking up its creation event. + * Unlike the assertion's createdAtBlock field, this will be the ArbSys blockNumber if the host chain is an Arbitrum chain. * That means that the block number returned for this is usable for event queries. - * This function will revert if the given node number does not exist. + * This function will revert if the given assertion hash does not exist. * @dev This function is meant for internal use only and has no stability guarantees. */ - function getNodeCreationBlockForLogLookup(uint64 nodeNum) - external - view - override - returns (uint256) - { + function getAssertionCreationBlockForLogLookup(bytes32 assertionHash) external view override returns (uint256) { if (_hostChainIsArbitrum) { - uint256 blockNum = _nodeCreatedAtArbSysBlock[nodeNum]; - require(blockNum > 0, "NO_NODE"); + uint256 blockNum = _assertionCreatedAtArbSysBlock[assertionHash]; + require(blockNum > 0, "NO_ASSERTION"); return blockNum; } else { - Node storage node = getNodeStorage(nodeNum); - require(node.deadlineBlock != 0, "NO_NODE"); - return node.createdAtBlock; + AssertionNode storage assertion = getAssertionStorage(assertionHash); + assertion.requireExists(); + return assertion.createdAtBlock; } } - /** - * @notice Check if the specified node has been staked on by the provided staker. - * Only accurate at the latest confirmed node and afterwards. - */ - function nodeHasStaker(uint64 nodeNum, address staker) public view override returns (bool) { - return _nodeStakers[nodeNum][staker]; - } - /** * @notice Get the address of the staker at the given index * @param stakerNum Index of the staker @@ -156,31 +169,12 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { } /** - * @notice Check whether the given staker is staked on the latest confirmed node, - * which includes if the staker is staked on a descendent of the latest confirmed node. - * @param staker Staker address to check - * @return True or False for whether the staker was staked - */ - function isStakedOnLatestConfirmed(address staker) public view returns (bool) { - return _stakerMap[staker].isStaked && nodeHasStaker(_latestConfirmed, staker); - } - - /** - * @notice Get the latest staked node of the given staker - * @param staker Staker address to lookup - * @return Latest node staked of the staker - */ - function latestStakedNode(address staker) public view override returns (uint64) { - return _stakerMap[staker].latestStakedNode; - } - - /** - * @notice Get the current challenge of the given staker + * @notice Get the latest staked assertion of the given staker * @param staker Staker address to lookup - * @return Current challenge of the staker + * @return Latest assertion staked of the staker */ - function currentChallenge(address staker) public view override returns (uint64) { - return _stakerMap[staker].currentChallenge; + function latestStakedAssertion(address staker) public view override returns (bytes32) { + return _stakerMap[staker].latestStakedAssertion; } /** @@ -201,47 +195,6 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { return _stakerMap[staker]; } - /** - * @notice Get the original staker address of the zombie at the given index - * @param zombieNum Index of the zombie to lookup - * @return Original staker address of the zombie - */ - function zombieAddress(uint256 zombieNum) public view override returns (address) { - return _zombies[zombieNum].stakerAddress; - } - - /** - * @notice Get Latest node that the given zombie at the given index is staked on - * @param zombieNum Index of the zombie to lookup - * @return Latest node that the given zombie is staked on - */ - function zombieLatestStakedNode(uint256 zombieNum) public view override returns (uint64) { - return _zombies[zombieNum].latestStakedNode; - } - - /** - * @notice Retrieves stored information about a requested zombie - * @param zombieNum Index of the zombie to lookup - * @return A structure with information about the requested staker - */ - function getZombieStorage(uint256 zombieNum) internal view returns (Zombie storage) { - return _zombies[zombieNum]; - } - - /// @return Current number of un-removed zombies - function zombieCount() public view override returns (uint256) { - return _zombies.length; - } - - function isZombie(address staker) public view override returns (bool) { - for (uint256 i = 0; i < _zombies.length; i++) { - if (staker == _zombies[i].stakerAddress) { - return true; - } - } - return false; - } - /** * @notice Get the amount of funds withdrawable by the given address * @param user Address to check the funds of @@ -251,145 +204,76 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { return _withdrawableFunds[user]; } - /** - * @return Index of the first unresolved node - * @dev If all nodes have been resolved, this will be latestNodeCreated + 1 - */ - function firstUnresolvedNode() public view override returns (uint64) { - return _firstUnresolvedNode; - } - - /// @return Index of the latest confirmed node - function latestConfirmed() public view override returns (uint64) { + /// @return Index of the latest confirmed assertion + function latestConfirmed() public view override returns (bytes32) { return _latestConfirmed; } - /// @return Index of the latest rollup node created - function latestNodeCreated() public view override returns (uint64) { - return _latestNodeCreated; - } - - /// @return Ethereum block that the most recent stake was created - function lastStakeBlock() external view override returns (uint64) { - return _lastStakeBlock; - } - /// @return Number of active stakers currently staked function stakerCount() public view override returns (uint64) { return uint64(_stakerList.length); } /** - * @notice Initialize the core with an initial node - * @param initialNode Initial node to start the chain with + * @notice Initialize the core with an initial assertion + * @param initialAssertion Initial assertion to start the chain with */ - function initializeCore(Node memory initialNode) internal { + function initializeCore(AssertionNode memory initialAssertion, bytes32 assertionHash) internal { __Pausable_init(); - _nodes[GENESIS_NODE] = initialNode; - _firstUnresolvedNode = GENESIS_NODE + 1; - if (_hostChainIsArbitrum) { - _nodeCreatedAtArbSysBlock[GENESIS_NODE] = ArbSys(address(100)).arbBlockNumber(); - } + initialAssertion.status = AssertionStatus.Confirmed; + _assertions[assertionHash] = initialAssertion; + _latestConfirmed = assertionHash; } /** - * @notice React to a new node being created by storing it an incrementing the latest node counter - * @param node Node that was newly created + * @dev This function will validate the parentAssertionHash, confirmState and inboxAcc against the assertionHash + * and check if the assertionHash is currently pending. If all checks pass, the assertion will be confirmed. */ - function nodeCreated(Node memory node) internal { - _latestNodeCreated++; - _nodes[_latestNodeCreated] = node; - if (_hostChainIsArbitrum) { - _nodeCreatedAtArbSysBlock[_latestNodeCreated] = ArbSys(address(100)).arbBlockNumber(); - } - } + function confirmAssertionInternal( + bytes32 assertionHash, + bytes32 parentAssertionHash, + ExecutionState calldata confirmState, + bytes32 inboxAcc + ) internal { + AssertionNode storage assertion = getAssertionStorage(assertionHash); + // Check that assertion is pending, this also checks that assertion exists + require(assertion.status == AssertionStatus.Pending, "NOT_PENDING"); - /// @notice Reject the next unresolved node - function _rejectNextNode() internal { - _firstUnresolvedNode++; - } + // Authenticate data against assertionHash pre-image + require( + assertionHash + == RollupLib.assertionHash({ + parentAssertionHash: parentAssertionHash, + afterState: confirmState, + inboxAcc: inboxAcc + }), + "CONFIRM_DATA" + ); - function confirmNode( - uint64 nodeNum, - bytes32 blockHash, - bytes32 sendRoot - ) internal { - Node storage node = getNodeStorage(nodeNum); - // Authenticate data against node's confirm data pre-image - require(node.confirmData == RollupLib.confirmHash(blockHash, sendRoot), "CONFIRM_DATA"); + bytes32 blockHash = confirmState.globalState.bytes32Vals[0]; + bytes32 sendRoot = confirmState.globalState.bytes32Vals[1]; // trusted external call to outbox outbox.updateSendRoot(sendRoot, blockHash); - _latestConfirmed = nodeNum; - _firstUnresolvedNode = nodeNum + 1; + _latestConfirmed = assertionHash; + assertion.status = AssertionStatus.Confirmed; - emit NodeConfirmed(nodeNum, blockHash, sendRoot); + emit AssertionConfirmed(assertionHash, blockHash, sendRoot); } /** - * @notice Create a new stake at latest confirmed node + * @notice Create a new stake at latest confirmed assertion * @param stakerAddress Address of the new staker * @param depositAmount Stake amount of the new staker */ function createNewStake(address stakerAddress, uint256 depositAmount) internal { uint64 stakerIndex = uint64(_stakerList.length); _stakerList.push(stakerAddress); - _stakerMap[stakerAddress] = Staker( - depositAmount, - stakerIndex, - _latestConfirmed, - NO_CHAL_INDEX, // new staker is not in challenge - true - ); - _nodeStakers[_latestConfirmed][stakerAddress] = true; - _lastStakeBlock = uint64(block.number); + _stakerMap[stakerAddress] = Staker(depositAmount, _latestConfirmed, stakerIndex, true); emit UserStakeUpdated(stakerAddress, 0, depositAmount); } - /** - * @notice Check to see whether the two stakers are in the same challenge - * @param stakerAddress1 Address of the first staker - * @param stakerAddress2 Address of the second staker - * @return Address of the challenge that the two stakers are in - */ - function inChallenge(address stakerAddress1, address stakerAddress2) - internal - view - returns (uint64) - { - Staker storage staker1 = _stakerMap[stakerAddress1]; - Staker storage staker2 = _stakerMap[stakerAddress2]; - uint64 challenge = staker1.currentChallenge; - require(challenge != NO_CHAL_INDEX, "NO_CHAL"); - require(challenge == staker2.currentChallenge, "DIFF_IN_CHAL"); - return challenge; - } - - /** - * @notice Make the given staker as not being in a challenge - * @param stakerAddress Address of the staker to remove from a challenge - */ - function clearChallenge(address stakerAddress) internal { - Staker storage staker = _stakerMap[stakerAddress]; - staker.currentChallenge = NO_CHAL_INDEX; - } - - /** - * @notice Mark both the given stakers as engaged in the challenge - * @param staker1 Address of the first staker - * @param staker2 Address of the second staker - * @param challenge Address of the challenge both stakers are now in - */ - function challengeStarted( - address staker1, - address staker2, - uint64 challenge - ) internal { - _stakerMap[staker1].currentChallenge = challenge; - _stakerMap[staker2].currentChallenge = challenge; - } - /** * @notice Add to the stake of the given staker by the given amount * @param stakerAddress Address of the staker to increase the stake of @@ -420,102 +304,19 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { return amountWithdrawn; } - /** - * @notice Remove the given staker and turn them into a zombie - * @param stakerAddress Address of the staker to remove - */ - function turnIntoZombie(address stakerAddress) internal { - Staker storage staker = _stakerMap[stakerAddress]; - _zombies.push(Zombie(stakerAddress, staker.latestStakedNode)); - deleteStaker(stakerAddress); - } - - /** - * @notice Update the latest staked node of the zombie at the given index - * @param zombieNum Index of the zombie to move - * @param latest New latest node the zombie is staked on - */ - function zombieUpdateLatestStakedNode(uint256 zombieNum, uint64 latest) internal { - _zombies[zombieNum].latestStakedNode = latest; - } - - /** - * @notice Remove the zombie at the given index - * @param zombieNum Index of the zombie to remove - */ - function removeZombie(uint256 zombieNum) internal { - _zombies[zombieNum] = _zombies[_zombies.length - 1]; - _zombies.pop(); - } - - /** - * @notice Mark the given staker as staked on this node - * @param staker Address of the staker to mark - */ - function addStaker(uint64 nodeNum, address staker) internal { - require(!_nodeStakers[nodeNum][staker], "ALREADY_STAKED"); - _nodeStakers[nodeNum][staker] = true; - Node storage node = getNodeStorage(nodeNum); - require(node.deadlineBlock != 0, "NO_NODE"); - - uint64 prevCount = node.stakerCount; - node.stakerCount = prevCount + 1; - - if (nodeNum > GENESIS_NODE) { - Node storage parent = getNodeStorage(node.prevNum); - parent.childStakerCount++; - if (prevCount == 0) { - parent.newChildConfirmDeadline(uint64(block.number) + confirmPeriodBlocks); - } - } - } - - /** - * @notice Remove the given staker from this node - * @param staker Address of the staker to remove - */ - function removeStaker(uint64 nodeNum, address staker) internal { - require(_nodeStakers[nodeNum][staker], "NOT_STAKED"); - _nodeStakers[nodeNum][staker] = false; - - Node storage node = getNodeStorage(nodeNum); - node.stakerCount--; - - if (nodeNum > GENESIS_NODE) { - getNodeStorage(node.prevNum).childStakerCount--; - } - } - /** * @notice Remove the given staker and return their stake - * This should not be called if the staker is staked on a descendent of the latest confirmed node + * This should only be called when the staker is inactive * @param stakerAddress Address of the staker withdrawing their stake */ function withdrawStaker(address stakerAddress) internal { Staker storage staker = _stakerMap[stakerAddress]; - uint64 latestConfirmedNum = latestConfirmed(); - if (nodeHasStaker(latestConfirmedNum, stakerAddress)) { - // Withdrawing a staker whose latest staked node isn't resolved should be impossible - assert(staker.latestStakedNode == latestConfirmedNum); - removeStaker(latestConfirmedNum, stakerAddress); - } uint256 initialStaked = staker.amountStaked; increaseWithdrawableFunds(stakerAddress, initialStaked); deleteStaker(stakerAddress); emit UserStakeUpdated(stakerAddress, initialStaked, 0); } - /** - * @notice Advance the given staker to the given node - * @param stakerAddress Address of the staker adding their stake - * @param nodeNum Index of the node to stake on - */ - function stakeOnNode(address stakerAddress, uint64 nodeNum) internal { - Staker storage staker = _stakerMap[stakerAddress]; - addStaker(nodeNum, stakerAddress); - staker.latestStakedNode = nodeNum; - } - /** * @notice Clear the withdrawable funds for the given address * @param account Address of the account to remove funds from @@ -555,130 +356,200 @@ abstract contract RollupCore is IRollupCore, PausableUpgradeable { delete _stakerMap[stakerAddress]; } - struct StakeOnNewNodeFrame { - uint256 currentInboxSize; - Node node; - bytes32 executionHash; - Node prevNode; - bytes32 lastHash; - bool hasSibling; - uint64 deadlineBlock; - bytes32 sequencerBatchAcc; - } + function createNewAssertion( + AssertionInputs calldata assertion, + bytes32 prevAssertionHash, + bytes32 expectedAssertionHash + ) internal returns (bytes32) { + // Validate the config hash + RollupLib.validateConfigHash( + assertion.beforeStateData.configData, getAssertionStorage(prevAssertionHash).configHash + ); - function createNewNode( - Assertion calldata assertion, - uint64 prevNodeNum, - uint256 prevNodeInboxMaxCount, - bytes32 expectedNodeHash - ) internal returns (bytes32 newNodeHash) { + // reading inbox messages always terminates in either a finished or errored state + // although the challenge protocol that any invalid terminal state will be proven incorrect + // we can do a quick sanity check here require( - assertion.afterState.machineStatus == MachineStatus.FINISHED || - assertion.afterState.machineStatus == MachineStatus.ERRORED, + assertion.afterState.machineStatus == MachineStatus.FINISHED + || assertion.afterState.machineStatus == MachineStatus.ERRORED, "BAD_AFTER_STATUS" ); - StakeOnNewNodeFrame memory memoryFrame; + // validate the provided before state is correct by checking that it's part of the prev assertion hash + require( + RollupLib.assertionHash( + assertion.beforeStateData.prevPrevAssertionHash, + assertion.beforeState, + assertion.beforeStateData.sequencerBatchAcc + ) == prevAssertionHash, + "INVALID_BEFORE_STATE" + ); + + // The rollup cannot advance from an errored state + // If it reaches an errored state it must be corrected by an administrator + // This will involve updating the wasm root and creating an alternative assertion + // that consumes the correct number of inbox messages, and correctly transitions to the + // FINISHED state so that normal progress can continue + require(assertion.beforeState.machineStatus == MachineStatus.FINISHED, "BAD_PREV_STATUS"); + + AssertionNode storage prevAssertion = getAssertionStorage(prevAssertionHash); + uint256 nextInboxPosition; + bytes32 sequencerBatchAcc; { - // validate data - memoryFrame.prevNode = getNode(prevNodeNum); - memoryFrame.currentInboxSize = bridge.sequencerMessageCount(); - - // Make sure the previous state is correct against the node being built on - require( - RollupLib.stateHash(assertion.beforeState, prevNodeInboxMaxCount) == - memoryFrame.prevNode.stateHash, - "PREV_STATE_HASH" - ); - - // Ensure that the assertion doesn't read past the end of the current inbox - uint64 afterInboxCount = assertion.afterState.globalState.getInboxPosition(); + uint64 afterInboxPosition = assertion.afterState.globalState.getInboxPosition(); uint64 prevInboxPosition = assertion.beforeState.globalState.getInboxPosition(); - require(afterInboxCount >= prevInboxPosition, "INBOX_BACKWARDS"); - if (afterInboxCount == prevInboxPosition) { + require(afterInboxPosition >= prevInboxPosition, "INBOX_BACKWARDS"); + if (assertion.afterState.machineStatus == MachineStatus.ERRORED) { + // the errored position must still be within the correct message bounds require( - assertion.afterState.globalState.getPositionInMessage() >= - assertion.beforeState.globalState.getPositionInMessage(), - "INBOX_POS_IN_MSG_BACKWARDS" + afterInboxPosition <= assertion.beforeStateData.configData.nextInboxPosition, + "ERRORED_INBOX_TOO_FAR" ); - } - // See validator/assertion.go ExecutionState RequiredBatches() for reasoning - if ( - assertion.afterState.machineStatus == MachineStatus.ERRORED || - assertion.afterState.globalState.getPositionInMessage() > 0 - ) { - // The current inbox message was read - afterInboxCount++; - } - require(afterInboxCount <= memoryFrame.currentInboxSize, "INBOX_PAST_END"); - // This gives replay protection against the state of the inbox - if (afterInboxCount > 0) { - memoryFrame.sequencerBatchAcc = bridge.sequencerInboxAccs(afterInboxCount - 1); - } - } - { - memoryFrame.executionHash = RollupLib.executionHash(assertion); + // and cannot go backwards + require(afterInboxPosition >= prevInboxPosition, "ERRORED_INBOX_TOO_FEW"); + } else if (assertion.afterState.machineStatus == MachineStatus.FINISHED) { + // Assertions must consume exactly all inbox messages + // that were in the inbox at the time the previous assertion was created + require( + afterInboxPosition == assertion.beforeStateData.configData.nextInboxPosition, "INCORRECT_INBOX_POS" + ); + // Assertions that finish correctly completely consume the message + // Therefore their position in the message is 0 + require(assertion.afterState.globalState.getPositionInMessage() == 0, "FINISHED_NON_ZERO_POS"); - memoryFrame.deadlineBlock = uint64(block.number) + confirmPeriodBlocks; + // We enforce that at least one inbox message is always consumed + // so the after inbox position is always strictly greater than previous + require(afterInboxPosition > prevInboxPosition, "INBOX_BACKWARDS"); + } - memoryFrame.hasSibling = memoryFrame.prevNode.latestChildNumber > 0; - // here we don't use ternacy operator to remain compatible with slither - if (memoryFrame.hasSibling) { - memoryFrame.lastHash = getNodeStorage(memoryFrame.prevNode.latestChildNumber) - .nodeHash; + uint256 currentInboxPosition = bridge.sequencerMessageCount(); + // Cannot read more messages than currently exist in the inbox + require(afterInboxPosition <= currentInboxPosition, "INBOX_PAST_END"); + + // The next assertion must consume all the messages that are currently found in the inbox + if (afterInboxPosition == currentInboxPosition) { + // No new messages have been added to the inbox since the last assertion + // In this case if we set the next inbox position to the current one we would be insisting that + // the next assertion process no messages. So instead we increment the next inbox position to current + // plus one, so that the next assertion will process exactly one message + nextInboxPosition = currentInboxPosition + 1; } else { - memoryFrame.lastHash = memoryFrame.prevNode.nodeHash; + nextInboxPosition = currentInboxPosition; } - newNodeHash = RollupLib.nodeHash( - memoryFrame.hasSibling, - memoryFrame.lastHash, - memoryFrame.executionHash, - memoryFrame.sequencerBatchAcc, - wasmModuleRoot - ); - require( - newNodeHash == expectedNodeHash || expectedNodeHash == bytes32(0), - "UNEXPECTED_NODE_HASH" - ); - - memoryFrame.node = NodeLib.createNode( - RollupLib.stateHash(assertion.afterState, memoryFrame.currentInboxSize), - RollupLib.challengeRootHash( - memoryFrame.executionHash, - block.number, - wasmModuleRoot - ), - RollupLib.confirmHash(assertion), - prevNodeNum, - memoryFrame.deadlineBlock, - newNodeHash - ); + // only the genesis assertion processes no messages, and that assertion is created + // when we initialize this contract. Therefore, all assertions created here should have a non + // zero inbox position. + require(afterInboxPosition != 0, "EMPTY_INBOX_COUNT"); + + // Fetch the inbox accumulator for this message count. Fetching this and checking against it + // allows the assertion creator to ensure they're creating an assertion against the expected + // inbox messages + sequencerBatchAcc = bridge.sequencerInboxAccs(afterInboxPosition - 1); } - { - uint64 nodeNum = latestNodeCreated() + 1; + bytes32 newAssertionHash = RollupLib.assertionHash(prevAssertionHash, assertion.afterState, sequencerBatchAcc); + + // allow an assertion creator to ensure that they're creating their assertion against the expected state + require( + newAssertionHash == expectedAssertionHash || expectedAssertionHash == bytes32(0), + "UNEXPECTED_ASSERTION_HASH" + ); - // Fetch a storage reference to prevNode since we copied our other one into memory - // and we don't have enough stack available to keep to keep the previous storage reference around - Node storage prevNode = getNodeStorage(prevNodeNum); - prevNode.childCreated(nodeNum); + // the assertion hash is unique - it's only possible to have one correct assertion hash + // per assertion. Therefore we can check if this assertion has already been made, and if so + // we can revert + require(getAssertionStorage(newAssertionHash).status == AssertionStatus.NoAssertion, "ASSERTION_SEEN"); + + // state updates + AssertionNode memory newAssertion = AssertionNodeLib.createAssertion( + prevAssertion.firstChildBlock == 0, // assumes block 0 is impossible + RollupLib.configHash({ + wasmModuleRoot: wasmModuleRoot, + requiredStake: baseStake, + challengeManager: address(challengeManager), + confirmPeriodBlocks: confirmPeriodBlocks, + nextInboxPosition: uint64(nextInboxPosition) + }) + ); - nodeCreated(memoryFrame.node); - } + // Fetch a storage reference to prevAssertion since we copied our other one into memory + // and we don't have enough stack available to keep to keep the previous storage reference around + prevAssertion.childCreated(); + _assertions[newAssertionHash] = newAssertion; - emit NodeCreated( - latestNodeCreated(), - memoryFrame.prevNode.nodeHash, - newNodeHash, - memoryFrame.executionHash, + emit AssertionCreated( + newAssertionHash, + prevAssertionHash, assertion, - memoryFrame.sequencerBatchAcc, + sequencerBatchAcc, + nextInboxPosition, wasmModuleRoot, - memoryFrame.currentInboxSize + baseStake, + address(challengeManager), + confirmPeriodBlocks ); + if (_hostChainIsArbitrum) { + _assertionCreatedAtArbSysBlock[newAssertionHash] = ArbSys(address(100)).arbBlockNumber(); + } + + return newAssertionHash; + } + + function genesisAssertionHash() external pure returns (bytes32) { + GlobalState memory emptyGlobalState; + ExecutionState memory emptyExecutionState = ExecutionState(emptyGlobalState, MachineStatus.FINISHED); + bytes32 parentAssertionHash = bytes32(0); + bytes32 inboxAcc = bytes32(0); + return RollupLib.assertionHash({ + parentAssertionHash: parentAssertionHash, + afterState: emptyExecutionState, + inboxAcc: inboxAcc + }); + } - return newNodeHash; + function getFirstChildCreationBlock(bytes32 assertionHash) external view returns (uint256) { + return getAssertionStorage(assertionHash).firstChildBlock; + } + + function getSecondChildCreationBlock(bytes32 assertionHash) external view returns (uint256) { + return getAssertionStorage(assertionHash).secondChildBlock; + } + + function validateAssertionHash( + bytes32 assertionHash, + ExecutionState calldata state, + bytes32 prevAssertionHash, + bytes32 inboxAcc + ) external pure { + require(assertionHash == RollupLib.assertionHash(prevAssertionHash, state, inboxAcc), "INVALID_ASSERTION_HASH"); + } + + function validateConfig(bytes32 assertionHash, ConfigData calldata configData) external view { + RollupLib.validateConfigHash(configData, getAssertionStorage(assertionHash).configHash); + } + + function isFirstChild(bytes32 assertionHash) external view returns (bool) { + return getAssertionStorage(assertionHash).isFirstChild; + } + + function isPending(bytes32 assertionHash) external view returns (bool) { + return getAssertionStorage(assertionHash).status == AssertionStatus.Pending; + } + + /** + * @notice Verify that the given staker is not active + * @param stakerAddress Address to check + */ + function requireInactiveStaker(address stakerAddress) internal view { + require(isStaked(stakerAddress), "NOT_STAKED"); + // A staker is inactive if + // a) their last staked assertion is the latest confirmed assertion + // b) their last staked assertion have a child + bytes32 lastestAssertion = latestStakedAssertion(stakerAddress); + bool isLatestConfirmed = lastestAssertion == latestConfirmed(); + bool haveChild = getAssertionStorage(lastestAssertion).firstChildBlock > 0; + require(isLatestConfirmed || haveChild, "STAKE_ACTIVE"); } } diff --git a/src/rollup/RollupCreator.sol b/src/rollup/RollupCreator.sol index 0d469a05..891cacdb 100644 --- a/src/rollup/RollupCreator.sol +++ b/src/rollup/RollupCreator.sol @@ -11,24 +11,20 @@ import "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.so import "@openzeppelin/contracts/access/Ownable.sol"; import "./RollupProxy.sol"; +import "./IRollupAdmin.sol"; contract RollupCreator is Ownable { event RollupCreated( - address indexed rollupAddress, - address inboxAddress, - address adminProxy, - address sequencerInbox, - address bridge + address indexed rollupAddress, address inboxAddress, address adminProxy, address sequencerInbox, address bridge ); event TemplatesUpdated(); BridgeCreator public bridgeCreator; IOneStepProofEntry public osp; - IChallengeManager public challengeManagerTemplate; + IEdgeChallengeManager public challengeManagerTemplate; IRollupAdmin public rollupAdminLogic; IRollupUser public rollupUserLogic; - address public validatorUtils; address public validatorWalletCreator; constructor() Ownable() {} @@ -36,10 +32,9 @@ contract RollupCreator is Ownable { function setTemplates( BridgeCreator _bridgeCreator, IOneStepProofEntry _osp, - IChallengeManager _challengeManagerLogic, + IEdgeChallengeManager _challengeManagerLogic, IRollupAdmin _rollupAdminLogic, IRollupUser _rollupUserLogic, - address _validatorUtils, address _validatorWalletCreator ) external onlyOwner { bridgeCreator = _bridgeCreator; @@ -47,18 +42,65 @@ contract RollupCreator is Ownable { challengeManagerTemplate = _challengeManagerLogic; rollupAdminLogic = _rollupAdminLogic; rollupUserLogic = _rollupUserLogic; - validatorUtils = _validatorUtils; validatorWalletCreator = _validatorWalletCreator; emit TemplatesUpdated(); } - // After this setup: - // Rollup should be the owner of bridge - // RollupOwner should be the owner of Rollup's ProxyAdmin - // RollupOwner should be the owner of Rollup - // Bridge should have a single inbox and outbox + // internal function to workaround stack limit + function createChallengeManager(address rollupAddr, address proxyAdminAddr, Config memory config) + internal + returns (IEdgeChallengeManager) + { + IEdgeChallengeManager challengeManager = IEdgeChallengeManager( + address( + new TransparentUpgradeableProxy( + address(challengeManagerTemplate), + proxyAdminAddr, + "" + ) + ) + ); + + challengeManager.initialize({ + _assertionChain: IAssertionChain(rollupAddr), + _challengePeriodBlocks: config.confirmPeriodBlocks, + _oneStepProofEntry: osp, + layerZeroBlockEdgeHeight: config.layerZeroBlockEdgeHeight, + layerZeroBigStepEdgeHeight: config.layerZeroBigStepEdgeHeight, + layerZeroSmallStepEdgeHeight: config.layerZeroSmallStepEdgeHeight, + _stakeToken: IERC20(config.stakeToken), + _stakeAmount: config.miniStakeValue, + _excessStakeReceiver: config.owner + }); + + return challengeManager; + } + function createRollup(Config memory config) external returns (address) { + return createRollup(config, address(0), new address[](0), false); + } + + /** + * @notice Create a new rollup + * @dev After this setup: + * @dev - Rollup should be the owner of bridge + * @dev - RollupOwner should be the owner of Rollup's ProxyAdmin + * @dev - RollupOwner should be the owner of Rollup + * @dev - Bridge should have a single inbox and outbox + * @dev - Validators and batch poster should be set if provided + * @param config The configuration for the rollup + * @param _batchPoster The address of the batch poster, not used when set to zero address + * @param _validators The list of validator addresses, not used when set to empty list + * @return The address of the newly created rollup + */ + function createRollup( + Config memory config, + address _batchPoster, + address[] memory _validators, + bool disableValidatorWhitelist + ) public returns (address) { ProxyAdmin proxyAdmin = new ProxyAdmin(); + proxyAdmin.transferOwnership(config.owner); // Create the rollup proxy to figure out the address and initialize it later RollupProxy rollup = new RollupProxy{salt: keccak256(abi.encode(config))}(); @@ -69,29 +111,14 @@ contract RollupCreator is Ownable { IInbox inbox, IRollupEventInbox rollupEventInbox, IOutbox outbox - ) = bridgeCreator.createBridge( - address(proxyAdmin), - address(rollup), - config.sequencerInboxMaxTimeVariation - ); + ) = bridgeCreator.createBridge(address(proxyAdmin), address(rollup), config.sequencerInboxMaxTimeVariation); - proxyAdmin.transferOwnership(config.owner); + IEdgeChallengeManager challengeManager = createChallengeManager(address(rollup), address(proxyAdmin), config); - IChallengeManager challengeManager = IChallengeManager( - address( - new TransparentUpgradeableProxy( - address(challengeManagerTemplate), - address(proxyAdmin), - "" - ) - ) - ); - challengeManager.initialize( - IChallengeResultReceiver(address(rollup)), - sequencerInbox, - bridge, - osp - ); + // initialize the rollup with this contract as owner to set batch poster and validators + // it will transfer the ownership back to the actual owner later + address actualOwner = config.owner; + config.owner = address(this); rollup.initializeProxy( config, @@ -104,17 +131,29 @@ contract RollupCreator is Ownable { challengeManager: challengeManager, rollupAdminLogic: address(rollupAdminLogic), rollupUserLogic: rollupUserLogic, - validatorUtils: validatorUtils, validatorWalletCreator: validatorWalletCreator }) ); + // setting batch poster, if the address provided is not zero address + if (_batchPoster != address(0)) { + sequencerInbox.setIsBatchPoster(_batchPoster, true); + } + // Call setValidator on the newly created rollup contract just if validator set is not empty + if (_validators.length != 0) { + bool[] memory _vals = new bool[](_validators.length); + for (uint256 i = 0; i < _validators.length; i++) { + _vals[i] = true; + } + IRollupAdmin(address(rollup)).setValidator(_validators, _vals); + } + if(disableValidatorWhitelist == true) { + IRollupAdmin(address(rollup)).setValidatorWhitelistDisabled(disableValidatorWhitelist); + } + IRollupAdmin(address(rollup)).setOwner(actualOwner); + emit RollupCreated( - address(rollup), - address(inbox), - address(proxyAdmin), - address(sequencerInbox), - address(bridge) + address(rollup), address(inbox), address(proxyAdmin), address(sequencerInbox), address(bridge) ); return address(rollup); } diff --git a/src/rollup/RollupEventInbox.sol b/src/rollup/RollupEventInbox.sol index 4641930a..784840e4 100644 --- a/src/rollup/RollupEventInbox.sol +++ b/src/rollup/RollupEventInbox.sol @@ -7,7 +7,9 @@ pragma solidity ^0.8.0; import "./IRollupEventInbox.sol"; import "../bridge/IBridge.sol"; import "../bridge/IDelayedMessageProvider.sol"; +import "../precompiles/ArbGasInfo.sol"; import "../libraries/DelegateCallAware.sol"; +import "../libraries/ArbitrumChecker.sol"; import {INITIALIZATION_MSG_TYPE} from "../libraries/MessageTypes.sol"; import {AlreadyInit, HadZeroInit} from "../libraries/Error.sol"; @@ -30,13 +32,28 @@ contract RollupEventInbox is IRollupEventInbox, IDelayedMessageProvider, Delegat rollup = address(_bridge.rollup()); } + /// @notice Allows the proxy owner to set the rollup address + function updateRollupAddress() external onlyDelegated onlyProxyOwner { + rollup = address(bridge.rollup()); + } + function rollupInitialized(uint256 chainId, string calldata chainConfig) external override onlyRollup { require(bytes(chainConfig).length > 0, "EMPTY_CHAIN_CONFIG"); - bytes memory initMsg = abi.encodePacked(chainId, uint8(0), chainConfig); + uint8 initMsgVersion = 1; + uint256 currentDataCost = block.basefee; + if (ArbitrumChecker.runningOnArbitrum()) { + currentDataCost += ArbGasInfo(address(0x6c)).getL1BaseFeeEstimate(); + } + bytes memory initMsg = abi.encodePacked( + chainId, + initMsgVersion, + currentDataCost, + chainConfig + ); uint256 num = bridge.enqueueDelayedMessage( INITIALIZATION_MSG_TYPE, address(0), diff --git a/src/rollup/RollupLib.sol b/src/rollup/RollupLib.sol index 82b9571d..7eac4a9a 100644 --- a/src/rollup/RollupLib.sol +++ b/src/rollup/RollupLib.sol @@ -4,16 +4,16 @@ pragma solidity ^0.8.0; -import "../challenge/IChallengeManager.sol"; -import "../challenge/ChallengeLib.sol"; +import "../challenge/OldChallengeLib.sol"; import "../state/GlobalState.sol"; import "../bridge/ISequencerInbox.sol"; import "../bridge/IBridge.sol"; import "../bridge/IOutbox.sol"; import "../bridge/IInbox.sol"; -import "./Node.sol"; +import "./Assertion.sol"; import "./IRollupEventInbox.sol"; +import "../challengeV2/EdgeChallengeManager.sol"; library RollupLib { using GlobalStateLib for GlobalState; @@ -49,65 +49,77 @@ library RollupLib { ); } - function executionHash(Assertion memory assertion) internal pure returns (bytes32) { - MachineStatus[2] memory statuses; - statuses[0] = assertion.beforeState.machineStatus; - statuses[1] = assertion.afterState.machineStatus; - GlobalState[2] memory globalStates; - globalStates[0] = assertion.beforeState.globalState; - globalStates[1] = assertion.afterState.globalState; - // TODO: benchmark how much this abstraction adds of gas overhead - return executionHash(statuses, globalStates, assertion.numBlocks); + // Not the same as a machine hash for a given execution state + function executionStateHash(ExecutionState memory state) internal pure returns (bytes32) { + return keccak256(abi.encodePacked(state.machineStatus, state.globalState.hash())); } - function executionHash( - MachineStatus[2] memory statuses, - GlobalState[2] memory globalStates, - uint64 numBlocks + // The `assertionHash` contains all the information needed to determine an assertion's validity. + // This helps protect validators against reorgs by letting them bind their assertion to the current chain state. + function assertionHash( + bytes32 parentAssertionHash, + ExecutionState memory afterState, + bytes32 inboxAcc ) internal pure returns (bytes32) { - bytes32[] memory segments = new bytes32[](2); - segments[0] = ChallengeLib.blockStateHash(statuses[0], globalStates[0].hash()); - segments[1] = ChallengeLib.blockStateHash(statuses[1], globalStates[1].hash()); - return ChallengeLib.hashChallengeState(0, numBlocks, segments); + // we can no longer have `hasSibling` in the assertion hash as it would allow identical assertions + return assertionHash( + parentAssertionHash, + executionStateHash(afterState), + inboxAcc + ); } - function challengeRootHash( - bytes32 execution, - uint256 proposedTime, - bytes32 wasmModuleRoot + // Takes in a hash of the afterState instead of the afterState itself + function assertionHash( + bytes32 parentAssertionHash, + bytes32 afterStateHash, + bytes32 inboxAcc ) internal pure returns (bytes32) { - return keccak256(abi.encodePacked(execution, proposedTime, wasmModuleRoot)); - } - - function confirmHash(Assertion memory assertion) internal pure returns (bytes32) { + // we can no longer have `hasSibling` in the assertion hash as it would allow identical assertions return - confirmHash( - assertion.afterState.globalState.getBlockHash(), - assertion.afterState.globalState.getSendRoot() + keccak256( + abi.encodePacked( + parentAssertionHash, + afterStateHash, + inboxAcc + ) ); } - function confirmHash(bytes32 blockHash, bytes32 sendRoot) internal pure returns (bytes32) { - return keccak256(abi.encodePacked(blockHash, sendRoot)); - } - - function nodeHash( - bool hasSibling, - bytes32 lastHash, - bytes32 assertionExecHash, - bytes32 inboxAcc, - bytes32 wasmModuleRoot + // All these should be emited in AssertionCreated event + function configHash( + bytes32 wasmModuleRoot, + uint256 requiredStake, + address challengeManager, + uint64 confirmPeriodBlocks, + uint64 nextInboxPosition ) internal pure returns (bytes32) { - uint8 hasSiblingInt = hasSibling ? 1 : 0; return keccak256( abi.encodePacked( - hasSiblingInt, - lastHash, - assertionExecHash, - inboxAcc, - wasmModuleRoot + wasmModuleRoot, + requiredStake, + challengeManager, + confirmPeriodBlocks, + nextInboxPosition ) ); } + + function validateConfigHash( + ConfigData calldata configData, + bytes32 _configHash + ) internal pure { + require( + _configHash + == configHash( + configData.wasmModuleRoot, + configData.requiredStake, + configData.challengeManager, + configData.confirmPeriodBlocks, + configData.nextInboxPosition + ), + "CONFIG_HASH_MISMATCH" + ); + } } diff --git a/src/rollup/RollupProxy.sol b/src/rollup/RollupProxy.sol index 2938a585..3483e596 100644 --- a/src/rollup/RollupProxy.sol +++ b/src/rollup/RollupProxy.sol @@ -19,13 +19,13 @@ contract RollupProxy is AdminFallbackProxy { ) { _initialize( address(connectedContracts.rollupAdminLogic), - abi.encodeWithSelector( - IRollupAdmin.initialize.selector, - config, - connectedContracts + abi.encodeCall( + IRollupAdmin.initialize, + (config, + connectedContracts) ), address(connectedContracts.rollupUserLogic), - abi.encodeWithSelector(IRollupUserAbs.initialize.selector, config.stakeToken), + abi.encodeCall(IRollupUser.initialize, (config.stakeToken)), config.owner ); } else { diff --git a/src/rollup/RollupUserLogic.sol b/src/rollup/RollupUserLogic.sol index bd16ad5e..c284bd27 100644 --- a/src/rollup/RollupUserLogic.sol +++ b/src/rollup/RollupUserLogic.sol @@ -4,29 +4,30 @@ pragma solidity ^0.8.0; -import "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol"; +import "@openzeppelin/contracts/token/ERC20/IERC20.sol"; import {IRollupUser} from "./IRollupLogic.sol"; import "../libraries/UUPSNotUpgradeable.sol"; import "./RollupCore.sol"; import "./IRollupLogic.sol"; - import {ETH_POS_BLOCK_TIME} from "../libraries/Constants.sol"; -abstract contract AbsRollupUserLogic is - RollupCore, - UUPSNotUpgradeable, - IRollupUserAbs, - IChallengeResultReceiver -{ - using NodeLib for Node; +contract RollupUserLogic is RollupCore, UUPSNotUpgradeable, IRollupUser { + using AssertionNodeLib for AssertionNode; using GlobalStateLib for GlobalState; + using SafeERC20 for IERC20; modifier onlyValidator() { require(isValidator[msg.sender] || validatorWhitelistDisabled, "NOT_VALIDATOR"); _; } + /// @dev the user logic just validated configuration and shouldn't write to state during init + /// this allows the admin logic to ensure consistency on parameters. + function initialize(address _stakeToken) external view override onlyProxy { + require(_stakeToken != address(0), "NEED_STAKE_TOKEN"); + } + uint256 internal immutable deployTimeChainId = block.chainid; function _chainIdChanged() internal view returns (bool) { @@ -34,18 +35,28 @@ abstract contract AbsRollupUserLogic is } /** - * @notice Extra number of blocks the validator can remain inactive before considered inactive - * This is 7 days assuming a 13.2 seconds block time + * @notice Number of blocks since the last confirmed assertion before the validator whitelist is removed + * This is 28 days assuming a 12 seconds block time. Since it can take 14 days under normal + * circumstances to confirm an assertion, this means that validators will have been inactive for + * a further 14 days before the validator whitelist is removed. + * + * It's important that this time is greater than the max amount of time it can take to + * to confirm an assertion via the normal method. Therefore we need it to be greater + * than max(2* confirmPeriod, 2 * challengePeriod). With some additional margin + * It's expected that initially confirm and challenge periods are set to 1 week, so 4 weeks + * should give a two weeks of margin before the validators are considered afk. */ - uint256 public constant VALIDATOR_AFK_BLOCKS = 45818; + uint256 public constant VALIDATOR_AFK_BLOCKS = 201600; function _validatorIsAfk() internal view returns (bool) { - Node memory latestNode = getNodeStorage(latestNodeCreated()); - if (latestNode.createdAtBlock == 0) return false; - if (latestNode.createdAtBlock + confirmPeriodBlocks + VALIDATOR_AFK_BLOCKS < block.number) { - return true; + AssertionNode memory latestConfirmedAssertion = getAssertionStorage(latestConfirmed()); + if (latestConfirmedAssertion.createdAtBlock == 0) return false; + // We consider the validator is gone if the last known assertion is older than VALIDATOR_AFK_BLOCKS + // Which is either the latest confirmed assertion or the first child of the latest confirmed assertion + if (latestConfirmedAssertion.firstChildBlock > 0) { + return latestConfirmedAssertion.firstChildBlock + VALIDATOR_AFK_BLOCKS < block.number; } - return false; + return latestConfirmedAssertion.createdAtBlock + VALIDATOR_AFK_BLOCKS < block.number; } function removeWhitelistAfterFork() external { @@ -54,100 +65,62 @@ abstract contract AbsRollupUserLogic is validatorWhitelistDisabled = true; } + /** + * @notice Remove the whitelist after the validator has been inactive for too long + */ function removeWhitelistAfterValidatorAfk() external { require(!validatorWhitelistDisabled, "WHITELIST_DISABLED"); require(_validatorIsAfk(), "VALIDATOR_NOT_AFK"); validatorWhitelistDisabled = true; } - function isERC20Enabled() public view override returns (bool) { - return stakeToken != address(0); - } - - /** - * @notice Reject the next unresolved node - * @param stakerAddress Example staker staked on sibling, used to prove a node is on an unconfirmable branch and can be rejected - */ - function rejectNextNode(address stakerAddress) external onlyValidator whenNotPaused { - requireUnresolvedExists(); - uint64 latestConfirmedNodeNum = latestConfirmed(); - uint64 firstUnresolvedNodeNum = firstUnresolvedNode(); - Node storage firstUnresolvedNode_ = getNodeStorage(firstUnresolvedNodeNum); - - if (firstUnresolvedNode_.prevNum == latestConfirmedNodeNum) { - /**If the first unresolved node is a child of the latest confirmed node, to prove it can be rejected, we show: - * a) Its deadline has expired - * b) *Some* staker is staked on a sibling - - * The following three checks are sufficient to prove b: - */ - - // 1. StakerAddress is indeed a staker - require(isStakedOnLatestConfirmed(stakerAddress), "NOT_STAKED"); - - // 2. Staker's latest staked node hasn't been resolved; this proves that staker's latest staked node can't be a parent of firstUnresolvedNode - requireUnresolved(latestStakedNode(stakerAddress)); - - // 3. staker isn't staked on first unresolved node; this proves staker's latest staked can't be a child of firstUnresolvedNode (recall staking on node requires staking on all of its parents) - require(!nodeHasStaker(firstUnresolvedNodeNum, stakerAddress), "STAKED_ON_TARGET"); - // If a staker is staked on a node that is neither a child nor a parent of firstUnresolvedNode, it must be a sibling, QED - - // Verify the block's deadline has passed - firstUnresolvedNode_.requirePastDeadline(); - - getNodeStorage(latestConfirmedNodeNum).requirePastChildConfirmDeadline(); - - removeOldZombies(0); - - // Verify that no staker is staked on this node - require( - firstUnresolvedNode_.stakerCount == countStakedZombies(firstUnresolvedNodeNum), - "HAS_STAKERS" - ); - } - // Simpler case: if the first unreseolved node doesn't point to the last confirmed node, another branch was confirmed and can simply reject it outright - _rejectNextNode(); - - emit NodeRejected(firstUnresolvedNodeNum); - } - /** - * @notice Confirm the next unresolved node - * @param blockHash The block hash at the end of the assertion - * @param sendRoot The send root at the end of the assertion + * @notice Confirm a unresolved assertion + * @param confirmState The state to confirm + * @param winningEdgeId The winning edge if a challenge is started */ - function confirmNextNode(bytes32 blockHash, bytes32 sendRoot) - external - onlyValidator - whenNotPaused - { - requireUnresolvedExists(); - - uint64 nodeNum = firstUnresolvedNode(); - Node storage node = getNodeStorage(nodeNum); - - // Verify the block's deadline has passed - node.requirePastDeadline(); + function confirmAssertion( + bytes32 assertionHash, + bytes32 prevAssertionHash, + ExecutionState calldata confirmState, + bytes32 winningEdgeId, + ConfigData calldata prevConfig, + bytes32 inboxAcc + ) external onlyValidator whenNotPaused { + /* + * To confirm an assertion, the following must be true: + * 1. The assertion must be pending + * 2. The assertion's deadline must have passed + * 3. The assertion's prev must be latest confirmed + * 4. The assertion's prev's child confirm deadline must have passed + * 5. If the assertion's prev has more than 1 child, the assertion must be the winner of the challenge + * + * Note that we do not need to ever reject invalid assertion because they can never confirm + * and the stake on them is swept to the loserStakeEscrow as soon as the leaf is created + */ + + // The assertion's must exists and be pending and will be validated in RollupCore.confirmAssertionInternal + AssertionNode storage assertion = getAssertionStorage(assertionHash); + + // prevAssertionHash is user supplied, but will be validated in RollupCore.confirmAssertionInternal + AssertionNode storage prevAssertion = getAssertionStorage(prevAssertionHash); + RollupLib.validateConfigHash(prevConfig, prevAssertion.configHash); + + // Check that deadline has passed + require(block.number >= assertion.createdAtBlock + prevConfig.confirmPeriodBlocks, "BEFORE_DEADLINE"); // Check that prev is latest confirmed - assert(node.prevNum == latestConfirmed()); - - Node storage prevNode = getNodeStorage(node.prevNum); - prevNode.requirePastChildConfirmDeadline(); - - removeOldZombies(0); - - // Require only zombies are staked on siblings to this node, and there's at least one non-zombie staked on this node - uint256 stakedZombies = countStakedZombies(nodeNum); - uint256 zombiesStakedOnOtherChildren = countZombiesStakedOnChildren(node.prevNum) - - stakedZombies; - require(node.stakerCount > stakedZombies, "NO_STAKERS"); - require( - prevNode.childStakerCount == node.stakerCount + zombiesStakedOnOtherChildren, - "NOT_ALL_STAKED" - ); + require(prevAssertionHash == latestConfirmed(), "PREV_NOT_LATEST_CONFIRMED"); + + if (prevAssertion.secondChildBlock > 0) { + // if the prev has more than 1 child, check if this assertion is the challenge winner + RollupLib.validateConfigHash(prevConfig, prevAssertion.configHash); + ChallengeEdge memory winningEdge = challengeManager.getEdge(winningEdgeId); + require(winningEdge.claimId == assertionHash, "NOT_WINNER"); + require(winningEdge.status == EdgeStatus.Confirmed, "EDGE_NOT_CONFIRMED"); + } - confirmNode(nodeNum, blockHash, sendRoot); + confirmAssertionInternal(assertionHash, prevAssertionHash, confirmState, inboxAcc); } /** @@ -157,558 +130,189 @@ abstract contract AbsRollupUserLogic is function _newStake(uint256 depositAmount) internal onlyValidator whenNotPaused { // Verify that sender is not already a staker require(!isStaked(msg.sender), "ALREADY_STAKED"); - require(!isZombie(msg.sender), "STAKER_IS_ZOMBIE"); - require(depositAmount >= currentRequiredStake(), "NOT_ENOUGH_STAKE"); - + // amount will be checked when creating an assertion createNewStake(msg.sender, depositAmount); } /** - * @notice Move stake onto existing child node - * @param nodeNum Index of the node to move stake to. This must by a child of the node the staker is currently staked on - * @param nodeHash Node hash of nodeNum (protects against reorgs) + * @notice Computes the hash of an assertion + * @param state The execution state for the assertion + * @param prevAssertionHash The hash of the assertion's parent + * @param inboxAcc The inbox batch accumulator */ - function stakeOnExistingNode(uint64 nodeNum, bytes32 nodeHash) + function computeAssertionHash(bytes32 prevAssertionHash, ExecutionState calldata state, bytes32 inboxAcc) public - onlyValidator - whenNotPaused + pure + returns (bytes32) { - require(isStakedOnLatestConfirmed(msg.sender), "NOT_STAKED"); - - require( - nodeNum >= firstUnresolvedNode() && nodeNum <= latestNodeCreated(), - "NODE_NUM_OUT_OF_RANGE" - ); - Node storage node = getNodeStorage(nodeNum); - require(node.nodeHash == nodeHash, "NODE_REORG"); - require(latestStakedNode(msg.sender) == node.prevNum, "NOT_STAKED_PREV"); - stakeOnNode(msg.sender, nodeNum); + return RollupLib.assertionHash(prevAssertionHash, state, inboxAcc); } /** - * @notice Create a new node and move stake onto it + * @notice Create a new assertion and move stake onto it * @param assertion The assertion data - * @param expectedNodeHash The hash of the node being created (protects against reorgs) + * @param expectedAssertionHash The hash of the assertion being created (protects against reorgs) */ - function stakeOnNewNode( - Assertion calldata assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount - ) public onlyValidator whenNotPaused { - require(isStakedOnLatestConfirmed(msg.sender), "NOT_STAKED"); - // Ensure staker is staked on the previous node - uint64 prevNode = latestStakedNode(msg.sender); - - { - uint256 timeSinceLastNode = block.number - getNode(prevNode).createdAtBlock; - // Verify that assertion meets the minimum Delta time requirement - require(timeSinceLastNode >= minimumAssertionPeriod, "TIME_DELTA"); - - // Minimum size requirement: any assertion must consume at least all inbox messages - // put into L1 inbox before the prev node’s L1 blocknum. - // We make an exception if the machine enters the errored state, - // as it can't consume future batches. - require( - assertion.afterState.machineStatus == MachineStatus.ERRORED || - assertion.afterState.globalState.getInboxPosition() >= prevNodeInboxMaxCount, - "TOO_SMALL" - ); - // Minimum size requirement: any assertion must contain at least one block - require(assertion.numBlocks > 0, "EMPTY_ASSERTION"); - - // The rollup cannot advance normally from an errored state - require( - assertion.beforeState.machineStatus == MachineStatus.FINISHED, - "BAD_PREV_STATUS" - ); - } - createNewNode(assertion, prevNode, prevNodeInboxMaxCount, expectedNodeHash); - - stakeOnNode(msg.sender, latestNodeCreated()); - } - - /** - * @notice Refund a staker that is currently staked on or before the latest confirmed node - * @dev Since a staker is initially placed in the latest confirmed node, if they don't move it - * a griefer can remove their stake. It is recomended to batch together the txs to place a stake - * and move it to the desired node. - * @param stakerAddress Address of the staker whose stake is refunded - */ - function returnOldDeposit(address stakerAddress) external override onlyValidator whenNotPaused { - require(latestStakedNode(stakerAddress) <= latestConfirmed(), "TOO_RECENT"); - requireUnchallengedStaker(stakerAddress); - withdrawStaker(stakerAddress); - } - - /** - * @notice Increase the amount staked for the given staker - * @param stakerAddress Address of the staker whose stake is increased - * @param depositAmount The amount of either eth or tokens deposited - */ - function _addToDeposit(address stakerAddress, uint256 depositAmount) - internal + function stakeOnNewAssertion(AssertionInputs calldata assertion, bytes32 expectedAssertionHash) + public onlyValidator whenNotPaused { - requireUnchallengedStaker(stakerAddress); - increaseStakeBy(stakerAddress, depositAmount); - } - - /** - * @notice Reduce the amount staked for the sender (difference between initial amount staked and target is creditted back to the sender). - * @param target Target amount of stake for the staker. If this is below the current minimum, it will be set to minimum instead - */ - function reduceDeposit(uint256 target) external onlyValidator whenNotPaused { - requireUnchallengedStaker(msg.sender); - uint256 currentRequired = currentRequiredStake(); - if (target < currentRequired) { - target = currentRequired; - } - reduceStakeTo(msg.sender, target); - } - - /** - * @notice Start a challenge between the given stakers over the node created by the first staker assuming that the two are staked on conflicting nodes. N.B.: challenge creator does not necessarily need to be one of the two asserters. - * @param stakers Stakers engaged in the challenge. The first staker should be staked on the first node - * @param nodeNums Nodes of the stakers engaged in the challenge. The first node should be the earliest and is the one challenged - * @param machineStatuses The before and after machine status for the first assertion - * @param globalStates The before and after global state for the first assertion - * @param numBlocks The number of L2 blocks contained in the first assertion - * @param secondExecutionHash The execution hash of the second assertion - * @param proposedBlocks L1 block numbers that the two nodes were proposed at - * @param wasmModuleRoots The wasm module roots at the time of the creation of each assertion - */ - function createChallenge( - address[2] calldata stakers, - uint64[2] calldata nodeNums, - MachineStatus[2] calldata machineStatuses, - GlobalState[2] calldata globalStates, - uint64 numBlocks, - bytes32 secondExecutionHash, - uint256[2] calldata proposedBlocks, - bytes32[2] calldata wasmModuleRoots - ) external onlyValidator whenNotPaused { - require(nodeNums[0] < nodeNums[1], "WRONG_ORDER"); - require(nodeNums[1] <= latestNodeCreated(), "NOT_PROPOSED"); - require(latestConfirmed() < nodeNums[0], "ALREADY_CONFIRMED"); - - Node storage node1 = getNodeStorage(nodeNums[0]); - Node storage node2 = getNodeStorage(nodeNums[1]); - - // ensure nodes staked on the same parent (and thus in conflict) - require(node1.prevNum == node2.prevNum, "DIFF_PREV"); + // Early revert on duplicated assertion if expectedAssertionHash is set + require( + expectedAssertionHash == bytes32(0) + || getAssertionStorage(expectedAssertionHash).status == AssertionStatus.NoAssertion, + "EXPECTED_ASSERTION_SEEN" + ); - // ensure both stakers aren't currently in challenge - requireUnchallengedStaker(stakers[0]); - requireUnchallengedStaker(stakers[1]); + require(isStaked(msg.sender), "NOT_STAKED"); - require(nodeHasStaker(nodeNums[0], stakers[0]), "STAKER1_NOT_STAKED"); - require(nodeHasStaker(nodeNums[1], stakers[1]), "STAKER2_NOT_STAKED"); + // requiredStake is user supplied, will be verified against configHash later + // the prev's requiredStake is used to make sure all children have the same stake + // the staker may have more than enough stake, and the entire stake will be locked + // we cannot do a refund here because the staker may be staker on an unconfirmed ancestor that requires more stake + // excess stake can be removed by calling reduceDeposit when the staker is inactive + require(amountStaked(msg.sender) >= assertion.beforeStateData.configData.requiredStake, "INSUFFICIENT_STAKE"); - // Check param data against challenge hash - require( - node1.challengeHash == - RollupLib.challengeRootHash( - RollupLib.executionHash(machineStatuses, globalStates, numBlocks), - proposedBlocks[0], - wasmModuleRoots[0] - ), - "CHAL_HASH1" + bytes32 prevAssertion = RollupLib.assertionHash( + assertion.beforeStateData.prevPrevAssertionHash, + assertion.beforeState, + assertion.beforeStateData.sequencerBatchAcc ); + getAssertionStorage(prevAssertion).requireExists(); + // Staker can create new assertion only if + // a) its last staked assertion is the prev; or + // b) its last staked assertion have a child + bytes32 lastAssertion = latestStakedAssertion(msg.sender); require( - node2.challengeHash == - RollupLib.challengeRootHash( - secondExecutionHash, - proposedBlocks[1], - wasmModuleRoots[1] - ), - "CHAL_HASH2" + lastAssertion == prevAssertion || getAssertionStorage(lastAssertion).firstChildBlock > 0, + "STAKED_ON_ANOTHER_BRANCH" ); - // Calculate upper limit for allowed node proposal time: - uint256 commonEndBlock = getNodeStorage(node1.prevNum).firstChildBlock + - // Dispute start: dispute timer for a node starts when its first child is created - (node1.deadlineBlock - proposedBlocks[0]) + - extraChallengeTimeBlocks; // add dispute window to dispute start time - if (commonEndBlock < proposedBlocks[1]) { - // The 2nd node was created too late; loses challenge automatically. - completeChallengeImpl(stakers[0], stakers[1]); - return; - } - // Start a challenge between staker1 and staker2. Staker1 will defend the correctness of node1, and staker2 will challenge it. - uint64 challengeIndex = createChallengeHelper( - stakers, - machineStatuses, - globalStates, - numBlocks, - wasmModuleRoots, - // convert from block counts to real second based timestamps - (commonEndBlock - proposedBlocks[0]) * ETH_POS_BLOCK_TIME, - (commonEndBlock - proposedBlocks[1]) * ETH_POS_BLOCK_TIME - ); // trusted external call - - challengeStarted(stakers[0], stakers[1], challengeIndex); - - emit RollupChallengeStarted(challengeIndex, stakers[0], stakers[1], nodeNums[0]); - } + // We assume assertion.beforeStateData is valid here as it will be validated in createNewAssertion - function createChallengeHelper( - address[2] calldata stakers, - MachineStatus[2] calldata machineStatuses, - GlobalState[2] calldata globalStates, - uint64 numBlocks, - bytes32[2] calldata wasmModuleRoots, - uint256 asserterTimeLeft, - uint256 challengerTimeLeft - ) internal returns (uint64) { - return - challengeManager.createChallenge( - wasmModuleRoots[0], - machineStatuses, - globalStates, - numBlocks, - stakers[0], - stakers[1], - asserterTimeLeft, - challengerTimeLeft - ); - } + uint256 timeSincePrev = block.number - getAssertionStorage(prevAssertion).createdAtBlock; + // Verify that assertion meets the minimum Delta time requirement + require(timeSincePrev >= minimumAssertionPeriod, "TIME_DELTA"); - /** - * @notice Inform the rollup that the challenge between the given stakers is completed - * @param winningStaker Address of the winning staker - * @param losingStaker Address of the losing staker - */ - function completeChallenge( - uint256 challengeIndex, - address winningStaker, - address losingStaker - ) external override whenNotPaused { - // Only the challenge manager contract can call this to declare the winner and loser - require(msg.sender == address(challengeManager), "WRONG_SENDER"); - require(challengeIndex == inChallenge(winningStaker, losingStaker), "NOT_IN_CHAL"); - completeChallengeImpl(winningStaker, losingStaker); - } + bytes32 newAssertionHash = createNewAssertion(assertion, prevAssertion, expectedAssertionHash); + _stakerMap[msg.sender].latestStakedAssertion = newAssertionHash; - function completeChallengeImpl(address winningStaker, address losingStaker) private { - uint256 remainingLoserStake = amountStaked(losingStaker); - uint256 winnerStake = amountStaked(winningStaker); - if (remainingLoserStake > winnerStake) { - // If loser has a higher stake than the winner, refund the difference - remainingLoserStake -= reduceStakeTo(losingStaker, winnerStake); + if (!getAssertionStorage(newAssertionHash).isFirstChild) { + // only 1 of the children can be confirmed and get their stake refunded + // so we send the other children's stake to the loserStakeEscrow + // NOTE: if the losing staker have staked more than requiredStake, the excess stake will be stuck + increaseWithdrawableFunds(loserStakeEscrow, assertion.beforeStateData.configData.requiredStake); } - - // Reward the winner with half the remaining stake - uint256 amountWon = remainingLoserStake / 2; - increaseStakeBy(winningStaker, amountWon); - remainingLoserStake -= amountWon; - // We deliberately leave loser in challenge state to prevent them from - // doing certain thing that are allowed only to parties not in a challenge - clearChallenge(winningStaker); - // Credit the other half to the loserStakeEscrow address - increaseWithdrawableFunds(loserStakeEscrow, remainingLoserStake); - // Turning loser into zombie renders the loser's remaining stake inaccessible - turnIntoZombie(losingStaker); } /** - * @notice Remove the given zombie from nodes it is staked on, moving backwords from the latest node it is staked on - * @param zombieNum Index of the zombie to remove - * @param maxNodes Maximum number of nodes to remove the zombie from (to limit the cost of this transaction) + * @notice Refund a staker that is currently staked on an assertion that either has a chlid assertion or is the latest confirmed assertion. */ - function removeZombie(uint256 zombieNum, uint256 maxNodes) - external - onlyValidator - whenNotPaused - { - require(zombieNum < zombieCount(), "NO_SUCH_ZOMBIE"); - address zombieStakerAddress = zombieAddress(zombieNum); - uint64 latestNodeStaked = zombieLatestStakedNode(zombieNum); - uint256 nodesRemoved = 0; - uint256 latestConfirmedNum = latestConfirmed(); - while (latestNodeStaked >= latestConfirmedNum && nodesRemoved < maxNodes) { - Node storage node = getNodeStorage(latestNodeStaked); - removeStaker(latestNodeStaked, zombieStakerAddress); - latestNodeStaked = node.prevNum; - nodesRemoved++; - } - if (latestNodeStaked < latestConfirmedNum) { - removeZombie(zombieNum); - } else { - zombieUpdateLatestStakedNode(zombieNum, latestNodeStaked); - } + function returnOldDeposit() external override onlyValidator whenNotPaused { + requireInactiveStaker(msg.sender); + withdrawStaker(msg.sender); } /** - * @notice Remove any zombies whose latest stake is earlier than the latest confirmed node - * @param startIndex Index in the zombie list to start removing zombies from (to limit the cost of this transaction) - */ - function removeOldZombies(uint256 startIndex) public onlyValidator whenNotPaused { - uint256 currentZombieCount = zombieCount(); - uint256 latestConfirmedNum = latestConfirmed(); - for (uint256 i = startIndex; i < currentZombieCount; i++) { - while (zombieLatestStakedNode(i) < latestConfirmedNum) { - removeZombie(i); - currentZombieCount--; - if (i >= currentZombieCount) { - return; - } - } - } - } - - /** - * @notice Calculate the current amount of funds required to place a new stake in the rollup - * @dev If the stake requirement get's too high, this function may start reverting due to overflow, but - * that only blocks operations that should be blocked anyway - * @return The current minimum stake requirement - */ - function currentRequiredStake( - uint256 _blockNumber, - uint64 _firstUnresolvedNodeNum, - uint256 _latestCreatedNode - ) internal view returns (uint256) { - // If there are no unresolved nodes, then you can use the base stake - if (_firstUnresolvedNodeNum - 1 == _latestCreatedNode) { - return baseStake; - } - uint256 firstUnresolvedDeadline = getNodeStorage(_firstUnresolvedNodeNum).deadlineBlock; - if (_blockNumber < firstUnresolvedDeadline) { - return baseStake; - } - uint24[10] memory numerators = [ - 1, - 122971, - 128977, - 80017, - 207329, - 114243, - 314252, - 129988, - 224562, - 162163 - ]; - uint24[10] memory denominators = [ - 1, - 114736, - 112281, - 64994, - 157126, - 80782, - 207329, - 80017, - 128977, - 86901 - ]; - uint256 firstUnresolvedAge = _blockNumber - firstUnresolvedDeadline; - uint256 periodsPassed = (firstUnresolvedAge * 10) / confirmPeriodBlocks; - uint256 baseMultiplier = 2**(periodsPassed / 10); - uint256 withNumerator = baseMultiplier * numerators[periodsPassed % 10]; - uint256 multiplier = withNumerator / denominators[periodsPassed % 10]; - if (multiplier == 0) { - multiplier = 1; - } - return baseStake * multiplier; - } - - /** - * @notice Calculate the current amount of funds required to place a new stake in the rollup - * @dev If the stake requirement get's too high, this function may start reverting due to overflow, but - * that only blocks operations that should be blocked anyway - * @return The current minimum stake requirement - */ - function requiredStake( - uint256 blockNumber, - uint64 firstUnresolvedNodeNum, - uint64 latestCreatedNode - ) external view returns (uint256) { - return currentRequiredStake(blockNumber, firstUnresolvedNodeNum, latestCreatedNode); - } - - function owner() external view returns (address) { - return _getAdmin(); - } - - function currentRequiredStake() public view returns (uint256) { - uint64 firstUnresolvedNodeNum = firstUnresolvedNode(); - - return currentRequiredStake(block.number, firstUnresolvedNodeNum, latestNodeCreated()); - } - - /** - * @notice Calculate the number of zombies staked on the given node - * - * @dev This function could be uncallable if there are too many zombies. However, - * removeZombie and removeOldZombies can be used to remove any zombies that exist - * so that this will then be callable - * - * @param nodeNum The node on which to count staked zombies - * @return The number of zombies staked on the node - */ - function countStakedZombies(uint64 nodeNum) public view override returns (uint256) { - uint256 currentZombieCount = zombieCount(); - uint256 stakedZombieCount = 0; - for (uint256 i = 0; i < currentZombieCount; i++) { - if (nodeHasStaker(nodeNum, zombieAddress(i))) { - stakedZombieCount++; - } - } - return stakedZombieCount; - } - - /** - * @notice Calculate the number of zombies staked on a child of the given node - * - * @dev This function could be uncallable if there are too many zombies. However, - * removeZombie and removeOldZombies can be used to remove any zombies that exist - * so that this will then be callable - * - * @param nodeNum The parent node on which to count zombies staked on children - * @return The number of zombies staked on children of the node - */ - function countZombiesStakedOnChildren(uint64 nodeNum) public view override returns (uint256) { - uint256 currentZombieCount = zombieCount(); - uint256 stakedZombieCount = 0; - for (uint256 i = 0; i < currentZombieCount; i++) { - Zombie storage zombie = getZombieStorage(i); - // If this zombie is staked on this node, but its _latest_ staked node isn't this node, - // then it must be staked on a child of this node. - if ( - zombie.latestStakedNode != nodeNum && nodeHasStaker(nodeNum, zombie.stakerAddress) - ) { - stakedZombieCount++; - } - } - return stakedZombieCount; - } - - /** - * @notice Verify that there are some number of nodes still unresolved - */ - function requireUnresolvedExists() public view override { - uint256 firstUnresolved = firstUnresolvedNode(); - require( - firstUnresolved > latestConfirmed() && firstUnresolved <= latestNodeCreated(), - "NO_UNRESOLVED" - ); - } - - function requireUnresolved(uint256 nodeNum) public view override { - require(nodeNum >= firstUnresolvedNode(), "ALREADY_DECIDED"); - require(nodeNum <= latestNodeCreated(), "DOESNT_EXIST"); - } - - /** - * @notice Verify that the given address is staked and not actively in a challenge - * @param stakerAddress Address to check + * @notice Increase the amount staked for the given staker + * @param stakerAddress Address of the staker whose stake is increased + * @param depositAmount The amount of either eth or tokens deposited */ - function requireUnchallengedStaker(address stakerAddress) private view { + function _addToDeposit(address stakerAddress, uint256 depositAmount) internal onlyValidator whenNotPaused { require(isStaked(stakerAddress), "NOT_STAKED"); - require(currentChallenge(stakerAddress) == NO_CHAL_INDEX, "IN_CHAL"); - } -} - -contract RollupUserLogic is AbsRollupUserLogic, IRollupUser { - /// @dev the user logic just validated configuration and shouldn't write to state during init - /// this allows the admin logic to ensure consistency on parameters. - function initialize(address _stakeToken) external view override onlyProxy { - require(_stakeToken == address(0), "NO_TOKEN_ALLOWED"); - require(!isERC20Enabled(), "FACET_NOT_ERC20"); + increaseStakeBy(stakerAddress, depositAmount); } /** - * @notice Create a new stake on an existing node - * @param nodeNum Number of the node your stake will be place one - * @param nodeHash Node hash of the node with the given nodeNum + * @notice Reduce the amount staked for the sender (difference between initial amount staked and target is creditted back to the sender). + * @param target Target amount of stake for the staker. */ - function newStakeOnExistingNode(uint64 nodeNum, bytes32 nodeHash) external payable override { - _newStake(msg.value); - stakeOnExistingNode(nodeNum, nodeHash); + function reduceDeposit(uint256 target) external onlyValidator whenNotPaused { + requireInactiveStaker(msg.sender); + // amount will be checked when creating an assertion + reduceStakeTo(msg.sender, target); } /** - * @notice Create a new stake on a new node - * @param assertion Assertion describing the state change between the old node and the new one - * @param expectedNodeHash Node hash of the node that will be created - * @param prevNodeInboxMaxCount Total of messages in the inbox as of the previous node + * @notice This allow the anyTrustFastConfirmer to force confirm any pending assertion + * the anyTrustFastConfirmer is supposed to be set only on an AnyTrust chain to + * a contract that can call this function when received sufficient signatures */ - function newStakeOnNewNode( - Assertion calldata assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount - ) external payable override { - _newStake(msg.value); - stakeOnNewNode(assertion, expectedNodeHash, prevNodeInboxMaxCount); + function fastConfirmAssertion( + bytes32 assertionHash, + bytes32 parentAssertionHash, + ExecutionState calldata confirmState, + bytes32 inboxAcc + ) public whenNotPaused { + require(msg.sender == anyTrustFastConfirmer, "NOT_FAST_CONFIRMER"); + // this skip deadline, prev, challenge validations + confirmAssertionInternal(assertionHash, parentAssertionHash, confirmState, inboxAcc); } /** - * @notice Increase the amount staked eth for the given staker - * @param stakerAddress Address of the staker whose stake is increased + * @notice This allow the anyTrustFastConfirmer to immediately create and confirm an assertion + * the anyTrustFastConfirmer is supposed to be set only on an AnyTrust chain to + * a contract that can call this function when received sufficient signatures + * The logic in this function is similar to stakeOnNewAssertion, but without staker checks + * + * We trust the anyTrustFastConfirmer to not call this function multiple times on the same prev, + * as doing so would result in incorrect accounting of withdrawable funds in the loserStakeEscrow. + * This is because the protocol assume there is only 1 unique confirmable child assertion. */ - function addToDeposit(address stakerAddress) + function fastConfirmNewAssertion(AssertionInputs calldata assertion, bytes32 expectedAssertionHash) external - payable - override - onlyValidator whenNotPaused { - _addToDeposit(stakerAddress, msg.value); - } - - /** - * @notice Withdraw uncommitted funds owned by sender from the rollup chain - */ - function withdrawStakerFunds() external override onlyValidator whenNotPaused returns (uint256) { - uint256 amount = withdrawFunds(msg.sender); - // This is safe because it occurs after all checks and effects - // solhint-disable-next-line avoid-low-level-calls - (bool success, ) = msg.sender.call{value: amount}(""); - require(success, "TRANSFER_FAILED"); - return amount; - } -} + // Must supply expectedAssertionHash to fastConfirmNewAssertion + require(expectedAssertionHash != bytes32(0), "EXPECTED_ASSERTION_HASH"); + AssertionStatus status = getAssertionStorage(expectedAssertionHash).status; + + bytes32 prevAssertion = RollupLib.assertionHash( + assertion.beforeStateData.prevPrevAssertionHash, + assertion.beforeState, + assertion.beforeStateData.sequencerBatchAcc + ); + getAssertionStorage(prevAssertion).requireExists(); + + if (status == AssertionStatus.NoAssertion) { + // If not exists, we create the new assertion + bytes32 newAssertionHash = createNewAssertion(assertion, prevAssertion, expectedAssertionHash); + if (!getAssertionStorage(newAssertionHash).isFirstChild) { + // only 1 of the children can be confirmed and get their stake refunded + // so we send the other children's stake to the loserStakeEscrow + // NOTE: if the losing staker have staked more than requiredStake, the excess stake will be stuck + increaseWithdrawableFunds(loserStakeEscrow, assertion.beforeStateData.configData.requiredStake); + } + } -contract ERC20RollupUserLogic is AbsRollupUserLogic, IRollupUserERC20 { - /// @dev the user logic just validated configuration and shouldn't write to state during init - /// this allows the admin logic to ensure consistency on parameters. - function initialize(address _stakeToken) external view override onlyProxy { - require(_stakeToken != address(0), "NEED_STAKE_TOKEN"); - require(isERC20Enabled(), "FACET_NOT_ERC20"); + // This would revert if the assertion is already confirmed + fastConfirmAssertion( + expectedAssertionHash, + prevAssertion, + assertion.afterState, + bridge.sequencerInboxAccs(assertion.afterState.globalState.getInboxPosition() - 1) + ); } - /** - * @notice Create a new stake on an existing node - * @param tokenAmount Amount of the rollups staking token to stake - * @param nodeNum Number of the node your stake will be place one - * @param nodeHash Node hash of the node with the given nodeNum - */ - function newStakeOnExistingNode( - uint256 tokenAmount, - uint64 nodeNum, - bytes32 nodeHash - ) external override { - _newStake(tokenAmount); - stakeOnExistingNode(nodeNum, nodeHash); - /// @dev This is an external call, safe because it's at the end of the function - receiveTokens(tokenAmount); + function owner() external view returns (address) { + return _getAdmin(); } /** - * @notice Create a new stake on a new node + * @notice Create a new stake on a new assertion * @param tokenAmount Amount of the rollups staking token to stake - * @param assertion Assertion describing the state change between the old node and the new one - * @param expectedNodeHash Node hash of the node that will be created - * @param prevNodeInboxMaxCount Total of messages in the inbox as of the previous node + * @param assertion Assertion describing the state change between the old assertion and the new one + * @param expectedAssertionHash Assertion hash of the assertion that will be created */ - function newStakeOnNewNode( + function newStakeOnNewAssertion( uint256 tokenAmount, - Assertion calldata assertion, - bytes32 expectedNodeHash, - uint256 prevNodeInboxMaxCount - ) external override { + AssertionInputs calldata assertion, + bytes32 expectedAssertionHash + ) external { _newStake(tokenAmount); - stakeOnNewNode(assertion, expectedNodeHash, prevNodeInboxMaxCount); + stakeOnNewAssertion(assertion, expectedAssertionHash); /// @dev This is an external call, safe because it's at the end of the function receiveTokens(tokenAmount); } @@ -718,11 +322,7 @@ contract ERC20RollupUserLogic is AbsRollupUserLogic, IRollupUserERC20 { * @param stakerAddress Address of the staker whose stake is increased * @param tokenAmount the amount of tokens staked */ - function addToDeposit(address stakerAddress, uint256 tokenAmount) - external - onlyValidator - whenNotPaused - { + function addToDeposit(address stakerAddress, uint256 tokenAmount) external onlyValidator whenNotPaused { _addToDeposit(stakerAddress, tokenAmount); /// @dev This is an external call, safe because it's at the end of the function receiveTokens(tokenAmount); @@ -731,17 +331,15 @@ contract ERC20RollupUserLogic is AbsRollupUserLogic, IRollupUserERC20 { /** * @notice Withdraw uncommitted funds owned by sender from the rollup chain */ - function withdrawStakerFunds() external override onlyValidator whenNotPaused returns (uint256) { + function withdrawStakerFunds() external override whenNotPaused returns (uint256) { uint256 amount = withdrawFunds(msg.sender); + require(amount > 0, "NO_FUNDS_TO_WITHDRAW"); // This is safe because it occurs after all checks and effects - require(IERC20Upgradeable(stakeToken).transfer(msg.sender, amount), "TRANSFER_FAILED"); + IERC20(stakeToken).safeTransfer(msg.sender, amount); return amount; } function receiveTokens(uint256 tokenAmount) private { - require( - IERC20Upgradeable(stakeToken).transferFrom(msg.sender, address(this), tokenAmount), - "TRANSFER_FAIL" - ); + IERC20(stakeToken).safeTransferFrom(msg.sender, address(this), tokenAmount); } } diff --git a/src/rollup/ValidatorUtils.sol b/src/rollup/ValidatorUtils.sol index 75d554bb..409e99ea 100644 --- a/src/rollup/ValidatorUtils.sol +++ b/src/rollup/ValidatorUtils.sol @@ -1,243 +1,245 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE -// SPDX-License-Identifier: BUSL-1.1 - -pragma solidity ^0.8.0; - -pragma experimental ABIEncoderV2; - -import "../rollup/IRollupCore.sol"; -import "../challenge/IChallengeManager.sol"; -import "./IRollupLogic.sol"; - -import {NO_CHAL_INDEX} from "../libraries/Constants.sol"; - -contract ValidatorUtils { - using NodeLib for Node; - - enum ConfirmType { - NONE, - VALID, - INVALID - } - - enum NodeConflictType { - NONE, - FOUND, - INDETERMINATE, - INCOMPLETE - } - - struct NodeConflict { - NodeConflictType ty; - uint64 node1; - uint64 node2; - } - - function findStakerConflict( - IRollupCore rollup, - address staker1, - address staker2, - uint256 maxDepth - ) external view returns (NodeConflict memory) { - uint64 staker1NodeNum = rollup.latestStakedNode(staker1); - uint64 staker2NodeNum = rollup.latestStakedNode(staker2); - return findNodeConflict(rollup, staker1NodeNum, staker2NodeNum, maxDepth); - } - - function checkDecidableNextNode(IRollupUserAbs rollup) external view returns (ConfirmType) { - try ValidatorUtils(address(this)).requireConfirmable(rollup) { - return ConfirmType.VALID; - } catch {} - - try ValidatorUtils(address(this)).requireRejectable(rollup) { - return ConfirmType.INVALID; - } catch { - return ConfirmType.NONE; - } - } - - function requireRejectable(IRollupCore rollup) external view { - IRollupUser(address(rollup)).requireUnresolvedExists(); - uint64 firstUnresolvedNode = rollup.firstUnresolvedNode(); - Node memory node = rollup.getNode(firstUnresolvedNode); - if (node.prevNum == rollup.latestConfirmed()) { - // Verify the block's deadline has passed - require(block.number >= node.deadlineBlock, "BEFORE_DEADLINE"); - rollup.getNode(node.prevNum).requirePastChildConfirmDeadline(); - - // Verify that no staker is staked on this node - require( - node.stakerCount == - IRollupUser(address(rollup)).countStakedZombies(firstUnresolvedNode), - "HAS_STAKERS" - ); - } - } - - function requireConfirmable(IRollupUserAbs rollup) external view { - rollup.requireUnresolvedExists(); - - uint256 stakerCount = rollup.stakerCount(); - // There is at least one non-zombie staker - require(stakerCount > 0, "NO_STAKERS"); - - uint64 firstUnresolved = rollup.firstUnresolvedNode(); - Node memory node = rollup.getNode(firstUnresolved); - - // Verify the block's deadline has passed - node.requirePastDeadline(); - - // Check that prev is latest confirmed - assert(node.prevNum == rollup.latestConfirmed()); - - Node memory prevNode = rollup.getNode(node.prevNum); - prevNode.requirePastChildConfirmDeadline(); - - uint256 zombiesStakedOnOtherChildren = rollup.countZombiesStakedOnChildren(node.prevNum) - - rollup.countStakedZombies(firstUnresolved); - require( - prevNode.childStakerCount == node.stakerCount + zombiesStakedOnOtherChildren, - "NOT_ALL_STAKED" - ); - } - - function refundableStakers(IRollupCore rollup) external view returns (address[] memory) { - uint256 stakerCount = rollup.stakerCount(); - address[] memory stakers = new address[](stakerCount); - uint256 latestConfirmed = rollup.latestConfirmed(); - uint256 index = 0; - for (uint64 i = 0; i < stakerCount; i++) { - address staker = rollup.getStakerAddress(i); - uint256 latestStakedNode = rollup.latestStakedNode(staker); - if (latestStakedNode <= latestConfirmed && rollup.currentChallenge(staker) == 0) { - stakers[index] = staker; - index++; - } - } - assembly { - mstore(stakers, index) - } - return stakers; - } - - function latestStaked(IRollupCore rollup, address staker) - external - view - returns (uint64, Node memory) - { - uint64 num = rollup.latestStakedNode(staker); - if (num == 0) { - num = rollup.latestConfirmed(); - } - Node memory node = rollup.getNode(num); - return (num, node); - } - - function stakedNodes(IRollupCore rollup, address staker) - external - view - returns (uint64[] memory) - { - uint64[] memory nodes = new uint64[](100000); - uint256 index = 0; - for (uint64 i = rollup.latestConfirmed(); i <= rollup.latestNodeCreated(); i++) { - if (rollup.nodeHasStaker(i, staker)) { - nodes[index] = i; - index++; - } - } - // Shrink array down to real size - assembly { - mstore(nodes, index) - } - return nodes; - } - - function findNodeConflict( - IRollupCore rollup, - uint64 node1, - uint64 node2, - uint256 maxDepth - ) public view returns (NodeConflict memory) { - uint64 firstUnresolvedNode = rollup.firstUnresolvedNode(); - uint64 node1Prev = rollup.getNode(node1).prevNum; - uint64 node2Prev = rollup.getNode(node2).prevNum; - - for (uint256 i = 0; i < maxDepth; i++) { - if (node1 == node2) { - return NodeConflict(NodeConflictType.NONE, node1, node2); - } - if (node1Prev == node2Prev) { - return NodeConflict(NodeConflictType.FOUND, node1, node2); - } - if (node1Prev < firstUnresolvedNode && node2Prev < firstUnresolvedNode) { - return NodeConflict(NodeConflictType.INDETERMINATE, 0, 0); - } - if (node1Prev < node2Prev) { - node2 = node2Prev; - node2Prev = rollup.getNode(node2).prevNum; - } else { - node1 = node1Prev; - node1Prev = rollup.getNode(node1).prevNum; - } - } - return NodeConflict(NodeConflictType.INCOMPLETE, 0, 0); - } - - function getStakers( - IRollupCore rollup, - uint64 startIndex, - uint64 max - ) public view returns (address[] memory, bool hasMore) { - uint256 maxStakers = rollup.stakerCount(); - if (startIndex + max <= maxStakers) { - maxStakers = startIndex + max; - hasMore = true; - } - - address[] memory stakers = new address[](maxStakers); - for (uint64 i = 0; i < maxStakers; i++) { - stakers[i] = rollup.getStakerAddress(startIndex + i); - } - return (stakers, hasMore); - } - - function timedOutChallenges( - IRollupCore rollup, - uint64 startIndex, - uint64 max - ) external view returns (uint64[] memory, bool hasMore) { - (address[] memory stakers, bool hasMoreStakers) = getStakers(rollup, startIndex, max); - uint64[] memory challenges = new uint64[](stakers.length); - uint256 index = 0; - IChallengeManager challengeManager = rollup.challengeManager(); - for (uint256 i = 0; i < stakers.length; i++) { - address staker = stakers[i]; - uint64 challengeIndex = rollup.currentChallenge(staker); - if ( - challengeIndex != NO_CHAL_INDEX && - challengeManager.isTimedOut(challengeIndex) && - challengeManager.currentResponder(challengeIndex) == staker - ) { - challenges[index++] = challengeIndex; - } - } - // Shrink array down to real size - assembly { - mstore(challenges, index) - } - return (challenges, hasMoreStakers); - } - - // Worst case runtime of O(depth), as it terminates if it switches paths. - function areUnresolvedNodesLinear(IRollupCore rollup) external view returns (bool) { - uint256 end = rollup.latestNodeCreated(); - for (uint64 i = rollup.firstUnresolvedNode(); i <= end; i++) { - if (i > 0 && rollup.getNode(i).prevNum != i - 1) { - return false; - } - } - return true; - } -} +// // Copyright 2021-2022, Offchain Labs, Inc. +// // For license information, see https://github.com/OffchainLabs/nitro-contracts/blob/main/LICENSE +// // SPDX-License-Identifier: BUSL-1.1 + +// pragma solidity ^0.8.0; + +// pragma experimental ABIEncoderV2; + +// import "../rollup/IRollupCore.sol"; +// import "../challenge/IOldChallengeManager.sol"; +// import "./IRollupLogic.sol"; + +// import {NO_CHAL_INDEX} from "../libraries/Constants.sol"; + +// contract ValidatorUtils { +// using AssertionNodeLib for AssertionNode; + +// enum ConfirmType { +// NONE, +// VALID, +// INVALID +// } + +// enum AssertionConflictType { +// NONE, +// FOUND, +// INDETERMINATE, +// INCOMPLETE +// } + +// struct AssertionConflict { +// AssertionConflictType ty; +// uint64 assertion1; +// uint64 assertion2; +// } + +// function findStakerConflict( +// IRollupCore rollup, +// address staker1, +// address staker2, +// uint256 maxDepth +// ) external view returns (AssertionConflict memory) { +// uint64 staker1AssertionNum = rollup.latestStakedAssertion(staker1); +// uint64 staker2AssertionNum = rollup.latestStakedAssertion(staker2); +// return findAssertionConflict(rollup, staker1AssertionNum, staker2AssertionNum, maxDepth); +// } + +// function checkDecidableNextAssertion(IRollupUserAbs rollup) external view returns (ConfirmType) { +// try ValidatorUtils(address(this)).requireConfirmable(rollup) { +// return ConfirmType.VALID; +// } catch {} + +// try ValidatorUtils(address(this)).requireRejectable(rollup) { +// return ConfirmType.INVALID; +// } catch { +// return ConfirmType.NONE; +// } +// } + +// function requireRejectable(IRollupCore rollup) external view { +// IRollupUser(address(rollup)).requireUnresolvedExists(); +// uint64 firstUnresolvedAssertion = rollup.firstUnresolvedAssertion(); +// AssertionNode memory assertion = rollup.getAssertion(firstUnresolvedAssertion); +// if (assertion.prevNum == rollup.latestConfirmed()) { +// // Verify the block's deadline has passed +// require(block.number >= assertion.deadlineBlock, "BEFORE_DEADLINE"); +// rollup.getAssertion(assertion.prevNum).requirePastChildConfirmDeadline(); + +// // Verify that no staker is staked on this assertion +// // TODO: HN: review this +// // require( +// // assertion.stakerCount == +// // IRollupUser(address(rollup)).countStakedZombies(firstUnresolvedAssertion), +// // "HAS_STAKERS" +// // ); +// } +// } + +// function requireConfirmable(IRollupUserAbs rollup) external view { +// rollup.requireUnresolvedExists(); + +// uint256 stakerCount = rollup.stakerCount(); +// // There is at least one non-zombie staker +// require(stakerCount > 0, "NO_STAKERS"); + +// uint64 firstUnresolved = rollup.firstUnresolvedAssertion(); +// AssertionNode memory assertion = rollup.getAssertion(firstUnresolved); + +// // Verify the block's deadline has passed +// assertion.requirePastDeadline(); + +// // Check that prev is latest confirmed +// assert(assertion.prevNum == rollup.latestConfirmed()); + +// AssertionNode memory prevAssertion = rollup.getAssertion(assertion.prevNum); +// prevAssertion.requirePastChildConfirmDeadline(); + +// // TODO: HN: review this +// // uint256 zombiesStakedOnOtherChildren = rollup.countZombiesStakedOnChildren(assertion.prevNum) - +// // rollup.countStakedZombies(firstUnresolved); +// // require( +// // prevAssertion.childStakerCount == assertion.stakerCount + zombiesStakedOnOtherChildren, +// // "NOT_ALL_STAKED" +// // ); +// } + +// function refundableStakers(IRollupCore rollup) external view returns (address[] memory) { +// uint256 stakerCount = rollup.stakerCount(); +// address[] memory stakers = new address[](stakerCount); +// uint256 latestConfirmed = rollup.latestConfirmed(); +// uint256 index = 0; +// for (uint64 i = 0; i < stakerCount; i++) { +// address staker = rollup.getStakerAddress(i); +// uint256 latestStakedAssertion = rollup.latestStakedAssertion(staker); +// if (latestStakedAssertion <= latestConfirmed && rollup.currentChallenge(staker) == 0) { +// stakers[index] = staker; +// index++; +// } +// } +// assembly { +// mstore(stakers, index) +// } +// return stakers; +// } + +// function latestStaked(IRollupCore rollup, address staker) +// external +// view +// returns (uint64, AssertionNode memory) +// { +// uint64 num = rollup.latestStakedAssertion(staker); +// if (num == 0) { +// num = rollup.latestConfirmed(); +// } +// AssertionNode memory assertion = rollup.getAssertion(num); +// return (num, assertion); +// } + +// function stakedAssertions(IRollupCore rollup, address staker) +// external +// view +// returns (uint64[] memory) +// { +// uint64[] memory assertions = new uint64[](100000); +// uint256 index = 0; +// for (uint64 i = rollup.latestConfirmed(); i <= rollup.latestAssertionCreated(); i++) { +// if (rollup.assertionHasStaker(i, staker)) { +// assertions[index] = i; +// index++; +// } +// } +// // Shrink array down to real size +// assembly { +// mstore(assertions, index) +// } +// return assertions; +// } + +// function findAssertionConflict( +// IRollupCore rollup, +// uint64 assertion1, +// uint64 assertion2, +// uint256 maxDepth +// ) public view returns (AssertionConflict memory) { +// uint64 firstUnresolvedAssertion = rollup.firstUnresolvedAssertion(); +// uint64 assertion1Prev = rollup.getAssertion(assertion1).prevNum; +// uint64 assertion2Prev = rollup.getAssertion(assertion2).prevNum; + +// for (uint256 i = 0; i < maxDepth; i++) { +// if (assertion1 == assertion2) { +// return AssertionConflict(AssertionConflictType.NONE, assertion1, assertion2); +// } +// if (assertion1Prev == assertion2Prev) { +// return AssertionConflict(AssertionConflictType.FOUND, assertion1, assertion2); +// } +// if (assertion1Prev < firstUnresolvedAssertion && assertion2Prev < firstUnresolvedAssertion) { +// return AssertionConflict(AssertionConflictType.INDETERMINATE, 0, 0); +// } +// if (assertion1Prev < assertion2Prev) { +// assertion2 = assertion2Prev; +// assertion2Prev = rollup.getAssertion(assertion2).prevNum; +// } else { +// assertion1 = assertion1Prev; +// assertion1Prev = rollup.getAssertion(assertion1).prevNum; +// } +// } +// return AssertionConflict(AssertionConflictType.INCOMPLETE, 0, 0); +// } + +// function getStakers( +// IRollupCore rollup, +// uint64 startIndex, +// uint64 max +// ) public view returns (address[] memory, bool hasMore) { +// uint256 maxStakers = rollup.stakerCount(); +// if (startIndex + max <= maxStakers) { +// maxStakers = startIndex + max; +// hasMore = true; +// } + +// address[] memory stakers = new address[](maxStakers); +// for (uint64 i = 0; i < maxStakers; i++) { +// stakers[i] = rollup.getStakerAddress(startIndex + i); +// } +// return (stakers, hasMore); +// } + +// function timedOutChallenges( +// IRollupCore rollup, +// uint64 startIndex, +// uint64 max +// ) external view returns (uint64[] memory, bool hasMore) { +// (address[] memory stakers, bool hasMoreStakers) = getStakers(rollup, startIndex, max); +// uint64[] memory challenges = new uint64[](stakers.length); +// uint256 index = 0; +// IOldChallengeManager oldChallengeManager = rollup.oldChallengeManager(); +// for (uint256 i = 0; i < stakers.length; i++) { +// address staker = stakers[i]; +// uint64 challengeIndex = rollup.currentChallenge(staker); +// if ( +// challengeIndex != NO_CHAL_INDEX && +// oldChallengeManager.isTimedOut(challengeIndex) && +// oldChallengeManager.currentResponder(challengeIndex) == staker +// ) { +// challenges[index++] = challengeIndex; +// } +// } +// // Shrink array down to real size +// assembly { +// mstore(challenges, index) +// } +// return (challenges, hasMoreStakers); +// } + +// // Worst case runtime of O(depth), as it terminates if it switches paths. +// function areUnresolvedAssertionsLinear(IRollupCore rollup) external view returns (bool) { +// uint256 end = rollup.latestAssertionCreated(); +// for (uint64 i = rollup.firstUnresolvedAssertion(); i <= end; i++) { +// if (i > 0 && rollup.getAssertion(i).prevNum != i - 1) { +// return false; +// } +// } +// return true; +// } +// } diff --git a/src/rollup/ValidatorWallet.sol b/src/rollup/ValidatorWallet.sol index 9b8c2296..6f0a64c8 100644 --- a/src/rollup/ValidatorWallet.sol +++ b/src/rollup/ValidatorWallet.sol @@ -4,7 +4,7 @@ pragma solidity ^0.8.0; -import "../challenge/IChallengeManager.sol"; +import "../challenge/IOldChallengeManager.sol"; import "../libraries/DelegateCallAware.sol"; import "../libraries/IGasRefunder.sol"; import "@openzeppelin/contracts/utils/Address.sol"; @@ -160,13 +160,13 @@ contract ValidatorWallet is OwnableUpgradeable, DelegateCallAware, GasRefundEnab } } - function timeoutChallenges(IChallengeManager manager, uint64[] calldata challenges) external { + function timeoutChallenges(IOldChallengeManager manager, uint64[] calldata challenges) external { timeoutChallengesWithGasRefunder(IGasRefunder(address(0)), manager, challenges); } function timeoutChallengesWithGasRefunder( IGasRefunder gasRefunder, - IChallengeManager manager, + IOldChallengeManager manager, uint64[] calldata challenges ) public onlyExecutorOrOwner refundsGas(gasRefunder) { uint256 challengesCount = challenges.length; diff --git a/src/state/Deserialize.sol b/src/state/Deserialize.sol index 75bf62bf..2339a05d 100644 --- a/src/state/Deserialize.sol +++ b/src/state/Deserialize.sol @@ -250,8 +250,6 @@ library Deserialize { status = MachineStatus.FINISHED; } else if (statusU8 == 2) { status = MachineStatus.ERRORED; - } else if (statusU8 == 3) { - status = MachineStatus.TOO_FAR; } else { revert("UNKNOWN_MACH_STATUS"); } diff --git a/src/state/Machine.sol b/src/state/Machine.sol index 9d80f45c..2ae9b690 100644 --- a/src/state/Machine.sol +++ b/src/state/Machine.sol @@ -11,8 +11,7 @@ import "./StackFrame.sol"; enum MachineStatus { RUNNING, FINISHED, - ERRORED, - TOO_FAR + ERRORED } struct Machine { @@ -51,9 +50,7 @@ library MachineLib { } else if (mach.status == MachineStatus.FINISHED) { return keccak256(abi.encodePacked("Machine finished:", mach.globalStateHash)); } else if (mach.status == MachineStatus.ERRORED) { - return keccak256(abi.encodePacked("Machine errored:")); - } else if (mach.status == MachineStatus.TOO_FAR) { - return keccak256(abi.encodePacked("Machine too far:")); + return keccak256(abi.encodePacked("Machine errored:", mach.globalStateHash)); } else { revert("BAD_MACH_STATUS"); } diff --git a/src/test-helpers/BridgeTester.sol b/src/test-helpers/BridgeTester.sol index b355c51c..374a89ca 100644 --- a/src/test-helpers/BridgeTester.sol +++ b/src/test-helpers/BridgeTester.sol @@ -73,6 +73,10 @@ contract BridgeTester is Initializable, DelegateCallAware, IBridge { rollup = rollup_; } + function updateRollupAddress(IOwnable _rollup) external onlyDelegated onlyProxyOwner { + rollup = _rollup; + } + function activeOutbox() public view returns (address) { if (_activeOutbox == EMPTY_ACTIVEOUTBOX) return address(uint160(0)); return _activeOutbox; @@ -191,7 +195,7 @@ contract BridgeTester is Initializable, DelegateCallAware, IBridge { InOutInfo storage info = allowedInboxesMap[inbox]; bool alreadyEnabled = info.allowed; emit InboxToggle(inbox, enabled); - if ((alreadyEnabled && enabled) || (!alreadyEnabled && !enabled)) { + if (alreadyEnabled == enabled) { return; } if (enabled) { @@ -211,7 +215,7 @@ contract BridgeTester is Initializable, DelegateCallAware, IBridge { InOutInfo storage info = allowedOutboxesMap[outbox]; bool alreadyEnabled = info.allowed; emit OutboxToggle(outbox, enabled); - if ((alreadyEnabled && enabled) || (!alreadyEnabled && !enabled)) { + if (alreadyEnabled == enabled) { return; } if (enabled) { diff --git a/src/test-helpers/OutboxWithoutOptTester.sol b/src/test-helpers/OutboxWithoutOptTester.sol index 50f378ac..957e7d84 100644 --- a/src/test-helpers/OutboxWithoutOptTester.sol +++ b/src/test-helpers/OutboxWithoutOptTester.sol @@ -54,6 +54,10 @@ contract OutboxWithoutOptTester is DelegateCallAware, IOutbox { emit SendRootUpdated(root, l2BlockHash); } + function updateRollupAddress() external onlyDelegated onlyProxyOwner { + rollup = address(bridge.rollup()); + } + /// @notice When l2ToL1Sender returns a nonzero address, the message was originated by an L2 account /// When the return value is zero, that means this is a system message /// @dev the l2ToL1Sender behaves as the tx.origin, the msg.sender should be validated to protect against reentrancies diff --git a/src/test-helpers/RollupMock.sol b/src/test-helpers/RollupMock.sol index 085b9c00..73831f66 100644 --- a/src/test-helpers/RollupMock.sol +++ b/src/test-helpers/RollupMock.sol @@ -12,10 +12,4 @@ contract RollupMock { emit WithdrawTriggered(); return 0; } - - function removeOldZombies( - uint256 /* startIndex */ - ) external { - emit ZombieTriggered(); - } }