diff --git a/.github/workflows/da-tests.yml b/.github/workflows/da-tests.yml new file mode 100644 index 0000000000..9a0b3a2d0f --- /dev/null +++ b/.github/workflows/da-tests.yml @@ -0,0 +1,53 @@ +--- +name: Task - DA Tests + +on: + workflow_dispatch: + workflow_call: + +jobs: + rpc-tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + da-layer: + - ethereum + - celestia + - avail + env: + BINARY_PATH: ../target/release/madara + steps: + - uses: actions/checkout@v3 + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "cache" + save-if: false + - uses: actions/cache@v3 + with: + path: target/release/madara + key: + ${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }}-${{ + github.run_id }} + fail-on-cache-miss: true + - name: Setup build deps + run: | + sudo apt-get update + sudo apt-get install -y clang llvm libudev-dev protobuf-compiler + - name: Setup dev chain + run: | + ./target/release/madara setup --chain=dev --from-local=configs + - name: Run DA Layer + run: | + bash ./scripts/da_devnet.sh ${{ matrix.da_layer }} + - name: Run DA tests + run: |- + ./target/release/madara --dev --da-layer ${{ matrix.da_layer }} --da-conf examples/da-confs/${{ matrix.da_layer }}.json & + MADARA_RUN_PID=$! + while ! echo exit | nc localhost 9944; do sleep 1; done + cd da-test + DA_LAYER=${{ matrix.da_layer }} cargo test + kill $MADARA_RUN_PID + - name: Stop DA Layer + run: | + bash ./scripts/stop_da_devnet.sh ${{ matrix.da_layer }} diff --git a/.gitmodules b/.gitmodules index 32a1c93e3b..6949b7d838 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,6 @@ [submodule "madara-tsukuyomi"] path = madara-tsukuyomi url = https://github.com/keep-starknet-strange/madara-tsukuyomi +[submodule "zaun"] + path = zaun + url = https://github.com/keep-starknet-strange/zaun diff --git a/.prettierignore b/.prettierignore index 036998c861..3380681c80 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,6 +1,7 @@ target cairo-contracts/build -madara-app +madara-tsukuyomi madara-dev-explorer madara-docs madara-infra +zaun diff --git a/CHANGELOG.md b/CHANGELOG.md index c08ce1df32..9cb80fad26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ - fix: Change serialization of bitvec to &[u8] in merkle tree to avoid memory uninitialized - chore: change SCARB config version for foundry CI +- feat(da): update da calldata encoding to v0.11.0 spec, da conf examples, da + conf flag, da-tests in CI ## v0.6.0 diff --git a/Cargo.lock b/Cargo.lock index 66aa3771d6..9ee99728db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2516,6 +2516,30 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "da-test" +version = "0.1.0" +dependencies = [ + "anyhow", + "assert_matches", + "async-lock 3.2.0", + "clap 4.4.11", + "ethers", + "flate2", + "lazy_static", + "mc-data-availability", + "reqwest", + "rstest", + "serde", + "serde_json", + "starknet-ff", + "starknet-providers", + "starknet-rpc-test", + "thiserror", + "tokio", + "url", +] + [[package]] name = "darling" version = "0.14.4" @@ -6206,15 +6230,21 @@ dependencies = [ "anyhow", "async-trait", "avail-subxt", + "blockifier", "celestia-rpc", "celestia-types", "clap 4.4.10", "ethers", "futures", + "indexmap 2.0.0-pre", "jsonrpsee 0.20.3", "log", + "mc-commitment-state-diff", "mc-db", + "mp-digest-log", + "mp-hashers", "mp-storage", + "pallet-starknet-runtime-api", "reqwest", "sc-client-api", "serde", @@ -6222,6 +6252,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-core 21.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.3.0)", + "sp-io 23.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.3.0)", "sp-keyring", "sp-runtime 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.3.0)", "starknet_api", @@ -6245,6 +6276,7 @@ dependencies = [ "sp-core 21.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.3.0)", "sp-database", "sp-runtime 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=release-polkadot-v1.3.0)", + "starknet_api", "uuid 1.6.1", ] diff --git a/Cargo.toml b/Cargo.toml index 440d6aefa4..1462bed647 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,9 @@ members = [ "crates/client/storage", "crates/client/commitment-state-diff", "starknet-rpc-test", + "da-test", ] -# All previous except for `starknet-rpc-test` +# All previous except for `starknet-rpc-test` and `da-test` # We don't want `cargo test` to trigger its tests default-members = [ "crates/node", @@ -246,6 +247,10 @@ url = "2.4.1" hashbrown = "0.14.2" tokio = "1.34.0" openssl = { version = "0.10", features = ["vendored"] } +ethers = "2.0.7" +subxt = "0.29" +assert_matches = "1.5.0" +async-lock = "3.1.0" [patch."https://github.com/w3f/ring-vrf"] bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf?rev=3ddc20", version = "0.0.4", rev = "3ddc20" } diff --git a/crates/client/commitment-state-diff/Cargo.toml b/crates/client/commitment-state-diff/Cargo.toml index aebe99b095..640b5a187a 100644 --- a/crates/client/commitment-state-diff/Cargo.toml +++ b/crates/client/commitment-state-diff/Cargo.toml @@ -16,7 +16,7 @@ sp-runtime = { workspace = true, default-features = true } mp-digest-log = { workspace = true, default-features = true } mp-hashers = { workspace = true, default-features = true } mp-storage = { workspace = true, default-features = true } -pallet-starknet = { workspace = true } +pallet-starknet = { workspace = true, default-features = true } pallet-starknet-runtime-api = { workspace = true, default-features = true } # Starknet diff --git a/crates/client/commitment-state-diff/src/lib.rs b/crates/client/commitment-state-diff/src/lib.rs index 4f74cbaf97..24111d4fff 100644 --- a/crates/client/commitment-state-diff/src/lib.rs +++ b/crates/client/commitment-state-diff/src/lib.rs @@ -3,10 +3,9 @@ use std::pin::Pin; use std::sync::Arc; use std::task::Poll; -use blockifier::state::cached_state::CommitmentStateDiff; use futures::channel::mpsc; -use futures::{Stream, StreamExt}; -use indexmap::IndexMap; +use futures::Stream; +use indexmap::{IndexMap, IndexSet}; use mp_hashers::HasherT; use mp_storage::{SN_COMPILED_CLASS_HASH_PREFIX, SN_CONTRACT_CLASS_HASH_PREFIX, SN_NONCE_PREFIX, SN_STORAGE_PREFIX}; use pallet_starknet_runtime_api::StarknetRuntimeApi; @@ -18,14 +17,16 @@ use sp_runtime::traits::{Block as BlockT, Header}; use starknet_api::api_core::{ClassHash, CompiledClassHash, ContractAddress, Nonce, PatriciaKey}; use starknet_api::block::BlockHash; use starknet_api::hash::StarkFelt; -use starknet_api::state::StorageKey as StarknetStorageKey; +use starknet_api::state::{StorageKey as StarknetStorageKey, ThinStateDiff}; use thiserror::Error; +pub struct BlockDAData(pub BlockHash, pub ThinStateDiff, pub usize); + pub struct CommitmentStateDiffWorker { client: Arc, storage_event_stream: StorageEventStream, - tx: mpsc::Sender<(BlockHash, CommitmentStateDiff)>, - msg: Option<(BlockHash, CommitmentStateDiff)>, + tx: mpsc::Sender, + msg: Option, phantom: PhantomData, } @@ -33,7 +34,7 @@ impl CommitmentStateDiffWorker where C: BlockchainEvents, { - pub fn new(client: Arc, tx: mpsc::Sender<(BlockHash, CommitmentStateDiff)>) -> Self { + pub fn new(client: Arc, tx: mpsc::Sender) -> Self { let storage_event_stream = client .storage_changes_notification_stream(None, None) .expect("the node storage changes notification stream should be up and running"); @@ -55,7 +56,6 @@ where // state 2: waiting for the channel to be ready, `commitment_state_diff` field is `Some` fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll> { let self_as_mut = self.get_mut(); - if self_as_mut.msg.is_none() { // State 1 match Stream::poll_next(Pin::new(&mut self_as_mut.storage_event_stream), cx) { @@ -101,10 +101,10 @@ where // Channel is full, we wait Poll::Pending => Poll::Pending, - // Channel receiver have been drop, we close. + // Channel receiver has been dropped, we close. // This should not happen tho Poll::Ready(Err(e)) => { - log::error!("CommitmentStateDiff channel reciever have been droped: {e}"); + log::error!("CommitmentStateDiff channel receiver has been dropped: {e}"); Poll::Ready(None) } } @@ -124,7 +124,7 @@ enum BuildCommitmentStateDiffError { fn build_commitment_state_diff( client: Arc, storage_notification: StorageNotification, -) -> Result<(BlockHash, CommitmentStateDiff), BuildCommitmentStateDiffError> +) -> Result where C: ProvideRuntimeApi, C::Api: StarknetRuntimeApi, @@ -138,11 +138,14 @@ where block.header().hash::().into() }; - let mut commitment_state_diff = CommitmentStateDiff { - address_to_class_hash: Default::default(), - address_to_nonce: Default::default(), - storage_updates: Default::default(), - class_hash_to_compiled_class_hash: Default::default(), + let mut accessed_addrs: IndexSet = IndexSet::new(); + let mut commitment_state_diff = ThinStateDiff { + declared_classes: IndexMap::new(), + storage_diffs: IndexMap::new(), + nonces: IndexMap::new(), + deployed_contracts: IndexMap::new(), + deprecated_declared_classes: Vec::new(), + replaced_classes: IndexMap::new(), }; for (_prefix, full_storage_key, change) in storage_notification.changes.iter() { @@ -162,7 +165,8 @@ where ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..].try_into().unwrap()))); // `change` is safe to unwrap as `Nonces` storage is `ValueQuery` let nonce = Nonce(StarkFelt(change.unwrap().0.clone().try_into().unwrap())); - commitment_state_diff.address_to_nonce.insert(contract_address, nonce); + commitment_state_diff.nonces.insert(contract_address, nonce); + accessed_addrs.insert(contract_address); } else if prefix == *SN_STORAGE_PREFIX { let contract_address = ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..64].try_into().unwrap()))); @@ -170,7 +174,7 @@ where // `change` is safe to unwrap as `StorageView` storage is `ValueQuery` let value = StarkFelt(change.unwrap().0.clone().try_into().unwrap()); - match commitment_state_diff.storage_updates.get_mut(&contract_address) { + match commitment_state_diff.storage_diffs.get_mut(&contract_address) { Some(contract_storage) => { contract_storage.insert(storage_key, value); } @@ -178,16 +182,28 @@ where let mut contract_storage: IndexMap<_, _, _> = Default::default(); contract_storage.insert(storage_key, value); - commitment_state_diff.storage_updates.insert(contract_address, contract_storage); + commitment_state_diff.storage_diffs.insert(contract_address, contract_storage); } } + accessed_addrs.insert(contract_address); } else if prefix == *SN_CONTRACT_CLASS_HASH_PREFIX { let contract_address = ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..].try_into().unwrap()))); // `change` is safe to unwrap as `ContractClassHashes` storage is `ValueQuery` let class_hash = ClassHash(StarkFelt(change.unwrap().0.clone().try_into().unwrap())); - commitment_state_diff.address_to_class_hash.insert(contract_address, class_hash); + // check if contract already exists + let runtime_api = client.runtime_api(); + let current_block_hash = client.info().best_hash; + + let contract_exists = runtime_api.contract_class_by_class_hash(current_block_hash, class_hash).is_ok(); + + if contract_exists { + commitment_state_diff.replaced_classes.insert(contract_address, class_hash); + } else { + commitment_state_diff.deployed_contracts.insert(contract_address, class_hash); + } + accessed_addrs.insert(contract_address); } else if prefix == *SN_COMPILED_CLASS_HASH_PREFIX { let class_hash = ClassHash(StarkFelt(full_storage_key.0[32..].try_into().unwrap())); // In the current state of starknet protocol, a compiled class hash can not be erased, so we should @@ -196,15 +212,9 @@ where let compiled_class_hash = CompiledClassHash(change.map(|data| StarkFelt(data.0.clone().try_into().unwrap())).unwrap_or_default()); - commitment_state_diff.class_hash_to_compiled_class_hash.insert(class_hash, compiled_class_hash); + commitment_state_diff.declared_classes.insert(class_hash, compiled_class_hash); } } - Ok((starknet_block_hash, commitment_state_diff)) -} - -pub async fn log_commitment_state_diff(mut rx: mpsc::Receiver<(BlockHash, CommitmentStateDiff)>) { - while let Some((block_hash, csd)) = rx.next().await { - log::info!("received state diff for block {block_hash}: {csd:?}"); - } + Ok(BlockDAData(starknet_block_hash, commitment_state_diff, accessed_addrs.len())) } diff --git a/crates/client/data-availability/Cargo.toml b/crates/client/data-availability/Cargo.toml index f80f7f0a3f..41e564318c 100644 --- a/crates/client/data-availability/Cargo.toml +++ b/crates/client/data-availability/Cargo.toml @@ -14,8 +14,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -clap = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive"], optional = true } futures = "0.3.21" +indexmap = { workspace = true } jsonrpsee = { version = "0.20.0", features = [ "http-client", "ws-client", @@ -23,35 +24,45 @@ jsonrpsee = { version = "0.20.0", features = [ ] } log = "0.4.19" reqwest = { version = "0.11.18", features = ["blocking", "json"] } -serde = { workspace = true } -serde_json = { workspace = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } thiserror = { workspace = true } tokio = { version = "1", features = ["full"] } -url = "2.4.0" +url = { workspace = true } uuid = { version = "1.4.0", features = ["v4", "serde"] } # Substrate sc-client-api = { workspace = true } -sp-api = { workspace = true } +sp-api = { workspace = true, features = ["std"] } sp-blockchain = { workspace = true } -sp-core = { workspace = true } -sp-runtime = { workspace = true } +sp-core = { workspace = true, features = ["std"] } +sp-io = { workspace = true, features = ["std"] } +sp-runtime = { workspace = true, features = ["std"] } # Starknet +blockifier = { workspace = true, default-features = true } +mc-commitment-state-diff = { workspace = true, default-features = true } mc-db = { workspace = true, default-features = true } +pallet-starknet-runtime-api = { workspace = true, features = ["std"] } starknet_api = { workspace = true, default-features = true } # Ethereum -ethers = "2.0.7" +ethers = { workspace = true } # Avail subxt dependency avail-subxt = { git = "https://github.com/availproject/avail", version = "0.4.0", tag = "v1.8.0.0" } sp-keyring = { workspace = true } -subxt = "0.29" +subxt = { workspace = true } # Celestia celestia-rpc = { git = "https://github.com/eigerco/celestia-node-rs", rev = "bd6394b66b11065c543ab3f19fd66000a72b6236" } celestia-types = { git = "https://github.com/eigerco/celestia-node-rs", rev = "bd6394b66b11065c543ab3f19fd66000a72b6236" } # Madara +mp-digest-log = { workspace = true, default-features = true } +mp-hashers = { workspace = true, default-features = true } mp-storage = { workspace = true, default-features = true } + +[features] +default = [] +clap = ["dep:clap"] diff --git a/crates/client/data-availability/src/lib.rs b/crates/client/data-availability/src/lib.rs index 33c92be893..ce77caf449 100644 --- a/crates/client/data-availability/src/lib.rs +++ b/crates/client/data-availability/src/lib.rs @@ -4,25 +4,29 @@ pub mod ethereum; mod sharp; pub mod utils; -use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; use ethers::types::{I256, U256}; +use futures::channel::mpsc; use futures::StreamExt; -use mp_storage::{SN_NONCE_PREFIX, SN_STORAGE_PREFIX}; +use mc_commitment_state_diff::BlockDAData; +use mp_hashers::HasherT; +use pallet_starknet_runtime_api::StarknetRuntimeApi; use sc_client_api::client::BlockchainEvents; use serde::Deserialize; use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::Block as BlockT; +use sp_blockchain::HeaderBackend; +use sp_runtime::traits::{Block as BlockT, Header}; +use utils::state_diff_to_calldata; -pub type StorageWrites<'a> = Vec<(&'a [u8], &'a [u8])>; +pub struct DataAvailabilityWorker(PhantomData<(B, C, H)>); +pub struct DataAvailabilityWorkerProving(PhantomData); -pub struct DataAvailabilityWorker(PhantomData<(B, C)>); - -#[derive(Debug, Copy, Clone, PartialEq, clap::ValueEnum)] +#[derive(Debug, Copy, Clone, PartialEq)] +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] pub enum DaLayer { Celestia, Ethereum, @@ -31,7 +35,7 @@ pub enum DaLayer { /// Data availability modes in which Madara can be initialized. /// -/// Default only mode currently implemented is Validium. +/// Default only mode currently implemented is Sovereign. #[derive(Debug, Copy, Clone, PartialEq, Deserialize, Default)] pub enum DaMode { /// Full Validity Rollup @@ -51,15 +55,15 @@ pub enum DaMode { /// will be necessary. #[serde(rename = "volition")] Volition, - /// Sovereign Validium + /// Sovereign Rollup /// - /// Validium state diffs are untethered to an accompanying validity proof therefore + /// Sovereign state diffs are untethered to an accompanying validity proof therefore /// they can simply be published to any da solution available. As this solution does not /// require an execution trace to be proved we can simply parse the state diff from the /// storage changes of the block. - #[serde(rename = "validium")] + #[serde(rename = "sovereign")] #[default] - Validium, + Sovereign, } #[async_trait] @@ -69,55 +73,22 @@ pub trait DaClient: Send + Sync { async fn publish_state_diff(&self, state_diff: Vec) -> Result<()>; } -impl DataAvailabilityWorker +impl DataAvailabilityWorkerProving where B: BlockT, - C: ProvideRuntimeApi, - C: BlockchainEvents + 'static, { - pub async fn prove_current_block(da_mode: DaMode, client: Arc, madara_backend: Arc>) { - let mut storage_event_st = client - .storage_changes_notification_stream(None, None) - .expect("node has been initialized to prove state change, but can't read from notification stream"); - - while let Some(storage_event) = storage_event_st.next().await { - // Locate and encode the storage change - let mut nonces: HashMap<&[u8], &[u8]> = HashMap::new(); - let mut storage_diffs: HashMap<&[u8], StorageWrites> = HashMap::new(); - - // Locate and encode the storage change - for event in storage_event.changes.iter() { - let mut prefix = event.1.0.as_slice(); - let mut key: &[u8] = &[]; - if prefix.len() > 32 { - let raw_split = prefix.split_at(32); - prefix = raw_split.0; - key = raw_split.1; - } - - if prefix == *SN_NONCE_PREFIX { - if let Some(data) = event.2 { - nonces.insert(key, data.0.as_slice()); - } - } - - if prefix == *SN_STORAGE_PREFIX { - if let Some(data) = event.2 { - // first 32 bytes = contract address, second 32 bytes = storage variable - let write_split = key.split_at(32); - - storage_diffs - .entry(write_split.0) - .and_modify(|v| v.push((write_split.1, data.0.as_slice()))) - .or_insert(vec![(write_split.1, data.0.as_slice())]); - } - } - } - - let state_diff = utils::pre_0_11_0_state_diff(storage_diffs, nonces); + pub async fn prove_current_block( + da_mode: DaMode, + mut state_diffs_rx: mpsc::Receiver, + madara_backend: Arc>, + ) { + while let Some(BlockDAData(block_hash, csd, num_addr_accessed)) = state_diffs_rx.next().await { + log::info!("received state diff for block {block_hash}: {csd:?}. {num_addr_accessed} addresses accessed."); - // Store the DA output from the SN OS - if let Err(db_err) = madara_backend.da().store_state_diff(&storage_event.block, state_diff) { + // store the da encoded calldata for the state update worker + if let Err(db_err) = + madara_backend.da().store_state_diff(&block_hash, state_diff_to_calldata(csd, num_addr_accessed)) + { log::error!("db err: {db_err}"); }; @@ -131,9 +102,7 @@ where if let Ok(job_resp) = sharp::submit_pie("TODO") { log::info!("Job Submitted: {}", job_resp.cairo_job_key); // Store the cairo job key - if let Err(db_err) = - madara_backend.da().update_cairo_job(&storage_event.block, job_resp.cairo_job_key) - { + if let Err(db_err) = madara_backend.da().update_cairo_job(&block_hash, job_resp.cairo_job_key) { log::error!("db err: {db_err}"); }; } @@ -146,11 +115,14 @@ where } } -impl DataAvailabilityWorker +impl DataAvailabilityWorker where B: BlockT, C: ProvideRuntimeApi, + C::Api: StarknetRuntimeApi, C: BlockchainEvents + 'static, + C: HeaderBackend, + H: HasherT + Unpin, { pub async fn update_state( da_client: Box, @@ -170,13 +142,19 @@ where } }; + let starknet_block_hash = { + let digest = notification.header.digest(); + let block = mp_digest_log::find_starknet_block(digest).expect("starknet block not found"); + block.header().hash::().into() + }; + match da_client.get_mode() { DaMode::Validity => { // Check the SHARP status of last_proved + 1 // Write the publish state diff of last_proved + 1 log::info!("validity da mode not implemented"); } - DaMode::Validium => match madara_backend.da().state_diff(¬ification.hash) { + DaMode::Sovereign => match madara_backend.da().state_diff(&starknet_block_hash) { Ok(state_diff) => { if let Err(e) = da_client.publish_state_diff(state_diff).await { log::error!("DA PUBLISH ERROR: {}", e); diff --git a/crates/client/data-availability/src/utils.rs b/crates/client/data-availability/src/utils.rs index ed0a0e743a..4d4c833d49 100644 --- a/crates/client/data-availability/src/utils.rs +++ b/crates/client/data-availability/src/utils.rs @@ -1,32 +1,84 @@ -use std::collections::HashMap; - use ethers::types::U256; +use starknet_api::api_core::{Nonce, PatriciaKey}; +use starknet_api::hash::StarkFelt; +use starknet_api::state::ThinStateDiff; use url::{ParseError, Url}; -// encode calldata: -// - https://docs.starknet.io/documentation/architecture_and_concepts/Data_Availability/on-chain-data/#pre_v0.11.0_example -pub fn pre_0_11_0_state_diff( - storage_diffs: HashMap<&[u8], crate::StorageWrites>, - nonces: HashMap<&[u8], &[u8]>, -) -> Vec { - let mut state_diff: Vec = Vec::new(); - - state_diff.push(U256::from(storage_diffs.len())); - - for (addr, writes) in storage_diffs { - state_diff.push(U256::from_big_endian(addr)); - state_diff.push(U256::from(writes.len())); - for write in writes { - state_diff.push(U256::from_big_endian(write.0)); - state_diff.push(U256::from_big_endian(write.1)); +const CLASS_FLAG_TRUE: &str = "0x100000000000000000000000000000001"; // 2 ^ 128 + 1 +const NONCE_BASE: &str = "0x10000000000000000"; // 2 ^ 64 + +/// DA calldata encoding: +/// - https://docs.starknet.io/documentation/architecture_and_concepts/Network_Architecture/on-chain-data +pub fn state_diff_to_calldata(mut state_diff: ThinStateDiff, num_addrs_accessed: usize) -> Vec { + let mut calldata: Vec = Vec::new(); + + calldata.push(U256::from(num_addrs_accessed)); + + // Loop over storage diffs + for (addr, writes) in state_diff.storage_diffs { + calldata.push(U256::from_big_endian(&addr.0.key().0)); + + let class_flag = state_diff.deployed_contracts.get(&addr).or_else(|| state_diff.replaced_classes.get(&addr)); + + let nonce = state_diff.nonces.remove(&addr); + calldata.push(da_word(class_flag.is_some(), nonce, writes.len() as u64)); + + if let Some(class_hash) = class_flag { + calldata.push(U256::from_big_endian(class_hash.0.bytes())); + } + + for (key, val) in &writes { + calldata.push(U256::from_big_endian(key.0.key().bytes())); + calldata.push(U256::from_big_endian(val.bytes())); } } - for (addr, nonce) in nonces { - state_diff.push(U256::from_big_endian(addr)); - state_diff.push(U256::from_big_endian(nonce)); + // Handle nonces + for (addr, nonce) in state_diff.nonces { + calldata.push(U256::from_big_endian(&addr.0.key().0)); + + let class_flag = state_diff.deployed_contracts.get(&addr).or_else(|| state_diff.replaced_classes.get(&addr)); + + calldata.push(da_word(class_flag.is_some(), Some(nonce), 0_u64)); + if let Some(class_hash) = class_flag { + calldata.push(U256::from_big_endian(class_hash.0.bytes())); + } + } + + // Handle deployed contracts + for (addr, class_hash) in state_diff.deployed_contracts { + calldata.push(U256::from_big_endian(&addr.0.key().0)); + + calldata.push(da_word(true, None, 0_u64)); + calldata.push(U256::from_big_endian(class_hash.0.bytes())); + } + + // Handle replaced classes + calldata.push(U256::from(state_diff.declared_classes.len())); + + for (class_hash, compiled_class_hash) in &state_diff.declared_classes { + calldata.push(U256::from_big_endian(class_hash.0.bytes())); + calldata.push(U256::from_big_endian(compiled_class_hash.0.bytes())); } - state_diff + + calldata +} + +/// DA word encoding: +/// |---padding---|---class flag---|---new nonce---|---num changes---| +/// 127 bits 1 bit 64 bits 64 bits +pub fn da_word(class_flag: bool, nonce_change: Option, num_changes: u64) -> U256 { + let mut word = U256::from(0); + + if class_flag { + word += U256::from_str_radix(CLASS_FLAG_TRUE, 16).unwrap(); + } + if let Some(new_nonce) = nonce_change { + word += U256::from_big_endian(new_nonce.0.bytes()) + U256::from_str_radix(NONCE_BASE, 16).unwrap(); + } + word += U256::from(num_changes); + + word } pub fn get_bytes_from_state_diff(state_diff: &[U256]) -> Vec { @@ -53,3 +105,30 @@ pub fn is_valid_ws_endpoint(endpoint: &str) -> bool { pub fn is_valid_http_endpoint(endpoint: &str) -> bool { if let Ok(url) = get_valid_url(endpoint) { matches!(url.scheme(), "http" | "https") } else { false } } + +pub fn safe_split(key: &[u8]) -> ([u8; 16], Option>) { + let length = key.len(); + let (mut child, mut rest) = ([0_u8; 16], None); + if length > 16 && key.len() <= 32 { + child[..(length - 16)].copy_from_slice(&key[16..]); + } else if length > 32 { + child.copy_from_slice(&key[16..32]); + rest = Some(Vec::from(&key[32..])) + } + + (child, rest) +} + +pub fn bytes_to_felt(raw: &[u8]) -> StarkFelt { + let mut buf = [0_u8; 32]; + if raw.len() < 32 { + buf[32 - raw.len()..].copy_from_slice(raw); + } else { + buf.copy_from_slice(&raw[..32]); + } + StarkFelt::new(buf).unwrap() +} + +pub fn bytes_to_key(raw: &[u8]) -> PatriciaKey { + PatriciaKey(bytes_to_felt(raw)) +} diff --git a/crates/client/db/Cargo.toml b/crates/client/db/Cargo.toml index 29568e2748..55dd4d65d9 100644 --- a/crates/client/db/Cargo.toml +++ b/crates/client/db/Cargo.toml @@ -16,7 +16,7 @@ repository = "https://github.com/keep-starknet-strange/madara" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ethers = "2.0.10" +ethers = { workspace = true } kvdb-rocksdb = { version = "0.19.0", optional = true } log = { workspace = true, default-features = true } parity-db = { version = "0.4.12", optional = true } @@ -27,6 +27,7 @@ scale-codec = { workspace = true, default-features = true, features = [ sp-core = { workspace = true, default-features = true } sp-database = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +starknet_api = { workspace = true, default-features = true } uuid = "1.4.1" [features] diff --git a/crates/client/db/src/da_db.rs b/crates/client/db/src/da_db.rs index 3bb68046f9..132fd098a1 100644 --- a/crates/client/db/src/da_db.rs +++ b/crates/client/db/src/da_db.rs @@ -6,6 +6,9 @@ use ethers::types::U256; use scale_codec::{Decode, Encode}; use sp_database::Database; use sp_runtime::traits::Block as BlockT; +// Starknet +use starknet_api::block::BlockHash; +use starknet_api::hash::StarkFelt; use uuid::Uuid; use crate::DbHash; @@ -18,51 +21,54 @@ pub struct DaDb { // TODO: purge old cairo job keys impl DaDb { - pub fn state_diff(&self, block_hash: &B::Hash) -> Result, String> { - match self.db.get(crate::columns::DA, &block_hash.encode()) { + pub fn state_diff(&self, block_hash: &BlockHash) -> Result, String> { + match self.db.get(crate::columns::DA, block_hash.0.bytes()) { Some(raw) => Ok(Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?), - None => Ok(Vec::new()), + None => Err(String::from("can't write state diff")), } } - pub fn store_state_diff(&self, block_hash: &B::Hash, diffs: Vec) -> Result<(), String> { + pub fn store_state_diff(&self, block_hash: &BlockHash, diff: Vec) -> Result<(), String> { let mut transaction = sp_database::Transaction::new(); - transaction.set(crate::columns::DA, &block_hash.encode(), &diffs.encode()); + transaction.set(crate::columns::DA, block_hash.0.bytes(), &diff.encode()); self.db.commit(transaction).map_err(|e| format!("{:?}", e))?; Ok(()) } - pub fn cairo_job(&self, block_hash: &B::Hash) -> Result { - match self.db.get(crate::columns::DA, &block_hash.encode()) { + pub fn cairo_job(&self, block_hash: &BlockHash) -> Result { + match self.db.get(crate::columns::DA, block_hash.0.bytes()) { Some(raw) => Ok(Uuid::from_slice(&raw[..]).map_err(|e| format!("{:?}", e))?), None => Err(String::from("can't locate cairo job")), } } - pub fn update_cairo_job(&self, block_hash: &B::Hash, job_id: Uuid) -> Result<(), String> { + pub fn update_cairo_job(&self, block_hash: &BlockHash, job_id: Uuid) -> Result<(), String> { let mut transaction = sp_database::Transaction::new(); - transaction.set(crate::columns::DA, &block_hash.encode(), &job_id.into_bytes()); + transaction.set(crate::columns::DA, block_hash.0.bytes(), &job_id.into_bytes()); self.db.commit(transaction).map_err(|e| format!("{:?}", e))?; Ok(()) } - pub fn last_proved_block(&self) -> Result { + pub fn last_proved_block(&self) -> Result { match self.db.get(crate::columns::DA, crate::static_keys::LAST_PROVED_BLOCK) { - Some(raw) => Ok(B::Hash::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?), + Some(raw) => { + let felt = StarkFelt::deserialize(&raw[..]).ok_or("Failed to deserialize block hash")?; + Ok(BlockHash(felt)) + } None => Err(String::from("can't locate last proved block")), } } - pub fn update_last_proved_block(&self, block_hash: &B::Hash) -> Result<(), String> { + pub fn update_last_proved_block(&self, block_hash: &BlockHash) -> Result<(), String> { let mut transaction = sp_database::Transaction::new(); - transaction.set(crate::columns::DA, crate::static_keys::LAST_PROVED_BLOCK, &block_hash.encode()); + transaction.set(crate::columns::DA, crate::static_keys::LAST_PROVED_BLOCK, block_hash.0.bytes()); self.db.commit(transaction).map_err(|e| format!("{:?}", e))?; diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 8fde9f6f7f..5cd43cf9d0 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -80,7 +80,7 @@ blockifier = { workspace = true } hex = { workspace = true } madara-runtime = { workspace = true } mc-commitment-state-diff = { workspace = true } -mc-data-availability = { workspace = true } +mc-data-availability = { workspace = true, features = ["clap"] } mc-db = { workspace = true } mc-mapping-sync = { workspace = true } mc-rpc = { workspace = true } diff --git a/crates/node/src/cli.rs b/crates/node/src/cli.rs index 6bda3c781b..2c6674258e 100644 --- a/crates/node/src/cli.rs +++ b/crates/node/src/cli.rs @@ -53,5 +53,5 @@ pub enum Subcommand { /// Try some command against runtime state. Note: `try-runtime` feature must be enabled. #[cfg(not(feature = "try-runtime"))] - TryRuntime, + TryRuntimeDisabled, } diff --git a/crates/node/src/command.rs b/crates/node/src/command.rs index 900189844c..f1984ecc1c 100644 --- a/crates/node/src/command.rs +++ b/crates/node/src/command.rs @@ -170,8 +170,8 @@ pub fn run() -> sc_cli::Result<()> { }) } #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. You can enable it \ - with `--features try-runtime`." + Some(Subcommand::TryRuntimeDisabled) => Err("TryRuntime wasn't enabled when building the node. You can \ + enable it with `--features try-runtime`." .into()), Some(Subcommand::ChainInfo(ref cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/crates/node/src/commands/run.rs b/crates/node/src/commands/run.rs index f4549ceba7..4074912d9d 100644 --- a/crates/node/src/commands/run.rs +++ b/crates/node/src/commands/run.rs @@ -42,9 +42,12 @@ pub struct ExtendedRunCmd { pub sealing: Option, /// Choose a supported DA Layer - #[clap(long)] + #[clap(long, requires = "da_conf")] pub da_layer: Option, + #[clap(long, requires = "da_layer")] + pub da_conf: Option, + /// When enabled, more information about the blocks and their transaction is cached and stored /// in the database. /// @@ -69,17 +72,16 @@ pub fn run_node(mut cli: Cli) -> Result<()> { override_dev_environment(&mut cli.run); } let runner = cli.create_runner(&cli.run.base)?; - let data_path = &runner.config().data_path; let da_config: Option<(DaLayer, PathBuf)> = match cli.run.da_layer { Some(da_layer) => { - let da_path = data_path.join("da-config.json"); - if !da_path.exists() { - log::info!("{} does not contain DA config", da_path.display()); + let da_conf = PathBuf::from(cli.run.da_conf.expect("clap requires da_conf when da_layer is present")); + if !da_conf.exists() { + log::info!("{} does not contain DA config", da_conf.display()); return Err("DA config not available".into()); } - Some((da_layer, da_path)) + Some((da_layer, da_conf)) } None => { log::info!("Madara initialized w/o DA layer"); diff --git a/crates/node/src/service.rs b/crates/node/src/service.rs index d1aa02bbdc..0b045145a1 100644 --- a/crates/node/src/service.rs +++ b/crates/node/src/service.rs @@ -11,14 +11,14 @@ use futures::future::BoxFuture; use futures::prelude::*; use madara_runtime::opaque::Block; use madara_runtime::{self, Hash, RuntimeApi, SealingMode, StarknetHasher}; -use mc_commitment_state_diff::{log_commitment_state_diff, CommitmentStateDiffWorker}; +use mc_commitment_state_diff::CommitmentStateDiffWorker; use mc_data_availability::avail::config::AvailConfig; use mc_data_availability::avail::AvailClient; use mc_data_availability::celestia::config::CelestiaConfig; use mc_data_availability::celestia::CelestiaClient; use mc_data_availability::ethereum::config::EthereumConfig; use mc_data_availability::ethereum::EthereumClient; -use mc_data_availability::{DaClient, DaLayer, DataAvailabilityWorker}; +use mc_data_availability::{DaClient, DaLayer, DataAvailabilityWorker, DataAvailabilityWorkerProving}; use mc_genesis_data_provider::OnDiskGenesisConfig; use mc_mapping_sync::MappingSyncWorker; use mc_storage::overrides_handle; @@ -413,21 +413,15 @@ pub fn new_full( let (commitment_state_diff_tx, commitment_state_diff_rx) = mpsc::channel(5); - task_manager.spawn_essential_handle().spawn( - "commitment-state-diff", - Some("madara"), - CommitmentStateDiffWorker::<_, _, StarknetHasher>::new(client.clone(), commitment_state_diff_tx) - .for_each(|()| future::ready(())), - ); - - task_manager.spawn_essential_handle().spawn( - "commitment-state-logger", - Some("madara"), - log_commitment_state_diff(commitment_state_diff_rx), - ); - // initialize data availability worker if let Some((da_layer, da_path)) = da_layer { + task_manager.spawn_essential_handle().spawn( + "commitment-state-diff", + Some("madara"), + CommitmentStateDiffWorker::<_, _, StarknetHasher>::new(client.clone(), commitment_state_diff_tx) + .for_each(|()| future::ready(())), + ); + let da_client: Box = match da_layer { DaLayer::Celestia => { let celestia_conf = CelestiaConfig::try_from(&da_path)?; @@ -446,12 +440,17 @@ pub fn new_full( task_manager.spawn_essential_handle().spawn( "da-worker-prove", Some("madara"), - DataAvailabilityWorker::prove_current_block(da_client.get_mode(), client.clone(), madara_backend.clone()), + DataAvailabilityWorkerProving::prove_current_block( + da_client.get_mode(), + commitment_state_diff_rx, + madara_backend.clone(), + ), ); + task_manager.spawn_essential_handle().spawn( "da-worker-update", Some("madara"), - DataAvailabilityWorker::update_state(da_client, client.clone(), madara_backend), + DataAvailabilityWorker::<_, _, StarknetHasher>::update_state(da_client, client.clone(), madara_backend), ); }; diff --git a/crates/primitives/storage/src/lib.rs b/crates/primitives/storage/src/lib.rs index 4ba17e4a31..c82e478f16 100644 --- a/crates/primitives/storage/src/lib.rs +++ b/crates/primitives/storage/src/lib.rs @@ -36,8 +36,6 @@ lazy_static! { pub static ref SN_NONCE_PREFIX: Vec = [twox_128(PALLET_STARKNET), twox_128(STARKNET_NONCE)].concat(); pub static ref SN_CONTRACT_CLASS_HASH_PREFIX: Vec = [twox_128(PALLET_STARKNET), twox_128(STARKNET_CONTRACT_CLASS_HASH)].concat(); - pub static ref SN_CONTRACT_CLASS_PREFIX: Vec = - [twox_128(PALLET_STARKNET), twox_128(STARKNET_CONTRACT_CLASS)].concat(); pub static ref SN_STORAGE_PREFIX: Vec = [twox_128(PALLET_STARKNET), twox_128(STARKNET_STORAGE)].concat(); pub static ref SN_COMPILED_CLASS_HASH_PREFIX: Vec = [twox_128(PALLET_STARKNET), twox_128(STARKNET_COMPILED_CLASS_HASH)].concat(); diff --git a/da-test/Cargo.toml b/da-test/Cargo.toml new file mode 100644 index 0000000000..12750f4ecc --- /dev/null +++ b/da-test/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "da-test" +version = "0.1.0" +edition = "2021" + + +[dependencies] + +anyhow = { workspace = true, default-features = true } +assert_matches = { workspace = true, default-features = true } +async-lock = { workspace = true, default-features = true } +clap = { workspace = true, features = ["std", "derive"] } +ethers = { workspace = true, default-features = true } +flate2 = { workspace = true, default-features = true } +lazy_static = { workspace = true, default-features = true } +reqwest = { workspace = true, default-features = true } +rstest = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +starknet-ff = { workspace = true, default-features = true } +starknet-providers = { workspace = true, default-features = true } +starknet-rpc-test = { path = "../starknet-rpc-test" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["rt", "macros", "parking_lot"] } +url = { workspace = true } + +# madara +mc-data-availability = { workspace = true, features = ["clap"] } + +[[test]] +name = "da_state_diffs" +path = "state_diffs.rs" diff --git a/da-test/src/constants.rs b/da-test/src/constants.rs new file mode 100644 index 0000000000..dce80b7368 --- /dev/null +++ b/da-test/src/constants.rs @@ -0,0 +1,3 @@ +pub const ETHEREUM_DA_CONFIG: &str = include_str!("../../examples/da-confs/ethereum.json"); +pub const CELESTIA_DA_CONFIG: &str = include_str!("../../examples/da-confs/celestia.json"); +pub const AVAIL_DA_CONFIG: &str = include_str!("../../examples/da-confs/avail.json"); diff --git a/da-test/src/fixtures.rs b/da-test/src/fixtures.rs new file mode 100644 index 0000000000..a7351b8af3 --- /dev/null +++ b/da-test/src/fixtures.rs @@ -0,0 +1,13 @@ +use clap::ValueEnum; +use mc_data_availability::{DaClient, DaLayer}; +use rstest::fixture; + +use crate::utils::get_da_client; + +#[fixture] +pub fn da_client() -> Box { + let da_layer_str = std::env::var("DA_LAYER").expect("DA_LAYER env var not set"); + let da_layer = DaLayer::from_str(&da_layer_str, true).expect("Invalid value for DA_LAYER"); + + get_da_client(da_layer) +} diff --git a/da-test/src/lib.rs b/da-test/src/lib.rs new file mode 100644 index 0000000000..cf1ded826f --- /dev/null +++ b/da-test/src/lib.rs @@ -0,0 +1,10 @@ +#![feature(assert_matches)] + +/// Utilities for connecting to DA layers. +pub mod utils; + +/// Fixtures +pub mod fixtures; + +/// Constants +pub mod constants; diff --git a/da-test/src/utils.rs b/da-test/src/utils.rs new file mode 100644 index 0000000000..de3ee97a78 --- /dev/null +++ b/da-test/src/utils.rs @@ -0,0 +1,40 @@ +use std::path::PathBuf; + +use mc_data_availability::avail::config::AvailConfig; +use mc_data_availability::avail::AvailClient; +use mc_data_availability::celestia::config::CelestiaConfig; +use mc_data_availability::celestia::CelestiaClient; +use mc_data_availability::ethereum::config::EthereumConfig; +use mc_data_availability::ethereum::EthereumClient; +use mc_data_availability::{DaClient, DaLayer}; + +use crate::constants::{AVAIL_DA_CONFIG, CELESTIA_DA_CONFIG, ETHEREUM_DA_CONFIG}; + +pub fn get_da_client(da_layer: DaLayer) -> Box { + let da_path = get_da_path(da_layer); + + let da_client: Box = match da_layer { + DaLayer::Celestia => { + let celestia_conf = CelestiaConfig::try_from(&da_path).expect("Failed to read Celestia config"); + Box::new(CelestiaClient::try_from(celestia_conf).expect("Failed to create Celestia client")) + } + DaLayer::Ethereum => { + let ethereum_conf = EthereumConfig::try_from(&da_path).expect("Failed to read Ethereum config"); + Box::new(EthereumClient::try_from(ethereum_conf).expect("Failed to create Ethereum client")) + } + DaLayer::Avail => { + let avail_conf = AvailConfig::try_from(&da_path).expect("Failed to read Avail config"); + Box::new(AvailClient::try_from(avail_conf).expect("Failed to create Avail client")) + } + }; + + da_client +} + +pub(crate) fn get_da_path(da_layer: DaLayer) -> PathBuf { + match da_layer { + DaLayer::Celestia => CELESTIA_DA_CONFIG.into(), + DaLayer::Ethereum => ETHEREUM_DA_CONFIG.into(), + DaLayer::Avail => AVAIL_DA_CONFIG.into(), + } +} diff --git a/da-test/state_diffs.rs b/da-test/state_diffs.rs new file mode 100644 index 0000000000..889d5668da --- /dev/null +++ b/da-test/state_diffs.rs @@ -0,0 +1,51 @@ +extern crate da_test; + +use std::vec; + +use da_test::fixtures::da_client; +use ethers::types::I256; +use mc_data_availability::DaClient; +use rstest::rstest; +use starknet_ff::FieldElement; +use starknet_providers::Provider; +use starknet_rpc_test::constants::{ARGENT_CONTRACT_ADDRESS, SIGNER_PRIVATE}; +use starknet_rpc_test::fixtures::{madara, ThreadSafeMadaraClient}; +use starknet_rpc_test::utils::{build_single_owner_account, AccountActions}; +use starknet_rpc_test::Transaction; + +#[rstest] +#[tokio::test] +async fn publish_to_da_layer( + madara: &ThreadSafeMadaraClient, + da_client: Box, +) -> Result<(), anyhow::Error> { + let rpc = madara.get_starknet_client().await; + + let (txs, block_number) = { + let mut madara_write_lock = madara.write().await; + // using incorrect private key to generate the wrong signature + let account = build_single_owner_account(&rpc, SIGNER_PRIVATE, ARGENT_CONTRACT_ADDRESS, true); + + let txs = madara_write_lock + .create_block_with_txs(vec![Transaction::Execution(account.transfer_tokens( + FieldElement::from_hex_be("0x123").unwrap(), + FieldElement::ONE, + None, + ))]) + .await?; + let block_number = rpc.block_number().await?; + + (txs, block_number) + }; + + assert_eq!(txs.len(), 1); + + let _tx = &txs[0]; + + // Check the state diff that has been published to the DA layer + let published_block_number = da_client.last_published_state().await?; + + assert_eq!(published_block_number, I256::from(block_number)); + + Ok(()) +} diff --git a/docs/da-contribution.md b/docs/da-contribution.md new file mode 100644 index 0000000000..784ff2c7f2 --- /dev/null +++ b/docs/da-contribution.md @@ -0,0 +1,30 @@ +# Data Availability Testing + +To contribute to the DA related crates, you will need to run the tests locally. + +## Run tests + +First you will need to run locally the DA devnet node. + +```bash +bash scripts/da_devnet.sh +``` + +Once it's up and running, you can run madara with the same DA layer. + +```bash +./target/release/madara --dev --da-layer --da-conf examples/da-confs/.json +``` + +Now you can run the tests inside the `da-test` crate. + +```bash +cd da-test +DA_LAYER= cargo test +``` + +Finally make sure to stop the DA devnet node. + +```bash +bash scripts/stop_da_devnet.sh +``` diff --git a/examples/da-confs/avail.json b/examples/da-confs/avail.json new file mode 100644 index 0000000000..462e83bdef --- /dev/null +++ b/examples/da-confs/avail.json @@ -0,0 +1,6 @@ +{ + "ws_provider": "ws://127.0.0.1:9945", + "app_id": 0, + "validate_codegen": true, + "seed": "//Alice" +} diff --git a/examples/da-confs/celestia.json b/examples/da-confs/celestia.json new file mode 100644 index 0000000000..30cb461fa0 --- /dev/null +++ b/examples/da-confs/celestia.json @@ -0,0 +1,6 @@ +{ + "http_provider": "http://127.0.0.1:26658", + "ws_provider": "ws://127.0.0.1:26658", + "nid": "Madara", + "auth_token": "" +} diff --git a/examples/da-confs/ethereum.json b/examples/da-confs/ethereum.json new file mode 100644 index 0000000000..e4c309ed43 --- /dev/null +++ b/examples/da-confs/ethereum.json @@ -0,0 +1,6 @@ +{ + "http_provider": "http://127.0.0.1:8545", + "core_contracts": "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "sequencer_key": "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + "chain_id": 31337 +} diff --git a/madara-docs b/madara-docs index 9b48c061b0..27654cbfbe 160000 --- a/madara-docs +++ b/madara-docs @@ -1 +1 @@ -Subproject commit 9b48c061b0fdeb710d95ac524e9f4f27c75f2847 +Subproject commit 27654cbfbe4519d101fdfbdb51bb0b6a454234a1 diff --git a/madara-tsukuyomi b/madara-tsukuyomi index bef32061fa..5acb3cafbe 160000 --- a/madara-tsukuyomi +++ b/madara-tsukuyomi @@ -1 +1 @@ -Subproject commit bef32061fa7c6ea60b39b8a9345b0d18ed47ab41 +Subproject commit 5acb3cafbe4f7422b31c506371312419d0e24c09 diff --git a/scripts/da_devnet.sh b/scripts/da_devnet.sh index ca2f833ddc..12424e962b 100755 --- a/scripts/da_devnet.sh +++ b/scripts/da_devnet.sh @@ -12,16 +12,12 @@ if [ ! -f "$MADARA_PATH/da-config.json" ]; then echo "{}" > $MADARA_PATH/da-config.json fi -cargo build --release - if [ "$DA_LAYER" = "ethereum" ]; then echo "Ethereum DA Test:" - # TODO: do we want to add zaun as submodule - git clone --recurse-submodules https://github.com/keep-starknet-strange/zaun.git target/zaun 2> /dev/null - ./target/zaun/scripts/sn-base-dev.sh target target/zaun 2> /dev/null + ./zaun/scripts/sn-base-dev.sh target zaun 2> /dev/null echo -e "\t anvil logs -> target/anvil.log" - echo -e "\t to kill anvil -> ./target/zaun/scripts/sn-base-kill.sh target" + echo -e "\t to kill anvil -> ./zaun/scripts/sn-base-kill.sh target" elif [ "$DA_LAYER" = "celestia" ]; then if ! command -v celestia > /dev/null then @@ -71,6 +67,3 @@ elif [ "$DA_LAYER" = "avail" ]; then sleep 5 fi - -echo "Launching Madara with DA $DA_LAYER" -./target/release/madara --dev --da-layer=$DA_LAYER diff --git a/scripts/stop_da_devnet.sh b/scripts/stop_da_devnet.sh new file mode 100755 index 0000000000..934ac275dc --- /dev/null +++ b/scripts/stop_da_devnet.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# [ethereum, celestia, avail] +DA_LAYER=$1 + +if [ "$DA_LAYER" = "ethereum" ]; then + echo "Killing Anvil:" + ./zaun/scripts/sn-base-kill.sh target +elif [ "$DA_LAYER" = "celestia" ]; then + # TODO: Kill Celestia + echo "Killing Celestia:" +elif [ "$DA_LAYER" = "avail" ]; then + # TODO: Kill Avail + echo "Killing Avail:" +fi diff --git a/zaun b/zaun new file mode 160000 index 0000000000..2b431a94c1 --- /dev/null +++ b/zaun @@ -0,0 +1 @@ +Subproject commit 2b431a94c194ad2efe62051811224c15e9196556