Skip to content
This repository has been archived by the owner on Aug 2, 2024. It is now read-only.

Commit

Permalink
✨ v0.11 calldata parsing (#1287)
Browse files Browse the repository at this point in the history
  • Loading branch information
EvolveArt authored Dec 26, 2023
1 parent 2079af1 commit e3b1927
Show file tree
Hide file tree
Showing 34 changed files with 564 additions and 178 deletions.
53 changes: 53 additions & 0 deletions .github/workflows/da-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
---
name: Task - DA Tests

on:
workflow_dispatch:
workflow_call:

jobs:
rpc-tests:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
da-layer:
- ethereum
- celestia
- avail
env:
BINARY_PATH: ../target/release/madara
steps:
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
with:
shared-key: "cache"
save-if: false
- uses: actions/cache@v3
with:
path: target/release/madara
key:
${{ runner.os }}-cargo-${{ hashFiles('Cargo.lock') }}-${{
github.run_id }}
fail-on-cache-miss: true
- name: Setup build deps
run: |
sudo apt-get update
sudo apt-get install -y clang llvm libudev-dev protobuf-compiler
- name: Setup dev chain
run: |
./target/release/madara setup --chain=dev --from-local=configs
- name: Run DA Layer
run: |
bash ./scripts/da_devnet.sh ${{ matrix.da_layer }}
- name: Run DA tests
run: |-
./target/release/madara --dev --da-layer ${{ matrix.da_layer }} --da-conf examples/da-confs/${{ matrix.da_layer }}.json &
MADARA_RUN_PID=$!
while ! echo exit | nc localhost 9944; do sleep 1; done
cd da-test
DA_LAYER=${{ matrix.da_layer }} cargo test
kill $MADARA_RUN_PID
- name: Stop DA Layer
run: |
bash ./scripts/stop_da_devnet.sh ${{ matrix.da_layer }}
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,6 @@
[submodule "madara-tsukuyomi"]
path = madara-tsukuyomi
url = https://github.com/keep-starknet-strange/madara-tsukuyomi
[submodule "zaun"]
path = zaun
url = https://github.com/keep-starknet-strange/zaun
3 changes: 2 additions & 1 deletion .prettierignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
target
cairo-contracts/build
madara-app
madara-tsukuyomi
madara-dev-explorer
madara-docs
madara-infra
zaun
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
- fix: Change serialization of bitvec to &[u8] in merkle tree to avoid memory
uninitialized
- chore: change SCARB config version for foundry CI
- feat(da): update da calldata encoding to v0.11.0 spec, da conf examples, da
conf flag, da-tests in CI

## v0.6.0

Expand Down
32 changes: 32 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 6 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,9 @@ members = [
"crates/client/storage",
"crates/client/commitment-state-diff",
"starknet-rpc-test",
"da-test",
]
# All previous except for `starknet-rpc-test`
# All previous except for `starknet-rpc-test` and `da-test`
# We don't want `cargo test` to trigger its tests
default-members = [
"crates/node",
Expand Down Expand Up @@ -246,6 +247,10 @@ url = "2.4.1"
hashbrown = "0.14.2"
tokio = "1.34.0"
openssl = { version = "0.10", features = ["vendored"] }
ethers = "2.0.7"
subxt = "0.29"
assert_matches = "1.5.0"
async-lock = "3.1.0"

[patch."https://github.com/w3f/ring-vrf"]
bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf?rev=3ddc20", version = "0.0.4", rev = "3ddc20" }
2 changes: 1 addition & 1 deletion crates/client/commitment-state-diff/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ sp-runtime = { workspace = true, default-features = true }
mp-digest-log = { workspace = true, default-features = true }
mp-hashers = { workspace = true, default-features = true }
mp-storage = { workspace = true, default-features = true }
pallet-starknet = { workspace = true }
pallet-starknet = { workspace = true, default-features = true }
pallet-starknet-runtime-api = { workspace = true, default-features = true }

# Starknet
Expand Down
66 changes: 38 additions & 28 deletions crates/client/commitment-state-diff/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,9 @@ use std::pin::Pin;
use std::sync::Arc;
use std::task::Poll;

use blockifier::state::cached_state::CommitmentStateDiff;
use futures::channel::mpsc;
use futures::{Stream, StreamExt};
use indexmap::IndexMap;
use futures::Stream;
use indexmap::{IndexMap, IndexSet};
use mp_hashers::HasherT;
use mp_storage::{SN_COMPILED_CLASS_HASH_PREFIX, SN_CONTRACT_CLASS_HASH_PREFIX, SN_NONCE_PREFIX, SN_STORAGE_PREFIX};
use pallet_starknet_runtime_api::StarknetRuntimeApi;
Expand All @@ -18,22 +17,24 @@ use sp_runtime::traits::{Block as BlockT, Header};
use starknet_api::api_core::{ClassHash, CompiledClassHash, ContractAddress, Nonce, PatriciaKey};
use starknet_api::block::BlockHash;
use starknet_api::hash::StarkFelt;
use starknet_api::state::StorageKey as StarknetStorageKey;
use starknet_api::state::{StorageKey as StarknetStorageKey, ThinStateDiff};
use thiserror::Error;

pub struct BlockDAData(pub BlockHash, pub ThinStateDiff, pub usize);

pub struct CommitmentStateDiffWorker<B: BlockT, C, H> {
client: Arc<C>,
storage_event_stream: StorageEventStream<B::Hash>,
tx: mpsc::Sender<(BlockHash, CommitmentStateDiff)>,
msg: Option<(BlockHash, CommitmentStateDiff)>,
tx: mpsc::Sender<BlockDAData>,
msg: Option<BlockDAData>,
phantom: PhantomData<H>,
}

impl<B: BlockT, C, H> CommitmentStateDiffWorker<B, C, H>
where
C: BlockchainEvents<B>,
{
pub fn new(client: Arc<C>, tx: mpsc::Sender<(BlockHash, CommitmentStateDiff)>) -> Self {
pub fn new(client: Arc<C>, tx: mpsc::Sender<BlockDAData>) -> Self {
let storage_event_stream = client
.storage_changes_notification_stream(None, None)
.expect("the node storage changes notification stream should be up and running");
Expand All @@ -55,7 +56,6 @@ where
// state 2: waiting for the channel to be ready, `commitment_state_diff` field is `Some`
fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Option<Self::Item>> {
let self_as_mut = self.get_mut();

if self_as_mut.msg.is_none() {
// State 1
match Stream::poll_next(Pin::new(&mut self_as_mut.storage_event_stream), cx) {
Expand Down Expand Up @@ -101,10 +101,10 @@ where
// Channel is full, we wait
Poll::Pending => Poll::Pending,

// Channel receiver have been drop, we close.
// Channel receiver has been dropped, we close.
// This should not happen tho
Poll::Ready(Err(e)) => {
log::error!("CommitmentStateDiff channel reciever have been droped: {e}");
log::error!("CommitmentStateDiff channel receiver has been dropped: {e}");
Poll::Ready(None)
}
}
Expand All @@ -124,7 +124,7 @@ enum BuildCommitmentStateDiffError {
fn build_commitment_state_diff<B: BlockT, C, H>(
client: Arc<C>,
storage_notification: StorageNotification<B::Hash>,
) -> Result<(BlockHash, CommitmentStateDiff), BuildCommitmentStateDiffError>
) -> Result<BlockDAData, BuildCommitmentStateDiffError>
where
C: ProvideRuntimeApi<B>,
C::Api: StarknetRuntimeApi<B>,
Expand All @@ -138,11 +138,14 @@ where
block.header().hash::<H>().into()
};

let mut commitment_state_diff = CommitmentStateDiff {
address_to_class_hash: Default::default(),
address_to_nonce: Default::default(),
storage_updates: Default::default(),
class_hash_to_compiled_class_hash: Default::default(),
let mut accessed_addrs: IndexSet<ContractAddress> = IndexSet::new();
let mut commitment_state_diff = ThinStateDiff {
declared_classes: IndexMap::new(),
storage_diffs: IndexMap::new(),
nonces: IndexMap::new(),
deployed_contracts: IndexMap::new(),
deprecated_declared_classes: Vec::new(),
replaced_classes: IndexMap::new(),
};

for (_prefix, full_storage_key, change) in storage_notification.changes.iter() {
Expand All @@ -162,32 +165,45 @@ where
ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..].try_into().unwrap())));
// `change` is safe to unwrap as `Nonces` storage is `ValueQuery`
let nonce = Nonce(StarkFelt(change.unwrap().0.clone().try_into().unwrap()));
commitment_state_diff.address_to_nonce.insert(contract_address, nonce);
commitment_state_diff.nonces.insert(contract_address, nonce);
accessed_addrs.insert(contract_address);
} else if prefix == *SN_STORAGE_PREFIX {
let contract_address =
ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..64].try_into().unwrap())));
let storage_key = StarknetStorageKey(PatriciaKey(StarkFelt(full_storage_key.0[64..].try_into().unwrap())));
// `change` is safe to unwrap as `StorageView` storage is `ValueQuery`
let value = StarkFelt(change.unwrap().0.clone().try_into().unwrap());

match commitment_state_diff.storage_updates.get_mut(&contract_address) {
match commitment_state_diff.storage_diffs.get_mut(&contract_address) {
Some(contract_storage) => {
contract_storage.insert(storage_key, value);
}
None => {
let mut contract_storage: IndexMap<_, _, _> = Default::default();
contract_storage.insert(storage_key, value);

commitment_state_diff.storage_updates.insert(contract_address, contract_storage);
commitment_state_diff.storage_diffs.insert(contract_address, contract_storage);
}
}
accessed_addrs.insert(contract_address);
} else if prefix == *SN_CONTRACT_CLASS_HASH_PREFIX {
let contract_address =
ContractAddress(PatriciaKey(StarkFelt(full_storage_key.0[32..].try_into().unwrap())));
// `change` is safe to unwrap as `ContractClassHashes` storage is `ValueQuery`
let class_hash = ClassHash(StarkFelt(change.unwrap().0.clone().try_into().unwrap()));

commitment_state_diff.address_to_class_hash.insert(contract_address, class_hash);
// check if contract already exists
let runtime_api = client.runtime_api();
let current_block_hash = client.info().best_hash;

let contract_exists = runtime_api.contract_class_by_class_hash(current_block_hash, class_hash).is_ok();

if contract_exists {
commitment_state_diff.replaced_classes.insert(contract_address, class_hash);
} else {
commitment_state_diff.deployed_contracts.insert(contract_address, class_hash);
}
accessed_addrs.insert(contract_address);
} else if prefix == *SN_COMPILED_CLASS_HASH_PREFIX {
let class_hash = ClassHash(StarkFelt(full_storage_key.0[32..].try_into().unwrap()));
// In the current state of starknet protocol, a compiled class hash can not be erased, so we should
Expand All @@ -196,15 +212,9 @@ where
let compiled_class_hash =
CompiledClassHash(change.map(|data| StarkFelt(data.0.clone().try_into().unwrap())).unwrap_or_default());

commitment_state_diff.class_hash_to_compiled_class_hash.insert(class_hash, compiled_class_hash);
commitment_state_diff.declared_classes.insert(class_hash, compiled_class_hash);
}
}

Ok((starknet_block_hash, commitment_state_diff))
}

pub async fn log_commitment_state_diff(mut rx: mpsc::Receiver<(BlockHash, CommitmentStateDiff)>) {
while let Some((block_hash, csd)) = rx.next().await {
log::info!("received state diff for block {block_hash}: {csd:?}");
}
Ok(BlockDAData(starknet_block_hash, commitment_state_diff, accessed_addrs.len()))
}
Loading

0 comments on commit e3b1927

Please sign in to comment.