Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

guest side proof fail #1653

Open
wants to merge 15 commits into
base: nightly
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 21 additions & 2 deletions crates/light-client-prover/src/circuit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
|prev_journal| (prev_journal.state_root, prev_journal.last_l2_height),
);

// index only incremented on processing of a complete or aggregate DA tx
let mut current_proof_index = 0u32;
let mut expected_to_fail_hints = input.expected_to_fail_hint.into_iter().peekable();
// Parse the batch proof da data
for blob in input.da_data {
if blob.sender().as_ref() == batch_prover_da_public_key {
Expand Down Expand Up @@ -130,12 +133,14 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
{
(output.initial_state_root, output.final_state_root, 0)
} else {
current_proof_index += 1;
continue; // cannot parse the output, skip
};

// Do not add if last l2 height is smaller or equal to previous output
// This is to defend against replay attacks, for example if somehow there is the script of batch proof 1 we do not need to go through it again
if batch_proof_output_last_l2_height <= last_l2_height {
current_proof_index += 1;
continue;
}

Expand All @@ -145,6 +150,7 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
batch_proof_method_ids[0].1
} else {
// If not continue to the next blob
current_proof_index += 1;
continue;
}
} else {
Expand All @@ -161,8 +167,19 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
batch_proof_method_ids[idx].1
};

if G::verify(&journal, &batch_proof_method_id.into()).is_err() {
// if the batch proof is invalid, continue to the next blob
// we unwrap_or to u32::MAX because if hint is empty, then it means all proofs are expected to pass
if current_proof_index
!= expected_to_fail_hints.peek().copied().unwrap_or(u32::MAX)
{
// if index is not in the expected to fail hints, then it should pass
G::verify(&journal, &batch_proof_method_id.into())
.expect("Proof hinted to pass failed");
} else {
// if index is in the expected to fail hints, then it should fail
G::verify_expected_to_fail(&proof, &batch_proof_method_id.into())
.expect_err("Proof hinted to fail passed");
expected_to_fail_hints.next();
current_proof_index += 1;
yaziciahmet marked this conversation as resolved.
Show resolved Hide resolved
continue;
}

Expand All @@ -174,6 +191,8 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
batch_proof_output_last_l2_height,
),
);

current_proof_index += 1;
}
DaDataLightClient::Aggregate(_) => todo!(),
DaDataLightClient::Chunk(_) => todo!(),
Expand Down
57 changes: 27 additions & 30 deletions crates/light-client-prover/src/da_block_handler.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;

use alloy_primitives::U64;
use anyhow::anyhow;
use borsh::BorshDeserialize;
use citrea_common::cache::L1BlockCache;
use citrea_common::da::get_da_block_at_height;
use citrea_common::LightClientProverConfig;
use citrea_primitives::forks::fork_from_block_number;
use jsonrpsee::http_client::HttpClient;
use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps};
use sov_db::schema::types::{SlotNumber, StoredLatestDaState, StoredLightClientProofOutput};
use sov_ledger_rpc::LedgerRpcClient;
use sov_modules_api::{BatchProofCircuitOutput, BlobReaderTrait, DaSpec, Zkvm};
use sov_rollup_interface::da::{BlockHeaderTrait, DaDataLightClient, DaNamespace};
use sov_rollup_interface::services::da::{DaService, SlotData};
Expand Down Expand Up @@ -45,7 +42,6 @@ where
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
l1_block_cache: Arc<Mutex<L1BlockCache<Da>>>,
queued_l1_blocks: VecDeque<<Da as DaService>::FilteredBlock>,
sequencer_client: Arc<HttpClient>,
}

impl<Vm, Da, Ps, DB> L1BlockHandler<Vm, Da, Ps, DB>
Expand All @@ -65,7 +61,6 @@ where
batch_proof_code_commitments: HashMap<SpecId, Vm::CodeCommitment>,
light_client_proof_code_commitments: HashMap<SpecId, Vm::CodeCommitment>,
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
sequencer_client: Arc<HttpClient>,
) -> Self {
Self {
_prover_config: prover_config,
Expand All @@ -78,7 +73,6 @@ where
light_client_proof_elfs,
l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())),
queued_l1_blocks: VecDeque::new(),
sequencer_client,
}
}

Expand Down Expand Up @@ -166,8 +160,12 @@ where
);

let mut assumptions = vec![];
// index only incremented for complete and aggregated proofs, in line with the circuit
let mut proof_index = 0u32;
let mut expected_to_fail_hint = vec![];

for batch_proof in batch_proofs {
// TODO handle aggreagates
if let DaDataLightClient::Complete(proof) = batch_proof {
let last_l2_height = match Vm::extract_output::<
BatchProofCircuitOutput<<Da as DaService>::Spec, [u8; 32]>,
Expand All @@ -176,10 +174,17 @@ where
Ok(output) => output.last_l2_height,
Err(e) => {
info!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e);
Vm::extract_output::<
if Vm::extract_output::<
OldBatchProofCircuitOutput<<Da as DaService>::Spec, [u8; 32]>,
>(&proof)
.map_err(|_| anyhow!("Proof should be deserializable"))?;
.is_err()
{
tracing::info!(
"Failed to extract pre fork1 and fork1 output from proof"
);
proof_index += 1;
continue;
}
// If this is a pre fork 1 proof, then we need to convert it to post fork 1 proof
0
}
Expand All @@ -191,10 +196,14 @@ where
.expect("Batch proof code commitment not found");
if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) {
tracing::error!("Failed to verify batch proof: {:?}", e);
expected_to_fail_hint.push(proof_index);
proof_index += 1;
continue;
}

assumptions.push(proof);

proof_index += 1;
}
}

Expand All @@ -208,32 +217,19 @@ where
let proof = data.proof;
let output = data.light_client_proof_output;
assumptions.push(proof);
// TODO: instead of serializing the stored output
// we should just store and push the serialized proof as outputted from the circuit
// that way modifications are less error prone
light_client_proof_journal = Some(borsh::to_vec(&output)?);
Some(output.last_l2_height)
}
None => {
let soft_confirmation = self
.sequencer_client
.get_soft_confirmation_by_number(U64::from(1))
.await?
.unwrap();
let initial_l1_height = soft_confirmation.da_slot_height;
// If the prev block is the block before the first processed l1 block
// then we don't have a previous light client proof, so just give an info
if previous_l1_height == initial_l1_height {
tracing::info!(
"No previous light client proof found for L1 block: {}",
previous_l1_height
);
}
// If not then we have a problem
else {
panic!(
"No previous light client proof found for L1 block: {}",
previous_l1_height
);
}
Some(soft_confirmation.l2_height)
// first time proving a light client proof
tracing::warn!(
"Creating initial light client proof on L1 block #{}",
l1_height
);
Some(0)
}
};

Expand All @@ -260,6 +256,7 @@ where
da_block_header: l1_block.header().clone(),
light_client_proof_method_id: light_client_proof_code_commitment.clone().into(),
previous_light_client_proof_journal: light_client_proof_journal,
expected_to_fail_hint,
};

let proof = self
Expand Down
6 changes: 0 additions & 6 deletions crates/light-client-prover/src/runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use std::sync::Arc;

use citrea_common::tasks::manager::TaskManager;
use citrea_common::{LightClientProverConfig, RollupPublicKeys, RpcConfig, RunnerConfig};
use jsonrpsee::http_client::{HttpClient, HttpClientBuilder};
use jsonrpsee::server::{BatchRequestConfig, ServerBuilder};
use jsonrpsee::RpcModule;
use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps};
Expand Down Expand Up @@ -32,7 +31,6 @@ where
rpc_config: RpcConfig,
da_service: Arc<Da>,
ledger_db: DB,
sequencer_client: HttpClient,
prover_service: Arc<Ps>,
prover_config: LightClientProverConfig,
task_manager: TaskManager<()>,
Expand Down Expand Up @@ -62,14 +60,12 @@ where
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
task_manager: TaskManager<()>,
) -> Result<Self, anyhow::Error> {
let sequencer_client_url = runner_config.sequencer_client_url.clone();
Ok(Self {
_runner_config: runner_config,
public_keys,
rpc_config,
da_service,
ledger_db,
sequencer_client: HttpClientBuilder::default().build(sequencer_client_url)?,
prover_service,
prover_config,
task_manager,
Expand Down Expand Up @@ -159,7 +155,6 @@ where
let batch_proof_commitments_by_spec = self.batch_proof_commitments_by_spec.clone();
let light_client_proof_commitment = self.light_client_proof_commitment.clone();
let light_client_proof_elfs = self.light_client_proof_elfs.clone();
let sequencer_client = self.sequencer_client.clone();

self.task_manager.spawn(|cancellation_token| async move {
let l1_block_handler = L1BlockHandler::<Vm, Da, Ps, DB>::new(
Expand All @@ -171,7 +166,6 @@ where
batch_proof_commitments_by_spec,
light_client_proof_commitment,
light_client_proof_elfs,
Arc::new(sequencer_client),
);
l1_block_handler
.run(last_l1_height_scanned.0, cancellation_token)
Expand Down
Loading
Loading