Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

guest side proof fail #1653

Open
wants to merge 15 commits into
base: nightly
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 26 additions & 11 deletions crates/light-client-prover/src/circuit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
|prev_journal| (prev_journal.state_root, prev_journal.last_l2_height),
);

// index only incremented on processing of a complete or aggregate DA tx
let mut current_proof_index = 0u32;
let mut expected_to_fail_hints = input.expected_to_fail_hint.into_iter().peekable();
// Parse the batch proof da data
for blob in input.da_data {
if blob.sender().as_ref() == batch_prover_da_public_key {
Expand Down Expand Up @@ -131,6 +134,7 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
{
(output.initial_state_root, output.final_state_root, 0)
} else {
current_proof_index += 1;
continue; // cannot parse the output, skip
};

Expand All @@ -139,6 +143,7 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
if batch_proof_output_last_l2_height <= last_l2_height
&& last_l2_height != 0
{
current_proof_index += 1;
continue;
}

Expand All @@ -148,6 +153,7 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
batch_proof_method_ids[0].1
} else {
// If not continue to the next blob
current_proof_index += 1;
continue;
}
} else {
Expand All @@ -164,19 +170,28 @@ pub fn run_circuit<DaV: DaVerifier, G: ZkvmGuest>(
batch_proof_method_ids[idx].1
};

if G::verify(&journal, &batch_proof_method_id.into()).is_err() {
// if the batch proof is invalid, continue to the next blob
continue;
if expected_to_fail_hints
.next_if(|&x| x == current_proof_index)
.is_some()
{
// if index is in the expected to fail hints, then it should fail
G::verify_expected_to_fail(&proof, &batch_proof_method_id.into())
.expect_err("Proof hinted to fail passed");
} else {
// if index is not in the expected to fail hints, then it should pass
G::verify(&journal, &batch_proof_method_id.into())
.expect("Proof hinted to pass failed");
recursive_match_state_roots(
eyusufatik marked this conversation as resolved.
Show resolved Hide resolved
&mut initial_to_final,
&BatchProofInfo::new(
batch_proof_output_initial_state_root,
batch_proof_output_final_state_root,
batch_proof_output_last_l2_height,
),
);
}

recursive_match_state_roots(
&mut initial_to_final,
&BatchProofInfo::new(
batch_proof_output_initial_state_root,
batch_proof_output_final_state_root,
batch_proof_output_last_l2_height,
),
);
current_proof_index += 1;
}
DaDataLightClient::Aggregate(_) => todo!(),
DaDataLightClient::Chunk(_) => todo!(),
Expand Down
129 changes: 56 additions & 73 deletions crates/light-client-prover/src/da_block_handler.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;

use alloy_primitives::U64;
use anyhow::anyhow;
use borsh::BorshDeserialize;
use citrea_common::cache::L1BlockCache;
use citrea_common::da::get_da_block_at_height;
use citrea_common::LightClientProverConfig;
use citrea_primitives::forks::fork_from_block_number;
use jsonrpsee::http_client::HttpClient;
use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps};
use sov_db::schema::types::{SlotNumber, StoredLatestDaState, StoredLightClientProofOutput};
use sov_ledger_rpc::LedgerRpcClient;
use sov_db::schema::types::{SlotNumber, StoredLightClientProofOutput};
use sov_modules_api::{BatchProofCircuitOutput, BlobReaderTrait, DaSpec, Zkvm};
use sov_rollup_interface::da::{BlockHeaderTrait, DaDataLightClient, DaNamespace};
use sov_rollup_interface::services::da::{DaService, SlotData};
Expand Down Expand Up @@ -45,7 +41,6 @@ where
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
l1_block_cache: Arc<Mutex<L1BlockCache<Da>>>,
queued_l1_blocks: VecDeque<<Da as DaService>::FilteredBlock>,
sequencer_client: Arc<HttpClient>,
}

impl<Vm, Da, Ps, DB> L1BlockHandler<Vm, Da, Ps, DB>
Expand All @@ -65,7 +60,6 @@ where
batch_proof_code_commitments: HashMap<SpecId, Vm::CodeCommitment>,
light_client_proof_code_commitments: HashMap<SpecId, Vm::CodeCommitment>,
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
sequencer_client: Arc<HttpClient>,
) -> Self {
Self {
_prover_config: prover_config,
Expand All @@ -78,7 +72,6 @@ where
light_client_proof_elfs,
l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())),
queued_l1_blocks: VecDeque::new(),
sequencer_client,
}
}

Expand Down Expand Up @@ -158,90 +151,94 @@ where
tx.full_data();
});

let mut assumptions = vec![];

let previous_l1_height = l1_height - 1;
let (light_client_proof_journal, l2_last_height) = match self
.ledger_db
.get_light_client_proof_data_by_l1_height(previous_l1_height)?
{
Some(data) => {
let proof = data.proof;
assumptions.push(proof);

let db_output = data.light_client_proof_output;
let output = LightClientCircuitOutput::from(db_output);
// TODO: instead of serializing the output
// we should just store and push the serialized proof as outputted from the circuit
// that way modifications are less error prone
(Some(borsh::to_vec(&output)?), output.last_l2_height)
}
None => {
// first time proving a light client proof
tracing::warn!(
"Creating initial light client proof on L1 block #{}",
l1_height
);
(None, 0)
}
};

let batch_proofs = self.extract_batch_proofs(&mut da_data, l1_hash).await;
tracing::info!(
"Block {} has {} batch proofs",
l1_height,
batch_proofs.len()
);

let mut assumptions = vec![];
// index only incremented for complete and aggregated proofs, in line with the circuit
let mut proof_index = 0u32;
let mut expected_to_fail_hint = vec![];

for batch_proof in batch_proofs {
// TODO handle aggreagates
if let DaDataLightClient::Complete(proof) = batch_proof {
let last_l2_height = match Vm::extract_output::<
let batch_proof_last_l2_height = match Vm::extract_output::<
BatchProofCircuitOutput<<Da as DaService>::Spec, [u8; 32]>,
>(&proof)
{
Ok(output) => output.last_l2_height,
Err(e) => {
info!("Failed to extract post fork 1 output from proof: {:?}. Trying to extract pre fork 1 output", e);
Vm::extract_output::<
if Vm::extract_output::<
OldBatchProofCircuitOutput<<Da as DaService>::Spec, [u8; 32]>,
>(&proof)
.map_err(|_| anyhow!("Proof should be deserializable"))?;
.is_err()
{
tracing::info!(
"Failed to extract pre fork1 and fork1 output from proof"
);
proof_index += 1;
continue;
}
// If this is a pre fork 1 proof, then we need to convert it to post fork 1 proof
0
}
};
let current_spec = fork_from_block_number(last_l2_height).spec_id;

if batch_proof_last_l2_height <= l2_last_height {
proof_index += 1;
continue;
}

let current_spec = fork_from_block_number(batch_proof_last_l2_height).spec_id;
let batch_proof_method_id = self
.batch_proof_code_commitments
.get(&current_spec)
.expect("Batch proof code commitment not found");
if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) {
tracing::error!("Failed to verify batch proof: {:?}", e);
continue;
expected_to_fail_hint.push(proof_index);
} else {
assumptions.push(proof);
}

assumptions.push(proof);
proof_index += 1;
}
}

let previous_l1_height = l1_height - 1;
let mut light_client_proof_journal = None;
let l2_last_height = match self
.ledger_db
.get_light_client_proof_data_by_l1_height(previous_l1_height)?
{
Some(data) => {
let proof = data.proof;
let output = data.light_client_proof_output;
assumptions.push(proof);
light_client_proof_journal = Some(borsh::to_vec(&output)?);
Some(output.last_l2_height)
}
None => {
let soft_confirmation = self
.sequencer_client
.get_soft_confirmation_by_number(U64::from(1))
.await?
.unwrap();
let initial_l1_height = soft_confirmation.da_slot_height;
// If the prev block is the block before the first processed l1 block
// then we don't have a previous light client proof, so just give an info
if previous_l1_height == initial_l1_height {
tracing::info!(
"No previous light client proof found for L1 block: {}",
previous_l1_height
);
}
// If not then we have a problem
else {
panic!(
"No previous light client proof found for L1 block: {}",
previous_l1_height
);
}
Some(soft_confirmation.l2_height)
}
};

tracing::debug!("assumptions len: {:?}", assumptions.len());

let l2_last_height = l2_last_height.ok_or(anyhow!(
"Could not determine the last L2 height for batch proof"
))?;
let current_fork = fork_from_block_number(l2_last_height);
let light_client_proof_code_commitment = self
.light_client_proof_code_commitments
Expand All @@ -260,6 +257,7 @@ where
da_block_header: l1_block.header().clone(),
light_client_proof_method_id: light_client_proof_code_commitment.clone().into(),
previous_light_client_proof_journal: light_client_proof_journal,
expected_to_fail_hint,
};

let proof = self
Expand All @@ -274,22 +272,7 @@ where
circuit_output
);

let latest_da_state = &circuit_output.latest_da_state;
let stored_proof_output = StoredLightClientProofOutput {
state_root: circuit_output.state_root,
light_client_proof_method_id: circuit_output.light_client_proof_method_id,
latest_da_state: StoredLatestDaState {
block_hash: latest_da_state.block_hash,
block_height: latest_da_state.block_height,
total_work: latest_da_state.total_work,
current_target_bits: latest_da_state.current_target_bits,
epoch_start_time: latest_da_state.epoch_start_time,
prev_11_timestamps: latest_da_state.prev_11_timestamps,
},
unchained_batch_proofs_info: circuit_output.unchained_batch_proofs_info,
last_l2_height: circuit_output.last_l2_height,
batch_proof_method_ids: circuit_output.batch_proof_method_ids,
};
let stored_proof_output = StoredLightClientProofOutput::from(circuit_output);

self.ledger_db.insert_light_client_proof_data_by_l1_height(
l1_height,
Expand Down
6 changes: 0 additions & 6 deletions crates/light-client-prover/src/runner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use std::sync::Arc;

use citrea_common::tasks::manager::TaskManager;
use citrea_common::{LightClientProverConfig, RollupPublicKeys, RpcConfig, RunnerConfig};
use jsonrpsee::http_client::{HttpClient, HttpClientBuilder};
use jsonrpsee::server::{BatchRequestConfig, ServerBuilder};
use jsonrpsee::RpcModule;
use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps};
Expand Down Expand Up @@ -32,7 +31,6 @@ where
rpc_config: RpcConfig,
da_service: Arc<Da>,
ledger_db: DB,
sequencer_client: HttpClient,
prover_service: Arc<Ps>,
prover_config: LightClientProverConfig,
task_manager: TaskManager<()>,
Expand Down Expand Up @@ -62,14 +60,12 @@ where
light_client_proof_elfs: HashMap<SpecId, Vec<u8>>,
task_manager: TaskManager<()>,
) -> Result<Self, anyhow::Error> {
let sequencer_client_url = runner_config.sequencer_client_url.clone();
Ok(Self {
_runner_config: runner_config,
public_keys,
rpc_config,
da_service,
ledger_db,
sequencer_client: HttpClientBuilder::default().build(sequencer_client_url)?,
prover_service,
prover_config,
task_manager,
Expand Down Expand Up @@ -159,7 +155,6 @@ where
let batch_proof_commitments_by_spec = self.batch_proof_commitments_by_spec.clone();
let light_client_proof_commitment = self.light_client_proof_commitment.clone();
let light_client_proof_elfs = self.light_client_proof_elfs.clone();
let sequencer_client = self.sequencer_client.clone();

self.task_manager.spawn(|cancellation_token| async move {
let l1_block_handler = L1BlockHandler::<Vm, Da, Ps, DB>::new(
Expand All @@ -171,7 +166,6 @@ where
batch_proof_commitments_by_spec,
light_client_proof_commitment,
light_client_proof_elfs,
Arc::new(sequencer_client),
);
l1_block_handler
.run(last_l1_height_scanned.0, cancellation_token)
Expand Down
Loading
Loading