From 31147b4a226e24cacd6e15008d9bf5d281ef339d Mon Sep 17 00:00:00 2001 From: Eugene Rabinovich Date: Wed, 6 Nov 2024 18:26:03 -0800 Subject: [PATCH 01/15] perf: remove proving single proofs (#60) * working with the 'pass-through' method * cleanup * making clippy happy --- crates/prover/src/lib.rs | 74 ++++++++++++++++++++++++++++++++-------- 1 file changed, 60 insertions(+), 14 deletions(-) diff --git a/crates/prover/src/lib.rs b/crates/prover/src/lib.rs index 5ffd096e43..bc134b9295 100644 --- a/crates/prover/src/lib.rs +++ b/crates/prover/src/lib.rs @@ -114,6 +114,18 @@ pub type CompressAir = RecursionAir; pub type ShrinkAir = RecursionAir; pub type WrapAir = RecursionAir; +#[allow(clippy::type_complexity)] +enum TracesOrInput { + ProgramRecordTraces( + Box<( + Arc>, + ExecutionRecord, + Vec<(String, RowMajorMatrix)>, + )>, + ), + CircuitWitness(Box), +} + /// A end-to-end prover implementation for the SP1 RISC-V zkVM. pub struct SP1Prover { /// The machine used for proving the core step. @@ -667,7 +679,7 @@ impl SP1Prover { // Spawn a worker that sends the first layer inputs to a bounded channel. let input_sync = Arc::new(TurnBasedSync::new()); - let (input_tx, input_rx) = sync_channel::<(usize, usize, SP1CircuitWitness)>( + let (input_tx, input_rx) = sync_channel::<(usize, usize, SP1CircuitWitness, bool)>( opts.recursion_opts.checkpoints_channel_capacity, ); let input_tx = Arc::new(Mutex::new(input_tx)); @@ -677,7 +689,7 @@ impl SP1Prover { s.spawn(move || { for (index, input) in first_layer_inputs.into_iter().enumerate() { input_sync.wait_for_turn(index); - input_tx.lock().unwrap().send((index, 0, input)).unwrap(); + input_tx.lock().unwrap().send((index, 0, input, false)).unwrap(); input_sync.advance_turn(); } }); @@ -686,13 +698,9 @@ impl SP1Prover { // Spawn workers who generate the records and traces. let record_and_trace_sync = Arc::new(TurnBasedSync::new()); let (record_and_trace_tx, record_and_trace_rx) = - sync_channel::<( - usize, - usize, - Arc>, - ExecutionRecord, - Vec<(String, RowMajorMatrix)>, - )>(opts.recursion_opts.records_and_traces_channel_capacity); + sync_channel::<(usize, usize, TracesOrInput)>( + opts.recursion_opts.records_and_traces_channel_capacity, + ); let record_and_trace_tx = Arc::new(Mutex::new(record_and_trace_tx)); let record_and_trace_rx = Arc::new(Mutex::new(record_and_trace_rx)); let input_rx = Arc::new(Mutex::new(input_rx)); @@ -705,7 +713,7 @@ impl SP1Prover { let _span = span.enter(); loop { let received = { input_rx.lock().unwrap().recv() }; - if let Ok((index, height, input)) = received { + if let Ok((index, height, input, false)) = received { // Get the program and witness stream. let (program, witness_stream) = tracing::debug_span!( "get program and witness stream" @@ -776,7 +784,29 @@ impl SP1Prover { record_and_trace_tx .lock() .unwrap() - .send((index, height, program, record, traces)) + .send(( + index, + height, + TracesOrInput::ProgramRecordTraces(Box::new(( + program, record, traces, + ))), + )) + .unwrap(); + + // Advance the turn. + record_and_trace_sync.advance_turn(); + } else if let Ok((index, height, input, true)) = received { + record_and_trace_sync.wait_for_turn(index); + + // Send the record and traces to the worker. + record_and_trace_tx + .lock() + .unwrap() + .send(( + index, + height, + TracesOrInput::CircuitWitness(Box::new(input)), + )) .unwrap(); // Advance the turn. @@ -806,7 +836,8 @@ impl SP1Prover { let _span = span.enter(); loop { let received = { record_and_trace_rx.lock().unwrap().recv() }; - if let Ok((index, height, program, record, traces)) = received { + if let Ok((index, height, TracesOrInput::ProgramRecordTraces(boxed_prt))) = received { + let (program, record, traces) = *boxed_prt; tracing::debug_span!("batch").in_scope(|| { // Get the keys. let (pk, vk) = tracing::debug_span!("Setup compress program") @@ -874,7 +905,22 @@ impl SP1Prover { // Advance the turn. prover_sync.advance_turn(); }); - } else { + } else if let Ok((index, height, TracesOrInput::CircuitWitness(witness_box))) = received { + let witness = *witness_box; + if let SP1CircuitWitness::Compress(inner_witness) = witness { + let SP1CompressWitnessValues { vks_and_proofs, is_complete: _ } = inner_witness; + assert!(vks_and_proofs.len()==1); + let (vk, proof) = vks_and_proofs.last().unwrap(); + // Wait for our turn to update the state. + prover_sync.wait_for_turn(index); + + // Send the proof. + proofs_tx.lock().unwrap().send((index, height, vk.clone(), proof.clone())).unwrap(); + + // Advance the turn. + prover_sync.advance_turn(); + } + } else { break; } } @@ -934,7 +980,7 @@ impl SP1Prover { input_tx .lock() .unwrap() - .send((count, next_input_height, input)) + .send((count, next_input_height, input, is_last)) .unwrap(); input_sync.advance_turn(); count += 1; From c9662c409bd0899f1bf2e82ccaf546866c103e64 Mon Sep 17 00:00:00 2001 From: Tamir Hemo Date: Fri, 8 Nov 2024 11:27:49 -0800 Subject: [PATCH 02/15] perf: drop record in different task (#65) * drop record span * drop in thread --- Cargo.lock | 1 + crates/core/machine/Cargo.toml | 2 ++ crates/core/machine/src/utils/prove.rs | 7 ++++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2a9ba70be9..06254e4303 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5473,6 +5473,7 @@ dependencies = [ "p3-uni-stark", "p3-util", "rand 0.8.5", + "rayon", "serde", "size", "snowbridge-amcl", diff --git a/crates/core/machine/Cargo.toml b/crates/core/machine/Cargo.toml index 3bbd29d0b9..13dedad378 100644 --- a/crates/core/machine/Cargo.toml +++ b/crates/core/machine/Cargo.toml @@ -27,6 +27,8 @@ p3-util = { workspace = true } sp1-derive = { workspace = true } sp1-primitives = { workspace = true } +rayon = "1.10.0" + amcl = { package = "snowbridge-amcl", version = "1.0.2", default-features = false, features = [ "bls381", ] } diff --git a/crates/core/machine/src/utils/prove.rs b/crates/core/machine/src/utils/prove.rs index 18f857cdf3..49a06e66db 100644 --- a/crates/core/machine/src/utils/prove.rs +++ b/crates/core/machine/src/utils/prove.rs @@ -639,13 +639,18 @@ where #[cfg(debug_assertions)] { - if let Some(shape) = record.shape { + if let Some(shape) = record.shape.as_ref() { assert_eq!( proof.shape(), shape.clone().into_iter().collect(), ); } } + + rayon::spawn(move || { + drop(record); + }); + proof }, ), From 2614e21b31dd39e052b65d39552a2c95113e68ac Mon Sep 17 00:00:00 2001 From: Tamir Hemo Date: Fri, 8 Nov 2024 11:28:52 -0800 Subject: [PATCH 03/15] feat: execute mode on `perf` (#62) * add execute mode * Update crates/perf/src/main.rs Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- crates/perf/run_s3.sh | 18 +++++++++++++----- crates/perf/src/main.rs | 13 +++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/crates/perf/run_s3.sh b/crates/perf/run_s3.sh index a9a0ef7a06..901cde6574 100755 --- a/crates/perf/run_s3.sh +++ b/crates/perf/run_s3.sh @@ -1,13 +1,21 @@ #!/bin/bash -# Check if both arguments are provided -if [ $# -ne 2 ]; then - echo "Usage: $0 " +# Set the default value for the stage argument +stage="prove" + +# Check the number of arguments +if [ $# -lt 2 ] || [ $# -gt 3 ]; then + echo "Usage: $0 [execute|prove]" exit 1 fi +# If the third argument is provided, override the default value +if [ $# -eq 3 ]; then + stage="$3" +fi + s3_path=$1 -stage=$2 +kind=$2 # Download files from S3 aws s3 cp s3://sp1-testing-suite/$s3_path/program.bin /tmp/program.bin @@ -20,4 +28,4 @@ export RUST_LOG=debug export SP1_DEBUG=1 # Run moongate-perf -cargo run -p sp1-perf -- --program /tmp/program.bin --stdin /tmp/stdin.bin --mode $stage \ No newline at end of file +cargo run -p sp1-perf -- --program /tmp/program.bin --stdin /tmp/stdin.bin --mode $kind --stage $stage \ No newline at end of file diff --git a/crates/perf/src/main.rs b/crates/perf/src/main.rs index baeffbc49b..493ab5aa0c 100644 --- a/crates/perf/src/main.rs +++ b/crates/perf/src/main.rs @@ -17,6 +17,8 @@ struct PerfArgs { pub stdin: String, #[arg(short, long)] pub mode: ProverMode, + #[arg(short, long, default_value = "prove")] + pub stage: Stage, } #[derive(Default, Debug, Clone)] @@ -41,6 +43,12 @@ enum ProverMode { Network, } +#[derive(Debug, Clone, ValueEnum, PartialEq, Eq)] +enum Stage { + Execute, + Prove, +} + pub fn time_operation T>(operation: F) -> (T, std::time::Duration) { let start = Instant::now(); let result = operation(); @@ -59,6 +67,11 @@ fn main() { let prover = SP1Prover::::new(); let (pk, vk) = prover.setup(&elf); let cycles = sp1_prover::utils::get_cycles(&elf, &stdin); + let stage = args.stage; + if stage == Stage::Execute { + println!("Program executed successfully, number of cycles: {}", cycles); + return; + } let opts = SP1ProverOpts::default(); match args.mode { From adcda87e3d40208470981d13efa81ae102c3fa1b Mon Sep 17 00:00:00 2001 From: Eugene Rabinovich Date: Sun, 10 Nov 2024 22:52:07 -0800 Subject: [PATCH 04/15] perf: two shapes + shape script (#63) * working * wip * wip * wip * semi-working shrink * working shrink * added precompile computaitons * slightly better tuned shapes * clippy * working to do no precompile shapes too * docker * switched order of shapes * comments * cached programs * remove singleton proofs from cache * comment out building the chips * pre-dev-merge * refactored max core plus precompile * near final draft * almost there * ready for review --- .github/workflows/suite.yml | 3 + crates/core/machine/src/riscv/shape.rs | 21 +++ crates/core/machine/src/utils/prove.rs | 4 +- crates/prover/Cargo.toml | 4 + .../find_minimal_large_recursion_shape.rs | 143 ++++++++++++++++ crates/prover/src/lib.rs | 138 +++++++++------ crates/prover/src/shapes.rs | 131 +++++++++++++-- crates/recursion/core/src/machine.rs | 6 +- crates/recursion/core/src/shape.rs | 157 +++++------------- 9 files changed, 421 insertions(+), 186 deletions(-) create mode 100644 crates/prover/scripts/find_minimal_large_recursion_shape.rs diff --git a/.github/workflows/suite.yml b/.github/workflows/suite.yml index ca39cabe1c..6ab1bae2f9 100644 --- a/.github/workflows/suite.yml +++ b/.github/workflows/suite.yml @@ -67,6 +67,7 @@ jobs: args: --release -p sp1-perf -- --program workdir/program.bin --stdin workdir/stdin.bin --mode cpu env: RUST_LOG: info + VERIFY_VK: false RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native RUST_BACKTRACE: 1 @@ -120,6 +121,7 @@ jobs: args: --release -p sp1-perf -- --program workdir/program.bin --stdin workdir/stdin.bin --mode cuda env: RUST_LOG: debug + VERIFY_VK: false RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native RUST_BACKTRACE: 1 SP1_PROVER: cuda @@ -172,6 +174,7 @@ jobs: args: --release -p sp1-perf --features "native-gnark,network-v2" -- --program workdir/program.bin --stdin workdir/stdin.bin --mode network env: RUST_LOG: info + VERIFY_VK: false RUSTFLAGS: -Copt-level=3 -Ctarget-cpu=native RUST_BACKTRACE: 1 SP1_PROVER: network diff --git a/crates/core/machine/src/riscv/shape.rs b/crates/core/machine/src/riscv/shape.rs index 2eb57338df..ac8f887d85 100644 --- a/crates/core/machine/src/riscv/shape.rs +++ b/crates/core/machine/src/riscv/shape.rs @@ -320,6 +320,27 @@ impl CoreShapeConfig { max_core_shapes.collect() } + pub fn maximal_core_plus_precompile_shapes(&self) -> Vec { + let max_preprocessed = self + .allowed_preprocessed_log_heights + .iter() + .map(|(air, allowed_heights)| (air.name(), allowed_heights.last().unwrap().unwrap())); + + let precompile_only_shapes = self.precompile_allowed_log_heights.iter().flat_map( + move |(air, (mem_events_per_row, allowed_log_heights))| { + self.get_precompile_shapes( + air, + *mem_events_per_row, + *allowed_log_heights.last().unwrap(), + ) + }, + ); + + let precompile_shapes = precompile_only_shapes + .map(|x| max_preprocessed.clone().chain(x).collect::()); + + self.maximal_core_shapes().into_iter().chain(precompile_shapes).collect() + } } impl Default for CoreShapeConfig { diff --git a/crates/core/machine/src/utils/prove.rs b/crates/core/machine/src/utils/prove.rs index 49a06e66db..62b6d0916e 100644 --- a/crates/core/machine/src/utils/prove.rs +++ b/crates/core/machine/src/utils/prove.rs @@ -26,7 +26,6 @@ use p3_matrix::Matrix; use crate::{ io::SP1Stdin, - riscv::cost::CostEstimator, utils::{chunk_vec, concurrency::TurnBasedSync}, }; use sp1_core_executor::{events::sorted_table_lines, ExecutionState}; @@ -693,9 +692,8 @@ where // Print the summary. let proving_time = proving_start.elapsed().as_secs_f64(); tracing::info!( - "summary: cycles={}, gas={}, e2e={}s, khz={:.2}, proofSize={}", + "summary: cycles={}, e2e={}s, khz={:.2}, proofSize={}", cycles, - report_aggregate.estimate_gas(), proving_time, (cycles as f64 / (proving_time * 1000.0) as f64), bincode::serialize(&proof).unwrap().len(), diff --git a/crates/prover/Cargo.toml b/crates/prover/Cargo.toml index 2a57c7bfd3..ee27d7df69 100644 --- a/crates/prover/Cargo.toml +++ b/crates/prover/Cargo.toml @@ -60,6 +60,10 @@ path = "scripts/post_trusted_setup.rs" name = "e2e" path = "scripts/e2e.rs" +[[bin]] +name = "find_minimal_large_recursion_shape" +path = "scripts/find_minimal_large_recursion_shape.rs" + [features] native-gnark = ["sp1-recursion-gnark-ffi/native"] export-tests = [] diff --git a/crates/prover/scripts/find_minimal_large_recursion_shape.rs b/crates/prover/scripts/find_minimal_large_recursion_shape.rs new file mode 100644 index 0000000000..7fc8aa0428 --- /dev/null +++ b/crates/prover/scripts/find_minimal_large_recursion_shape.rs @@ -0,0 +1,143 @@ +use std::panic::{catch_unwind, AssertUnwindSafe}; + +use clap::Parser; +use p3_baby_bear::BabyBear; +use sp1_core_machine::utils::setup_logger; +use sp1_prover::{ + components::DefaultProverComponents, + shapes::{check_shapes, SP1ProofShape}, + SP1Prover, ShrinkAir, REDUCE_BATCH_SIZE, +}; +use sp1_recursion_core::shape::RecursionShapeConfig; +use sp1_stark::{MachineProver, ProofShape}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + #[clap(short, long, default_value_t = false)] + dummy: bool, + #[clap(short, long, default_value_t = REDUCE_BATCH_SIZE)] + reduce_batch_size: usize, + #[clap(short, long, default_value_t = 1)] + num_compiler_workers: usize, + #[clap(short, long, default_value_t = 1)] + num_setup_workers: usize, + #[clap(short, long)] + start: Option, + #[clap(short, long)] + end: Option, +} + +fn main() { + setup_logger(); + let args = Args::parse(); + + let reduce_batch_size = args.reduce_batch_size; + let dummy = args.dummy; + let num_compiler_workers = args.num_compiler_workers; + + let mut prover = SP1Prover::::new(); + prover.vk_verification = !dummy; + + let recursion_shape_config = + prover.recursion_shape_config.as_ref().expect("recursion shape config not found"); + + // Create the maximal shape from all of the shapes in recursion_shape_config, then add 2 to + // all the log-heights of that shape. This is the starting candidate for the "minimal large shape". + let candidate = recursion_shape_config.union_config_with_extra_room().first().unwrap().clone(); + + prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&candidate)); + + // Check that this candidate is big enough for all core shapes, including those with precompiles. + assert!(check_shapes(reduce_batch_size, false, num_compiler_workers, &prover,)); + + let mut answer = candidate.clone(); + + // Chip-by-chip in the candidate, reduce the log-height corresponding to that chip until the + // shape is no longer big enough to support all the core shapes. Then, record the log height for + // that chip into answer. + for (key, value) in candidate.iter() { + if key != "PublicValues" { + let mut done = false; + let mut new_val = *value; + while !done { + new_val -= 1; + answer.insert(key.clone(), new_val); + prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&answer)); + done = !check_shapes(reduce_batch_size, false, num_compiler_workers, &prover); + } + answer.insert(key.clone(), new_val + 1); + } + } + + let mut no_precompile_answer = answer.clone(); + + // Repeat the process but only for core shapes that don't have a precompile in them. + for (key, value) in answer.iter() { + if key != "PublicValues" { + let mut done = false; + let mut new_val = *value; + while !done { + new_val -= 1; + no_precompile_answer.insert(key.clone(), new_val); + prover.recursion_shape_config = + Some(RecursionShapeConfig::from_hash_map(&no_precompile_answer)); + done = !check_shapes(reduce_batch_size, true, num_compiler_workers, &prover); + } + no_precompile_answer.insert(key.clone(), new_val + 1); + } + } + + // Repeat this process to tune the shrink shape. + let mut shrink_shape = ShrinkAir::::shrink_shape().clone_into_hash_map(); + + // First, check that the current shrink shape is compatible with the compress shape choice arising + // from the tuning process above. + assert!({ + prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&answer)); + catch_unwind(AssertUnwindSafe(|| { + prover.shrink_prover.setup(&prover.program_from_shape( + true, + sp1_prover::shapes::SP1CompressProgramShape::from_proof_shape( + SP1ProofShape::Shrink(ProofShape { + chip_information: answer.clone().into_iter().collect::>(), + }), + 5, + ), + Some(shrink_shape.clone().into()), + )) + })) + .is_ok() + }); + + // Next, tune the shrink shape in the same manner as for the compress shapes. + for (key, value) in shrink_shape.clone().iter() { + if key != "PublicValues" { + let mut done = false; + let mut new_val = *value + 1; + while !done { + new_val -= 1; + shrink_shape.insert(key.clone(), new_val); + prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&answer)); + done = catch_unwind(AssertUnwindSafe(|| { + prover.shrink_prover.setup(&prover.program_from_shape( + true, + sp1_prover::shapes::SP1CompressProgramShape::from_proof_shape( + SP1ProofShape::Shrink(ProofShape { + chip_information: answer.clone().into_iter().collect::>(), + }), + 5, + ), + Some(shrink_shape.clone().into()), + )) + })) + .is_err(); + } + shrink_shape.insert(key.clone(), new_val + 1); + } + } + + println!("Final compress shape: {:?}", answer); + println!("Final compress shape with no precompiles: {:?}", no_precompile_answer); + println!("Final shrink shape: {:?}", shrink_shape); +} diff --git a/crates/prover/src/lib.rs b/crates/prover/src/lib.rs index bc134b9295..54ec6bd1f2 100644 --- a/crates/prover/src/lib.rs +++ b/crates/prover/src/lib.rs @@ -37,6 +37,7 @@ use p3_baby_bear::BabyBear; use p3_challenger::CanObserve; use p3_field::{AbstractField, PrimeField, PrimeField32}; use p3_matrix::dense::RowMajorMatrix; +use shapes::SP1ProofShape; use sp1_core_executor::{ExecutionError, ExecutionReport, Executor, Program, SP1Context}; use sp1_core_machine::{ io::SP1Stdin, @@ -64,9 +65,12 @@ use sp1_recursion_compiler::{ ir::{Builder, Witness}, }; use sp1_recursion_core::{ - air::RecursionPublicValues, machine::RecursionAir, runtime::ExecutionRecord, - shape::RecursionShapeConfig, stark::BabyBearPoseidon2Outer, RecursionProgram, - Runtime as RecursionRuntime, + air::RecursionPublicValues, + machine::RecursionAir, + runtime::ExecutionRecord, + shape::{RecursionShape, RecursionShapeConfig}, + stark::BabyBearPoseidon2Outer, + RecursionProgram, Runtime as RecursionRuntime, }; pub use sp1_recursion_gnark_ffi::proof::{Groth16Bn254Proof, PlonkBn254Proof}; use sp1_recursion_gnark_ffi::{groth16_bn254::Groth16Bn254Prover, plonk_bn254::PlonkBn254Prover}; @@ -99,7 +103,6 @@ const SHRINK_DEGREE: usize = 3; const WRAP_DEGREE: usize = 9; const CORE_CACHE_SIZE: usize = 5; -const COMPRESS_CACHE_SIZE: usize = 3; pub const REDUCE_BATCH_SIZE: usize = 2; // TODO: FIX @@ -144,8 +147,7 @@ pub struct SP1Prover { pub recursion_cache_misses: AtomicUsize, - pub compress_programs: - Mutex>>>, + pub compress_programs: BTreeMap>>, pub compress_cache_misses: AtomicUsize, @@ -197,14 +199,6 @@ impl SP1Prover { ) .expect("PROVER_CORE_CACHE_SIZE must be a non-zero usize"); - let compress_cache_size = NonZeroUsize::new( - env::var("PROVER_COMPRESS_CACHE_SIZE") - .unwrap_or_else(|_| CORE_CACHE_SIZE.to_string()) - .parse() - .unwrap_or(COMPRESS_CACHE_SIZE), - ) - .expect("PROVER_COMPRESS_CACHE_SIZE must be a non-zero usize"); - let core_shape_config = env::var("FIX_CORE_SHAPES") .map(|v| v.eq_ignore_ascii_case("true")) .unwrap_or(true) @@ -229,6 +223,28 @@ impl SP1Prover { let (root, merkle_tree) = MerkleTree::commit(allowed_vk_map.keys().copied().collect()); + let mut compress_programs = BTreeMap::new(); + if let Some(config) = &recursion_shape_config { + SP1ProofShape::generate_compress_shapes(config, REDUCE_BATCH_SIZE).for_each(|shape| { + let compress_shape = SP1CompressWithVkeyShape { + compress_shape: shape.into(), + merkle_tree_height: merkle_tree.height, + }; + let input = SP1CompressWithVKeyWitnessValues::dummy( + compress_prover.machine(), + &compress_shape, + ); + let program = compress_program_from_input::( + recursion_shape_config.as_ref(), + &compress_prover, + vk_verification, + &input, + ); + let program = Arc::new(program); + compress_programs.insert(compress_shape, program); + }); + } + Self { core_prover, compress_prover, @@ -236,7 +252,7 @@ impl SP1Prover { wrap_prover, recursion_programs: Mutex::new(LruCache::new(core_cache_size)), recursion_cache_misses: AtomicUsize::new(0), - compress_programs: Mutex::new(LruCache::new(compress_cache_size)), + compress_programs, compress_cache_misses: AtomicUsize::new(0), vk_root: root, vk_merkle_tree: merkle_tree, @@ -363,47 +379,25 @@ impl SP1Prover { pub fn compress_program( &self, + shape_tuning: bool, input: &SP1CompressWithVKeyWitnessValues, ) -> Arc> { - let mut cache = self.compress_programs.lock().unwrap_or_else(|e| e.into_inner()); - let shape = input.shape(); - cache - .get_or_insert(shape.clone(), || { - let misses = self.compress_cache_misses.fetch_add(1, Ordering::Relaxed); - tracing::debug!("compress cache miss, misses: {}", misses); - // Get the operations. - let builder_span = tracing::debug_span!("build compress program").entered(); - let mut builder = Builder::::default(); - - // read the input. - let input = input.read(&mut builder); - // Verify the proof. - SP1CompressWithVKeyVerifier::verify( - &mut builder, - self.compress_prover.machine(), - input, - self.vk_verification, - PublicValuesOutputDigest::Reduce, - ); - let operations = builder.into_operations(); - builder_span.exit(); - - // Compile the program. - let compiler_span = tracing::debug_span!("compile compress program").entered(); - let mut compiler = AsmCompiler::::default(); - let mut program = compiler.compile(operations); - if let Some(recursion_shape_config) = &self.recursion_shape_config { - recursion_shape_config.fix_shape(&mut program); - } - let program = Arc::new(program); - compiler_span.exit(); - program - }) - .clone() + if self.recursion_shape_config.is_some() && !shape_tuning { + self.compress_programs.get(&input.shape()).map(Clone::clone).unwrap() + } else { + // Get the operations. + Arc::new(compress_program_from_input::( + self.recursion_shape_config.as_ref(), + &self.compress_prover, + self.vk_verification, + input, + )) + } } pub fn shrink_program( &self, + shrink_shape: RecursionShape, input: &SP1CompressWithVKeyWitnessValues, ) -> Arc> { // Get the operations. @@ -425,7 +419,8 @@ impl SP1Prover { let compiler_span = tracing::debug_span!("compile shrink program").entered(); let mut compiler = AsmCompiler::::default(); let mut program = compiler.compile(operations); - program.shape = Some(ShrinkAir::::shrink_shape()); + + program.shape = Some(shrink_shape); let program = Arc::new(program); compiler_span.exit(); program @@ -739,7 +734,10 @@ impl SP1Prover { &mut witness_stream, ); - (self.compress_program(&input_with_merkle), witness_stream) + ( + self.compress_program(false, &input_with_merkle), + witness_stream, + ) } }); @@ -1036,7 +1034,8 @@ impl SP1Prover { let input_with_merkle = self.make_merkle_proofs(input); - let program = self.shrink_program(&input_with_merkle); + let program = + self.shrink_program(ShrinkAir::::shrink_shape(), &input_with_merkle); // Run the compress program. let mut runtime = RecursionRuntime::, Challenge, _>::new( @@ -1261,6 +1260,39 @@ impl SP1Prover { } } +pub fn compress_program_from_input( + config: Option<&RecursionShapeConfig>>, + compress_prover: &C::CompressProver, + vk_verification: bool, + input: &SP1CompressWithVKeyWitnessValues, +) -> RecursionProgram { + let builder_span = tracing::debug_span!("build compress program").entered(); + let mut builder = Builder::::default(); + // read the input. + let input = input.read(&mut builder); + // Verify the proof. + SP1CompressWithVKeyVerifier::verify( + &mut builder, + compress_prover.machine(), + input, + vk_verification, + PublicValuesOutputDigest::Reduce, + ); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile compress program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + if let Some(config) = config { + config.fix_shape(&mut program); + } + compiler_span.exit(); + + program +} + #[cfg(any(test, feature = "export-tests"))] pub mod tests { diff --git a/crates/prover/src/shapes.rs b/crates/prover/src/shapes.rs index 74f7ba177e..95f5b2874d 100644 --- a/crates/prover/src/shapes.rs +++ b/crates/prover/src/shapes.rs @@ -8,20 +8,23 @@ use std::{ }; use eyre::Result; +use serde::{Deserialize, Serialize}; use thiserror::Error; use p3_baby_bear::BabyBear; use p3_field::AbstractField; -use serde::{Deserialize, Serialize}; use sp1_core_machine::riscv::CoreShapeConfig; use sp1_recursion_circuit::machine::{ SP1CompressWithVKeyWitnessValues, SP1CompressWithVkeyShape, SP1DeferredShape, SP1DeferredWitnessValues, SP1RecursionShape, SP1RecursionWitnessValues, }; -use sp1_recursion_core::{shape::RecursionShapeConfig, RecursionProgram}; +use sp1_recursion_core::{ + shape::{RecursionShape, RecursionShapeConfig}, + RecursionProgram, +}; use sp1_stark::{MachineProver, ProofShape, DIGEST_SIZE}; -use crate::{components::SP1ProverComponents, CompressAir, HashableKey, SP1Prover}; +use crate::{components::SP1ProverComponents, CompressAir, HashableKey, SP1Prover, ShrinkAir}; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub enum SP1ProofShape { @@ -55,6 +58,79 @@ pub enum VkBuildError { Bincode(#[from] bincode::Error), } +pub fn check_shapes( + reduce_batch_size: usize, + no_precompiles: bool, + num_compiler_workers: usize, + prover: &SP1Prover, +) -> bool { + let (shape_tx, shape_rx) = + std::sync::mpsc::sync_channel::(num_compiler_workers); + let (panic_tx, panic_rx) = std::sync::mpsc::channel(); + let core_shape_config = prover.core_shape_config.as_ref().expect("core shape config not found"); + let recursion_shape_config = + prover.recursion_shape_config.as_ref().expect("recursion shape config not found"); + + let shape_rx = Mutex::new(shape_rx); + + let all_maximal_shapes = SP1ProofShape::generate_maximal_shapes( + core_shape_config, + recursion_shape_config, + reduce_batch_size, + no_precompiles, + ) + .collect::>(); + let num_shapes = all_maximal_shapes.len(); + tracing::info!("number of shapes: {}", num_shapes); + + // The Merkle tree height. + let height = num_shapes.next_power_of_two().ilog2() as usize; + + let compress_ok = std::thread::scope(|s| { + // Initialize compiler workers. + for _ in 0..num_compiler_workers { + let shape_rx = &shape_rx; + let prover = &prover; + let panic_tx = panic_tx.clone(); + s.spawn(move || { + while let Ok(shape) = shape_rx.lock().unwrap().recv() { + tracing::info!("shape is {:?}", shape); + let program = catch_unwind(AssertUnwindSafe(|| { + // Try to build the recursion program from the given shape. + prover.program_from_shape(true, shape.clone(), None) + })); + match program { + Ok(_) => {} + Err(e) => { + tracing::warn!( + "Program generation failed for shape {:?}, with error: {:?}", + shape, + e + ); + panic_tx.send(true).unwrap(); + } + } + } + }); + } + + // Generate shapes and send them to the compiler workers. + all_maximal_shapes.into_iter().for_each(|program_shape| { + shape_tx + .send(SP1CompressProgramShape::from_proof_shape(program_shape, height)) + .unwrap(); + }); + + drop(shape_tx); + drop(panic_tx); + + // If the panic receiver has no panics, then the shape is correct. + panic_rx.iter().next().is_none() + }); + + compress_ok +} + pub fn build_vk_map( reduce_batch_size: usize, dummy: bool, @@ -111,7 +187,7 @@ pub fn build_vk_map( while let Ok((i, shape)) = shape_rx.lock().unwrap().recv() { println!("shape {} is {:?}", i, shape); let program = catch_unwind(AssertUnwindSafe(|| { - prover.program_from_shape(shape.clone()) + prover.program_from_shape(false, shape.clone(), None) })); let is_shrink = matches!(shape, SP1CompressProgramShape::Shrink(_)); match program { @@ -244,10 +320,40 @@ impl SP1ProofShape { pub fn generate_compress_shapes( recursion_shape_config: &'_ RecursionShapeConfig>, reduce_batch_size: usize, - ) -> impl Iterator + '_ { - (1..=reduce_batch_size).flat_map(|batch_size| { - recursion_shape_config.get_all_shape_combinations(batch_size).map(Self::Compress) - }) + ) -> impl Iterator> + '_ { + recursion_shape_config.get_all_shape_combinations(reduce_batch_size) + } + + pub fn generate_maximal_shapes<'a>( + core_shape_config: &'a CoreShapeConfig, + recursion_shape_config: &'a RecursionShapeConfig>, + reduce_batch_size: usize, + no_precompiles: bool, + ) -> impl Iterator + 'a { + let core_shape_iter = if no_precompiles { + core_shape_config.maximal_core_shapes().into_iter() + } else { + core_shape_config.maximal_core_plus_precompile_shapes().into_iter() + }; + core_shape_iter + .map(|core_shape| { + Self::Recursion(ProofShape { + chip_information: core_shape.inner.into_iter().collect(), + }) + }) + .chain((1..=reduce_batch_size).flat_map(|batch_size| { + recursion_shape_config.get_all_shape_combinations(batch_size).map(Self::Compress) + })) + .chain( + recursion_shape_config + .get_all_shape_combinations(1) + .map(|mut x| Self::Deferred(x.pop().unwrap())), + ) + .chain( + recursion_shape_config + .get_all_shape_combinations(1) + .map(|mut x| Self::Shrink(x.pop().unwrap())), + ) } pub fn dummy_vk_map<'a>( @@ -284,7 +390,9 @@ impl SP1CompressProgramShape { impl SP1Prover { pub fn program_from_shape( &self, + shape_tuning: bool, shape: SP1CompressProgramShape, + shrink_shape: Option, ) -> Arc> { match shape { SP1CompressProgramShape::Recursion(shape) => { @@ -298,12 +406,15 @@ impl SP1Prover { SP1CompressProgramShape::Compress(shape) => { let input = SP1CompressWithVKeyWitnessValues::dummy(self.compress_prover.machine(), &shape); - self.compress_program(&input) + self.compress_program(shape_tuning, &input) } SP1CompressProgramShape::Shrink(shape) => { let input = SP1CompressWithVKeyWitnessValues::dummy(self.compress_prover.machine(), &shape); - self.shrink_program(&input) + self.shrink_program( + shrink_shape.unwrap_or_else(ShrinkAir::::shrink_shape), + &input, + ) } } } diff --git a/crates/recursion/core/src/machine.rs b/crates/recursion/core/src/machine.rs index b143487d1d..7c32cc34d8 100644 --- a/crates/recursion/core/src/machine.rs +++ b/crates/recursion/core/src/machine.rs @@ -156,10 +156,10 @@ impl, const DEGREE: usize> RecursionAi [ (Self::MemoryConst(MemoryConstChip::default()), 17), (Self::MemoryVar(MemoryVarChip::default()), 18), - (Self::BaseAlu(BaseAluChip), 20), - (Self::ExtAlu(ExtAluChip), 18), + (Self::BaseAlu(BaseAluChip), 15), + (Self::ExtAlu(ExtAluChip), 15), (Self::Poseidon2Wide(Poseidon2WideChip::), 16), - (Self::BatchFRI(BatchFRIChip::), 18), + (Self::BatchFRI(BatchFRIChip::), 17), (Self::Select(SelectChip), 18), (Self::ExpReverseBitsLen(ExpReverseBitsLenChip::), 17), (Self::PublicValues(PublicValuesChip), PUB_VALUES_LOG_HEIGHT), diff --git a/crates/recursion/core/src/shape.rs b/crates/recursion/core/src/shape.rs index 39f47cf72c..32d034b9ee 100644 --- a/crates/recursion/core/src/shape.rs +++ b/crates/recursion/core/src/shape.rs @@ -27,6 +27,18 @@ pub struct RecursionShape { pub(crate) inner: HashMap, } +impl RecursionShape { + pub fn clone_into_hash_map(&self) -> HashMap { + self.inner.clone() + } +} + +impl From> for RecursionShape { + fn from(value: HashMap) -> Self { + Self { inner: value } + } +} + pub struct RecursionShapeConfig { allowed_shapes: Vec>, _marker: PhantomData<(F, A)>, @@ -81,6 +93,27 @@ impl, const DEGREE: usize> }) .multi_cartesian_product() } + + pub fn union_config_with_extra_room(&self) -> Self { + let mut map = HashMap::new(); + for shape in self.allowed_shapes.clone() { + for key in shape.keys() { + let current = map.get(key).unwrap_or(&0); + map.insert(key.clone(), *current.max(shape.get(key).unwrap())); + } + } + map.values_mut().for_each(|x| *x += 2); + map.insert("PublicValues".to_string(), 4); + Self { allowed_shapes: vec![map], _marker: PhantomData } + } + + pub fn from_hash_map(hash_map: &HashMap) -> Self { + Self { allowed_shapes: vec![hash_map.clone()], _marker: PhantomData } + } + + pub fn first(&self) -> Option<&HashMap> { + self.allowed_shapes.first() + } } impl, const DEGREE: usize> Default @@ -103,135 +136,25 @@ impl, const DEGREE: usize> Default // Specify allowed shapes. let allowed_shapes = [ [ - (base_alu.clone(), 20), - (mem_var.clone(), 18), - (ext_alu.clone(), 18), - (exp_reverse_bits_len.clone(), 17), - (mem_const.clone(), 17), - (poseidon2_wide.clone(), 16), - (batch_fri.clone(), 18), - (select.clone(), 18), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 20), - (mem_var.clone(), 18), - (ext_alu.clone(), 18), - (exp_reverse_bits_len.clone(), 17), - (mem_const.clone(), 16), - (poseidon2_wide.clone(), 16), - (batch_fri.clone(), 18), - (select.clone(), 18), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (ext_alu.clone(), 20), - (base_alu.clone(), 19), + (ext_alu.clone(), 16), + (base_alu.clone(), 16), (mem_var.clone(), 19), (poseidon2_wide.clone(), 17), - (mem_const.clone(), 16), - (exp_reverse_bits_len.clone(), 16), - (batch_fri.clone(), 20), - (select.clone(), 18), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 19), - (mem_var.clone(), 18), - (ext_alu.clone(), 18), - (exp_reverse_bits_len.clone(), 17), - (mem_const.clone(), 16), - (poseidon2_wide.clone(), 16), - (batch_fri.clone(), 18), - (select.clone(), 18), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 19), - (mem_var.clone(), 18), - (ext_alu.clone(), 18), - (exp_reverse_bits_len.clone(), 16), - (mem_const.clone(), 16), - (poseidon2_wide.clone(), 16), + (mem_const.clone(), 18), (batch_fri.clone(), 18), - (select.clone(), 18), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 20), - (mem_var.clone(), 19), - (ext_alu.clone(), 19), - (exp_reverse_bits_len.clone(), 17), - (mem_const.clone(), 17), - (poseidon2_wide.clone(), 17), - (batch_fri.clone(), 19), - (select.clone(), 19), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 21), - (mem_var.clone(), 19), - (ext_alu.clone(), 19), (exp_reverse_bits_len.clone(), 18), - (mem_const.clone(), 18), - (poseidon2_wide.clone(), 17), - (batch_fri.clone(), 19), (select.clone(), 19), (public_values.clone(), PUB_VALUES_LOG_HEIGHT), ], [ - (base_alu.clone(), 21), + (ext_alu.clone(), 17), + (base_alu.clone(), 16), (mem_var.clone(), 19), - (ext_alu.clone(), 19), - (exp_reverse_bits_len.clone(), 18), - (mem_const.clone(), 17), (poseidon2_wide.clone(), 17), - (batch_fri.clone(), 19), - (select.clone(), 19), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (ext_alu.clone(), 21), - (base_alu.clone(), 20), - (mem_var.clone(), 20), - (poseidon2_wide.clone(), 18), - (mem_const.clone(), 17), - (exp_reverse_bits_len.clone(), 17), + (mem_const.clone(), 18), (batch_fri.clone(), 21), - (select.clone(), 19), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 20), - (mem_var.clone(), 19), - (ext_alu.clone(), 19), - (exp_reverse_bits_len.clone(), 18), - (mem_const.clone(), 17), - (poseidon2_wide.clone(), 17), - (batch_fri.clone(), 19), - (select.clone(), 19), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 20), - (mem_var.clone(), 19), - (ext_alu.clone(), 19), - (exp_reverse_bits_len.clone(), 17), - (mem_const.clone(), 17), - (poseidon2_wide.clone(), 17), - (batch_fri.clone(), 19), - (select.clone(), 19), - (public_values.clone(), PUB_VALUES_LOG_HEIGHT), - ], - [ - (base_alu.clone(), 21), - (mem_var.clone(), 20), - (ext_alu.clone(), 20), (exp_reverse_bits_len.clone(), 18), - (mem_const.clone(), 18), - (poseidon2_wide.clone(), 18), - (batch_fri.clone(), 20), - (select.clone(), 19), + (select.clone(), 20), (public_values.clone(), PUB_VALUES_LOG_HEIGHT), ], ] From 61a2c7bf78b6e037eac7cf5afb4ca1922ab723d5 Mon Sep 17 00:00:00 2001 From: John Guibas Date: Tue, 26 Nov 2024 20:29:50 -0800 Subject: [PATCH 05/15] feat: v4.0.0-rc.1 --- .clang-format | 83 ++ .github/actions/setup/action.yml | 14 +- .github/workflows/main.yml | 4 +- .github/workflows/pr.yml | 160 ++-- .github/workflows/release.yml | 84 +- .github/workflows/suite.yml | 2 +- .github/workflows/toolchain-ec2.yml | 19 +- .vscode/settings.json | 18 + Cargo.lock | 577 ++++++++---- Cargo.toml | 52 +- DEVELOPMENT.md | 2 +- audits/rkm0959.md | 2 +- book/SUMMARY.md | 2 +- book/developers/rv32im-deviations.md | 27 + book/developers/rv32im-specification.md | 8 - book/generating-proofs/basics.md | 3 +- .../prover-network/versions.md | 3 +- book/getting-started/hardware-requirements.md | 4 +- book/getting-started/quickstart.md | 4 +- book/verification/off-chain-verification.md | 41 +- book/verification/onchain/getting-started.md | 3 +- book/verification/supported-versions.md | 1 + book/writing-programs/compiling.md | 2 +- crates/build/src/build.rs | 57 +- crates/build/src/command/utils.rs | 9 +- crates/build/src/lib.rs | 33 +- crates/build/src/utils.rs | 36 +- crates/cli/Cargo.toml | 3 +- crates/cli/src/bin/cargo-prove.rs | 14 +- crates/cli/src/commands/build_toolchain.rs | 3 +- crates/cli/src/commands/mod.rs | 1 - crates/cli/src/commands/prove.rs | 133 --- crates/cli/src/commands/vkey.rs | 60 +- crates/cli/src/lib.rs | 10 +- crates/cli/src/util.rs | 15 - crates/core/executor/Cargo.toml | 13 +- crates/core/executor/src/context.rs | 20 +- crates/core/executor/src/events/alu.rs | 1 + crates/core/executor/src/events/memory.rs | 9 +- .../executor/src/events/precompiles/mod.rs | 12 +- .../src/events/precompiles/u256x2048_mul.rs | 49 + crates/core/executor/src/events/syscall.rs | 5 +- crates/core/executor/src/events/utils.rs | 78 +- crates/core/executor/src/executor.rs | 42 +- crates/core/executor/src/hook.rs | 6 +- crates/core/executor/src/instruction.rs | 1 + crates/core/executor/src/memory.rs | 3 +- crates/core/executor/src/opcode.rs | 1 + crates/core/executor/src/program.rs | 47 +- crates/core/executor/src/programs.rs | 134 +-- crates/core/executor/src/record.rs | 46 +- crates/core/executor/src/report.rs | 6 +- crates/core/executor/src/state.rs | 3 +- crates/core/executor/src/syscalls/code.rs | 5 + crates/core/executor/src/syscalls/context.rs | 6 +- crates/core/executor/src/syscalls/mod.rs | 3 + .../precompiles/fptower/fp2_addsub.rs | 4 +- .../executor/src/syscalls/precompiles/mod.rs | 1 + .../src/syscalls/precompiles/u256x2048_mul.rs | 91 ++ crates/core/executor/src/syscalls/verify.rs | 28 +- crates/core/machine/Cargo.toml | 24 +- crates/core/machine/build.rs | 169 ++++ crates/core/machine/cpp/extern.cpp | 28 + crates/core/machine/include/add_sub.hpp | 38 + .../include/bb31_septic_extension_t.hpp | 511 ++++++++++ crates/core/machine/include/bb31_t.hpp | 640 +++++++++++++ crates/core/machine/include/bitwise.hpp | 19 + crates/core/machine/include/cpu.hpp | 555 +++++++++++ crates/core/machine/include/lt.hpp | 100 ++ crates/core/machine/include/memory.hpp | 116 +++ crates/core/machine/include/memory_global.hpp | 37 + crates/core/machine/include/memory_local.hpp | 82 ++ crates/core/machine/include/mul.hpp | 111 +++ crates/core/machine/include/prelude.hpp | 23 + crates/core/machine/include/sll.hpp | 66 ++ crates/core/machine/include/sr.hpp | 106 +++ crates/core/machine/include/sys.hpp | 14 + crates/core/machine/include/syscall.hpp | 77 ++ crates/core/machine/include/utils.hpp | 134 +++ crates/core/machine/src/alu/add_sub/mod.rs | 105 ++- crates/core/machine/src/alu/bitwise/mod.rs | 29 +- crates/core/machine/src/alu/divrem/mod.rs | 95 +- crates/core/machine/src/alu/lt/mod.rs | 15 +- crates/core/machine/src/alu/mul/mod.rs | 68 +- crates/core/machine/src/alu/sll/mod.rs | 20 +- crates/core/machine/src/alu/sr/mod.rs | 15 +- crates/core/machine/src/bytes/trace.rs | 4 +- crates/core/machine/src/cpu/air/branch.rs | 3 - crates/core/machine/src/cpu/air/ecall.rs | 1 - crates/core/machine/src/cpu/air/memory.rs | 2 - crates/core/machine/src/cpu/air/mod.rs | 4 - crates/core/machine/src/cpu/columns/auipc.rs | 1 - crates/core/machine/src/cpu/columns/branch.rs | 9 - crates/core/machine/src/cpu/columns/ecall.rs | 3 - crates/core/machine/src/cpu/columns/jump.rs | 3 - crates/core/machine/src/cpu/columns/memory.rs | 3 - crates/core/machine/src/cpu/columns/mod.rs | 4 - crates/core/machine/src/cpu/trace.rs | 87 +- crates/core/machine/src/lib.rs | 4 +- crates/core/machine/src/memory/global.rs | 226 +++-- crates/core/machine/src/memory/local.rs | 451 ++++++++- crates/core/machine/src/memory/program.rs | 106 ++- .../machine/src/operations/field/field_den.rs | 7 +- .../operations/field/field_inner_product.rs | 9 +- .../machine/src/operations/field/field_op.rs | 89 +- .../src/operations/field/field_sqrt.rs | 9 +- .../src/operations/global_accumulation.rs | 222 +++++ .../src/operations/global_interaction.rs | 337 +++++++ crates/core/machine/src/operations/mod.rs | 4 + crates/core/machine/src/riscv/cost.rs | 6 +- crates/core/machine/src/riscv/mod.rs | 92 +- crates/core/machine/src/riscv/shape.rs | 218 ++++- crates/core/machine/src/runtime/syscall.rs | 9 + crates/core/machine/src/sys.rs | 163 ++++ crates/core/machine/src/syscall/chip.rs | 208 +++-- .../machine/src/syscall/precompiles/README.md | 20 +- .../src/syscall/precompiles/edwards/ed_add.rs | 38 +- .../precompiles/edwards/ed_decompress.rs | 36 +- .../src/syscall/precompiles/fptower/fp.rs | 30 +- .../syscall/precompiles/fptower/fp2_addsub.rs | 34 +- .../syscall/precompiles/fptower/fp2_mul.rs | 27 +- .../src/syscall/precompiles/fptower/mod.rs | 30 +- .../src/syscall/precompiles/keccak256/air.rs | 31 +- .../syscall/precompiles/keccak256/columns.rs | 1 - .../src/syscall/precompiles/keccak256/mod.rs | 14 +- .../syscall/precompiles/keccak256/trace.rs | 11 +- .../machine/src/syscall/precompiles/mod.rs | 1 + .../precompiles/sha256/compress/air.rs | 5 - .../precompiles/sha256/compress/columns.rs | 1 - .../precompiles/sha256/compress/mod.rs | 12 +- .../precompiles/sha256/compress/trace.rs | 17 +- .../syscall/precompiles/sha256/extend/air.rs | 5 - .../precompiles/sha256/extend/columns.rs | 1 - .../syscall/precompiles/sha256/extend/mod.rs | 16 +- .../precompiles/sha256/extend/trace.rs | 16 +- .../syscall/precompiles/u256x2048_mul/air.rs | 415 ++++++++ .../syscall/precompiles/u256x2048_mul/mod.rs | 212 +++++ .../src/syscall/precompiles/uint256/air.rs | 28 +- .../src/syscall/precompiles/uint256/mod.rs | 5 +- .../weierstrass/weierstrass_add.rs | 62 +- .../weierstrass/weierstrass_decompress.rs | 43 +- .../weierstrass/weierstrass_double.rs | 48 +- crates/core/machine/src/utils/logger.rs | 6 +- crates/core/machine/src/utils/mod.rs | 12 +- crates/core/machine/src/utils/programs.rs | 112 --- crates/core/machine/src/utils/prove.rs | 781 ++++------------ crates/core/machine/src/utils/span.rs | 6 +- crates/core/machine/src/utils/test.rs | 138 +++ crates/core/machine/src/utils/tracer.rs | 23 - crates/core/machine/src/utils/uni_stark.rs | 68 ++ crates/cuda/Cargo.toml | 9 +- crates/cuda/proto/api.proto | 9 + crates/cuda/src/lib.rs | 206 ++-- crates/cuda/src/proto/api.rs | 40 + crates/curves/Cargo.toml | 4 +- crates/eval/Cargo.toml | 4 +- crates/eval/src/lib.rs | 13 +- crates/perf/Cargo.toml | 1 + crates/perf/src/main.rs | 56 +- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/consts.rs | 6 + crates/prover/Cargo.toml | 17 +- crates/prover/scripts/e2e.rs | 6 +- crates/prover/scripts/fibonacci_groth16.rs | 77 -- crates/prover/scripts/fibonacci_sweep.rs | 90 -- .../find_minimal_large_recursion_shape.rs | 10 +- crates/prover/scripts/tendermint_sweep.rs | 90 -- crates/prover/src/build.rs | 4 +- crates/prover/src/lib.rs | 770 +++++++-------- crates/prover/src/types.rs | 11 + crates/prover/src/verify.rs | 6 +- crates/recursion/circuit/Cargo.toml | 12 +- crates/recursion/circuit/src/constraints.rs | 20 +- crates/recursion/circuit/src/fri.rs | 4 +- crates/recursion/circuit/src/hash.rs | 16 +- .../recursion/circuit/src/machine/complete.rs | 21 +- .../recursion/circuit/src/machine/compress.rs | 88 +- crates/recursion/circuit/src/machine/core.rs | 122 +-- .../recursion/circuit/src/machine/deferred.rs | 47 +- .../circuit/src/machine/vkey_proof.rs | 8 +- .../recursion/circuit/src/machine/witness.rs | 22 +- crates/recursion/circuit/src/machine/wrap.rs | 19 +- crates/recursion/circuit/src/merkle_tree.rs | 4 +- crates/recursion/circuit/src/stark.rs | 174 ++-- crates/recursion/circuit/src/types.rs | 9 +- crates/recursion/circuit/src/utils.rs | 15 +- crates/recursion/circuit/src/witness/mod.rs | 39 +- crates/recursion/compiler/Cargo.toml | 10 +- .../recursion/compiler/src/circuit/builder.rs | 119 ++- .../compiler/src/circuit/compiler.rs | 50 +- crates/recursion/compiler/src/ir/builder.rs | 30 +- .../recursion/compiler/src/ir/instructions.rs | 14 +- crates/recursion/compiler/src/ir/symbolic.rs | 2 +- crates/recursion/core/Cargo.toml | 22 +- crates/recursion/core/build.rs | 199 ++++ crates/recursion/core/cpp/extern.cpp | 141 +++ crates/recursion/core/include/alu_base.hpp | 37 + crates/recursion/core/include/alu_ext.hpp | 37 + crates/recursion/core/include/batch_fri.hpp | 27 + .../core/include/exp_reverse_bits.hpp | 32 + crates/recursion/core/include/fri_fold.hpp | 56 ++ crates/recursion/core/include/poseidon2.hpp | 611 ++++++++++++ .../core/include/poseidon2_skinny.hpp | 115 +++ .../recursion/core/include/poseidon2_wide.hpp | 149 +++ crates/recursion/core/include/prelude.hpp | 23 + .../recursion/core/include/public_values.hpp | 21 + crates/recursion/core/include/select.hpp | 20 + crates/recursion/core/include/sys.hpp | 12 + .../recursion/core/src/air/public_values.rs | 17 +- crates/recursion/core/src/chips/alu_base.rs | 142 +++ crates/recursion/core/src/chips/alu_ext.rs | 145 +++ crates/recursion/core/src/chips/batch_fri.rs | 176 +++- .../core/src/chips/exp_reverse_bits.rs | 194 +++- crates/recursion/core/src/chips/fri_fold.rs | 212 ++++- crates/recursion/core/src/chips/mem/mod.rs | 5 +- .../recursion/core/src/chips/mem/variable.rs | 9 +- .../src/chips/poseidon2_skinny/columns/mod.rs | 4 +- .../poseidon2_skinny/columns/preprocessed.rs | 8 +- .../core/src/chips/poseidon2_skinny/trace.rs | 146 ++- .../poseidon2_wide/columns/preprocessed.rs | 8 +- .../core/src/chips/poseidon2_wide/trace.rs | 211 ++++- .../recursion/core/src/chips/public_values.rs | 150 ++- crates/recursion/core/src/chips/select.rs | 167 +++- crates/recursion/core/src/lib.rs | 162 ++++ crates/recursion/core/src/machine.rs | 4 + .../recursion/core/src/runtime/instruction.rs | 10 + crates/recursion/core/src/runtime/mod.rs | 59 +- crates/recursion/core/src/runtime/opcode.rs | 2 + crates/recursion/core/src/runtime/program.rs | 8 +- crates/recursion/core/src/sys.rs | 117 +++ crates/recursion/gnark-ffi/Cargo.toml | 4 +- crates/recursion/gnark-ffi/src/ffi/docker.rs | 7 +- crates/sdk/Cargo.toml | 12 +- crates/sdk/src/action.rs | 12 + crates/sdk/src/lib.rs | 244 +++-- crates/sdk/src/network-v2/client.rs | 53 +- crates/sdk/src/network-v2/prover.rs | 39 +- crates/sdk/src/network/client.rs | 19 +- crates/sdk/src/network/prover.rs | 46 +- crates/sdk/src/proof.rs | 89 ++ crates/sdk/src/provers/cpu.rs | 14 +- crates/sdk/src/provers/cuda.rs | 11 +- crates/sdk/src/provers/mock.rs | 11 +- crates/sdk/src/provers/mod.rs | 8 +- crates/stark/Cargo.toml | 8 +- crates/stark/src/air/builder.rs | 43 +- crates/stark/src/air/machine.rs | 4 +- crates/stark/src/chip.rs | 41 +- crates/stark/src/debug.rs | 28 +- crates/stark/src/folder.rs | 39 +- crates/stark/src/lib.rs | 3 + crates/stark/src/machine.rs | 103 +- crates/stark/src/opts.rs | 2 +- crates/stark/src/permutation.rs | 313 +++---- crates/stark/src/prover.rs | 498 +++------- crates/stark/src/quotient.rs | 13 +- crates/stark/src/septic_curve.rs | 346 +++++++ crates/stark/src/septic_digest.rs | 98 ++ crates/stark/src/septic_extension.rs | 883 ++++++++++++++++++ crates/stark/src/types.rs | 66 +- crates/stark/src/verifier.rs | 86 +- crates/test-artifacts/Cargo.toml | 14 + crates/test-artifacts/Makefile | 8 + crates/test-artifacts/build.rs | 20 + .../test-artifacts/programs}/Cargo.lock | 303 +++--- .../test-artifacts/programs}/Cargo.toml | 6 + .../test-artifacts/programs}/Makefile | 0 .../programs/bls12381-add/Cargo.toml | 11 + .../programs}/bls12381-add/src/main.rs | 0 .../programs}/bls12381-decompress/Cargo.toml | 2 +- .../programs}/bls12381-decompress/src/main.rs | 0 .../programs}/bls12381-double/Cargo.toml | 2 +- .../programs}/bls12381-double/src/main.rs | 0 .../programs}/bls12381-fp/Cargo.toml | 2 +- .../programs}/bls12381-fp/src/main.rs | 0 .../programs}/bls12381-fp2-addsub/Cargo.toml | 2 +- .../programs}/bls12381-fp2-addsub/src/main.rs | 0 .../programs}/bls12381-fp2-mul/Cargo.toml | 2 +- .../programs}/bls12381-fp2-mul/src/main.rs | 0 .../programs/bls12381-mul/Cargo.toml | 10 + .../programs}/bls12381-mul/src/main.rs | 0 .../programs/bn254-add/Cargo.toml | 11 + .../programs}/bn254-add/src/main.rs | 0 .../programs}/bn254-double/Cargo.toml | 2 +- .../programs}/bn254-double/src/main.rs | 0 .../programs}/bn254-fp/Cargo.toml | 2 +- .../programs}/bn254-fp/src/main.rs | 0 .../programs}/bn254-fp2-addsub/Cargo.toml | 2 +- .../programs}/bn254-fp2-addsub/src/main.rs | 0 .../programs}/bn254-fp2-mul/Cargo.toml | 2 +- .../programs}/bn254-fp2-mul/src/main.rs | 0 .../programs/bn254-mul/Cargo.toml | 10 + .../programs}/bn254-mul/src/main.rs | 0 .../programs}/common/Cargo.toml | 2 +- .../programs}/common/src/lib.rs | 0 .../programs}/common/src/weierstrass_add.rs | 0 .../programs/cycle-tracker/Cargo.toml | 9 + .../programs}/cycle-tracker/src/main.rs | 0 .../programs}/ed-add/Cargo.toml | 2 +- .../programs}/ed-add/src/main.rs | 0 .../programs}/ed-decompress/Cargo.toml | 2 +- .../programs}/ed-decompress/src/main.rs | 0 .../programs}/ed25519/Cargo.toml | 2 +- .../programs}/ed25519/src/main.rs | 0 .../programs}/fibonacci/Cargo.toml | 2 +- .../programs}/fibonacci/src/main.rs | 0 .../programs}/hint-io/Cargo.toml | 2 +- .../programs}/hint-io/src/main.rs | 0 .../programs}/keccak-permute/Cargo.toml | 2 +- .../programs}/keccak-permute/src/main.rs | 0 .../programs}/keccak256/Cargo.toml | 2 +- .../programs}/keccak256/src/main.rs | 0 .../test-artifacts/programs}/panic/Cargo.toml | 2 +- .../programs}/panic/src/main.rs | 0 .../test-artifacts/programs}/rand/Cargo.toml | 2 +- .../test-artifacts/programs}/rand/src/main.rs | 0 .../programs/secp256k1-add/Cargo.toml | 11 + .../programs}/secp256k1-add/src/main.rs | 0 .../programs}/secp256k1-decompress/Cargo.toml | 2 +- .../secp256k1-decompress/src/main.rs | 0 .../programs}/secp256k1-double/Cargo.toml | 2 +- .../programs}/secp256k1-double/src/main.rs | 0 .../programs/secp256k1-mul/Cargo.toml | 9 + .../programs}/secp256k1-mul/src/main.rs | 0 .../programs}/secp256r1-add/Cargo.toml | 6 +- .../programs}/secp256r1-add/src/main.rs | 0 .../programs}/secp256r1-decompress/Cargo.toml | 2 +- .../secp256r1-decompress/src/main.rs | 0 .../programs}/secp256r1-double/Cargo.toml | 6 +- .../programs}/secp256r1-double/src/main.rs | 0 .../programs}/sha-compress/Cargo.toml | 2 +- .../programs}/sha-compress/src/main.rs | 0 .../programs}/sha-extend/Cargo.toml | 2 +- .../programs}/sha-extend/src/main.rs | 0 .../test-artifacts/programs}/sha2/Cargo.toml | 2 +- .../test-artifacts/programs}/sha2/src/main.rs | 0 .../programs}/tendermint-benchmark/Cargo.toml | 6 +- .../src/fixtures/1/next_validators.json | 0 .../src/fixtures/1/signed_header.json | 0 .../src/fixtures/1/validators.json | 0 .../src/fixtures/2/next_validators.json | 0 .../src/fixtures/2/signed_header.json | 0 .../src/fixtures/2/validators.json | 0 .../src/fixtures/small-1/next_validators.json | 0 .../src/fixtures/small-1/signed_header.json | 0 .../src/fixtures/small-1/validators.json | 0 .../src/fixtures/small-2/next_validators.json | 0 .../src/fixtures/small-2/signed_header.json | 0 .../src/fixtures/small-2/validators.json | 0 .../tendermint-benchmark/src/main.rs | 0 .../programs/u256x2048-mul/Cargo.toml | 13 + .../programs/u256x2048-mul/src/main.rs | 84 ++ .../programs}/uint256-arith/Cargo.toml | 4 +- .../programs}/uint256-arith/src/main.rs | 0 .../programs}/uint256-mul/Cargo.toml | 4 +- .../programs}/uint256-mul/src/main.rs | 0 .../programs}/verify-proof/Cargo.toml | 2 +- .../programs}/verify-proof/src/main.rs | 0 crates/test-artifacts/src/lib.rs | 79 ++ crates/verifier/Cargo.toml | 12 +- crates/verifier/README.md | 2 +- crates/verifier/src/error.rs | 2 +- crates/verifier/src/groth16/ark_converter.rs | 196 ++++ crates/verifier/src/groth16/converter.rs | 6 +- crates/verifier/src/groth16/error.rs | 2 +- crates/verifier/src/groth16/mod.rs | 67 +- crates/verifier/src/groth16/verify.rs | 4 +- crates/verifier/src/lib.rs | 10 +- crates/verifier/src/plonk/error.rs | 2 +- crates/verifier/src/plonk/hash_to_field.rs | 3 +- crates/verifier/src/plonk/mod.rs | 56 +- crates/verifier/src/plonk/verify.rs | 11 +- crates/verifier/src/tests.rs | 34 +- crates/zkvm/entrypoint/src/syscalls/mod.rs | 5 + crates/zkvm/entrypoint/src/syscalls/sys.rs | 2 +- .../entrypoint/src/syscalls/u256x2048_mul.rs | 29 + crates/zkvm/lib/Cargo.toml | 2 +- crates/zkvm/lib/src/lib.rs | 7 + crates/zkvm/lib/src/utils.rs | 6 +- examples/Cargo.lock | 229 +++-- examples/Cargo.toml | 5 + .../program/elf/riscv32im-succinct-zkvm-elf | Bin 141956 -> 0 bytes examples/aggregation/script/Cargo.toml | 2 +- .../program/elf/riscv32im-succinct-zkvm-elf | Bin 304916 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 274368 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 1979628 -> 0 bytes examples/cycle-tracking/program/elf/normal | Bin 103224 -> 0 bytes examples/cycle-tracking/program/elf/report | Bin 101708 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 105052 -> 0 bytes examples/fibonacci/script/bin/compressed.rs | 4 +- examples/fibonacci/script/bin/execute.rs | 4 +- .../fibonacci/script/bin/groth16_bn254.rs | 4 +- examples/fibonacci/script/bin/plonk_bn254.rs | 4 +- examples/fibonacci/script/src/main.rs | 2 +- examples/io/program/Cargo.toml | 2 +- .../program/elf/riscv32im-succinct-zkvm-elf | Bin 182212 -> 0 bytes examples/io/script/Cargo.toml | 2 +- .../program/elf/riscv32im-succinct-zkvm-elf | Bin 107096 -> 0 bytes examples/json/lib/Cargo.toml | 2 +- examples/json/program/Cargo.toml | 2 +- .../json/program/elf/riscv32im-curta-zkvm-elf | Bin 163988 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 188472 -> 0 bytes examples/json/script/Cargo.toml | 2 +- .../program/elf/riscv32im-succinct-zkvm-elf | Bin 627592 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 2153596 -> 0 bytes examples/riscv32im-succinct-zkvm-elf | Bin 105028 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 280396 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 5109616 -> 0 bytes examples/ssz-withdrawals/program/Cargo.toml | 2 +- .../program/elf/riscv32im-succinct-zkvm-elf | Bin 193040 -> 0 bytes .../program/elf/riscv32im-succinct-zkvm-elf | Bin 1152936 -> 0 bytes examples/tendermint/script/Cargo.toml | 2 +- rustfmt.toml | 16 +- tests/bls12381-add/Cargo.toml | 11 - .../elf/riscv32im-succinct-zkvm-elf | Bin 126316 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 114192 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 106964 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 240076 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 248220 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 235964 -> 0 bytes tests/bls12381-mul/Cargo.toml | 10 - .../elf/riscv32im-succinct-zkvm-elf | Bin 119056 -> 0 bytes tests/bn254-add/Cargo.toml | 11 - .../bn254-add/elf/riscv32im-succinct-zkvm-elf | Bin 125892 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 106108 -> 0 bytes .../bn254-fp/elf/riscv32im-succinct-zkvm-elf | Bin 238060 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 248064 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 235964 -> 0 bytes tests/bn254-mul/Cargo.toml | 10 - .../bn254-mul/elf/riscv32im-succinct-zkvm-elf | Bin 121104 -> 0 bytes tests/cycle-tracker/Cargo.toml | 9 - .../elf/riscv32im-succinct-zkvm-elf | Bin 100280 -> 0 bytes tests/ed-add/elf/riscv32im-succinct-zkvm-elf | Bin 106904 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 106968 -> 0 bytes tests/ed25519/elf/riscv32im-succinct-zkvm-elf | Bin 224400 -> 0 bytes .../fibonacci/elf/riscv32im-succinct-zkvm-elf | Bin 119496 -> 0 bytes tests/hint-io/elf/riscv32im-succinct-zkvm-elf | Bin 152036 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 108076 -> 0 bytes .../keccak256/elf/riscv32im-succinct-zkvm-elf | Bin 176588 -> 0 bytes tests/panic/elf/riscv32im-succinct-zkvm-elf | Bin 83960 -> 0 bytes tests/rand/elf/riscv32im-succinct-zkvm-elf | Bin 148672 -> 0 bytes tests/secp256k1-add/Cargo.toml | 11 - .../elf/riscv32im-succinct-zkvm-elf | Bin 125884 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 91420 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 106528 -> 0 bytes tests/secp256k1-mul/Cargo.toml | 9 - .../elf/riscv32im-succinct-zkvm-elf | Bin 121536 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 125900 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 101172 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 97516 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 105772 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 105544 -> 0 bytes tests/sha2/elf/riscv32im-succinct-zkvm-elf | Bin 109560 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 965240 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 107736 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 241464 -> 0 bytes .../elf/riscv32im-succinct-zkvm-elf | Bin 178040 -> 0 bytes 457 files changed, 15342 insertions(+), 4913 deletions(-) create mode 100644 .clang-format create mode 100644 book/developers/rv32im-deviations.md delete mode 100644 book/developers/rv32im-specification.md create mode 100644 book/verification/supported-versions.md delete mode 100644 crates/cli/src/commands/prove.rs delete mode 100644 crates/cli/src/util.rs create mode 100644 crates/core/executor/src/events/precompiles/u256x2048_mul.rs create mode 100644 crates/core/executor/src/syscalls/precompiles/u256x2048_mul.rs create mode 100644 crates/core/machine/build.rs create mode 100644 crates/core/machine/cpp/extern.cpp create mode 100644 crates/core/machine/include/add_sub.hpp create mode 100644 crates/core/machine/include/bb31_septic_extension_t.hpp create mode 100644 crates/core/machine/include/bb31_t.hpp create mode 100644 crates/core/machine/include/bitwise.hpp create mode 100644 crates/core/machine/include/cpu.hpp create mode 100644 crates/core/machine/include/lt.hpp create mode 100644 crates/core/machine/include/memory.hpp create mode 100644 crates/core/machine/include/memory_global.hpp create mode 100644 crates/core/machine/include/memory_local.hpp create mode 100644 crates/core/machine/include/mul.hpp create mode 100644 crates/core/machine/include/prelude.hpp create mode 100644 crates/core/machine/include/sll.hpp create mode 100644 crates/core/machine/include/sr.hpp create mode 100644 crates/core/machine/include/sys.hpp create mode 100644 crates/core/machine/include/syscall.hpp create mode 100644 crates/core/machine/include/utils.hpp create mode 100644 crates/core/machine/src/operations/global_accumulation.rs create mode 100644 crates/core/machine/src/operations/global_interaction.rs create mode 100644 crates/core/machine/src/sys.rs create mode 100644 crates/core/machine/src/syscall/precompiles/u256x2048_mul/air.rs create mode 100644 crates/core/machine/src/syscall/precompiles/u256x2048_mul/mod.rs delete mode 100644 crates/core/machine/src/utils/programs.rs create mode 100644 crates/core/machine/src/utils/test.rs delete mode 100644 crates/core/machine/src/utils/tracer.rs create mode 100644 crates/core/machine/src/utils/uni_stark.rs delete mode 100644 crates/prover/scripts/fibonacci_groth16.rs delete mode 100644 crates/prover/scripts/fibonacci_sweep.rs delete mode 100644 crates/prover/scripts/tendermint_sweep.rs create mode 100644 crates/recursion/core/build.rs create mode 100644 crates/recursion/core/cpp/extern.cpp create mode 100644 crates/recursion/core/include/alu_base.hpp create mode 100644 crates/recursion/core/include/alu_ext.hpp create mode 100644 crates/recursion/core/include/batch_fri.hpp create mode 100644 crates/recursion/core/include/exp_reverse_bits.hpp create mode 100644 crates/recursion/core/include/fri_fold.hpp create mode 100644 crates/recursion/core/include/poseidon2.hpp create mode 100644 crates/recursion/core/include/poseidon2_skinny.hpp create mode 100644 crates/recursion/core/include/poseidon2_wide.hpp create mode 100644 crates/recursion/core/include/prelude.hpp create mode 100644 crates/recursion/core/include/public_values.hpp create mode 100644 crates/recursion/core/include/select.hpp create mode 100644 crates/recursion/core/include/sys.hpp create mode 100644 crates/recursion/core/src/sys.rs create mode 100644 crates/stark/src/septic_curve.rs create mode 100644 crates/stark/src/septic_digest.rs create mode 100644 crates/stark/src/septic_extension.rs create mode 100644 crates/test-artifacts/Cargo.toml create mode 100644 crates/test-artifacts/Makefile create mode 100644 crates/test-artifacts/build.rs rename {tests => crates/test-artifacts/programs}/Cargo.lock (91%) rename {tests => crates/test-artifacts/programs}/Cargo.toml (90%) rename {tests => crates/test-artifacts/programs}/Makefile (100%) create mode 100644 crates/test-artifacts/programs/bls12381-add/Cargo.toml rename {tests => crates/test-artifacts/programs}/bls12381-add/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bls12381-decompress/Cargo.toml (64%) rename {tests => crates/test-artifacts/programs}/bls12381-decompress/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bls12381-double/Cargo.toml (64%) rename {tests => crates/test-artifacts/programs}/bls12381-double/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bls12381-fp/Cargo.toml (70%) rename {tests => crates/test-artifacts/programs}/bls12381-fp/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bls12381-fp2-addsub/Cargo.toml (71%) rename {tests => crates/test-artifacts/programs}/bls12381-fp2-addsub/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bls12381-fp2-mul/Cargo.toml (71%) rename {tests => crates/test-artifacts/programs}/bls12381-fp2-mul/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/bls12381-mul/Cargo.toml rename {tests => crates/test-artifacts/programs}/bls12381-mul/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/bn254-add/Cargo.toml rename {tests => crates/test-artifacts/programs}/bn254-add/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bn254-double/Cargo.toml (63%) rename {tests => crates/test-artifacts/programs}/bn254-double/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bn254-fp/Cargo.toml (69%) rename {tests => crates/test-artifacts/programs}/bn254-fp/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bn254-fp2-addsub/Cargo.toml (71%) rename {tests => crates/test-artifacts/programs}/bn254-fp2-addsub/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/bn254-fp2-mul/Cargo.toml (70%) rename {tests => crates/test-artifacts/programs}/bn254-fp2-mul/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/bn254-mul/Cargo.toml rename {tests => crates/test-artifacts/programs}/bn254-mul/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/common/Cargo.toml (70%) rename {tests => crates/test-artifacts/programs}/common/src/lib.rs (100%) rename {tests => crates/test-artifacts/programs}/common/src/weierstrass_add.rs (100%) create mode 100644 crates/test-artifacts/programs/cycle-tracker/Cargo.toml rename {tests => crates/test-artifacts/programs}/cycle-tracker/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/ed-add/Cargo.toml (62%) rename {tests => crates/test-artifacts/programs}/ed-add/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/ed-decompress/Cargo.toml (68%) rename {tests => crates/test-artifacts/programs}/ed-decompress/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/ed25519/Cargo.toml (80%) rename {tests => crates/test-artifacts/programs}/ed25519/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/fibonacci/Cargo.toml (65%) rename {tests => crates/test-artifacts/programs}/fibonacci/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/hint-io/Cargo.toml (62%) rename {tests => crates/test-artifacts/programs}/hint-io/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/keccak-permute/Cargo.toml (64%) rename {tests => crates/test-artifacts/programs}/keccak-permute/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/keccak256/Cargo.toml (79%) rename {tests => crates/test-artifacts/programs}/keccak256/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/panic/Cargo.toml (62%) rename {tests => crates/test-artifacts/programs}/panic/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/rand/Cargo.toml (65%) rename {tests => crates/test-artifacts/programs}/rand/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/secp256k1-add/Cargo.toml rename {tests => crates/test-artifacts/programs}/secp256k1-add/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/secp256k1-decompress/Cargo.toml (65%) rename {tests => crates/test-artifacts/programs}/secp256k1-decompress/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/secp256k1-double/Cargo.toml (64%) rename {tests => crates/test-artifacts/programs}/secp256k1-double/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/secp256k1-mul/Cargo.toml rename {tests => crates/test-artifacts/programs}/secp256k1-mul/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/secp256r1-add/Cargo.toml (71%) rename {tests => crates/test-artifacts/programs}/secp256r1-add/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/secp256r1-decompress/Cargo.toml (65%) rename {tests => crates/test-artifacts/programs}/secp256r1-decompress/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/secp256r1-double/Cargo.toml (71%) rename {tests => crates/test-artifacts/programs}/secp256r1-double/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/sha-compress/Cargo.toml (63%) rename {tests => crates/test-artifacts/programs}/sha-compress/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/sha-extend/Cargo.toml (63%) rename {tests => crates/test-artifacts/programs}/sha-extend/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/sha2/Cargo.toml (77%) rename {tests => crates/test-artifacts/programs}/sha2/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/Cargo.toml (57%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/1/next_validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/1/signed_header.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/1/validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/2/next_validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/2/signed_header.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/2/validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-1/next_validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-1/signed_header.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-1/validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-2/next_validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-2/signed_header.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/fixtures/small-2/validators.json (100%) rename {tests => crates/test-artifacts/programs}/tendermint-benchmark/src/main.rs (100%) create mode 100644 crates/test-artifacts/programs/u256x2048-mul/Cargo.toml create mode 100644 crates/test-artifacts/programs/u256x2048-mul/src/main.rs rename {tests => crates/test-artifacts/programs}/uint256-arith/Cargo.toml (55%) rename {tests => crates/test-artifacts/programs}/uint256-arith/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/uint256-mul/Cargo.toml (59%) rename {tests => crates/test-artifacts/programs}/uint256-mul/src/main.rs (100%) rename {tests => crates/test-artifacts/programs}/verify-proof/Cargo.toml (61%) rename {tests => crates/test-artifacts/programs}/verify-proof/src/main.rs (100%) create mode 100644 crates/test-artifacts/src/lib.rs create mode 100644 crates/verifier/src/groth16/ark_converter.rs create mode 100644 crates/zkvm/entrypoint/src/syscalls/u256x2048_mul.rs delete mode 100755 examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/bls12381/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/bn254/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/chess/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/cycle-tracking/program/elf/normal delete mode 100755 examples/cycle-tracking/program/elf/report delete mode 100755 examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/io/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/is-prime/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/json/program/elf/riscv32im-curta-zkvm-elf delete mode 100755 examples/json/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/patch-testing/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/regex/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/riscv32im-succinct-zkvm-elf delete mode 100755 examples/rsa/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/rsp/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf delete mode 100755 examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/bls12381-add/Cargo.toml delete mode 100755 tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/bls12381-mul/Cargo.toml delete mode 100755 tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/bn254-add/Cargo.toml delete mode 100755 tests/bn254-add/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bn254-double/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/bn254-mul/Cargo.toml delete mode 100755 tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/cycle-tracker/Cargo.toml delete mode 100755 tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/ed-add/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/ed25519/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/fibonacci/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/hint-io/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/keccak256/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/panic/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/rand/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/secp256k1-add/Cargo.toml delete mode 100755 tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf delete mode 100644 tests/secp256k1-mul/Cargo.toml delete mode 100755 tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/secp256r1-add/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/secp256r1-decompress/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/secp256r1-double/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/sha-compress/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/sha-extend/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/sha2/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/uint256-arith/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf delete mode 100755 tests/verify-proof/elf/riscv32im-succinct-zkvm-elf diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..dc69733e1b --- /dev/null +++ b/.clang-format @@ -0,0 +1,83 @@ +# Google C/C++ Code Style settings +# https://clang.llvm.org/docs/ClangFormatStyleOptions.html +# Author: Kehan Xue, kehan.xue (at) gmail.com + +Language: Cpp +BasedOnStyle: Google +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: None +AlignOperands: Align +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: Empty +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Inline +AllowShortIfStatementsOnASingleLine: Never # To avoid conflict, set this "Never" and each "if statement" should include brace when coding +AllowShortLambdasOnASingleLine: Inline +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakTemplateDeclarations: Yes +BinPackArguments: true +BreakBeforeBraces: Custom +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterStruct: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + IndentBraces: false + SplitEmptyFunction: false + SplitEmptyRecord: false + SplitEmptyNamespace: false +BreakBeforeBinaryOperators: None +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon +BreakInheritanceList: BeforeColon +ColumnLimit: 80 +CompactNamespaces: false +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false # Make sure the * or & align on the left +EmptyLineBeforeAccessModifier: LogicalBlock +FixNamespaceComments: true +IncludeBlocks: Preserve +IndentCaseLabels: true +IndentPPDirectives: None +IndentWidth: 2 +KeepEmptyLinesAtTheStartOfBlocks: true +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PointerAlignment: Left +ReflowComments: false +# SeparateDefinitionBlocks: Always # Only support since clang-format 14 +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInCStyleCastParentheses: false +SpacesInContainerLiterals: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: c++11 +TabWidth: 4 +UseTab: Never \ No newline at end of file diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index d12a1d16aa..a6fbda6190 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -1,3 +1,5 @@ +# Note: this is only compatible with Linux runners. + name: Test setup inputs: pull_token: @@ -99,6 +101,16 @@ runs: else echo "pkg-config and libssl-dev are already installed." fi - + + - name: Echo docker buildx version + shell: bash + run: docker buildx version + - name: Set up Docker + uses: crazy-max/ghaction-setup-docker@v3 + + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + with: + driver-opts: | + image=public.ecr.aws/vend/moby/buildkit:buildx-stable-1 \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 251c44d529..a96b132e2a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -22,7 +22,7 @@ jobs: cpu=64, ram=256, family=m7i+m7a, - hdd=80, + disk=large, image=ubuntu22-full-x64, spot=false, "run-id=${{ github.run_id }}", @@ -54,7 +54,7 @@ jobs: cpu=64, ram=256, family=m7i+m7a, - hdd=80, + disk=large, image=ubuntu22-full-x64, spot=false, "run-id=${{ github.run_id }}", diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 81958dba2c..cd4b03bc2e 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -25,6 +25,7 @@ jobs: runs-on, runner=64cpu-linux-x64, spot=false, + disk=large, "run-id=${{ github.run_id }}", ] env: @@ -36,6 +37,13 @@ jobs: - name: Setup CI uses: ./.github/actions/setup + - name: Install SP1 toolchain from repo + run: | + cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean + - name: Run cargo check uses: actions-rs/cargo@v1 with: @@ -61,6 +69,7 @@ jobs: runs-on, runner=64cpu-linux-arm64, spot=false, + disk=large, "run-id=${{ github.run_id }}", ] env: @@ -72,6 +81,13 @@ jobs: - name: Setup CI uses: ./.github/actions/setup + - name: Install SP1 toolchain from repo + run: | + cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean + - name: Run cargo check uses: actions-rs/cargo@v1 with: @@ -92,7 +108,7 @@ jobs: lint: name: Formatting & Clippy - runs-on: [runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}"] + runs-on: [runs-on, runner=16cpu-linux-x64, "run-id=${{ github.run_id }}"] env: CARGO_NET_GIT_FETCH_WITH_CLI: "true" steps: @@ -102,6 +118,13 @@ jobs: - name: Setup CI uses: ./.github/actions/setup + - name: Install SP1 toolchain from repo + run: | + cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean + - name: Run cargo fmt uses: actions-rs/cargo@v1 with: @@ -110,6 +133,14 @@ jobs: env: CARGO_INCREMENTAL: 1 + - name: Check test-artifacts + uses: actions-rs/cargo@v1 + with: + command: check + args: -p test-artifacts + env: + CARGO_INCREMENTAL: 1 + - name: Run cargo clippy uses: actions-rs/cargo@v1 with: @@ -118,12 +149,50 @@ jobs: env: CARGO_INCREMENTAL: 1 + check: + name: Cargo Check + runs-on: [runs-on, runner=16cpu-linux-x64, disk=medium, "run-id=${{ github.run_id }}"] + env: + CARGO_NET_GIT_FETCH_WITH_CLI: "true" + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Setup CI + uses: ./.github/actions/setup + + - name: Install SP1 toolchain from repo + run: | + cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean + + - name: Check workspace no features + uses: actions-rs/cargo@v1 + with: + command: check + args: --workspace --all-targets --no-default-features + + - name: Check workspace with default features + uses: actions-rs/cargo@v1 + with: + command: check + args: --workspace --all-targets + + - name: Check workspace with all features + uses: actions-rs/cargo@v1 + with: + command: check + args: --workspace --all-targets --all-features + examples: name: Examples runs-on: [ runs-on, runner=64cpu-linux-x64, + disk=large, spot=false, "run-id=${{ github.run_id }}", ] @@ -136,9 +205,12 @@ jobs: - name: Setup CI uses: ./.github/actions/setup - - name: Install SP1 toolchain + - name: Install SP1 toolchain from repo run: | cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean - name: Run cargo fmt run: | @@ -155,39 +227,9 @@ jobs: cd ./examples/ cargo clippy --all-targets --all-features -- -D warnings -A incomplete-features - tests: - name: Tests - runs-on: [runs-on, runner=64cpu-linux-x64, "run-id=${{ github.run_id }}"] - steps: - - name: Checkout sources - uses: actions/checkout@v4 - - - name: Setup CI - uses: ./.github/actions/setup - - - name: Install SP1 toolchain - run: | - cargo install --locked --force --path crates/cli - cargo prove install-toolchain - - - name: Run cargo fmt - run: | - cd ./tests/ - cargo fmt --all -- --check - - - name: Run cargo clippy - run: | - cd ./tests/ - cargo clippy --all-targets --all-features -- -D warnings -A incomplete-features - - - name: Build test ELF files - run: | - cd ./tests/ - make - cli: name: CLI - runs-on: [runs-on, runner=8cpu-linux-x64, "run-id=${{ github.run_id }}"] + runs-on: [runs-on, runner=8cpu-linux-x64, disk=large, "run-id=${{ github.run_id }}"] env: CARGO_NET_GIT_FETCH_WITH_CLI: "true" steps: @@ -197,17 +239,12 @@ jobs: - name: Setup CI uses: ./.github/actions/setup - - name: Install SP1 toolchain - run: | - curl -L https://sp1.succinct.xyz | bash - ~/.sp1/bin/sp1up - ~/.sp1/bin/cargo-prove prove --version - - - name: Install SP1 CLI + - name: Install SP1 toolchain from repo run: | + cargo run -p sp1-cli -- prove install-toolchain cd crates/cli - cargo install --force --locked --path . - cd ~ + cargo install --locked --path . + cargo clean - name: Run cargo prove new run: | @@ -236,11 +273,12 @@ jobs: - name: Setup CI uses: ./.github/actions/setup - - name: Install SP1 toolchain + - name: Install SP1 toolchain from repo run: | - curl -L https://sp1.succinct.xyz | bash - ~/.sp1/bin/sp1up - ~/.sp1/bin/cargo-prove prove --version + cargo run -p sp1-cli -- prove install-toolchain + cd crates/cli + cargo install --locked --path . + cargo clean - name: Run Evaluation run: | @@ -268,6 +306,7 @@ jobs: [ runs-on, "ram=${{ matrix.mem_limit}}", + disk=large, family=c7a, image=ubuntu22-full-x64, "run-id=${{ github.run_id }}", @@ -281,17 +320,12 @@ jobs: - name: Setup CI uses: ./.github/actions/setup - - name: Install SP1 toolchain - run: | - curl -L https://sp1.succinct.xyz | bash - ~/.sp1/bin/sp1up - ~/.sp1/bin/cargo-prove prove --version - - - name: Install SP1 CLI + - name: Install SP1 toolchain from repo run: | + cargo run -p sp1-cli -- prove install-toolchain cd crates/cli - cargo install --force --locked --path . - cd ~ + cargo install --locked --path . + cargo clean - name: Run tendermint script run: | @@ -420,9 +454,12 @@ jobs: runs-on: [ runs-on, - runner=64cpu-linux-x64, + cpu=64, + ram=256, + family=m7i+m7a, + disk=large, + image=ubuntu22-full-x64, spot=false, - hdd=150, "run-id=${{ github.run_id }}", ] env: @@ -438,13 +475,14 @@ jobs: run: | find -name Cargo.lock -type f -exec rm {} \; - - name: "Build SP1 without lock files" - run: | - cargo build --all --all-targets - + # We need the SP1 toolchain to be installed in order to build test-artifacts - name: Install SP1 toolchain run: | cargo run -p sp1-cli -- prove install-toolchain + + - name: "Build SP1 without lock files" + run: | + cargo build --all --all-targets - name: "Build examples without lock files" run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e752edd077..65ef34ab73 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -86,12 +86,12 @@ jobs: # `platform` and `arch`: Used in tarball names # `svm`: target platform to use for the Solc binary: https://github.com/roynalnaruto/svm-rs/blob/84cbe0ac705becabdc13168bae28a45ad2299749/svm-builds/build.rs#L4-L24 - runner: ubuntu-latest - target: x86_64-unknown-linux-gnu + target: x86_64-unknown-linux-musl svm_target_platform: linux-amd64 platform: linux arch: amd64 - runner: warp-ubuntu-latest-arm64-4x - target: aarch64-unknown-linux-gnu + target: aarch64-unknown-linux-musl svm_target_platform: linux-aarch64 platform: linux arch: arm64 @@ -148,12 +148,19 @@ jobs: echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV - name: Linux ARM setup - if: matrix.target == 'aarch64-unknown-linux-gnu' + if: matrix.target == 'aarch64-unknown-linux-musl' run: | sudo apt-get update -y sudo apt-get install -y gcc-aarch64-linux-gnu echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV + - name: Musl setup + if: contains(matrix.target, 'musl') + run: | + sudo apt-get update -y + sudo apt-get install -y musl-tools musl-dev + rustup target add ${{ matrix.target }} + - name: Build binaries env: SVM_TARGET_PLATFORM: ${{ matrix.svm_target_platform }} @@ -225,6 +232,77 @@ jobs: ${{ steps.artifacts.outputs.file_name }} ${{ steps.man.outputs.cargo_prove_man }} + toolchain-test: + name: "Test toolchain installation (${{ matrix.name }})" + needs: release + strategy: + fail-fast: false + matrix: + include: + - name: "Ubuntu 24.04 (x86_64)" + runner: "ubuntu-24.04" + - name: "Ubuntu 22.04 (x86_64)" + runner: "ubuntu-22.04" + - name: "Ubuntu 20.04 (x86_64)" + runner: "ubuntu-20.04" + - name: "macOS Monterey (x86_64)" + runner: "macos-12" + - name: "macOS Ventura (x86_64)" + runner: "macos-13" + - name: "macOS Sonoma (ARM64)" + runner: "macos-14" + + runs-on: "${{ matrix.runner }}" + steps: + - name: "Checkout source code" + uses: "actions/checkout@v4" + + - name: "Install SP1" + env: + SP1UP_VERSION: ${{ github.ref_name }} + run: | + cd sp1up + chmod +x sp1up + ./sp1up --token ${{ secrets.GITHUB_TOKEN }} + + - name: "Create SP1 project from template" + run: | + $HOME/.sp1/bin/cargo-prove prove new --bare hello + + - name: "Build SP1 project" + run: | + cd ./hello/program + $HOME/.sp1/bin/cargo-prove prove build + + toolchain-test-ec2: + name: "Test toolchain installation (${{ matrix.name }})" + needs: release + strategy: + fail-fast: false + matrix: + include: + # AMI from `us-east-1` + - name: "Debian 12 (x86_64)" + ec2-instance: "c5.2xlarge" + ami: "ami-064519b8c76274859" + volume: "/dev/xvda" + - name: "Debian 12 (ARM64)" + ec2-instance: "c6g.2xlarge" + ami: "ami-0789039e34e739d67" + volume: "/dev/xvda" + uses: "./.github/workflows/toolchain-ec2.yml" + with: + image-id: "${{ matrix.ami }}" + instance-type: "${{ matrix.ec2-instance }}" + root-volume: "${{ matrix.volume }}" + secrets: + AWS_REGION: "${{ secrets.AWS_REGION }}" + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + AWS_SUBNET_ID: "${{ secrets.AWS_SUBNET_ID }}" + AWS_SG_ID: "${{ secrets.AWS_SG_ID }}" + GH_PAT: ${{ secrets.GH_PAT }} + cleanup: name: Release cleanup runs-on: ubuntu-latest diff --git a/.github/workflows/suite.yml b/.github/workflows/suite.yml index 6ab1bae2f9..5888cd2f28 100644 --- a/.github/workflows/suite.yml +++ b/.github/workflows/suite.yml @@ -82,7 +82,7 @@ jobs: [ "runs-on", "family=g6.4xlarge", - "hdd=200", + "disk=large", "ami=ami-0a63dc9cb9e934ba3", "spot=false", "run-id=${{ github.run_id }}", diff --git a/.github/workflows/toolchain-ec2.yml b/.github/workflows/toolchain-ec2.yml index dd3b2457f4..bbc4f6d117 100644 --- a/.github/workflows/toolchain-ec2.yml +++ b/.github/workflows/toolchain-ec2.yml @@ -92,15 +92,24 @@ jobs: - name: "Install build dependencies" run: | sudo apt-get update - sudo apt-get install -y build-essential pkg-config libssl-dev + sudo apt-get install -y build-essential pkg-config libssl-dev git - - name: "Install cargo-prove" + - name: "Install SP1" + env: + SP1UP_VERSION: ${{ github.ref_name }} run: | - cargo install --locked --path ./crates/cli + cd sp1up + chmod +x sp1up + ./sp1up --token ${{ secrets.GH_PAT }} - - name: "Install SP1 toolchain" + - name: "Create SP1 project from template" run: | - cargo prove install-toolchain --token ${{ secrets.GH_PAT }} + $HOME/.sp1/bin/cargo-prove prove new --bare hello + + - name: "Build SP1 project" + run: | + cd ./hello/program + $HOME/.sp1/bin/cargo-prove prove build stop-runner: name: "Stop self-hosted EC2 runner" diff --git a/.vscode/settings.json b/.vscode/settings.json index d1500a8b1a..f3522916a0 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -66,4 +66,22 @@ // "rust-analyzer.check.workspace": false, // "rust-analyzer.check.invocationStrategy": "once", // "rust-analyzer.cargo.buildScripts.invocationStrategy": "once", + "C_Cpp.default.includePath": [ + "${workspaceFolder}/crates/**/include", + "${workspaceFolder}/target/include", + ], + "C_Cpp.intelliSenseEngine": "Tag Parser", + "files.associations": { + "random": "cpp", + "chrono": "cpp", + "cstdint": "cpp", + "ratio": "cpp", + "system_error": "cpp", + "array": "cpp", + "functional": "cpp", + "tuple": "cpp", + "type_traits": "cpp", + "utility": "cpp", + "cmath": "cpp" + }, } \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 06254e4303..6488bc9e54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -105,9 +105,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-consensus" @@ -168,9 +168,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" +checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -188,7 +188,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -210,7 +210,7 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -228,9 +228,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "bytes 1.8.0", @@ -317,7 +317,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -333,14 +333,14 @@ dependencies = [ "async-trait", "k256", "rand 0.8.5", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "alloy-sol-macro" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" +checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -352,13 +352,13 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" +checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" dependencies = [ "alloy-sol-macro-input", "const-hex", - "heck", + "heck 0.5.0", "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", @@ -370,13 +370,13 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" +checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" dependencies = [ "const-hex", "dunce", - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.87", @@ -385,9 +385,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" +checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" dependencies = [ "serde", "winnow 0.6.20", @@ -395,9 +395,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" +checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -487,13 +487,61 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" dependencies = [ "backtrace", ] +[[package]] +name = "ark-bn254" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" +dependencies = [ + "ark-ec", + "ark-ff 0.4.2", + "ark-std 0.4.0", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3a13b34da09176a8baba701233fdffbaa7c1b1192ce031a3da4e55ce1f1a56" +dependencies = [ + "ark-ec", + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-snark", + "ark-std 0.4.0", + "blake2", + "derivative", + "digest 0.10.7", + "rayon", + "sha2 0.10.8", +] + +[[package]] +name = "ark-ec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" +dependencies = [ + "ark-ff 0.4.2", + "ark-poly", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "itertools 0.10.5", + "num-traits", + "rayon", + "zeroize", +] + [[package]] name = "ark-ff" version = "0.3.0" @@ -528,6 +576,7 @@ dependencies = [ "num-bigint 0.4.6", "num-traits", "paste", + "rayon", "rustc_version 0.4.1", "zeroize", ] @@ -577,6 +626,48 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-groth16" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20ceafa83848c3e390f1cbf124bc3193b3e639b3f02009e0e290809a501b95fc" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff 0.4.2", + "ark-poly", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "rayon", +] + +[[package]] +name = "ark-poly" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" +dependencies = [ + "ark-ff 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "hashbrown 0.13.2", + "rayon", +] + +[[package]] +name = "ark-relations" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" +dependencies = [ + "ark-ff 0.4.2", + "ark-std 0.4.0", + "tracing", + "tracing-subscriber 0.2.25", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -593,11 +684,35 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ + "ark-serialize-derive", "ark-std 0.4.0", "digest 0.10.7", "num-bigint 0.4.6", ] +[[package]] +name = "ark-serialize-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-snark" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84d3cc6833a335bb8a600241889ead68ee89a3cf8448081fb7694c0fe503da63" +dependencies = [ + "ark-ff 0.4.2", + "ark-relations", + "ark-serialize 0.4.2", + "ark-std 0.4.0", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -616,6 +731,7 @@ checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand 0.8.5", + "rayon", ] [[package]] @@ -671,8 +787,8 @@ checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.1", - "futures-lite 2.4.0", + "fastrand 2.2.0", + "futures-lite 2.5.0", "slab", ] @@ -687,21 +803,21 @@ dependencies = [ "async-io", "async-lock", "blocking", - "futures-lite 2.4.0", + "futures-lite 2.5.0", "once_cell", ] [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite 2.4.0", + "futures-lite 2.5.0", "parking", "polling", "rustix", @@ -736,7 +852,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-io", - "futures-lite 2.4.0", + "futures-lite 2.5.0", "gloo-timers", "kv-log-macro", "log", @@ -1066,7 +1182,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.4.0", + "futures-lite 2.5.0", "piper", ] @@ -1192,7 +1308,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1201,11 +1317,30 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbindgen" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fce8dd7fcfcbf3a0a87d8f515194b49d6135acab73e18bd380d1d93bb1a15eb" +dependencies = [ + "clap", + "heck 0.4.1", + "indexmap 2.6.0", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn 2.0.87", + "tempfile", + "toml", +] + [[package]] name = "cc" -version = "1.1.36" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" +checksum = "1aeb932158bd710538c73702db6945cb68a8fb08c519e6e12706b94263b36db8" dependencies = [ "jobserver", "libc", @@ -1320,7 +1455,7 @@ version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.87", @@ -1356,7 +1491,7 @@ dependencies = [ "encode_unicode 0.3.6", "lazy_static", "libc", - "unicode-width", + "unicode-width 0.1.14", "windows-sys 0.52.0", ] @@ -1426,9 +1561,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -1475,6 +1610,15 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -1540,9 +1684,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -1595,9 +1739,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.77+curl-8.10.1" +version = "0.4.78+curl-8.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f469e8a5991f277a208224f6c7ad72ecb5f986e36d09ae1f2c1bb9259478a480" +checksum = "8eec768341c5c7789611ae51cf6c459099f22e64a5d5d0ce4892434e33821eaf" dependencies = [ "cc", "libc", @@ -2044,9 +2188,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -2241,11 +2385,11 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1fa2f9765705486b33fd2acf1577f8ec449c2ba1f318ae5447697b7c08d210" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -2497,6 +2641,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -2520,6 +2673,12 @@ dependencies = [ "serde", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -2979,15 +3138,15 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763a5a8f45087d6bcea4222e7b72c291a054edf80e4ef6efd2a4979878c7bea3" +checksum = "cbf675b85ed934d3c67b5c5469701eec7db22689d0a2139d856e0925fa28b281" dependencies = [ "console", - "instant", "number_prefix", "portable-atomic", - "unicode-width", + "unicode-width 0.2.0", + "web-time", ] [[package]] @@ -3169,9 +3328,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libgit2-sys" @@ -3665,7 +3824,7 @@ dependencies = [ [[package]] name = "p3-air" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "p3-field", "p3-matrix", @@ -3674,7 +3833,7 @@ dependencies = [ [[package]] name = "p3-baby-bear" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "num-bigint 0.4.6", "p3-field", @@ -3688,7 +3847,7 @@ dependencies = [ [[package]] name = "p3-bn254-fr" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "ff 0.13.0", "num-bigint 0.4.6", @@ -3702,7 +3861,7 @@ dependencies = [ [[package]] name = "p3-challenger" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "p3-field", "p3-maybe-rayon", @@ -3715,7 +3874,7 @@ dependencies = [ [[package]] name = "p3-commit" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -3728,7 +3887,7 @@ dependencies = [ [[package]] name = "p3-dft" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "p3-field", "p3-matrix", @@ -3740,7 +3899,7 @@ dependencies = [ [[package]] name = "p3-field" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "num-bigint 0.4.6", @@ -3753,7 +3912,7 @@ dependencies = [ [[package]] name = "p3-fri" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -3771,7 +3930,7 @@ dependencies = [ [[package]] name = "p3-interpolation" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "p3-field", "p3-matrix", @@ -3781,7 +3940,7 @@ dependencies = [ [[package]] name = "p3-keccak-air" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "p3-air", "p3-field", @@ -3794,7 +3953,7 @@ dependencies = [ [[package]] name = "p3-matrix" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-field", @@ -3808,7 +3967,7 @@ dependencies = [ [[package]] name = "p3-maybe-rayon" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "rayon", ] @@ -3816,7 +3975,7 @@ dependencies = [ [[package]] name = "p3-mds" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-dft", @@ -3830,7 +3989,7 @@ dependencies = [ [[package]] name = "p3-merkle-tree" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-commit", @@ -3846,7 +4005,7 @@ dependencies = [ [[package]] name = "p3-poseidon2" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "gcd", "p3-field", @@ -3859,7 +4018,7 @@ dependencies = [ [[package]] name = "p3-symmetric" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-field", @@ -3869,7 +4028,7 @@ dependencies = [ [[package]] name = "p3-uni-stark" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "itertools 0.12.1", "p3-air", @@ -3887,7 +4046,7 @@ dependencies = [ [[package]] name = "p3-util" version = "0.1.0" -source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#d33eaa69b1ef96ad678ebd96ae8e75aef3508b2a" dependencies = [ "serde", ] @@ -3992,6 +4151,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pathdiff" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" + [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -4014,7 +4179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.69", "ucd-trie", ] @@ -4067,7 +4232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.1", + "fastrand 2.2.0", "futures-io", ] @@ -4123,9 +4288,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", @@ -4189,7 +4354,7 @@ dependencies = [ "is-terminal", "lazy_static", "term", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -4305,7 +4470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes 1.8.0", - "heck", + "heck 0.5.0", "itertools 0.13.0", "log", "multimap", @@ -4360,7 +4525,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.16", "socket2", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -4377,7 +4542,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls 0.23.16", "slab", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tracing", ] @@ -4538,7 +4703,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -4549,7 +4714,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -4564,9 +4729,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -4646,7 +4811,7 @@ dependencies = [ "http 1.1.0", "reqwest", "serde", - "thiserror", + "thiserror 1.0.69", "tower-service", ] @@ -4806,9 +4971,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags", "errno", @@ -5030,9 +5195,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -5082,18 +5247,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", @@ -5130,7 +5295,16 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.69", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", ] [[package]] @@ -5169,9 +5343,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "futures", "log", @@ -5183,9 +5357,9 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", @@ -5376,7 +5550,7 @@ dependencies = [ [[package]] name = "sp1-build" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "cargo_metadata", @@ -5387,10 +5561,11 @@ dependencies = [ [[package]] name = "sp1-cli" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anstyle", "anyhow", + "cargo_metadata", "clap", "ctrlc", "dirs", @@ -5415,7 +5590,7 @@ dependencies = [ [[package]] name = "sp1-core-executor" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "bytemuck", @@ -5439,7 +5614,8 @@ dependencies = [ "sp1-zkvm", "strum", "strum_macros", - "thiserror", + "test-artifacts", + "thiserror 1.0.69", "tiny-keccak", "tracing", "typenum", @@ -5448,13 +5624,16 @@ dependencies = [ [[package]] name = "sp1-core-machine" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", + "cbindgen", + "cc", "cfg-if", "criterion", "elliptic-curve", "generic-array 1.1.0", + "glob", "hashbrown 0.14.5", "hex", "itertools 0.13.0", @@ -5472,8 +5651,10 @@ dependencies = [ "p3-maybe-rayon", "p3-uni-stark", "p3-util", + "pathdiff", "rand 0.8.5", "rayon", + "rayon-scan", "serde", "size", "snowbridge-amcl", @@ -5487,18 +5668,19 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror", + "test-artifacts", + "thiserror 1.0.69", "tiny-keccak", "tracing", "tracing-forest", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "typenum", "web-time", ] [[package]] name = "sp1-cuda" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "ctrlc", @@ -5507,6 +5689,7 @@ dependencies = [ "serde", "sp1-core-machine", "sp1-prover", + "test-artifacts", "tokio", "tracing", "twirp-build-rs", @@ -5515,7 +5698,7 @@ dependencies = [ [[package]] name = "sp1-curves" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "cfg-if", "curve25519-dalek", @@ -5538,7 +5721,7 @@ dependencies = [ [[package]] name = "sp1-derive" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "quote", "syn 1.0.109", @@ -5546,7 +5729,7 @@ dependencies = [ [[package]] name = "sp1-eval" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "bincode", @@ -5564,14 +5747,16 @@ dependencies = [ [[package]] name = "sp1-helper" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "sp1-build", ] [[package]] name = "sp1-lib" -version = "3.0.0" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c8744af050832df5ca44fcd63979a83b93ca3010b2d5a5ce2a2b91f7438065c" dependencies = [ "bincode", "serde", @@ -5579,9 +5764,7 @@ dependencies = [ [[package]] name = "sp1-lib" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14deb700469a37ec075bcf88dac3815b026dd9c4b9cb175980826f1fbb2e4e80" +version = "4.0.0-rc.1" dependencies = [ "bincode", "serde", @@ -5589,7 +5772,7 @@ dependencies = [ [[package]] name = "sp1-perf" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "clap", @@ -5599,12 +5782,13 @@ dependencies = [ "sp1-prover", "sp1-sdk", "sp1-stark", + "test-artifacts", "time 0.3.36", ] [[package]] name = "sp1-primitives" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "hex", @@ -5620,7 +5804,7 @@ dependencies = [ [[package]] name = "sp1-prover" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "bincode", @@ -5648,14 +5832,16 @@ dependencies = [ "sp1-recursion-core", "sp1-recursion-gnark-ffi", "sp1-stark", - "thiserror", + "test-artifacts", + "thiserror 1.0.69", "tracing", - "tracing-subscriber", + "tracing-appender", + "tracing-subscriber 0.3.18", ] [[package]] name = "sp1-recursion-circuit" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "ff 0.13.0", "hashbrown 0.14.5", @@ -5685,13 +5871,14 @@ dependencies = [ "sp1-recursion-core", "sp1-recursion-gnark-ffi", "sp1-stark", + "test-artifacts", "tracing", "zkhash", ] [[package]] name = "sp1-recursion-compiler" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "backtrace", "criterion", @@ -5716,12 +5903,16 @@ dependencies = [ [[package]] name = "sp1-recursion-core" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "backtrace", + "cbindgen", + "cc", "ff 0.13.0", + "glob", "hashbrown 0.14.5", "itertools 0.13.0", + "num_cpus", "p3-air", "p3-baby-bear", "p3-bn254-fr", @@ -5736,6 +5927,7 @@ dependencies = [ "p3-poseidon2", "p3-symmetric", "p3-util", + "pathdiff", "rand 0.8.5", "serde", "sp1-core-machine", @@ -5743,7 +5935,7 @@ dependencies = [ "sp1-primitives", "sp1-stark", "static_assertions", - "thiserror", + "thiserror 1.0.69", "tracing", "vec_map", "zkhash", @@ -5751,7 +5943,7 @@ dependencies = [ [[package]] name = "sp1-recursion-derive" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "quote", "syn 1.0.109", @@ -5759,7 +5951,7 @@ dependencies = [ [[package]] name = "sp1-recursion-gnark-cli" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "clap", @@ -5768,7 +5960,7 @@ dependencies = [ [[package]] name = "sp1-recursion-gnark-ffi" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "bincode", @@ -5792,7 +5984,7 @@ dependencies = [ [[package]] name = "sp1-sdk" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "alloy-primitives", "alloy-signer", @@ -5817,6 +6009,7 @@ dependencies = [ "reqwest", "reqwest-middleware", "serde", + "sp1-build", "sp1-core-executor", "sp1-core-machine", "sp1-cuda", @@ -5826,7 +6019,8 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror", + "test-artifacts", + "thiserror 1.0.69", "tokio", "tonic", "tracing", @@ -5836,11 +6030,12 @@ dependencies = [ [[package]] name = "sp1-stark" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "arrayref", "hashbrown 0.14.5", "itertools 0.13.0", + "num-bigint 0.4.6", "num-traits", "p3-air", "p3-baby-bear", @@ -5869,8 +6064,13 @@ dependencies = [ [[package]] name = "sp1-verifier" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff 0.4.2", + "ark-groth16", + "ark-serialize 0.4.2", "hex", "lazy_static", "num-bigint 0.4.6", @@ -5878,7 +6078,7 @@ dependencies = [ "sha2 0.10.8", "sp1-sdk", "substrate-bn-succinct", - "thiserror-no-std", + "thiserror 2.0.3", ] [[package]] @@ -5893,7 +6093,7 @@ dependencies = [ "p3-field", "rand 0.8.5", "sha2 0.10.8", - "sp1-lib 3.0.0", + "sp1-lib 4.0.0-rc.1", "sp1-primitives", ] @@ -6025,7 +6225,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", @@ -6046,7 +6246,7 @@ dependencies = [ "num-bigint 0.4.6", "rand 0.8.5", "rustc-hex", - "sp1-lib 3.1.0", + "sp1-lib 3.2.1", ] [[package]] @@ -6102,9 +6302,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" +checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" dependencies = [ "paste", "proc-macro2", @@ -6188,12 +6388,12 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -6210,6 +6410,13 @@ dependencies = [ "winapi", ] +[[package]] +name = "test-artifacts" +version = "4.0.0-rc.1" +dependencies = [ + "sp1-build", +] + [[package]] name = "textwrap" version = "0.16.1" @@ -6218,47 +6425,47 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" dependencies = [ "smawk", "unicode-linebreak", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] name = "thiserror" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", ] [[package]] -name = "thiserror-impl" -version = "1.0.68" +name = "thiserror" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "thiserror-impl 2.0.3", ] [[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.87", ] [[package]] -name = "thiserror-no-std" -version = "2.0.2" +name = "thiserror-impl" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ - "thiserror-impl-no-std", + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] @@ -6397,9 +6604,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes 1.8.0", @@ -6469,11 +6676,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.22", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -6493,6 +6715,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.6.0", + "serde", + "serde_spanned", "toml_datetime", "winnow 0.6.20", ] @@ -6590,6 +6814,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.69", + "time 0.3.36", + "tracing-subscriber 0.3.18", +] + [[package]] name = "tracing-attributes" version = "0.1.27" @@ -6619,9 +6855,9 @@ checksum = "ee40835db14ddd1e3ba414292272eddde9dad04d3d4b65509656414d1c42592f" dependencies = [ "ansi_term", "smallvec", - "thiserror", + "thiserror 1.0.69", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", ] [[package]] @@ -6645,6 +6881,15 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + [[package]] name = "tracing-subscriber" version = "0.3.18" @@ -6683,7 +6928,7 @@ dependencies = [ "log", "rand 0.8.5", "sha1 0.10.6", - "thiserror", + "thiserror 1.0.69", "url", "utf-8", ] @@ -6713,7 +6958,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.5.1", "url", @@ -6773,6 +7018,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" diff --git a/Cargo.toml b/Cargo.toml index 7f40570ec4..5a83c7e1d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "3.0.0" +version = "4.0.0-rc.1" edition = "2021" license = "MIT OR Apache-2.0" rust-version = "1.79" @@ -31,6 +31,7 @@ members = [ "crates/verifier", "crates/stark", "crates/zkvm/*", + "crates/test-artifacts", ] exclude = ["examples/target"] resolver = "2" @@ -48,30 +49,33 @@ debug-assertions = true [workspace.dependencies] # sp1 -sp1-build = { path = "crates/build", version = "3.0.0" } -sp1-cli = { path = "crates/cli", version = "3.0.0", default-features = false } -sp1-core-machine = { path = "crates/core/machine", version = "3.0.0" } -sp1-core-executor = { path = "crates/core/executor", version = "3.0.0" } -sp1-curves = { path = "crates/curves", version = "3.0.0" } -sp1-derive = { path = "crates/derive", version = "3.0.0" } -sp1-eval = { path = "crates/eval", version = "3.0.0" } -sp1-helper = { path = "crates/helper", version = "3.0.0", default-features = false } -sp1-primitives = { path = "crates/primitives", version = "3.0.0" } -sp1-prover = { path = "crates/prover", version = "3.0.0" } -sp1-recursion-compiler = { path = "crates/recursion/compiler", version = "3.0.0" } -sp1-recursion-core = { path = "crates/recursion/core", version = "3.0.0", default-features = false } -sp1-recursion-derive = { path = "crates/recursion/derive", version = "3.0.0", default-features = false } -sp1-recursion-gnark-ffi = { path = "crates/recursion/gnark-ffi", version = "3.0.0", default-features = false } -sp1-recursion-circuit = { path = "crates/recursion/circuit", version = "3.0.0", default-features = false } -sp1-sdk = { path = "crates/sdk", version = "3.0.0" } -sp1-cuda = { path = "crates/cuda", version = "3.0.0" } -sp1-stark = { path = "crates/stark", version = "3.0.0" } -sp1-lib = { path = "crates/zkvm/lib", version = "3.0.0", default-features = false } +sp1-build = { path = "crates/build", version = "4.0.0-rc.1" } +sp1-cli = { path = "crates/cli", version = "4.0.0-rc.1", default-features = false } +sp1-core-machine = { path = "crates/core/machine", version = "4.0.0-rc.1" } +sp1-core-executor = { path = "crates/core/executor", version = "4.0.0-rc.1" } +sp1-curves = { path = "crates/curves", version = "4.0.0-rc.1" } +sp1-derive = { path = "crates/derive", version = "4.0.0-rc.1" } +sp1-eval = { path = "crates/eval", version = "4.0.0-rc.1" } +sp1-helper = { path = "crates/helper", version = "4.0.0-rc.1", default-features = false } +sp1-primitives = { path = "crates/primitives", version = "4.0.0-rc.1" } +sp1-prover = { path = "crates/prover", version = "4.0.0-rc.1" } +sp1-recursion-compiler = { path = "crates/recursion/compiler", version = "4.0.0-rc.1" } +sp1-recursion-core = { path = "crates/recursion/core", version = "4.0.0-rc.1", default-features = false } +sp1-recursion-derive = { path = "crates/recursion/derive", version = "4.0.0-rc.1", default-features = false } +sp1-recursion-gnark-ffi = { path = "crates/recursion/gnark-ffi", version = "4.0.0-rc.1", default-features = false } +sp1-recursion-circuit = { path = "crates/recursion/circuit", version = "4.0.0-rc.1", default-features = false } +sp1-sdk = { path = "crates/sdk", version = "4.0.0-rc.1" } +sp1-cuda = { path = "crates/cuda", version = "4.0.0-rc.1" } +sp1-stark = { path = "crates/stark", version = "4.0.0-rc.1" } +sp1-lib = { path = "crates/zkvm/lib", version = "4.0.0-rc.1", default-features = false } # NOTE: The version in this crate is manually set to 3.0.1 right now. When upgrading SP1 versions, # make sure to update this crate. sp1-zkvm = { path = "crates/zkvm/entrypoint", version = "3.0.1", default-features = false } +# For testing. +test-artifacts = { path = "crates/test-artifacts", version = "4.0.0-rc.1" } + # p3 # p3-air = "0.1.4-succinct" # p3-field = "0.1.4-succinct" @@ -138,6 +142,14 @@ p3-bn254-fr = { git = "https://github.com/Plonky3/Plonky3", branch = "sp1-v4" } # p3-maybe-rayon = { path = "../Plonky3/maybe-rayon" } # p3-bn254-fr = { path = "../Plonky3/bn254-fr" } +# misc +hashbrown = "0.14.5" +itertools = "0.13.0" +serde = "1.0.204" +serde_json = "1.0.132" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" + [workspace.metadata.typos] # TODO: Fix in next version since CommitCommitedValuesDigest is retained since it's present in constraints.json default.extend-ignore-re = [ diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 77014462c3..390ecb2573 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -4,7 +4,7 @@ This is a guide with helpful information for developers who want to contribute t ## Getting started -You can run the test suite in SP1 core by running the following command: +To build SP1, you must install [Go](https://go.dev/doc/install). You can run the test suite in SP1 core by running the following command: ```bash cd core diff --git a/audits/rkm0959.md b/audits/rkm0959.md index 3fb90318af..2c09f508d5 100644 --- a/audits/rkm0959.md +++ b/audits/rkm0959.md @@ -393,7 +393,7 @@ this passes each core verification, and since the RecursionPublicValue of proof - shard 1, 2's committed_value_digest = `0` - shard 3, 4's committed_value_digest = `x` -this passes each core verification, as proof #2 thinks shard 3 is its "first" shard - so it actually thinks that the `committed_value_digest` didn't change. This means that the whole "no cpu chip means `commited_value_digest` equal" thing actually just passes. Then, in the compress verification, we'll just see the committed_value_digest go from `0` to `x`, which is also completely fine. However, the committed_value_digest will go `0, 0, x, x`, where the change occurs on a shard without cpu chip - which isn't supposed to happen. +this passes each core verification, as proof #2 thinks shard 3 is its "first" shard - so it actually thinks that the `committed_value_digest` didn't change. This means that the whole "no cpu chip means `committed_value_digest` equal" thing actually just passes. Then, in the compress verification, we'll just see the committed_value_digest go from `0` to `x`, which is also completely fine. However, the committed_value_digest will go `0, 0, x, x`, where the change occurs on a shard without cpu chip - which isn't supposed to happen. While this is a slight incompatibility, the main invariant (if nonzero, public digest can only be one non-zero value) is preserved. Therefore, we did not fix this observation. diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 55ec23f628..4bde9eef98 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -72,6 +72,6 @@ - [Usage in CI](./developers/usage-in-ci.md) -- [RV32IM Specification](./developers/rv32im-specification.md) +- [RV32IM Deviations](./developers/rv32im-deviations.md) - [Building Circuit Artifacts](./developers/building-circuit-artifacts.md) diff --git a/book/developers/rv32im-deviations.md b/book/developers/rv32im-deviations.md new file mode 100644 index 0000000000..e28f294450 --- /dev/null +++ b/book/developers/rv32im-deviations.md @@ -0,0 +1,27 @@ +# RV32IM Deviations + +**SP1 does not conform exactly to the official RISC-V RV32IM specification.** Instead, it includes +several minor modifications tailored to make it more suitable for use in proving systems. These +deviations are outlined below: + +- Addresses `0x0` to `0x20` are reserved for registers. Writing to these addresses will modify + register state and cause divergent behavior from the RISC-V specification. +- Memory access is only valid for addresses `0x20, 0x78000000`. Writing to any other addresses + will result in undefined behavior. The heap allocator is also constrained to these addresses. +- Memory access must be "aligned". The alignment is automatically enforced by all programs compiled + through the official SP1 RISC-V toolchain. + - LW/SW memory access must be word aligned. + - LH/LHU/SH memory access must be half-word aligned. + - LW/SW memory access must be word aligned. + - LH/LHU/SH memory access must be half-word aligned. +- The ECALL instruction is used for system calls and precompiles. Only valid syscall IDs should be called, and only using the specific convention of loading the ID into register T0 and arguments into registers A0 and A1. If the arguments are addresses, they must be word-aligned. Failure to follow this convention can result in undefined behavior. Correct usages can be found in the `sp1_zkvm` and `sp1_lib` crates. + +## Security Considerations + +While the deviations from the RISC-V specification could theoretically be exploited to cause +divergent execution, such scenarios require a deliberately malicious program. The SP1 security +model assumes that programs are honestly compiled, as malicious bytecode could otherwise exploit +program execution and I/O. + +These security concerns regarding divergent execution have been reviewed and discussed with external +security researchers, including rkm0959, Zellic, samczsun, and others. \ No newline at end of file diff --git a/book/developers/rv32im-specification.md b/book/developers/rv32im-specification.md deleted file mode 100644 index 84e8aa7584..0000000000 --- a/book/developers/rv32im-specification.md +++ /dev/null @@ -1,8 +0,0 @@ -# RV32IM Specification - -SP1 implements the RISC-V RV32IM instruction set with some implementation details that make it more suitable for proving. - -- LW/SW memory access must be word aligned. -- LH/LHU/SH memory access must be half-word aligned. -- Memory access is only valid for addresses `0x20, 0x78000000`. Accessing addresses outside of this range will result in undefined behavior. The global heap allocator in `sp1_zkvm` will panic if memory exceeds this range. -- The ECALL instruction is used for system calls and precompiles. Only valid syscall IDs should be called, and only using the specific convention of loading the ID into register T0 and arguments into registers A0 and A1. If the arguments are addresses, they must be word-aligned. Failure to follow this convention can result in undefined behavior. Correct usages can be found in the `sp1_zkvm` and `sp1_lib` crates. diff --git a/book/generating-proofs/basics.md b/book/generating-proofs/basics.md index 85273d96ba..7bde99bbc1 100644 --- a/book/generating-proofs/basics.md +++ b/book/generating-proofs/basics.md @@ -13,7 +13,8 @@ To make this more concrete, let's walk through a simple example of generating a You can run the above script in the `script` directory with `RUST_LOG=info cargo run --release`. Note that running the above script will generate a proof locally.
-WARNING: Local proving often is much slower than the prover network and for certain proof types (e.g. Groth16, PLONK) require a significant amount of RAM and will likely not work on a laptop. +WARNING: Local proving often is much slower than the prover network and for certain proof types (e.g. Groth16, PLONK) require a +significant amount of RAM. You might only be able to generate proofs for small inputs locally.
We recommend using the [prover network](./prover-network.md) to generate proofs. Read more about the [recommended workflow](./recommended-workflow.md) for developing with SP1. diff --git a/book/generating-proofs/prover-network/versions.md b/book/generating-proofs/prover-network/versions.md index 4190b713fe..3060a610ad 100644 --- a/book/generating-proofs/prover-network/versions.md +++ b/book/generating-proofs/prover-network/versions.md @@ -4,8 +4,7 @@ The prover network currently only supports specific versions of SP1: | Version | Description | | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| v2.X.X | Audited, production ready version. | -| v3.X.X | Pre-release version with enhanced performance, currently being audited. **Recommended for benchmarking or testing, not recommended for production use.** | +| v3.X.X | V3 Release. Latest performant & production ready version. | `X` denotes that any minor and patch version is supported (e.g. `v2.1.0`, `v2.1.1`). diff --git a/book/getting-started/hardware-requirements.md b/book/getting-started/hardware-requirements.md index 828ea96080..3877d22e84 100644 --- a/book/getting-started/hardware-requirements.md +++ b/book/getting-started/hardware-requirements.md @@ -31,7 +31,9 @@ which can be parallelized with multiple cores. Our prover requires keeping large matrices (i.e., traces) in memory to generate the proofs. Certain steps of the prover have a minimum memory requirement, meaning that if you have less than this amount of memory, the process will OOM. -This effect is most noticeable when using the Groth16 or PLONK provers. +This effect is most noticeable when using the Groth16 or PLONK provers. If you're running the Groth16 or Plonk provers locally +on Mac or Windows using docker, you might need to increase the memory limit for +[docker desktop](https://docs.docker.com/desktop/settings-and-maintenance/settings/#resources). ### Disk diff --git a/book/getting-started/quickstart.md b/book/getting-started/quickstart.md index 824ddbf857..76f1356b3d 100644 --- a/book/getting-started/quickstart.md +++ b/book/getting-started/quickstart.md @@ -34,8 +34,6 @@ Your new project will have the following structure (ignoring the `contracts` fol ├── program │   ├── Cargo.lock │   ├── Cargo.toml -│   ├── elf -│   │   └── riscv32im-succinct-zkvm-elf │   └── src │   └── main.rs ├── rust-toolchain @@ -66,7 +64,7 @@ Before we can run the program inside the zkVM, it must be compiled to a RISC-V e cd program && cargo prove build ``` -which will output the compiled ELF to the file `program/elf/riscv32im-succinct-zkvm-elf`. +which will generate an ELF file under `target/elf-compilation`. Note: the `build.rs` file in the `script` directory will use run the above command automatically to build the ELF, meaning you don't have to manually run `cargo prove build` every time you make a change to the program! diff --git a/book/verification/off-chain-verification.md b/book/verification/off-chain-verification.md index 3dd4bc601f..100e554eb4 100644 --- a/book/verification/off-chain-verification.md +++ b/book/verification/off-chain-verification.md @@ -4,7 +4,7 @@ You can verify SP1 Groth16 and Plonk proofs in `no_std` environments with [`sp1-verifier`](https://docs.rs/sp1-verifier/latest/sp1_verifier/). -`sp1-verifier` is also patched to verify Groth16 and Plonk proofs within the SP1 ZKVM, using +`sp1-verifier` is also patched to verify Groth16 and Plonk proofs within the SP1 zkVM, using [bn254](https://blog.succinct.xyz/succinctshipsprecompiles/) precompiles. For an example of this, see the [Groth16 Example](https://github.com/succinctlabs/sp1/tree/main/examples/groth16/). @@ -43,6 +43,45 @@ Here, the proof, public inputs, and vkey hash are read from stdin. See the follo > Note that the SP1 SDK itself is *not* `no_std` compatible. +### Advanced: `verify_gnark_proof` + +`sp1-verifier` also exposes [`Groth16Verifier::verify_gnark_proof`](https://docs.rs/sp1-verifier/latest/sp1_verifier/struct.Groth16Verifier.html#method.verify_gnark_proof) and [`PlonkVerifier::verify_gnark_proof`](https://docs.rs/sp1-verifier/latest/sp1_verifier/struct.PlonkVerifier.html#method.verify_gnark_proof), +which verifies any Groth16 or Plonk proof from Gnark. This is especially useful for verifying custom Groth16 and Plonk proofs +efficiently in the SP1 zkVM. + +The following snippet demonstrates how you might serialize a Gnark proof in a way that `sp1-verifier` can use. + +```go +// Write the verifier key. +vkFile, err := os.Create("vk.bin") +if err != nil { + panic(err) +} +defer vkFile.Close() + +// Here, `vk` is a `groth16_bn254.VerifyingKey` or `plonk_bn254.VerifyingKey`. +_, err = vk.WriteTo(vkFile) +if err != nil { + panic(err) +} + +// Write the proof. +proofFile, err := os.Create("proof.bin") +if err != nil { + panic(err) +} +defer proofFile.Close() + +// Here, `proof` is a `groth16_bn254.Proof` or `plonk_bn254.Proof`. +_, err = proof.WriteTo(proofFile) +if err != nil { + panic(err) +} +``` + +Public values are serialized as big-endian `Fr` values. The default Gnark serialization will work +out of the box. + ## Wasm Verification The [`example-sp1-wasm-verifier`](https://github.com/succinctlabs/example-sp1-wasm-verifier) demonstrates how to diff --git a/book/verification/onchain/getting-started.md b/book/verification/onchain/getting-started.md index 8519d620a8..715b100d58 100644 --- a/book/verification/onchain/getting-started.md +++ b/book/verification/onchain/getting-started.md @@ -14,7 +14,8 @@ By default, the proofs generated by SP1 are not verifiable onchain, as they are > WARNING: The Groth16 and PLONK provers are only guaranteed to work on official releases of SP1. To > use Groth16 or PLONK proving & verification locally, ensure that you have Docker installed and have -> at least 128GB of RAM. +> at least 32GB of RAM. Note that you might need to increase the memory limit for +> [docker desktop](https://docs.docker.com/desktop/settings-and-maintenance/settings/#resources) if you're running on Mac. ### Example diff --git a/book/verification/supported-versions.md b/book/verification/supported-versions.md new file mode 100644 index 0000000000..b79555c32f --- /dev/null +++ b/book/verification/supported-versions.md @@ -0,0 +1 @@ +# Supported Versions diff --git a/book/writing-programs/compiling.md b/book/writing-programs/compiling.md index 6a1fdc56b4..297facb547 100644 --- a/book/writing-programs/compiling.md +++ b/book/writing-programs/compiling.md @@ -14,7 +14,7 @@ To build a program while developing, simply run the following command in the cra cargo prove build ``` -This will compile the ELF that can be executed in the zkVM and put it in the file `elf/riscv32im-succinct-zkvm-elf`. The output from the command will look something like this: +This will compile the ELF that can be executed in the zkVM. The output from the command will look something like this: ```bash [sp1] Compiling version_check v0.9.4 diff --git a/crates/build/src/build.rs b/crates/build/src/build.rs index b19207d406..37ee7392ac 100644 --- a/crates/build/src/build.rs +++ b/crates/build/src/build.rs @@ -5,7 +5,7 @@ use cargo_metadata::camino::Utf8PathBuf; use crate::{ command::{docker::create_docker_command, local::create_local_command, utils::execute_command}, - utils::{cargo_rerun_if_changed, copy_elf_to_output_dir, current_datetime}, + utils::{cargo_rerun_if_changed, current_datetime}, BuildArgs, BUILD_TARGET, HELPER_TARGET_SUBDIR, }; @@ -48,11 +48,7 @@ pub fn execute_build_program( let target_elf_paths = generate_elf_paths(&program_metadata, Some(args))?; - // Temporary backward compatibility with the deprecated behavior of copying the ELF file. - // TODO: add option to turn off this behavior - if target_elf_paths.len() == 1 { - copy_elf_to_output_dir(args, &program_metadata, &target_elf_paths[0].1)?; - } + print_elf_paths_cargo_directives(&target_elf_paths); Ok(target_elf_paths) } @@ -73,7 +69,10 @@ pub(crate) fn build_program_internal(path: &str, args: Option) { .unwrap_or(false); if skip_program_build { // Still need to set ELF env vars even if build is skipped. - generate_elf_paths(&metadata, args.as_ref()).expect("failed to collect target ELF paths"); + let target_elf_paths = generate_elf_paths(&metadata, args.as_ref()) + .expect("failed to collect target ELF paths"); + + print_elf_paths_cargo_directives(&target_elf_paths); println!( "cargo:warning=Build skipped for {} at {} due to SP1_SKIP_PROGRAM_BUILD flag", @@ -94,7 +93,10 @@ pub(crate) fn build_program_internal(path: &str, args: Option) { .unwrap_or(false); if is_clippy_driver { // Still need to set ELF env vars even if build is skipped. - generate_elf_paths(&metadata, args.as_ref()).expect("failed to collect target ELF paths"); + let target_elf_paths = generate_elf_paths(&metadata, args.as_ref()) + .expect("failed to collect target ELF paths"); + + print_elf_paths_cargo_directives(&target_elf_paths); println!("cargo:warning=Skipping build due to clippy invocation."); return; @@ -113,19 +115,39 @@ pub(crate) fn build_program_internal(path: &str, args: Option) { println!("cargo:warning={} built at {}", root_package_name, current_datetime()); } -/// Collects the list of targets that would be built and their output ELF file paths. Also prints -/// cargo directives setting relevant `SP1_ELF_` environment variables. -fn generate_elf_paths( +/// Collects the list of targets that would be built and their output ELF file paths. +pub fn generate_elf_paths( metadata: &cargo_metadata::Metadata, args: Option<&BuildArgs>, ) -> Result> { let mut target_elf_paths = vec![]; + let packages_to_iterate = if let Some(args) = args { + if !args.packages.is_empty() { + args.packages + .iter() + .map(|wanted_package| { + metadata + .packages + .iter() + .find(|p| p.name == *wanted_package) + .ok_or_else(|| { + anyhow::anyhow!("cannot find package named {}", wanted_package) + }) + .map(|p| p.id.clone()) + }) + .collect::>>()? + } else { + metadata.workspace_default_members.to_vec() + } + } else { + metadata.workspace_default_members.to_vec() + }; - for program_crate in metadata.workspace_default_members.iter() { + for program_crate in packages_to_iterate { let program = metadata .packages .iter() - .find(|p| &p.id == program_crate) + .find(|p| p.id == program_crate) .ok_or_else(|| anyhow::anyhow!("cannot find package for {}", program_crate))?; for bin_target in program.targets.iter().filter(|t| { @@ -133,7 +155,7 @@ fn generate_elf_paths( }) { // Filter out irrelevant targets if `--bin` is used. if let Some(args) = args { - if !args.binary.is_empty() && bin_target.name != args.binary { + if !args.binaries.is_empty() && !args.binaries.contains(&bin_target.name) { continue; } } @@ -149,9 +171,12 @@ fn generate_elf_paths( } } + Ok(target_elf_paths) +} + +/// Prints cargo directives setting relevant `SP1_ELF_` environment variables. +fn print_elf_paths_cargo_directives(target_elf_paths: &[(String, Utf8PathBuf)]) { for (target_name, elf_path) in target_elf_paths.iter() { println!("cargo:rustc-env=SP1_ELF_{}={}", target_name, elf_path); } - - Ok(target_elf_paths) } diff --git a/crates/build/src/command/utils.rs b/crates/build/src/command/utils.rs index 7eef9dc9a4..ba5ec28532 100644 --- a/crates/build/src/command/utils.rs +++ b/crates/build/src/command/utils.rs @@ -22,9 +22,14 @@ pub(crate) fn get_program_build_args(args: &BuildArgs) -> Vec { build_args.push("-Ztrim-paths".to_string()); - if !args.binary.is_empty() { + for p in &args.packages { + build_args.push("-p".to_string()); + build_args.push(p.to_string()); + } + + for b in &args.binaries { build_args.push("--bin".to_string()); - build_args.push(args.binary.clone()); + build_args.push(b.to_string()); } if !args.features.is_empty() { diff --git a/crates/build/src/lib.rs b/crates/build/src/lib.rs index 17b18602fc..f47cf9e1f1 100644 --- a/crates/build/src/lib.rs +++ b/crates/build/src/lib.rs @@ -2,7 +2,7 @@ mod build; mod command; mod utils; use build::build_program_internal; -pub use build::execute_build_program; +pub use build::{execute_build_program, generate_elf_paths}; use clap::Parser; @@ -50,14 +50,22 @@ pub struct BuildArgs { pub ignore_rust_version: bool, #[clap(long, action, help = "Assert that `Cargo.lock` will remain unchanged")] pub locked: bool, + #[clap( + short, + long, + action, + help = "Build only the specified packages", + num_args = 1.. + )] + pub packages: Vec, #[clap( alias = "bin", long, action, - help = "Build only the specified binary", - default_value = "" + help = "Build only the specified binaries", + num_args = 1.. )] - pub binary: String, + pub binaries: Vec, #[clap(long, action, help = "ELF binary name", default_value = "")] pub elf_name: String, #[clap( @@ -79,7 +87,8 @@ impl Default for BuildArgs { features: vec![], rustflags: vec![], ignore_rust_version: false, - binary: "".to_string(), + packages: vec![], + binaries: vec![], elf_name: "".to_string(), output_directory: DEFAULT_OUTPUT_DIR.to_string(), locked: false, @@ -117,3 +126,17 @@ pub fn build_program(path: &str) { pub fn build_program_with_args(path: &str, args: BuildArgs) { build_program_internal(path, Some(args)) } + +/// Returns the raw ELF bytes by the zkVM program target name. +/// +/// Note that this only works when using `sp1_build::build_program` or +/// `sp1_build::build_program_with_args` in a build script. +/// +/// By default, the program target name is the same as the program crate name. However, this might +/// not be the case for non-standard project structures. For example, placing the entrypoint source +/// file at `src/bin/my_entry.rs` would result in the program target being named `my_entry`, in +/// which case the invocation should be `include_elf!("my_entry")` instead. +#[macro_export] +macro_rules! include_elf { + ($arg:tt) => {{ include_bytes!(env!(concat!("SP1_ELF_", $arg))) }}; +} diff --git a/crates/build/src/utils.rs b/crates/build/src/utils.rs index ac5dd03c19..032692808b 100644 --- a/crates/build/src/utils.rs +++ b/crates/build/src/utils.rs @@ -1,40 +1,8 @@ -use std::{fs, path::Path}; +use std::path::Path; -use anyhow::Result; -use cargo_metadata::{camino::Utf8PathBuf, Metadata}; +use cargo_metadata::Metadata; use chrono::Local; -use crate::{BuildArgs, BUILD_TARGET}; - -/// Copy the ELF to the specified output directory. -pub(crate) fn copy_elf_to_output_dir( - args: &BuildArgs, - program_metadata: &cargo_metadata::Metadata, - elf_path: &Utf8PathBuf, -) -> Result { - // The order of precedence for the ELF name is: - // 1. --elf_name flag - // 2. --binary flag + -elf suffix (defaults to riscv32im-succinct-zkvm-elf) - let elf_name = if !args.elf_name.is_empty() { - args.elf_name.clone() - } else if !args.binary.is_empty() { - // TODO: In the future, change this to default to the package name. Will require updating - // docs and examples. - args.binary.clone() - } else { - BUILD_TARGET.to_string() - }; - - let elf_dir = program_metadata.target_directory.parent().unwrap().join(&args.output_directory); - fs::create_dir_all(&elf_dir)?; - let result_elf_path = elf_dir.join(elf_name); - - // Copy the ELF to the specified output directory. - fs::copy(elf_path, &result_elf_path)?; - - Ok(result_elf_path) -} - pub(crate) fn current_datetime() -> String { let now = Local::now(); now.format("%Y-%m-%d %H:%M:%S").to_string() diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 1e6bd0e088..cf2e8cb4b2 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -32,7 +32,7 @@ indicatif = "0.17.8" tokio = { version = "1", features = ["full"] } dirs = "5.0" rand = "0.8" -serde_json = "1.0.121" +serde_json = { workspace = true } yansi = "1.0.1" hex = "0.4.3" anstyle = "1.0.8" @@ -43,3 +43,4 @@ regex = "1.5.4" prettytable-rs = "0.10" textwrap = "0.16.0" ctrlc = "3.4.2" +cargo_metadata = "0.18.1" \ No newline at end of file diff --git a/crates/cli/src/bin/cargo-prove.rs b/crates/cli/src/bin/cargo-prove.rs index 56f452e708..e2b87d44bd 100644 --- a/crates/cli/src/bin/cargo-prove.rs +++ b/crates/cli/src/bin/cargo-prove.rs @@ -3,8 +3,7 @@ use clap::{Parser, Subcommand}; use sp1_cli::{ commands::{ build::BuildCmd, build_toolchain::BuildToolchainCmd, - install_toolchain::InstallToolchainCmd, new::NewCmd, prove::ProveCmd, trace::TraceCmd, - vkey::VkeyCmd, + install_toolchain::InstallToolchainCmd, new::NewCmd, trace::TraceCmd, vkey::VkeyCmd, }, SP1_VERSION_MESSAGE, }; @@ -19,17 +18,13 @@ pub enum Cargo { #[command(author, about, long_about = None, args_conflicts_with_subcommands = true, version = SP1_VERSION_MESSAGE)] pub struct ProveCli { #[clap(subcommand)] - pub command: Option, - - #[clap(flatten)] - pub prove: ProveCmd, + pub command: ProveCliCommands, } #[derive(Subcommand)] pub enum ProveCliCommands { New(NewCmd), Build(BuildCmd), - Prove(ProveCmd), BuildToolchain(BuildToolchainCmd), InstallToolchain(InstallToolchainCmd), Trace(TraceCmd), @@ -38,11 +33,10 @@ pub enum ProveCliCommands { fn main() -> Result<()> { let Cargo::Prove(args) = Cargo::parse(); - let command = args.command.unwrap_or(ProveCliCommands::Prove(args.prove)); - match command { + + match args.command { ProveCliCommands::New(cmd) => cmd.run(), ProveCliCommands::Build(cmd) => cmd.run(), - ProveCliCommands::Prove(cmd) => cmd.run(), ProveCliCommands::BuildToolchain(cmd) => cmd.run(), ProveCliCommands::InstallToolchain(cmd) => cmd.run(), ProveCliCommands::Trace(cmd) => cmd.run(), diff --git a/crates/cli/src/commands/build_toolchain.rs b/crates/cli/src/commands/build_toolchain.rs index 97f1aae009..0e13d56cb5 100644 --- a/crates/cli/src/commands/build_toolchain.rs +++ b/crates/cli/src/commands/build_toolchain.rs @@ -64,7 +64,8 @@ impl BuildToolchainCmd { std::fs::write(&config_file, config_toml) .with_context(|| format!("while writing configuration to {:?}", config_file))?; - // Work around target sanity check added in rust-lang/rust@09c076810cb7649e5817f316215010d49e78e8d7. + // Work around target sanity check added in + // rust-lang/rust@09c076810cb7649e5817f316215010d49e78e8d7. let temp_dir = std::env::temp_dir().join("rustc-targets"); if !temp_dir.exists() { std::fs::create_dir_all(&temp_dir)?; diff --git a/crates/cli/src/commands/mod.rs b/crates/cli/src/commands/mod.rs index af62dcf36e..fc6eb6a5ac 100644 --- a/crates/cli/src/commands/mod.rs +++ b/crates/cli/src/commands/mod.rs @@ -2,6 +2,5 @@ pub mod build; pub mod build_toolchain; pub mod install_toolchain; pub mod new; -pub mod prove; pub mod trace; pub mod vkey; diff --git a/crates/cli/src/commands/prove.rs b/crates/cli/src/commands/prove.rs deleted file mode 100644 index 747752dad4..0000000000 --- a/crates/cli/src/commands/prove.rs +++ /dev/null @@ -1,133 +0,0 @@ -use anstyle::*; -use anyhow::Result; -use clap::Parser; -use sp1_build::{execute_build_program, BuildArgs}; -use sp1_core_machine::{ - io::SP1Stdin, - utils::{setup_logger, setup_tracer}, -}; -use sp1_sdk::ProverClient; -use std::{env, fs::File, io::Read, path::PathBuf, str::FromStr, time::Instant}; - -use crate::util::{elapsed, write_status}; - -#[derive(Debug, Clone)] -enum Input { - FilePath(PathBuf), - HexBytes(Vec), -} - -fn is_valid_hex_string(s: &str) -> bool { - if s.len() % 2 != 0 { - return false; - } - // All hex digits with optional 0x prefix - s.starts_with("0x") && s[2..].chars().all(|c| c.is_ascii_hexdigit()) - || s.chars().all(|c| c.is_ascii_hexdigit()) -} - -impl FromStr for Input { - type Err = String; - - fn from_str(s: &str) -> Result { - if is_valid_hex_string(s) { - // Remove 0x prefix if present - let s = if s.starts_with("0x") { s.strip_prefix("0x").unwrap() } else { s }; - if s.is_empty() { - return Ok(Input::HexBytes(Vec::new())); - } - if !s.chars().all(|c| c.is_ascii_hexdigit()) { - return Err("Invalid hex string.".to_string()); - } - let bytes = hex::decode(s).map_err(|e| e.to_string())?; - Ok(Input::HexBytes(bytes)) - } else if PathBuf::from(s).exists() { - Ok(Input::FilePath(PathBuf::from(s))) - } else { - Err("Input must be a valid file path or hex string.".to_string()) - } - } -} - -#[derive(Parser)] -#[command(name = "prove", about = "(default) Build and prove a program")] -pub struct ProveCmd { - #[clap(long, value_parser)] - input: Option, - - #[clap(long, action)] - output: Option, - - #[clap(long, action)] - profile: bool, - - #[clap(long, action)] - verbose: bool, - - #[clap(flatten)] - build_args: BuildArgs, -} - -impl ProveCmd { - pub fn run(&self) -> Result<()> { - let elf_paths = execute_build_program(&self.build_args, None)?; - - if !self.profile { - match env::var("RUST_LOG") { - Ok(_) => {} - Err(_) => env::set_var("RUST_LOG", "info"), - } - setup_logger(); - } else { - match env::var("RUST_TRACER") { - Ok(_) => {} - Err(_) => env::set_var("RUST_TRACER", "info"), - } - setup_tracer(); - } - - // The command predates multi-target build support. This allows the command to continue to - // work when only one package is built, preserving backward compatibility. - let elf_path = if elf_paths.len() == 1 { - elf_paths[0].1.to_owned() - } else { - anyhow::bail!("the prove command does not work with multi-target builds"); - }; - - let mut elf = Vec::new(); - File::open(elf_path.as_path().as_str()) - .expect("failed to open input file") - .read_to_end(&mut elf) - .expect("failed to read from input file"); - - let mut stdin = SP1Stdin::new(); - if let Some(ref input) = self.input { - match input { - Input::FilePath(ref path) => { - let mut file = File::open(path).expect("failed to open input file"); - let mut bytes = Vec::new(); - file.read_to_end(&mut bytes)?; - stdin.write_slice(&bytes); - } - Input::HexBytes(ref bytes) => { - stdin.write_slice(bytes); - } - } - } - - let start_time = Instant::now(); - let client = ProverClient::new(); - let (pk, _) = client.setup(&elf); - let proof = client.prove(&pk, stdin).run().unwrap(); - - if let Some(ref path) = self.output { - proof.save(path.to_str().unwrap()).expect("failed to save proof"); - } - - let elapsed = elapsed(start_time.elapsed()); - let green = AnsiColor::Green.on_default().effects(Effects::BOLD); - write_status(&green, "Finished", format!("proving in {}", elapsed).as_str()); - - Ok(()) - } -} diff --git a/crates/cli/src/commands/vkey.rs b/crates/cli/src/commands/vkey.rs index a8aeb1c183..734b470970 100644 --- a/crates/cli/src/commands/vkey.rs +++ b/crates/cli/src/commands/vkey.rs @@ -1,31 +1,63 @@ -use std::fs::File; +use std::{fs::File, io::Read}; use anyhow::Result; -use clap::Parser; +use clap::{Args, Parser}; +use sp1_build::{generate_elf_paths, BuildArgs}; use sp1_sdk::{HashableKey, ProverClient}; -use std::io::Read; #[derive(Parser)] #[command(name = "vkey", about = "View the verification key hash for a program.")] pub struct VkeyCmd { /// Path to the ELF. - #[arg(long, required = true)] - elf: String, + #[clap(flatten)] + elf: Elf, +} + +#[derive(Debug, Clone, Args)] +#[group(required = true, multiple = false)] +pub struct Elf { + /// The path to the ELF file + #[arg(long = "elf")] + path: Option, + /// The crate used to generate the ELF file + #[arg(long)] + program: Option, } impl VkeyCmd { pub fn run(&self) -> Result<()> { - // Read the elf file contents - let mut file = File::open(self.elf.clone()).unwrap(); - let mut elf = Vec::new(); - file.read_to_end(&mut elf).unwrap(); + let elf_paths = if let Some(path) = &self.elf.path { + vec![(None, path.clone())] + } else if let Some(program) = &self.elf.program { + let metadata_cmd = cargo_metadata::MetadataCommand::new(); + let metadata = metadata_cmd.exec()?; + let build_args = BuildArgs { packages: vec![program.clone()], ..Default::default() }; + + generate_elf_paths(&metadata, Some(&build_args))? + .into_iter() + .map(|(target, path)| (Some(target), path.to_string())) + .collect() + } else { + unreachable!() + }; + + for (target, elf_path) in elf_paths { + // Read the elf file contents + let mut file = File::open(elf_path)?; + let mut elf = Vec::new(); + file.read_to_end(&mut elf)?; - // Get the verification key - let prover = ProverClient::new(); - let (_, vk) = prover.setup(&elf); + // Get the verification key + let prover = ProverClient::new(); + let (_, vk) = prover.setup(&elf); - // Print the verification key hash - println!("Verification Key Hash:\n{}", vk.vk.bytes32()); + // Print the verification key hash + if let Some(target) = target { + println!("Verification Key Hash for '{target}':\n{}", vk.vk.bytes32()); + } else { + println!("Verification Key Hash:\n{}", vk.vk.bytes32()); + } + } Ok(()) } diff --git a/crates/cli/src/lib.rs b/crates/cli/src/lib.rs index c9762921be..9e717aadae 100644 --- a/crates/cli/src/lib.rs +++ b/crates/cli/src/lib.rs @@ -1,5 +1,4 @@ pub mod commands; -mod util; use anyhow::{Context, Result}; use reqwest::Client; @@ -48,7 +47,14 @@ pub fn is_supported_target() -> bool { } pub fn get_target() -> String { - target_lexicon::HOST.to_string() + let mut target: target_lexicon::Triple = target_lexicon::HOST; + + // We don't want to operate on the musl toolchain, even if the CLI was compiled with musl + if target.environment == target_lexicon::Environment::Musl { + target.environment = target_lexicon::Environment::Gnu; + } + + target.to_string() } pub async fn get_toolchain_download_url(client: &Client, target: String) -> String { diff --git a/crates/cli/src/util.rs b/crates/cli/src/util.rs deleted file mode 100644 index 6993d74403..0000000000 --- a/crates/cli/src/util.rs +++ /dev/null @@ -1,15 +0,0 @@ -use std::{fmt::Display, time::Duration}; - -pub(crate) fn write_status(style: &dyn Display, status: &str, msg: &str) { - println!("{style}{status:>12}{style:#} {msg}"); -} - -pub(crate) fn elapsed(duration: Duration) -> String { - let secs = duration.as_secs(); - - if secs >= 60 { - format!("{}m {:02}s", secs / 60, secs % 60) - } else { - format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000) - } -} diff --git a/crates/core/executor/Cargo.toml b/crates/core/executor/Cargo.toml index 273b12aa4c..2cbf03a117 100644 --- a/crates/core/executor/Cargo.toml +++ b/crates/core/executor/Cargo.toml @@ -20,19 +20,19 @@ p3-field = { workspace = true } p3-maybe-rayon = { workspace = true, features = ["parallel"] } # misc -serde = { version = "1.0.205", features = ["derive", "rc"] } +serde = { workspace = true, features = ["derive", "rc"] } elf = "0.7.4" rrs_lib = { package = "rrs-succinct", version = "0.1.0" } eyre = "0.6.12" bincode = "1.3.3" -hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } -itertools = "0.13.0" +hashbrown = { workspace = true, features = ["serde", "inline-more"] } +itertools = { workspace = true } rand = "0.8.5" num = { version = "0.4.3" } typenum = "1.17.0" nohash-hasher = "0.2.0" thiserror = "1.0.63" -tracing = "0.1.40" +tracing = { workspace = true } strum_macros = "0.26.4" strum = { version = "0.26.3", features = ["derive"] } log = "0.4.22" @@ -41,10 +41,11 @@ bytemuck = "1.16.3" tiny-keccak = { version = "2.0.2", features = ["keccak"] } vec_map = { version = "0.8.2", features = ["serde"] } enum-map = { version = "2.7.3", features = ["serde"] } +test-artifacts = { workspace = true, optional = true } [dev-dependencies] -sp1-zkvm = { workspace = true } +sp1-zkvm = { workspace = true, features = ["lib"] } [features] -programs = [] +programs = ["dep:test-artifacts"] bigint-rug = ["sp1-curves/bigint-rug"] diff --git a/crates/core/executor/src/context.rs b/crates/core/executor/src/context.rs index 9585c6b851..0d6c05b170 100644 --- a/crates/core/executor/src/context.rs +++ b/crates/core/executor/src/context.rs @@ -21,6 +21,9 @@ pub struct SP1Context<'a> { /// The maximum number of cpu cycles to use for execution. pub max_cycles: Option, + + /// Skip deferred proof verification. + pub skip_deferred_proof_verification: bool, } /// A builder for [`SP1Context`]. @@ -30,6 +33,7 @@ pub struct SP1ContextBuilder<'a> { hook_registry_entries: Vec<(u32, BoxedHook<'a>)>, subproof_verifier: Option>, max_cycles: Option, + skip_deferred_proof_verification: bool, } impl<'a> SP1Context<'a> { @@ -68,7 +72,13 @@ impl<'a> SP1ContextBuilder<'a> { }); let subproof_verifier = take(&mut self.subproof_verifier); let cycle_limit = take(&mut self.max_cycles); - SP1Context { hook_registry, subproof_verifier, max_cycles: cycle_limit } + let skip_deferred_proof_verification = take(&mut self.skip_deferred_proof_verification); + SP1Context { + hook_registry, + subproof_verifier, + max_cycles: cycle_limit, + skip_deferred_proof_verification, + } } /// Add a runtime [Hook](super::Hook) into the context. @@ -110,6 +120,12 @@ impl<'a> SP1ContextBuilder<'a> { self.max_cycles = Some(max_cycles); self } + + /// Set the skip deferred proof verification flag. + pub fn set_skip_deferred_proof_verification(&mut self, skip: bool) -> &mut Self { + self.skip_deferred_proof_verification = skip; + self + } } #[cfg(test)] @@ -120,7 +136,7 @@ mod tests { #[test] fn defaults() { - let SP1Context { hook_registry, subproof_verifier, max_cycles: cycle_limit } = + let SP1Context { hook_registry, subproof_verifier, max_cycles: cycle_limit, .. } = SP1Context::builder().build(); assert!(hook_registry.is_none()); assert!(subproof_verifier.is_none()); diff --git a/crates/core/executor/src/events/alu.rs b/crates/core/executor/src/events/alu.rs index 2d2b14fe03..a42ce9ee82 100644 --- a/crates/core/executor/src/events/alu.rs +++ b/crates/core/executor/src/events/alu.rs @@ -9,6 +9,7 @@ use super::{create_random_lookup_ids, LookupId}; /// This object encapsulated the information needed to prove an ALU operation. This includes its /// shard, opcode, operands, and other relevant information. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[repr(C)] pub struct AluEvent { /// The lookup identifier. pub lookup_id: LookupId, diff --git a/crates/core/executor/src/events/memory.rs b/crates/core/executor/src/events/memory.rs index 655e0fc21d..d0e07109fb 100644 --- a/crates/core/executor/src/events/memory.rs +++ b/crates/core/executor/src/events/memory.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; /// This object encapsulates the information needed to prove a memory access operation. This /// includes the shard, timestamp, and value of the memory address. #[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)] +#[repr(C)] pub struct MemoryRecord { /// The shard number. pub shard: u32, @@ -39,6 +40,7 @@ pub enum MemoryAccessPosition { /// includes the value, shard, timestamp, and previous shard and timestamp. #[allow(clippy::manual_non_exhaustive)] #[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)] +#[repr(C)] pub struct MemoryReadRecord { /// The value. pub value: u32, @@ -58,6 +60,7 @@ pub struct MemoryReadRecord { /// includes the value, shard, timestamp, previous value, previous shard, and previous timestamp. #[allow(clippy::manual_non_exhaustive)] #[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)] +#[repr(C)] pub struct MemoryWriteRecord { /// The value. pub value: u32, @@ -126,7 +129,8 @@ impl MemoryRecordEnum { /// This object encapsulates the information needed to prove a memory initialize or finalize /// operation. This includes the address, value, shard, timestamp, and whether the memory is /// initialized or finalized. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[repr(C)] pub struct MemoryInitializeFinalizeEvent { /// The address. pub addr: u32, @@ -223,7 +227,8 @@ impl From for MemoryRecordEnum { /// This object encapsulates the information needed to prove a memory access operation within a /// shard. This includes the address, initial memory access, and final memory access within a /// shard. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[repr(C)] pub struct MemoryLocalEvent { /// The address. pub addr: u32, diff --git a/crates/core/executor/src/events/precompiles/mod.rs b/crates/core/executor/src/events/precompiles/mod.rs index d55a2093b2..29e62fd17c 100644 --- a/crates/core/executor/src/events/precompiles/mod.rs +++ b/crates/core/executor/src/events/precompiles/mod.rs @@ -4,8 +4,10 @@ mod fptower; mod keccak256_permute; mod sha256_compress; mod sha256_extend; +mod u256x2048_mul; mod uint256; +use crate::syscalls::SyscallCode; pub use ec::*; pub use edwards::*; pub use fptower::*; @@ -15,10 +17,9 @@ use serde::{Deserialize, Serialize}; pub use sha256_compress::*; pub use sha256_extend::*; use strum::{EnumIter, IntoEnumIterator}; +pub use u256x2048_mul::*; pub use uint256::*; -use crate::syscalls::SyscallCode; - use super::{MemoryLocalEvent, SyscallEvent}; #[derive(Clone, Debug, Serialize, Deserialize, EnumIter)] @@ -72,6 +73,8 @@ pub enum PrecompileEvent { Bls12381Fp2Mul(Fp2MulEvent), /// Uint256 mul precompile event. Uint256Mul(Uint256MulEvent), + /// U256XU2048 mul precompile event. + U256xU2048Mul(U256xU2048MulEvent), } /// Trait to retrieve all the local memory events from a vec of precompile events. @@ -120,6 +123,9 @@ impl PrecompileLocalMemory for Vec<(SyscallEvent, PrecompileEvent)> { PrecompileEvent::Uint256Mul(e) => { iterators.push(e.local_mem_access.iter()); } + PrecompileEvent::U256xU2048Mul(e) => { + iterators.push(e.local_mem_access.iter()); + } PrecompileEvent::Bls12381Fp(e) | PrecompileEvent::Bn254Fp(e) => { iterators.push(e.local_mem_access.iter()); } @@ -166,7 +172,7 @@ impl PrecompileEvents { #[inline] /// Add a precompile event for a given syscall code. - pub(crate) fn add_event( + pub fn add_event( &mut self, syscall_code: SyscallCode, syscall_event: SyscallEvent, diff --git a/crates/core/executor/src/events/precompiles/u256x2048_mul.rs b/crates/core/executor/src/events/precompiles/u256x2048_mul.rs new file mode 100644 index 0000000000..e44a45ce2f --- /dev/null +++ b/crates/core/executor/src/events/precompiles/u256x2048_mul.rs @@ -0,0 +1,49 @@ +use serde::{Deserialize, Serialize}; + +use crate::events::{ + memory::{MemoryLocalEvent, MemoryReadRecord, MemoryWriteRecord}, + LookupId, +}; + +/// `U256xU2048` Mul Event. +/// +/// This event is emitted when a `U256xU2048` mul operation is performed. +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct U256xU2048MulEvent { + /// The lookup identifier. + pub lookup_id: LookupId, + /// The shard number. + pub shard: u32, + /// The channel number. + pub clk: u32, + /// The pointer to the a value. + pub a_ptr: u32, + /// The a value as a list of words. + pub a: Vec, + /// The pointer to the b value. + pub b_ptr: u32, + /// The b value as a list of words. + pub b: Vec, + /// The pointer to the lo value. + pub lo_ptr: u32, + /// The memory record for the pointer to the lo value. + pub lo_ptr_memory: MemoryReadRecord, + /// The lo value as a list of words. + pub lo: Vec, + /// The pointer to the hi value. + pub hi_ptr: u32, + /// The memory record for the pointer to the hi value. + pub hi_ptr_memory: MemoryReadRecord, + /// The hi value as a list of words. + pub hi: Vec, + /// The memory records for the a value. + pub a_memory_records: Vec, + /// The memory records for the b value. + pub b_memory_records: Vec, + /// The memory records for lo. + pub lo_memory_records: Vec, + /// The memory records for hi. + pub hi_memory_records: Vec, + /// The local memory access events. + pub local_mem_access: Vec, +} diff --git a/crates/core/executor/src/events/syscall.rs b/crates/core/executor/src/events/syscall.rs index 23f9263ba8..09227c5a50 100644 --- a/crates/core/executor/src/events/syscall.rs +++ b/crates/core/executor/src/events/syscall.rs @@ -4,9 +4,10 @@ use super::LookupId; /// Syscall Event. /// -/// This object encapsulated the information needed to prove a syscall invocation from the CPU table. -/// This includes its shard, clk, syscall id, arguments, other relevant information. +/// This object encapsulated the information needed to prove a syscall invocation from the CPU +/// table. This includes its shard, clk, syscall id, arguments, other relevant information. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[repr(C)] pub struct SyscallEvent { /// The shard number. pub shard: u32, diff --git a/crates/core/executor/src/events/utils.rs b/crates/core/executor/src/events/utils.rs index d4b38df745..2c70f67c8d 100644 --- a/crates/core/executor/src/events/utils.rs +++ b/crates/core/executor/src/events/utils.rs @@ -1,13 +1,9 @@ -use serde::Deserialize; -use serde::Serialize; -use std::{ - fmt::Display, - iter::{Map, Peekable}, -}; +use serde::{Deserialize, Serialize}; +use std::fmt::Display; /// A unique identifier for lookups. #[derive(Deserialize, Serialize, Debug, Clone, Copy, Default, Eq, Hash, PartialEq)] - +#[repr(C)] pub struct LookupId(pub u64); /// Create a random lookup id. This is slower than `record.create_lookup_id()` but is useful for @@ -17,32 +13,64 @@ pub(crate) fn create_random_lookup_ids() -> [LookupId; 6] { std::array::from_fn(|_| LookupId(rand::random())) } -/// Returns sorted and formatted rows of a table of counts (e.g. `opcode_counts`). +/// Returns a tuple containing everything needed to to correctly display a table of counts +/// (e.g. `opcode_counts`): /// -/// The table is sorted first by count (descending) and then by label (ascending). -/// The first column consists of the counts, is right-justified, and is padded precisely -/// enough to fit all the numbers. The second column consists of the labels (e.g. `OpCode`s). -/// The columns are separated by a single space character. -#[allow(clippy::type_complexity)] +/// 1. The number of characters of the highest count, that can be used to right-justify the count +/// column. +/// +/// 2. The table sorted first by count (descending) and then by label (ascending). The table +/// itself is an iterator of a tuple (label, count). pub fn sorted_table_lines<'a, K, V>( - table: impl IntoIterator + 'a, -) -> Map< - Peekable, impl FnMut((K, V)) -> (String, String)>>, - impl FnMut((String, String)) -> String, -> + table: impl IntoIterator + 'a, +) -> (usize, impl Iterator) where K: Ord + Display + 'a, V: Ord + Display + 'a, { let mut entries = table.into_iter().collect::>(); // Sort table by count (descending), then the name order (ascending). - entries.sort_unstable_by(|a, b| a.1.cmp(&b.1).reverse().then_with(|| a.0.cmp(&b.0))); + entries.sort_unstable_by(|a, b| a.1.cmp(b.1).reverse().then_with(|| a.0.cmp(&b.0))); // Convert counts to `String`s to prepare them for printing and to measure their width. - let mut table_with_string_counts = entries - .into_iter() - .map(|(label, ct)| (label.to_string().to_lowercase(), ct.to_string())) - .peekable(); + let mut entries = + entries.into_iter().map(|(label, ct)| (label.to_string().to_lowercase(), ct)).peekable(); // Calculate width for padding the counts. - let width = table_with_string_counts.peek().map(|(_, b)| b.len()).unwrap_or_default(); - table_with_string_counts.map(move |(label, count)| format!("{count:>width$} {label}")) + let width = entries.peek().map(|(_, b)| b.to_string().len()).unwrap_or_default(); + + (width, entries) +} + +/// Returns a formatted row of a table of counts (e.g. `opcode_counts`). +/// +/// The first column consists of the counts, is right-justified, and is padded precisely +/// enough to fit all the numbers, using the provided `width`. The second column consists of +/// the labels (e.g. `OpCode`s). The columns are separated by a single space character. +#[must_use] +pub fn format_table_line(width: &usize, label: &str, count: &V) -> String +where + V: Display, +{ + format!("{count:>width$} {label}") +} + +/// Returns sorted and formatted rows of a table of counts (e.g. `opcode_counts`). +/// +/// The table is sorted first by count (descending) and then by label (ascending). +/// The first column consists of the counts, is right-justified, and is padded precisely +/// enough to fit all the numbers. The second column consists of the labels (e.g. `OpCode`s). +/// The columns are separated by a single space character. +/// +/// It's possible to hide rows with 0 count by setting `hide_zeros` to true. +pub fn generate_execution_report<'a, K, V>( + table: impl IntoIterator + 'a, +) -> impl Iterator + 'a +where + K: Ord + Display + 'a, + V: Ord + PartialEq + Display + 'a, +{ + let (width, lines) = sorted_table_lines(table); + + lines + .filter(move |(_, count)| **count != 0) + .map(move |(label, count)| format!(" {}", format_table_line(&width, &label, count))) } diff --git a/crates/core/executor/src/executor.rs b/crates/core/executor/src/executor.rs index e80d894bbe..c2cc1915ab 100644 --- a/crates/core/executor/src/executor.rs +++ b/crates/core/executor/src/executor.rs @@ -6,7 +6,7 @@ use std::{ use hashbrown::HashMap; use serde::{Deserialize, Serialize}; -use sp1_stark::SP1CoreOpts; +use sp1_stark::{air::PublicValues, SP1CoreOpts}; use thiserror::Error; use crate::{ @@ -26,6 +26,15 @@ use crate::{ Instruction, Opcode, Program, Register, }; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// Whether to verify deferred proofs during execution. +pub enum DeferredProofVerification { + /// Verify deferred proofs during execution. + Enabled, + /// Skip verification of deferred proofs + Disabled, +} + /// An executor for the SP1 RISC-V zkVM. /// /// The exeuctor is responsible for executing a user program and tracing important events which @@ -70,8 +79,9 @@ pub struct Executor<'a> { /// checkpoints. pub memory_checkpoint: PagedMemory>, - /// Memory addresses that were initialized in this batch of shards. Used to minimize the size of - /// checkpoints. The value stored is whether or not it had a value at the beginning of the batch. + /// Memory addresses that were initialized in this batch of shards. Used to minimize the size + /// of checkpoints. The value stored is whether or not it had a value at the beginning of + /// the batch. pub uninitialized_memory_checkpoint: PagedMemory, /// The memory accesses for the current cycle. @@ -80,6 +90,9 @@ pub struct Executor<'a> { /// The maximum number of cpu cycles to use for execution. pub max_cycles: Option, + /// Skip deferred proof verification. + pub deferred_proof_verification: DeferredProofVerification, + /// The state of the execution. pub state: ExecutionState, @@ -231,6 +244,11 @@ impl<'a> Executor<'a> { hook_registry, opts, max_cycles: context.max_cycles, + deferred_proof_verification: if context.skip_deferred_proof_verification { + DeferredProofVerification::Disabled + } else { + DeferredProofVerification::Enabled + }, memory_checkpoint: PagedMemory::new_preallocated(), uninitialized_memory_checkpoint: PagedMemory::new_preallocated(), local_memory_access: HashMap::new(), @@ -1373,7 +1391,7 @@ impl<'a> Executor<'a> { pub fn execute_state( &mut self, emit_global_memory_events: bool, - ) -> Result<(ExecutionState, bool), ExecutionError> { + ) -> Result<(ExecutionState, PublicValues, bool), ExecutionError> { self.memory_checkpoint.clear(); self.executor_mode = ExecutorMode::Checkpoint; self.emit_global_memory_events = emit_global_memory_events; @@ -1388,6 +1406,7 @@ impl<'a> Executor<'a> { let done = tracing::debug_span!("execute").in_scope(|| self.execute())?; // Create a checkpoint using `memory_checkpoint`. Just include all memory if `done` since we // need it all for MemoryFinalize. + let next_pc = self.state.pc; tracing::debug_span!("create memory checkpoint").in_scope(|| { let memory_checkpoint = std::mem::take(&mut self.memory_checkpoint); let uninitialized_memory_checkpoint = @@ -1423,10 +1442,14 @@ impl<'a> Executor<'a> { .collect(); } }); + let mut public_values = self.records.last().as_ref().unwrap().public_values; + public_values.start_pc = next_pc; + public_values.next_pc = next_pc; + println!("public values: {public_values:?}"); if !done { self.records.clear(); } - Ok((checkpoint, done)) + Ok((checkpoint, public_values, done)) } fn initialize(&mut self) { @@ -1670,7 +1693,7 @@ mod tests { use crate::programs::tests::{ fibonacci_program, panic_program, secp256r1_add_program, secp256r1_double_program, - simple_memory_program, simple_program, ssz_withdrawals_program, + simple_memory_program, simple_program, ssz_withdrawals_program, u256xu2048_mul_program, }; use crate::Register; @@ -1713,6 +1736,13 @@ mod tests { runtime.run().unwrap(); } + #[test] + fn test_u256xu2048_mul() { + let program = u256xu2048_mul_program(); + let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.run().unwrap(); + } + #[test] fn test_ssz_withdrawals_program_run() { let program = ssz_withdrawals_program(); diff --git a/crates/core/executor/src/hook.rs b/crates/core/executor/src/hook.rs index e5479f623f..c15e803e45 100644 --- a/crates/core/executor/src/hook.rs +++ b/crates/core/executor/src/hook.rs @@ -3,8 +3,10 @@ use core::fmt::Debug; use std::sync::{Arc, RwLock, RwLockWriteGuard}; use hashbrown::HashMap; -use sp1_curves::k256::{Invert, RecoveryId, Signature, VerifyingKey}; -use sp1_curves::p256::Signature as p256Signature; +use sp1_curves::{ + k256::{Invert, RecoveryId, Signature, VerifyingKey}, + p256::Signature as p256Signature, +}; use crate::Executor; diff --git a/crates/core/executor/src/instruction.rs b/crates/core/executor/src/instruction.rs index 10dfa5476d..cbea85daaf 100644 --- a/crates/core/executor/src/instruction.rs +++ b/crates/core/executor/src/instruction.rs @@ -11,6 +11,7 @@ use crate::opcode::Opcode; /// as 32-bit words, but instead use a custom encoding that is more friendly to decode in the /// SP1 zkVM. #[derive(Clone, Copy, Serialize, Deserialize)] +#[repr(C)] pub struct Instruction { /// The operation to execute. pub opcode: Opcode, diff --git a/crates/core/executor/src/memory.rs b/crates/core/executor/src/memory.rs index a036bbf5ca..bdaf468a6f 100644 --- a/crates/core/executor/src/memory.rs +++ b/crates/core/executor/src/memory.rs @@ -198,7 +198,8 @@ impl<'a, V: Copy> Entry<'a, V> { } } - /// Provides in-place mutable access to an occupied entry before any potential inserts into the map. + /// Provides in-place mutable access to an occupied entry before any potential inserts into the + /// map. pub fn and_modify(mut self, f: F) -> Self { match &mut self { Entry::Vacant(_) => {} diff --git a/crates/core/executor/src/opcode.rs b/crates/core/executor/src/opcode.rs index 818b5b1f2b..b8dd250e95 100644 --- a/crates/core/executor/src/opcode.rs +++ b/crates/core/executor/src/opcode.rs @@ -24,6 +24,7 @@ use serde::{Deserialize, Serialize}; #[derive( Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, PartialOrd, Ord, Enum, )] +#[repr(u8)] pub enum Opcode { /// rd ← rs1 + rs2, pc ← pc + 4 ADD = 0, diff --git a/crates/core/executor/src/program.rs b/crates/core/executor/src/program.rs index 09bb70cac4..8dcf4ef715 100644 --- a/crates/core/executor/src/program.rs +++ b/crates/core/executor/src/program.rs @@ -2,17 +2,23 @@ use std::{fs::File, io::Read}; -use hashbrown::HashMap; -use p3_field::Field; -use serde::{Deserialize, Serialize}; -use sp1_stark::air::{MachineAir, MachineProgram}; - use crate::{ disassembler::{transpile, Elf}, instruction::Instruction, CoreShape, }; - +use hashbrown::HashMap; +use p3_field::AbstractExtensionField; +use p3_field::Field; +use p3_field::PrimeField; +use p3_maybe_rayon::prelude::IntoParallelIterator; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; +use serde::{Deserialize, Serialize}; +use sp1_stark::air::{MachineAir, MachineProgram}; +use sp1_stark::septic_curve::{SepticCurve, SepticCurveComplete}; +use sp1_stark::septic_digest::SepticDigest; +use sp1_stark::septic_extension::SepticExtension; +use sp1_stark::InteractionKind; /// A program that can be executed by the SP1 zkVM. /// /// Contains a series of instructions along with the initial memory image. It also contains the @@ -98,8 +104,35 @@ impl Program { } } -impl MachineProgram for Program { +impl MachineProgram for Program { fn pc_start(&self) -> F { F::from_canonical_u32(self.pc_start) } + + fn initial_global_cumulative_sum(&self) -> SepticDigest { + let mut digests: Vec> = self + .memory_image + .iter() + .par_bridge() + .map(|(&addr, &word)| { + let values = [ + (InteractionKind::Memory as u32) << 24, + 0, + addr, + word & 255, + (word >> 8) & 255, + (word >> 16) & 255, + (word >> 24) & 255, + ]; + let x_start = + SepticExtension::::from_base_fn(|i| F::from_canonical_u32(values[i])); + let (point, _) = SepticCurve::::lift_x(x_start); + SepticCurveComplete::Affine(point.neg()) + }) + .collect(); + digests.push(SepticCurveComplete::Affine(SepticDigest::::zero().0)); + SepticDigest( + digests.into_par_iter().reduce(|| SepticCurveComplete::Infinity, |a, b| a + b).point(), + ) + } } diff --git a/crates/core/executor/src/programs.rs b/crates/core/executor/src/programs.rs index 3212e32d4e..d281c27875 100644 --- a/crates/core/executor/src/programs.rs +++ b/crates/core/executor/src/programs.rs @@ -5,126 +5,10 @@ pub mod tests { use crate::{Instruction, Opcode, Program}; - pub const CHESS_ELF: &[u8] = - include_bytes!("../../../../examples/chess/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const FIBONACCI_IO_ELF: &[u8] = - include_bytes!("../../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const IO_ELF: &[u8] = - include_bytes!("../../../../examples/io/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const JSON_ELF: &[u8] = - include_bytes!("../../../../examples/json/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const REGEX_ELF: &[u8] = - include_bytes!("../../../../examples/regex/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const RSA_ELF: &[u8] = - include_bytes!("../../../../examples/rsa/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const SSZ_WITHDRAWALS_ELF: &[u8] = include_bytes!( - "../../../../examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf" - ); - - pub const TENDERMINT_ELF: &[u8] = - include_bytes!("../../../../examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const FIBONACCI_ELF: &[u8] = - include_bytes!("../../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED25519_ELF: &[u8] = - include_bytes!("../../../../tests/ed25519/elf/riscv32im-succinct-zkvm-elf"); - - pub const CYCLE_TRACKER_ELF: &[u8] = - include_bytes!("../../../../tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED_ADD_ELF: &[u8] = - include_bytes!("../../../../tests/ed-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const KECCAK_PERMUTE_ELF: &[u8] = - include_bytes!("../../../../tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf"); - - pub const KECCAK256_ELF: &[u8] = - include_bytes!("../../../../tests/keccak256/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_ADD_ELF: &[u8] = - include_bytes!("../../../../tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256R1_ADD_ELF: &[u8] = - include_bytes!("../../../../tests/secp256r1-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256R1_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../tests/secp256r1-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA_COMPRESS_ELF: &[u8] = - include_bytes!("../../../../tests/sha-compress/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA_EXTEND_ELF: &[u8] = - include_bytes!("../../../../tests/sha-extend/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA2_ELF: &[u8] = - include_bytes!("../../../../tests/sha2/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_ADD_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_ADD_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const UINT256_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const VERIFY_PROOF_ELF: &[u8] = - include_bytes!("../../../../tests/verify-proof/elf/riscv32im-succinct-zkvm-elf"); - - pub const PANIC_ELF: &[u8] = - include_bytes!("../../../../tests/panic/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_FP_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-fp/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_FP2_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-fp2-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_FP2_ADDSUB_ELF: &[u8] = - include_bytes!("../../../../tests/bls12381-fp2-addsub/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_FP_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-fp/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_FP2_ADDSUB_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-fp2-addsub/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_FP2_MUL_ELF: &[u8] = - include_bytes!("../../../../tests/bn254-fp2-mul/elf/riscv32im-succinct-zkvm-elf"); + use test_artifacts::{ + FIBONACCI_ELF, KECCAK_PERMUTE_ELF, PANIC_ELF, SECP256R1_ADD_ELF, SECP256R1_DOUBLE_ELF, + U256XU2048_MUL_ELF, + }; #[must_use] pub fn simple_program() -> Program { @@ -166,6 +50,16 @@ pub mod tests { Program::from(SECP256R1_DOUBLE_ELF).unwrap() } + /// Get the u256x2048 mul program. + /// + /// # Panics + /// + /// This function will panic if the program fails to load. + #[must_use] + pub fn u256xu2048_mul_program() -> Program { + Program::from(U256XU2048_MUL_ELF).unwrap() + } + /// Get the SSZ withdrawals program. /// /// # Panics diff --git a/crates/core/executor/src/record.rs b/crates/core/executor/src/record.rs index f9e89acb4c..fbf79cf01a 100644 --- a/crates/core/executor/src/record.rs +++ b/crates/core/executor/src/record.rs @@ -178,7 +178,15 @@ impl ExecutionRecord { /// Splits the deferred [`ExecutionRecord`] into multiple [`ExecutionRecord`]s, each which /// contain a "reasonable" number of deferred events. - pub fn split(&mut self, last: bool, opts: SplitOpts) -> Vec { + /// + /// The optional `last_record` will be provided if there are few enough deferred events that + /// they can all be packed into the already existing last record. + pub fn split( + &mut self, + last: bool, + last_record: Option<&mut ExecutionRecord>, + opts: SplitOpts, + ) -> Vec { let mut shards = Vec::new(); let precompile_events = take(&mut self.precompile_events); @@ -216,6 +224,18 @@ impl ExecutionRecord { self.global_memory_initialize_events.sort_by_key(|event| event.addr); self.global_memory_finalize_events.sort_by_key(|event| event.addr); + // If there are no precompile shards, and `last_record` is Some, pack the memory events + // into the last record. + let pack_memory_events_into_last_record = last_record.is_some() && shards.is_empty(); + let mut blank_record = ExecutionRecord::new(self.program.clone()); + + // If `last_record` is None, use a blank record to store the memory events. + let last_record_ref = if pack_memory_events_into_last_record { + last_record.unwrap() + } else { + &mut blank_record + }; + let mut init_addr_bits = [0; 32]; let mut finalize_addr_bits = [0; 32]; for mem_chunks in self @@ -230,28 +250,34 @@ impl ExecutionRecord { EitherOrBoth::Left(mem_init_chunk) => (mem_init_chunk, [].as_slice()), EitherOrBoth::Right(mem_finalize_chunk) => ([].as_slice(), mem_finalize_chunk), }; - let mut shard = ExecutionRecord::new(self.program.clone()); - shard.global_memory_initialize_events.extend_from_slice(mem_init_chunk); - shard.public_values.previous_init_addr_bits = init_addr_bits; + last_record_ref.global_memory_initialize_events.extend_from_slice(mem_init_chunk); + last_record_ref.public_values.previous_init_addr_bits = init_addr_bits; if let Some(last_event) = mem_init_chunk.last() { let last_init_addr_bits = core::array::from_fn(|i| (last_event.addr >> i) & 1); init_addr_bits = last_init_addr_bits; } - shard.public_values.last_init_addr_bits = init_addr_bits; + last_record_ref.public_values.last_init_addr_bits = init_addr_bits; - shard.global_memory_finalize_events.extend_from_slice(mem_finalize_chunk); - shard.public_values.previous_finalize_addr_bits = finalize_addr_bits; + last_record_ref.global_memory_finalize_events.extend_from_slice(mem_finalize_chunk); + last_record_ref.public_values.previous_finalize_addr_bits = finalize_addr_bits; if let Some(last_event) = mem_finalize_chunk.last() { let last_finalize_addr_bits = core::array::from_fn(|i| (last_event.addr >> i) & 1); finalize_addr_bits = last_finalize_addr_bits; } - shard.public_values.last_finalize_addr_bits = finalize_addr_bits; + last_record_ref.public_values.last_finalize_addr_bits = finalize_addr_bits; + + if !pack_memory_events_into_last_record { + // If not packing memory events into the last record, add 'last_record_ref' + // to the returned records. `take` replaces `blank_program` with the default. + shards.push(take(last_record_ref)); - shards.push(shard); + // Reset the last record so its program is the correct one. (The default program + // provided by `take` contains no instructions.) + last_record_ref.program = self.program.clone(); + } } } - shards } diff --git a/crates/core/executor/src/report.rs b/crates/core/executor/src/report.rs index 6f15579216..00fe56ab23 100644 --- a/crates/core/executor/src/report.rs +++ b/crates/core/executor/src/report.rs @@ -6,7 +6,7 @@ use std::{ use enum_map::{EnumArray, EnumMap}; use hashbrown::HashMap; -use crate::{events::sorted_table_lines, syscalls::SyscallCode, Opcode}; +use crate::{events::generate_execution_report, syscalls::SyscallCode, Opcode}; /// An execution report. #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -68,12 +68,12 @@ impl Add for ExecutionReport { impl Display for ExecutionReport { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { writeln!(f, "opcode counts ({} total instructions):", self.total_instruction_count())?; - for line in sorted_table_lines(self.opcode_counts.as_ref()) { + for line in generate_execution_report(self.opcode_counts.as_ref()) { writeln!(f, " {line}")?; } writeln!(f, "syscall counts ({} total syscall instructions):", self.total_syscall_count())?; - for line in sorted_table_lines(self.syscall_counts.as_ref()) { + for line in generate_execution_report(self.syscall_counts.as_ref()) { writeln!(f, " {line}")?; } diff --git a/crates/core/executor/src/state.rs b/crates/core/executor/src/state.rs index 4c669188df..55ba22e321 100644 --- a/crates/core/executor/src/state.rs +++ b/crates/core/executor/src/state.rs @@ -29,7 +29,8 @@ pub struct ExecutionState { /// + timestamp that each memory address was accessed. pub memory: PagedMemory, - /// The global clock keeps track of how many instructions have been executed through all shards. + /// The global clock keeps track of how many instructions have been executed through all + /// shards. pub global_clk: u64, /// The clock increments by 4 (possibly more in syscalls) for each instruction that has been diff --git a/crates/core/executor/src/syscalls/code.rs b/crates/core/executor/src/syscalls/code.rs index 1891a3f742..59fe5c7de8 100644 --- a/crates/core/executor/src/syscalls/code.rs +++ b/crates/core/executor/src/syscalls/code.rs @@ -23,6 +23,7 @@ use strum_macros::EnumIter; )] #[allow(non_camel_case_types)] #[allow(clippy::upper_case_acronyms)] +#[repr(u32)] pub enum SyscallCode { /// Halts the program. HALT = 0x00_00_00_00, @@ -87,6 +88,9 @@ pub enum SyscallCode { /// Executes the `UINT256_MUL` precompile. UINT256_MUL = 0x00_01_01_1D, + /// Executes the `U256XU2048_MUL` precompile. + U256XU2048_MUL = 0x00_01_01_2F, + /// Executes the `BLS12381_ADD` precompile. BLS12381_ADD = 0x00_01_01_1E, @@ -166,6 +170,7 @@ impl SyscallCode { 0x00_00_00_F0 => SyscallCode::HINT_LEN, 0x00_00_00_F1 => SyscallCode::HINT_READ, 0x00_01_01_1D => SyscallCode::UINT256_MUL, + 0x00_01_01_2F => SyscallCode::U256XU2048_MUL, 0x00_01_01_20 => SyscallCode::BLS12381_FP_ADD, 0x00_01_01_21 => SyscallCode::BLS12381_FP_SUB, 0x00_01_01_22 => SyscallCode::BLS12381_FP_MUL, diff --git a/crates/core/executor/src/syscalls/context.rs b/crates/core/executor/src/syscalls/context.rs index 74dfafb279..64f41dc7ab 100644 --- a/crates/core/executor/src/syscalls/context.rs +++ b/crates/core/executor/src/syscalls/context.rs @@ -110,9 +110,9 @@ impl<'a, 'b> SyscallContext<'a, 'b> { let mut syscall_local_mem_events = Vec::new(); if !self.rt.unconstrained && self.rt.executor_mode == ExecutorMode::Trace { - // Will need to transfer the existing memory local events in the executor to it's record, - // and return all the syscall memory local events. This is similar to what - // `bump_record` does. + // Will need to transfer the existing memory local events in the executor to it's + // record, and return all the syscall memory local events. This is similar + // to what `bump_record` does. for (addr, event) in self.local_memory_access.drain() { let local_mem_access = self.rt.local_memory_access.remove(&addr); diff --git a/crates/core/executor/src/syscalls/mod.rs b/crates/core/executor/src/syscalls/mod.rs index 43811050b3..8abb75bc2e 100644 --- a/crates/core/executor/src/syscalls/mod.rs +++ b/crates/core/executor/src/syscalls/mod.rs @@ -26,6 +26,7 @@ use precompiles::{ fptower::{Fp2AddSubSyscall, Fp2MulSyscall, FpOpSyscall}, keccak256::permute::Keccak256PermuteSyscall, sha256::{compress::Sha256CompressSyscall, extend::Sha256ExtendSyscall}, + u256x2048_mul::U256xU2048MulSyscall, uint256::Uint256MulSyscall, weierstrass::{ add::WeierstrassAddAssignSyscall, decompress::WeierstrassDecompressSyscall, @@ -145,6 +146,8 @@ pub fn default_syscall_map() -> HashMap> { syscall_map.insert(SyscallCode::UINT256_MUL, Arc::new(Uint256MulSyscall)); + syscall_map.insert(SyscallCode::U256XU2048_MUL, Arc::new(U256xU2048MulSyscall)); + syscall_map.insert( SyscallCode::BLS12381_FP_ADD, Arc::new(FpOpSyscall::::new(FieldOperation::Add)), diff --git a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs index f583432310..b433f6384a 100644 --- a/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs +++ b/crates/core/executor/src/syscalls/precompiles/fptower/fp2_addsub.rs @@ -86,8 +86,8 @@ impl Syscall for Fp2AddSubSyscall

{ local_mem_access: rt.postprocess(), }; match P::FIELD_TYPE { - // All the fp2 add and sub events for a given curve are coalesced to the curve's fp2 add operation. Only check for - // that operation. + // All the fp2 add and sub events for a given curve are coalesced to the curve's fp2 add + // operation. Only check for that operation. // TODO: Fix this. FieldType::Bn254 => { let syscall_code_key = match syscall_code { diff --git a/crates/core/executor/src/syscalls/precompiles/mod.rs b/crates/core/executor/src/syscalls/precompiles/mod.rs index f07da94609..4b06dd3c12 100644 --- a/crates/core/executor/src/syscalls/precompiles/mod.rs +++ b/crates/core/executor/src/syscalls/precompiles/mod.rs @@ -2,5 +2,6 @@ pub mod edwards; pub mod fptower; pub mod keccak256; pub mod sha256; +pub mod u256x2048_mul; pub mod uint256; pub mod weierstrass; diff --git a/crates/core/executor/src/syscalls/precompiles/u256x2048_mul.rs b/crates/core/executor/src/syscalls/precompiles/u256x2048_mul.rs new file mode 100644 index 0000000000..3795055608 --- /dev/null +++ b/crates/core/executor/src/syscalls/precompiles/u256x2048_mul.rs @@ -0,0 +1,91 @@ +use num::{BigUint, Integer, One}; + +use sp1_primitives::consts::{bytes_to_words_le, words_to_bytes_le_vec}; + +use crate::{ + events::{PrecompileEvent, U256xU2048MulEvent}, + syscalls::{Syscall, SyscallCode, SyscallContext}, + Register::{X12, X13}, +}; + +const U256_NUM_WORDS: usize = 8; +const U2048_NUM_WORDS: usize = 64; +const U256_NUM_BYTES: usize = U256_NUM_WORDS * 4; +const U2048_NUM_BYTES: usize = U2048_NUM_WORDS * 4; + +pub(crate) struct U256xU2048MulSyscall; + +impl Syscall for U256xU2048MulSyscall { + fn execute( + &self, + rt: &mut SyscallContext, + syscall_code: SyscallCode, + arg1: u32, + arg2: u32, + ) -> Option { + let clk = rt.clk; + + let a_ptr = arg1; + let b_ptr = arg2; + + let (lo_ptr_memory, lo_ptr) = rt.mr(X12 as u32); + let (hi_ptr_memory, hi_ptr) = rt.mr(X13 as u32); + + let (a_memory_records, a) = rt.mr_slice(a_ptr, U256_NUM_WORDS); + let (b_memory_records, b) = rt.mr_slice(b_ptr, U2048_NUM_WORDS); + let uint256_a = BigUint::from_bytes_le(&words_to_bytes_le_vec(&a)); + let uint2048_b = BigUint::from_bytes_le(&words_to_bytes_le_vec(&b)); + + let result = uint256_a * uint2048_b; + + let two_to_2048 = BigUint::one() << 2048; + + let (hi, lo) = result.div_rem(&two_to_2048); + + let mut lo_bytes = lo.to_bytes_le(); + lo_bytes.resize(U2048_NUM_BYTES, 0u8); + let lo_words = bytes_to_words_le::(&lo_bytes); + + let mut hi_bytes = hi.to_bytes_le(); + hi_bytes.resize(U256_NUM_BYTES, 0u8); + let hi_words = bytes_to_words_le::(&hi_bytes); + + // Increment clk so that the write is not at the same cycle as the read. + rt.clk += 1; + + let lo_memory_records = rt.mw_slice(lo_ptr, &lo_words); + let hi_memory_records = rt.mw_slice(hi_ptr, &hi_words); + let lookup_id = rt.syscall_lookup_id; + let shard = rt.current_shard(); + let event = PrecompileEvent::U256xU2048Mul(U256xU2048MulEvent { + lookup_id, + shard, + clk, + a_ptr, + a, + b_ptr, + b, + lo_ptr, + lo: lo_words.to_vec(), + hi_ptr, + hi: hi_words.to_vec(), + lo_ptr_memory, + hi_ptr_memory, + a_memory_records, + b_memory_records, + lo_memory_records, + hi_memory_records, + local_mem_access: rt.postprocess(), + }); + + let sycall_event = + rt.rt.syscall_event(clk, syscall_code.syscall_id(), arg1, arg2, lookup_id); + rt.add_precompile_event(syscall_code, sycall_event, event); + + None + } + + fn num_extra_cycles(&self) -> u32 { + 1 + } +} diff --git a/crates/core/executor/src/syscalls/verify.rs b/crates/core/executor/src/syscalls/verify.rs index 0197199e51..b17d795d3d 100644 --- a/crates/core/executor/src/syscalls/verify.rs +++ b/crates/core/executor/src/syscalls/verify.rs @@ -1,3 +1,5 @@ +use crate::DeferredProofVerification; + use super::{Syscall, SyscallCode, SyscallContext}; pub(crate) struct VerifySyscall; @@ -32,16 +34,22 @@ impl Syscall for VerifySyscall { let vkey_bytes: [u32; 8] = vkey.try_into().unwrap(); let pv_digest_bytes: [u32; 8] = pv_digest.try_into().unwrap(); - ctx.rt - .subproof_verifier - .verify_deferred_proof(proof, proof_vk, vkey_bytes, pv_digest_bytes) - .unwrap_or_else(|e| { - panic!( - "Failed to verify proof {proof_index} with digest {}: {}", - hex::encode(bytemuck::cast_slice(&pv_digest_bytes)), - e - ) - }); + // Skip deferred proof verification if the corresponding runtime flag is set. + match ctx.rt.deferred_proof_verification { + DeferredProofVerification::Enabled => { + ctx.rt + .subproof_verifier + .verify_deferred_proof(proof, proof_vk, vkey_bytes, pv_digest_bytes) + .unwrap_or_else(|e| { + panic!( + "Failed to verify proof {proof_index} with digest {}: {}", + hex::encode(bytemuck::cast_slice(&pv_digest_bytes)), + e + ) + }); + } + DeferredProofVerification::Disabled => {} + } None } diff --git a/crates/core/machine/Cargo.toml b/crates/core/machine/Cargo.toml index 13dedad378..ff60ab4838 100644 --- a/crates/core/machine/Cargo.toml +++ b/crates/core/machine/Cargo.toml @@ -8,11 +8,12 @@ license = { workspace = true } repository = { workspace = true } keywords = { workspace = true } categories = { workspace = true } +links = "sp1-core-machine-sys" [dependencies] bincode = "1.3.3" -serde = { version = "1.0", features = ["derive", "rc"] } -itertools = "0.13.0" +serde = { workspace = true, features = ["derive", "rc"] } +itertools = { workspace = true } log = "0.4.22" num = { version = "0.4.3" } p3-air = { workspace = true } @@ -28,6 +29,7 @@ sp1-derive = { workspace = true } sp1-primitives = { workspace = true } rayon = "1.10.0" +rayon-scan = "0.1.1" amcl = { package = "snowbridge-amcl", version = "1.0.2", default-features = false, features = [ "bls381", @@ -43,15 +45,15 @@ p256 = { version = "0.13.2", features = ["expose-field"] } num_cpus = "1.16.0" size = "0.4.1" tempfile = "3.10.1" -tracing = "0.1.40" +tracing = { workspace = true } tracing-forest = { version = "0.1.6", features = ["ansi", "smallvec"] } -tracing-subscriber = { version = "0.3.18", features = ["std", "env-filter"] } +tracing-subscriber = { workspace = true, features = ["std", "env-filter"] } strum_macros = "0.26" strum = "0.26" web-time = "1.1.0" thiserror = "1.0.63" rand = "0.8.5" -hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } +hashbrown = { workspace = true, features = ["serde", "inline-more"] } static_assertions = "1.1.0" sp1-stark = { workspace = true } @@ -65,11 +67,23 @@ num = { version = "0.4.3", features = ["rand"] } rand = "0.8.5" sp1-zkvm = { workspace = true } sp1-core-executor = { workspace = true, features = ["programs"] } +test-artifacts = { workspace = true } + +[build-dependencies] +sp1-stark = { workspace = true } +sp1-primitives = { workspace = true } +p3-baby-bear = { workspace = true } +cbindgen = "0.27.0" +cc = "1.1" +pathdiff = "0.2.1" +glob = "0.3.1" [features] +default = [] programs = [] debug = [] bigint-rug = ["sp1-curves/bigint-rug"] +sys = [] [lib] bench = false diff --git a/crates/core/machine/build.rs b/crates/core/machine/build.rs new file mode 100644 index 0000000000..f2088bd6ac --- /dev/null +++ b/crates/core/machine/build.rs @@ -0,0 +1,169 @@ +fn main() { + #[cfg(feature = "sys")] + sys::build_ffi(); +} + +#[cfg(feature = "sys")] +mod sys { + use std::{ + env, fs, os, + path::{Path, PathBuf}, + }; + + use pathdiff::diff_paths; + + /// The library name, used for the static library archive and the headers. + /// Should be chosen as to not conflict with other library/header names. + const LIB_NAME: &str = "sp1-core-machine-sys"; + + /// The name of all include directories involved, used to find and output header files. + const INCLUDE_DIRNAME: &str = "include"; + + /// The name of the directory to recursively search for source files in. + const SOURCE_DIRNAME: &str = "cpp"; + + /// The warning placed in the cbindgen header. + const AUTOGEN_WARNING: &str = + "/* Automatically generated by `cbindgen`. Not intended for manual editing. */"; + + pub fn build_ffi() { + // The name of the header generated by `cbindgen`. + let cbindgen_hpp = &format!("{LIB_NAME}-cbindgen.hpp"); + + // The crate directory. + let crate_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + + // The output directory, where built artifacts should be placed. + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + // The target directory that the cargo invocation is using. + // Headers are symlinked into `target/include` purely for IDE purposes. + let target_dir = { + let mut dir = out_dir.clone(); + loop { + if dir.ends_with("target") { + break dir; + } + if !dir.pop() { + panic!("OUT_DIR does not have parent called \"target\": {:?}", out_dir); + } + } + }; + + // The directory to read headers from. + let source_include_dir = crate_dir.join(INCLUDE_DIRNAME); + + // The directory to place headers into. + let target_include_dir = out_dir.join(INCLUDE_DIRNAME); + + // The directory to place symlinks to headers into. Has the fixed path "target/include". + let target_include_dir_fixed = target_dir.join(INCLUDE_DIRNAME); + + // The directory to read source files from. + let source_dir = crate_dir.join(SOURCE_DIRNAME); + + let headers = glob::glob(source_include_dir.join("**/*.hpp").to_str().unwrap()) + .unwrap() + .collect::, _>>() + .unwrap(); + + let compilation_units = glob::glob(source_dir.join("**/*.cpp").to_str().unwrap()) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo::rerun-if-changed={INCLUDE_DIRNAME}"); + println!("cargo::rerun-if-changed={SOURCE_DIRNAME}"); + println!("cargo::rerun-if-changed=src"); + println!("cargo::rerun-if-changed=Cargo.toml"); + + // Cargo build script metadata, used by dependents' build scripts. + // The root directory containing the library archive. + println!("cargo::metadata=root={}", out_dir.to_str().unwrap()); + + // The include path defining the library's API. + println!("cargo::metadata=include={}", target_include_dir.to_str().unwrap()); + + // Generate a header containing bindings to the crate. + match cbindgen::Builder::new() + .with_pragma_once(true) + .with_autogen_warning(AUTOGEN_WARNING) + .with_no_includes() + .with_sys_include("cstdint") + .with_parse_deps(true) + .with_parse_include(&[ + "sp1-stark", + "sp1-primitives", + "sp1-core-machine", + "p3-baby-bear", + "sp1-core-executor", + ]) + .with_parse_extra_bindings(&["sp1-stark", "sp1-primitives", "p3-baby-bear"]) + .rename_item("BabyBear", "BabyBearP3") + .include_item("MemoryRecord") // Just for convenience. Not exposed, so we need to manually do this. + .include_item("SyscallCode") // Required for populating the CPU columns for ECALL. + .include_item("SepticExtension") + .include_item("SepticCurve") + .include_item("MemoryLocalCols") + .include_item("MEMORY_LOCAL_INITIAL_DIGEST_POS") + .include_item("Ghost") + .include_item("MemoryInitCols") + .include_item("MemoryInitializeFinalizeEvent") + .with_namespace("sp1_core_machine_sys") + .with_crate(crate_dir) + .generate() + { + Ok(bindings) => { + // Write the bindings to the target include directory. + let header_path = target_include_dir.join(cbindgen_hpp); + if bindings.write_to_file(&header_path) { + // Symlink the header to the fixed include directory. + rel_symlink_file(header_path, target_include_dir_fixed.join(cbindgen_hpp)); + } + } + Err(cbindgen::Error::ParseSyntaxError { .. }) => {} // Ignore parse errors so rust-analyzer can run. + Err(e) => panic!("{:?}", e), + } + + // Copy the headers to the include directory and symlink them to the fixed include directory. + for header in &headers { + // Get the path of the header relative to the source include directory. + let relpath = diff_paths(header, &source_include_dir).unwrap(); + + // Let the destination path be the same place relative to the target include directory. + let dst = target_include_dir.join(&relpath); + + // Create the parent directory if it does not exist. + if let Some(parent) = dst.parent() { + fs::create_dir_all(parent).unwrap(); + } + fs::copy(header, &dst).unwrap(); + rel_symlink_file(dst, target_include_dir_fixed.join(relpath)); + } + + // Use the `cc` crate to build the library and statically link it to the crate. + let mut cc_builder = cc::Build::new(); + cc_builder.files(&compilation_units).include(target_include_dir); + cc_builder.cpp(true).std("c++20"); + cc_builder.compile(LIB_NAME) + } + + /// Place a relative symlink pointing to `original` at `link`. + fn rel_symlink_file(original: P, link: Q) + where + P: AsRef, + Q: AsRef, + { + #[cfg(unix)] + use os::unix::fs::symlink; + #[cfg(windows)] + use os::windows::fs::symlink_file as symlink; + + let target_dir = link.as_ref().parent().unwrap(); + fs::create_dir_all(target_dir).unwrap(); + let _ = fs::remove_file(&link); + let relpath = diff_paths(original, target_dir).unwrap(); + symlink(relpath, link).unwrap(); + } +} diff --git a/crates/core/machine/cpp/extern.cpp b/crates/core/machine/cpp/extern.cpp new file mode 100644 index 0000000000..509f5998a7 --- /dev/null +++ b/crates/core/machine/cpp/extern.cpp @@ -0,0 +1,28 @@ +#include "bb31_t.hpp" +#include "bb31_septic_extension_t.hpp" +#include "sys.hpp" + +namespace sp1_core_machine_sys { +extern void add_sub_event_to_row_babybear( + const AluEvent* event, + AddSubCols* cols +) { + AddSubCols* cols_bb31 = reinterpret_cast*>(cols); + add_sub::event_to_row(*event, *cols_bb31); +} + +extern void memory_local_event_to_row_babybear(const MemoryLocalEvent* event, SingleMemoryLocal* cols) { + SingleMemoryLocal* cols_bb31 = reinterpret_cast*>(cols); + memory_local::event_to_row(event, cols_bb31); +} + +extern void memory_global_event_to_row_babybear(const MemoryInitializeFinalizeEvent* event, const bool is_receive, MemoryInitCols* cols) { + MemoryInitCols* cols_bb31 = reinterpret_cast*>(cols); + memory_global::event_to_row(event, is_receive, cols_bb31); +} + +extern void syscall_event_to_row_babybear(const SyscallEvent* event, const bool is_receive, SyscallCols* cols) { + SyscallCols* cols_bb31 = reinterpret_cast*>(cols); + syscall::event_to_row(event, is_receive, cols_bb31); +} +} // namespace sp1_core_machine_sys diff --git a/crates/core/machine/include/add_sub.hpp b/crates/core/machine/include/add_sub.hpp new file mode 100644 index 0000000000..ee98c21b4f --- /dev/null +++ b/crates/core/machine/include/add_sub.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::add_sub { +template +__SP1_HOSTDEV__ __SP1_INLINE__ uint32_t +populate(AddOperation& op, const uint32_t a_u32, const uint32_t b_u32) { + array_t a = u32_to_le_bytes(a_u32); + array_t b = u32_to_le_bytes(b_u32); + bool carry = a[0] + b[0] > 0xFF; + op.carry[0] = F::from_bool(carry).val; + carry = a[1] + b[1] + carry > 0xFF; + op.carry[1] = F::from_bool(carry).val; + carry = a[2] + b[2] + carry > 0xFF; + op.carry[2] = F::from_bool(carry).val; + + uint32_t expected = a_u32 + b_u32; + write_word_from_u32_v2(op.value, expected); + return expected; +} + +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, AddSubCols& cols) { + bool is_add = event.opcode == Opcode::ADD; + cols.shard = F::from_canonical_u32(event.shard); + cols.is_add = F::from_bool(is_add); + cols.is_sub = F::from_bool(!is_add); + + auto operand_1 = is_add ? event.b : event.a; + auto operand_2 = event.c; + + populate(cols.add_operation, operand_1, operand_2); + write_word_from_u32_v2(cols.operand_1, operand_1); + write_word_from_u32_v2(cols.operand_2, operand_2); +} +} // namespace sp1::add_sub \ No newline at end of file diff --git a/crates/core/machine/include/bb31_septic_extension_t.hpp b/crates/core/machine/include/bb31_septic_extension_t.hpp new file mode 100644 index 0000000000..9737d8bb12 --- /dev/null +++ b/crates/core/machine/include/bb31_septic_extension_t.hpp @@ -0,0 +1,511 @@ +#pragma once + +#include "prelude.hpp" +#include "bb31_t.hpp" +#include + +#ifdef __CUDA_ARCH__ +#define FUN __host__ __device__ +#endif +#ifndef __CUDA_ARCH__ +#define FUN inline +#endif + +class bb31_cipolla_t { + public: + bb31_t real; + bb31_t imag; + + FUN bb31_cipolla_t(bb31_t real, bb31_t imag) { + this->real = bb31_t(real); + this->imag = bb31_t(imag); + } + + FUN static bb31_cipolla_t one() { + return bb31_cipolla_t(bb31_t::one(), bb31_t::zero()); + } + + FUN bb31_cipolla_t mul_ext(bb31_cipolla_t other, bb31_t nonresidue) { + bb31_t new_real = real * other.real + nonresidue * imag * other.imag; + bb31_t new_imag = real * other.imag + imag * other.real; + return bb31_cipolla_t(new_real, new_imag); + } + + FUN bb31_cipolla_t pow(uint32_t exponent, bb31_t nonresidue) { + bb31_cipolla_t result = bb31_cipolla_t::one(); + bb31_cipolla_t base = *this; + + while(exponent) { + if(exponent & 1) { + result = result.mul_ext(base, nonresidue); + } + exponent >>= 1; + base = base.mul_ext(base, nonresidue); + } + + return result; + } +}; + +namespace constants { + #ifdef __CUDA_ARCH__ + __constant__ constexpr const bb31_t frobenius_const[49] = { + bb31_t(int(1)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), + bb31_t(int(954599710)), bb31_t(int(1359279693)), bb31_t(int(566669999)), bb31_t(int(1982781815)), bb31_t(int(1735718361)), bb31_t(int(1174868538)), bb31_t(int(1120871770)), + bb31_t(int(862825265)), bb31_t(int(597046311)), bb31_t(int(978840770)), bb31_t(int(1790138282)), bb31_t(int(1044777201)), bb31_t(int(835869808)), bb31_t(int(1342179023)), + bb31_t(int(596273169)), bb31_t(int(658837454)), bb31_t(int(1515468261)), bb31_t(int(367059247)), bb31_t(int(781278880)), bb31_t(int(1544222616)), bb31_t(int(155490465)), + bb31_t(int(557608863)), bb31_t(int(1173670028)), bb31_t(int(1749546888)), bb31_t(int(1086464137)), bb31_t(int(803900099)), bb31_t(int(1288818584)), bb31_t(int(1184677604)), + bb31_t(int(763416381)), bb31_t(int(1252567168)), bb31_t(int(628856225)), bb31_t(int(1771903394)), bb31_t(int(650712211)), bb31_t(int(19417363)), bb31_t(int(57990258)), + bb31_t(int(1734711039)), bb31_t(int(1749813853)), bb31_t(int(1227235221)), bb31_t(int(1707730636)), bb31_t(int(424560395)), bb31_t(int(1007029514)), bb31_t(int(498034669)), + }; + + __constant__ constexpr const bb31_t double_frobenius_const[49] = { + bb31_t(int(1)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), + bb31_t(int(1013489358)), bb31_t(int(1619071628)), bb31_t(int(304593143)), bb31_t(int(1949397349)), bb31_t(int(1564307636)), bb31_t(int(327761151)), bb31_t(int(415430835)), + bb31_t(int(209824426)), bb31_t(int(1313900768)), bb31_t(int(38410482)), bb31_t(int(256593180)), bb31_t(int(1708830551)), bb31_t(int(1244995038)), bb31_t(int(1555324019)), + bb31_t(int(1475628651)), bb31_t(int(777565847)), bb31_t(int(704492386)), bb31_t(int(1218528120)), bb31_t(int(1245363405)), bb31_t(int(475884575)), bb31_t(int(649166061)), + bb31_t(int(550038364)), bb31_t(int(948935655)), bb31_t(int(68722023)), bb31_t(int(1251345762)), bb31_t(int(1692456177)), bb31_t(int(1177958698)), bb31_t(int(350232928)), + bb31_t(int(882720258)), bb31_t(int(821925756)), bb31_t(int(199955840)), bb31_t(int(812002876)), bb31_t(int(1484951277)), bb31_t(int(1063138035)), bb31_t(int(491712810)), + bb31_t(int(738287111)), bb31_t(int(1955364991)), bb31_t(int(552724293)), bb31_t(int(1175775744)), bb31_t(int(341623997)), bb31_t(int(1454022463)), bb31_t(int(408193320)) + }; + + __constant__ constexpr const bb31_t A_EC_LOGUP[7] = {bb31_t(int(0x31415926)), bb31_t(int(0x53589793)), bb31_t(int(0x23846264)), bb31_t(int(0x33832795)), bb31_t(int(0x02884197)), bb31_t(int(0x16939937)), bb31_t(int(0x51058209))}; + + __constant__ constexpr const bb31_t B_EC_LOGUP[7] = {bb31_t(int(0x74944592)), bb31_t(int(0x30781640)), bb31_t(int(0x62862089)), bb31_t(int(0x9862803)), bb31_t(int(0x48253421)), bb31_t(int(0x17067982)), bb31_t(int(0x14808651))}; + + __constant__ constexpr const bb31_t dummy_x[7] = {bb31_t(int(0x2738281)), bb31_t(int(0x8284590)), bb31_t(int(0x4523536)), bb31_t(int(0x0287471)), bb31_t(int(0x3526624)), bb31_t(int(0x9775724)), bb31_t(int(0x7093699))}; + __constant__ constexpr const bb31_t dummy_y[7] = {bb31_t(int(48041908)), bb31_t(int(550064556)), bb31_t(int(415267377)), bb31_t(int(1726976249)), bb31_t(int(1253299140)), bb31_t(int(209439863)), bb31_t(int(1302309485))}; + + __constant__ constexpr bb31_t start_x[7] = {bb31_t(int(0x1434213)), bb31_t(int(0x5623730)), bb31_t(int(0x9504880)), bb31_t(int(0x1688724)), bb31_t(int(0x2096980)), bb31_t(int(0x7856967)), bb31_t(int(0x1875376))}; + __constant__ constexpr bb31_t start_y[7] = {bb31_t(int(885797405)), bb31_t(int(1130275556)), bb31_t(int(567836311)), bb31_t(int(52700240)), bb31_t(int(239639200)), bb31_t(int(442612155)), bb31_t(int(1839439733))}; + + #endif + + #ifndef __CUDA_ARCH__ + static constexpr const bb31_t frobenius_const[49] = { + bb31_t(int(1)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), + bb31_t(int(954599710)), bb31_t(int(1359279693)), bb31_t(int(566669999)), bb31_t(int(1982781815)), bb31_t(int(1735718361)), bb31_t(int(1174868538)), bb31_t(int(1120871770)), + bb31_t(int(862825265)), bb31_t(int(597046311)), bb31_t(int(978840770)), bb31_t(int(1790138282)), bb31_t(int(1044777201)), bb31_t(int(835869808)), bb31_t(int(1342179023)), + bb31_t(int(596273169)), bb31_t(int(658837454)), bb31_t(int(1515468261)), bb31_t(int(367059247)), bb31_t(int(781278880)), bb31_t(int(1544222616)), bb31_t(int(155490465)), + bb31_t(int(557608863)), bb31_t(int(1173670028)), bb31_t(int(1749546888)), bb31_t(int(1086464137)), bb31_t(int(803900099)), bb31_t(int(1288818584)), bb31_t(int(1184677604)), + bb31_t(int(763416381)), bb31_t(int(1252567168)), bb31_t(int(628856225)), bb31_t(int(1771903394)), bb31_t(int(650712211)), bb31_t(int(19417363)), bb31_t(int(57990258)), + bb31_t(int(1734711039)), bb31_t(int(1749813853)), bb31_t(int(1227235221)), bb31_t(int(1707730636)), bb31_t(int(424560395)), bb31_t(int(1007029514)), bb31_t(int(498034669)) + }; + + static constexpr const bb31_t double_frobenius_const[49] = { + bb31_t(int(1)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), bb31_t(int(0)), + bb31_t(int(1013489358)), bb31_t(int(1619071628)), bb31_t(int(304593143)), bb31_t(int(1949397349)), bb31_t(int(1564307636)), bb31_t(int(327761151)), bb31_t(int(415430835)), + bb31_t(int(209824426)), bb31_t(int(1313900768)), bb31_t(int(38410482)), bb31_t(int(256593180)), bb31_t(int(1708830551)), bb31_t(int(1244995038)), bb31_t(int(1555324019)), + bb31_t(int(1475628651)), bb31_t(int(777565847)), bb31_t(int(704492386)), bb31_t(int(1218528120)), bb31_t(int(1245363405)), bb31_t(int(475884575)), bb31_t(int(649166061)), + bb31_t(int(550038364)), bb31_t(int(948935655)), bb31_t(int(68722023)), bb31_t(int(1251345762)), bb31_t(int(1692456177)), bb31_t(int(1177958698)), bb31_t(int(350232928)), + bb31_t(int(882720258)), bb31_t(int(821925756)), bb31_t(int(199955840)), bb31_t(int(812002876)), bb31_t(int(1484951277)), bb31_t(int(1063138035)), bb31_t(int(491712810)), + bb31_t(int(738287111)), bb31_t(int(1955364991)), bb31_t(int(552724293)), bb31_t(int(1175775744)), bb31_t(int(341623997)), bb31_t(int(1454022463)), bb31_t(int(408193320)) + }; + + static constexpr const bb31_t A_EC_LOGUP[7] = {bb31_t(int(0x31415926)), bb31_t(int(0x53589793)), bb31_t(int(0x23846264)), bb31_t(int(0x33832795)), bb31_t(int(0x02884197)), bb31_t(int(0x16939937)), bb31_t(int(0x51058209))}; + static constexpr const bb31_t B_EC_LOGUP[7] = {bb31_t(int(0x74944592)), bb31_t(int(0x30781640)), bb31_t(int(0x62862089)), bb31_t(int(0x9862803)), bb31_t(int(0x48253421)), bb31_t(int(0x17067982)), bb31_t(int(0x14808651))}; + + static constexpr bb31_t dummy_x[7] = {bb31_t(int(0x2738281)), bb31_t(int(0x8284590)), bb31_t(int(0x4523536)), bb31_t(int(0x0287471)), bb31_t(int(0x3526624)), bb31_t(int(0x9775724)), bb31_t(int(0x7093699))}; + static constexpr bb31_t dummy_y[7] = {bb31_t(int(48041908)), bb31_t(int(550064556)), bb31_t(int(415267377)), bb31_t(int(1726976249)), bb31_t(int(1253299140)), bb31_t(int(209439863)), bb31_t(int(1302309485))}; + + static constexpr bb31_t start_x[7] = {bb31_t(int(0x1434213)), bb31_t(int(0x5623730)), bb31_t(int(0x9504880)), bb31_t(int(0x1688724)), bb31_t(int(0x2096980)), bb31_t(int(0x7856967)), bb31_t(int(0x1875376))}; + static constexpr bb31_t start_y[7] = {bb31_t(int(885797405)), bb31_t(int(1130275556)), bb31_t(int(567836311)), bb31_t(int(52700240)), bb31_t(int(239639200)), bb31_t(int(442612155)), bb31_t(int(1839439733))}; + + #endif +} + +class bb31_septic_extension_t { + // The value of BabyBear septic extension element. + public: + bb31_t value[7]; + static constexpr const bb31_t* frobenius_const = constants::frobenius_const; + static constexpr const bb31_t* double_frobenius_const = constants::double_frobenius_const; + static constexpr const bb31_t* A_EC_LOGUP = constants::A_EC_LOGUP; + static constexpr const bb31_t* B_EC_LOGUP = constants::B_EC_LOGUP; + + FUN bb31_septic_extension_t() { + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->value[i] = bb31_t(0); + } + } + + FUN bb31_septic_extension_t(bb31_t value) { + this->value[0] = value; + for (uintptr_t i = 1 ; i < 7 ; i++) { + this->value[i] = bb31_t(0); + } + } + + FUN bb31_septic_extension_t(bb31_t value[7]) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->value[i] = value[i]; + } + } + + FUN bb31_septic_extension_t(const bb31_t value[7]) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->value[i] = value[i]; + } + } + + static FUN bb31_septic_extension_t zero() { + return bb31_septic_extension_t(); + } + + static FUN bb31_septic_extension_t one() { + return bb31_septic_extension_t(bb31_t::one()); + } + + static FUN bb31_septic_extension_t two() { + return bb31_septic_extension_t(bb31_t::two()); + } + + static FUN bb31_septic_extension_t from_canonical_u32(uint32_t n) { + return bb31_septic_extension_t(bb31_t::from_canonical_u32(n)); + } + + FUN bb31_septic_extension_t& operator+=(const bb31_t b) { + value[0] += b; + return *this; + } + + friend FUN bb31_septic_extension_t operator+(bb31_septic_extension_t a, const bb31_t b) { + return a += b; + } + + FUN bb31_septic_extension_t& operator+=(const bb31_septic_extension_t b) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + value[i] += b.value[i]; + } + return *this; + } + + friend FUN bb31_septic_extension_t operator+(bb31_septic_extension_t a, const bb31_septic_extension_t b) { + return a += b; + } + + FUN bb31_septic_extension_t& operator-=(const bb31_t b) { + value[0] -= b; + return *this; + } + + friend FUN bb31_septic_extension_t operator-(bb31_septic_extension_t a, const bb31_t b) { + return a -= b; + } + + FUN bb31_septic_extension_t& operator-=(const bb31_septic_extension_t b) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + value[i] -= b.value[i]; + } + return *this; + } + + friend FUN bb31_septic_extension_t operator-(bb31_septic_extension_t a, const bb31_septic_extension_t b) { + return a -= b; + } + + FUN bb31_septic_extension_t& operator*=(const bb31_t b) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + value[i] *= b; + } + return *this; + } + + friend FUN bb31_septic_extension_t operator*(bb31_septic_extension_t a, const bb31_t b) { + return a *= b; + } + + FUN bb31_septic_extension_t& operator*=(const bb31_septic_extension_t b) { + { + bb31_t res[13] = {}; + for(uintptr_t i = 0 ; i < 13 ; i++) { + res[i] = bb31_t::zero(); + } + for(uintptr_t i = 0 ; i < 7 ; i++) { + for(uintptr_t j = 0 ; j < 7 ; j++) { + res[i + j] += value[i] * b.value[j]; + } + } + for(uintptr_t i = 7 ; i < 13 ; i++) { + res[i - 7] += res[i] * bb31_t::from_canonical_u32(5); + res[i - 6] += res[i] * bb31_t::from_canonical_u32(2); + } + for(uintptr_t i = 0 ; i < 7 ; i++) { + value[i] = res[i]; + } + } + return *this; + } + + friend FUN bb31_septic_extension_t operator*(bb31_septic_extension_t a, const bb31_septic_extension_t b) { + return a *= b; + } + + FUN bool operator==(const bb31_septic_extension_t rhs) const { + for(uintptr_t i = 0 ; i < 7 ; i++) { + if(value[i] != rhs.value[i]) { + return false; + } + } + return true; + } + + FUN bb31_septic_extension_t frobenius() const { + bb31_t res[7] = {}; + res[0] = value[0]; + for(uintptr_t i = 1 ; i < 7 ; i++) { + res[i] = bb31_t::zero(); + } + for(uintptr_t i = 1 ; i < 7 ; i++) { + for(uintptr_t j = 0 ; j < 7 ; j++) { + res[j] += value[i] * frobenius_const[7 * i + j]; + } + } + return bb31_septic_extension_t(res); + + } + + FUN bb31_septic_extension_t double_frobenius() const { + bb31_t res[7] = {}; + res[0] = value[0]; + for(uintptr_t i = 1 ; i < 7 ; i++) { + res[i] = bb31_t::zero(); + } + for(uintptr_t i = 1 ; i < 7 ; i++) { + for(uintptr_t j = 0 ; j < 7 ; j++) { + res[j] += value[i] * double_frobenius_const[7 * i + j]; + } + } + return bb31_septic_extension_t(res); + + } + + FUN bb31_septic_extension_t pow_r_1() const { + bb31_septic_extension_t base = frobenius(); + base *= double_frobenius(); + bb31_septic_extension_t base_p2 = base.double_frobenius(); + bb31_septic_extension_t base_p4 = base_p2.double_frobenius(); + return base * base_p2 * base_p4; + } + + FUN bb31_t pow_r() const { + bb31_septic_extension_t pow_r1 = pow_r_1(); + bb31_septic_extension_t pow_r = pow_r1 * *this; + return pow_r.value[0]; + } + + FUN bb31_septic_extension_t reciprocal() const { + bb31_septic_extension_t pow_r1 = pow_r_1(); + bb31_septic_extension_t pow_r = pow_r1 * *this; + return pow_r1 * pow_r.value[0].reciprocal(); + } + + friend FUN bb31_septic_extension_t operator/(bb31_septic_extension_t a, bb31_septic_extension_t b) { + return a * b.reciprocal(); + } + + FUN bb31_septic_extension_t& operator/=(const bb31_septic_extension_t a) { + return *this *= a.reciprocal(); + } + + FUN bb31_septic_extension_t sqrt(bb31_t pow_r) const { + if (*this == bb31_septic_extension_t::zero()) { + return *this; + } + + bb31_septic_extension_t n_iter = *this; + bb31_septic_extension_t n_power = *this; + for(uintptr_t i = 1 ; i < 30 ; i++) { + n_iter *= n_iter; + if(i >= 26) { + n_power *= n_iter; + } + } + + bb31_septic_extension_t n_frobenius = n_power.frobenius(); + bb31_septic_extension_t denominator = n_frobenius; + + n_frobenius = n_frobenius.double_frobenius(); + denominator *= n_frobenius; + n_frobenius = n_frobenius.double_frobenius(); + denominator *= n_frobenius; + denominator *= *this; + + bb31_t base = pow_r.reciprocal(); + bb31_t g = bb31_t::from_canonical_u32(31); + bb31_t a = bb31_t::one(); + bb31_t nonresidue = bb31_t::one() - base; + + while (true) { + bb31_t is_square = nonresidue ^ 1006632960; + if (is_square != bb31_t::one()) { + break; + } + a *= g; + nonresidue = a.square() - base; + } + + bb31_cipolla_t x = bb31_cipolla_t(a, bb31_t::one()); + x = x.pow(1006632961, nonresidue); + + return denominator * x.real; + } + + FUN bb31_septic_extension_t universal_hash() const { + return *this * bb31_septic_extension_t(A_EC_LOGUP) + bb31_septic_extension_t(B_EC_LOGUP); + } + + FUN bb31_septic_extension_t curve_formula() const { + bb31_septic_extension_t result = *this * *this * *this; + result += *this; + result += *this; + result.value[5] += bb31_t::from_canonical_u32(26); + return result; + } + + FUN bool is_receive() const { + uint32_t limb = value[6].as_canonical_u32(); + return 1 <= limb && limb <= (bb31_t::MOD - 1) / 2; + } + + FUN bool is_send() const { + uint32_t limb = value[6].as_canonical_u32(); + return (bb31_t::MOD + 1) / 2 <= limb && limb <= (bb31_t::MOD - 1); + } + + FUN bool is_exception() const { + return value[6] == bb31_t::zero(); + } +}; + + +class bb31_septic_curve_t { + public: + bb31_septic_extension_t x; + bb31_septic_extension_t y; + + static constexpr const bb31_t* dummy_x = constants::dummy_x; + static constexpr const bb31_t* dummy_y = constants::dummy_y; + static constexpr const bb31_t* start_x = constants::start_x; + static constexpr const bb31_t* start_y = constants::start_y; + + FUN bb31_septic_curve_t() { + this->x = bb31_septic_extension_t::zero(); + this->y = bb31_septic_extension_t::zero(); + } + + FUN bb31_septic_curve_t(bb31_septic_extension_t x, bb31_septic_extension_t y) { + this->x = x; + this->y = y; + } + + FUN bb31_septic_curve_t(bb31_t value[14]) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->x.value[i] = value[i]; + } + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->y.value[i] = value[i + 7]; + } + } + + FUN bb31_septic_curve_t(bb31_t value_x[7], bb31_t value_y[7]) { + for (uintptr_t i = 0 ; i < 7 ; i++) { + this->x.value[i] = value_x[i]; + this->y.value[i] = value_y[i]; + } + } + + static FUN bb31_septic_curve_t dummy_point() { + bb31_septic_extension_t x; + bb31_septic_extension_t y; + for (uintptr_t i = 0 ; i < 7 ; i++) { + x.value[i] = dummy_x[i]; + y.value[i] = dummy_y[i]; + } + return bb31_septic_curve_t(x, y); + } + + static FUN bb31_septic_curve_t start_point() { + bb31_septic_extension_t x; + bb31_septic_extension_t y; + for (uintptr_t i = 0 ; i < 7 ; i++) { + x.value[i] = start_x[i]; + y.value[i] = start_y[i]; + } + return bb31_septic_curve_t(x, y); + } + + FUN bool is_infinity() const { + return x == bb31_septic_extension_t::zero() && y == bb31_septic_extension_t::zero(); + } + + FUN bb31_septic_curve_t& operator+=(const bb31_septic_curve_t b) { + if (b.is_infinity()) { + return *this; + } + if (is_infinity()) { + x = b.x; + y = b.y; + return *this; + } + if (x == b.x) { + if (y == b.y) { + bb31_septic_extension_t slope = (x * x * bb31_t::from_canonical_u8(3) + bb31_t::two()) / (y * bb31_t::two()); + bb31_septic_extension_t result_x = slope * slope - x - b.x; + bb31_septic_extension_t result_y = slope * (x - result_x) - y; + x = result_x; + y = result_y; + return *this; + } + else { + x = bb31_septic_extension_t::zero(); + y = bb31_septic_extension_t::zero(); + return *this; + } + } + else { + bb31_septic_extension_t slope = (b.y - y) / (b.x - x); + bb31_septic_extension_t result_x = slope * slope - x - b.x; + bb31_septic_extension_t result_y = slope * (x - result_x) - y; + x = result_x; + y = result_y; + return *this; + } + } + + friend FUN bb31_septic_curve_t operator+(bb31_septic_curve_t a, const bb31_septic_curve_t b) { + return a += b; + } + + static FUN bb31_septic_extension_t sum_checker_x( + const bb31_septic_curve_t& p1, + const bb31_septic_curve_t& p2, + const bb31_septic_curve_t& p3 + ) { + bb31_septic_extension_t x_diff = p2.x - p1.x; + bb31_septic_extension_t y_diff = p2.y - p1.y; + return (p1.x + p2.x + p3.x) * x_diff * x_diff - y_diff * y_diff; + } +}; + +class bb31_septic_digest_t { + public: + bb31_septic_curve_t point; + + FUN bb31_septic_digest_t() { + this->point = bb31_septic_curve_t(); + } + + FUN bb31_septic_digest_t(bb31_t value[14]) { + this->point = bb31_septic_curve_t(value); + } + + FUN bb31_septic_digest_t(bb31_septic_extension_t x, bb31_septic_extension_t y) { + this->point = bb31_septic_curve_t(x, y); + } + + FUN bb31_septic_digest_t(bb31_septic_curve_t point) { + this->point = point; + } +}; + diff --git a/crates/core/machine/include/bb31_t.hpp b/crates/core/machine/include/bb31_t.hpp new file mode 100644 index 0000000000..387456d422 --- /dev/null +++ b/crates/core/machine/include/bb31_t.hpp @@ -0,0 +1,640 @@ +// Modified by Succinct Labs +// Copyright Supranational LLC +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#ifdef __CUDA_ARCH__ + +#define inline __device__ __forceinline__ +#ifdef __GNUC__ +#define asm __asm__ __volatile__ +#else +#define asm asm volatile +#endif + +class bb31_t { + public: + using mem_t = bb31_t; + uint32_t val; + static const uint32_t DEGREE = 1; + static const uint32_t NBITS = 31; + static const uint32_t MOD = 0x78000001u; + static const uint32_t M = 0x77ffffffu; + static const uint32_t RR = 0x45dddde3u; + static const uint32_t ONE = 0x0ffffffeu; + static const uint32_t MONTY_BITS = 32; + static const uint32_t MONTY_MU = 0x88000001; + static const uint32_t MONTY_MASK = ((1ULL << MONTY_BITS) - 1); + + static constexpr size_t __device__ bit_length() { return 31; } + + inline uint32_t& operator[](size_t i) { return val; } + + inline uint32_t& operator*() { return val; } + + inline const uint32_t& operator[](size_t i) const { return val; } + + inline uint32_t operator*() const { return val; } + + inline size_t len() const { return 1; } + + inline bb31_t() {} + + inline bb31_t(const uint32_t a) { val = a; } + + inline bb31_t(const uint32_t* p) { val = *p; } + + inline constexpr bb31_t(int a) : val(((uint64_t)a << 32) % MOD) {} + + static inline const bb31_t zero() { return bb31_t(0); } + + static inline const bb31_t one() { return bb31_t(ONE); } + + static inline const bb31_t two() { return from_canonical_u32(2); } + + static inline uint32_t to_monty(uint32_t x) { + return (((uint64_t)x << MONTY_BITS) % MOD); + } + + static inline uint32_t monty_reduce(uint64_t x) { + uint64_t t = (x * (uint64_t)MONTY_MU) & (uint64_t)MONTY_MASK; + uint64_t u = t * (uint64_t)MOD; + uint64_t x_sub_u = x - u; + bool over = x < u; + uint32_t x_sub_u_hi = (uint32_t)(x_sub_u >> MONTY_BITS); + uint32_t corr = over ? MOD : 0; + return x_sub_u_hi + corr; + } + + static inline bb31_t from_canonical_u32(uint32_t x) { + return bb31_t(to_monty(x)); + } + + static inline bb31_t from_canonical_u16(uint16_t x) { + return from_canonical_u32((uint32_t)x); + } + + static inline bb31_t from_canonical_u8(uint8_t x) { + return from_canonical_u32((uint32_t)x); + } + + static inline bb31_t from_bool(bool x) { return bb31_t(x * one().val); } + + inline uint32_t as_canonical_u32() const { + return monty_reduce((uint64_t)val); + } + + inline bb31_t exp_power_of_two(size_t log_power) { + bb31_t ret = *this; + for (size_t i = 0; i < log_power; i++) { + ret *= ret; + } + return ret; + } + + inline bb31_t& operator+=(const bb31_t b) { + val += b.val; + final_sub(val); + + return *this; + } + + friend inline bb31_t operator+(bb31_t a, const bb31_t b) { return a += b; } + + inline bb31_t& operator<<=(uint32_t l) { + while (l--) { + val <<= 1; + final_sub(val); + } + + return *this; + } + + friend inline bb31_t operator<<(bb31_t a, uint32_t l) { return a <<= l; } + + inline bb31_t& operator>>=(uint32_t r) { + while (r--) { + val += val & 1 ? MOD : 0; + val >>= 1; + } + + return *this; + } + + friend inline bb31_t operator>>(bb31_t a, uint32_t r) { return a >>= r; } + + inline bb31_t& operator-=(const bb31_t b) { + asm("{"); + asm(".reg.pred %brw;"); + asm("setp.lt.u32 %brw, %0, %1;" ::"r"(val), "r"(b.val)); + asm("sub.u32 %0, %0, %1;" : "+r"(val) : "r"(b.val)); + asm("@%brw add.u32 %0, %0, %1;" : "+r"(val) : "r"(MOD)); + asm("}"); + + return *this; + } + + friend inline bb31_t operator-(bb31_t a, const bb31_t b) { return a -= b; } + + inline bb31_t cneg(bool flag) { + asm("{"); + asm(".reg.pred %flag;"); + asm("setp.ne.u32 %flag, %0, 0;" ::"r"(val)); + asm("@%flag setp.ne.u32 %flag, %0, 0;" ::"r"((int)flag)); + asm("@%flag sub.u32 %0, %1, %0;" : "+r"(val) : "r"(MOD)); + asm("}"); + + return *this; + } + + static inline bb31_t cneg(bb31_t a, bool flag) { return a.cneg(flag); } + + inline bb31_t operator-() const { return cneg(*this, true); } + + inline bool operator==(const bb31_t rhs) const { return val == rhs.val; } + + inline bool is_one() const { return val == ONE; } + + inline bool is_zero() const { return val == 0; } + + inline void set_to_zero() { val = 0; } + + friend inline bb31_t czero(const bb31_t a, int set_z) { + bb31_t ret; + + asm("{"); + asm(".reg.pred %set_z;"); + asm("setp.ne.s32 %set_z, %0, 0;" : : "r"(set_z)); + asm("selp.u32 %0, 0, %1, %set_z;" : "=r"(ret.val) : "r"(a.val)); + asm("}"); + + return ret; + } + + static inline bb31_t csel(const bb31_t a, const bb31_t b, int sel_a) { + bb31_t ret; + + asm("{"); + asm(".reg.pred %sel_a;"); + asm("setp.ne.s32 %sel_a, %0, 0;" ::"r"(sel_a)); + asm("selp.u32 %0, %1, %2, %sel_a;" + : "=r"(ret.val) + : "r"(a.val), "r"(b.val)); + asm("}"); + + return ret; + } + + private: + static inline void final_sub(uint32_t& val) { + asm("{"); + asm(".reg.pred %p;"); + asm("setp.ge.u32 %p, %0, %1;" ::"r"(val), "r"(MOD)); + asm("@%p sub.u32 %0, %0, %1;" : "+r"(val) : "r"(MOD)); + asm("}"); + } + + inline bb31_t& mul(const bb31_t b) { + uint32_t tmp[2], red; + + asm("mul.lo.u32 %0, %2, %3; mul.hi.u32 %1, %2, %3;" + : "=r"(tmp[0]), "=r"(tmp[1]) + : "r"(val), "r"(b.val)); + asm("mul.lo.u32 %0, %1, %2;" : "=r"(red) : "r"(tmp[0]), "r"(M)); + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %4;" + : "+r"(tmp[0]), "=r"(val) + : "r"(red), "r"(MOD), "r"(tmp[1])); + + final_sub(val); + + return *this; + } + + inline uint32_t mul_by_1() const { + uint32_t tmp[2], red; + + asm("mul.lo.u32 %0, %1, %2;" : "=r"(red) : "r"(val), "r"(M)); + asm("mad.lo.cc.u32 %0, %2, %3, %4; madc.hi.u32 %1, %2, %3, 0;" + : "=r"(tmp[0]), "=r"(tmp[1]) + : "r"(red), "r"(MOD), "r"(val)); + return tmp[1]; + } + + public: + friend inline bb31_t operator*(bb31_t a, const bb31_t b) { return a.mul(b); } + + inline bb31_t& operator*=(const bb31_t a) { return mul(a); } + + // raise to a variable power, variable in respect to threadIdx, + // but mind the ^ operator's precedence! + inline bb31_t& operator^=(uint32_t p) { + bb31_t sqr = *this; + *this = csel(val, ONE, p & 1); + +#pragma unroll 1 + while (p >>= 1) { + sqr.mul(sqr); + if (p & 1) + mul(sqr); + } + + return *this; + } + + friend inline bb31_t operator^(bb31_t a, uint32_t p) { + return a ^= p; + } + + inline bb31_t operator()(uint32_t p) { + return *this ^ p; + } + + // raise to a constant power, e.g. x^7, to be unrolled at compile time + inline bb31_t& operator^=(int p) { + if (p < 2) + asm("trap;"); + + bb31_t sqr = *this; + if ((p & 1) == 0) { + do { + sqr.mul(sqr); + p >>= 1; + } while ((p & 1) == 0); + *this = sqr; + } + for (p >>= 1; p; p >>= 1) { + sqr.mul(sqr); + if (p & 1) + mul(sqr); + } + + return *this; + } + + friend inline bb31_t operator^(bb31_t a, int p) { + return a ^= p; + } + + inline bb31_t operator()(int p) { + return *this ^ p; + } + + inline bb31_t square() { return *this * *this; } + + friend inline bb31_t sqr(bb31_t a) { + return a.sqr(); + } + + inline bb31_t& sqr() { + return mul(*this); + } + + inline void to() { + mul(RR); + } + + inline void from() { + val = mul_by_1(); + } + + template + static inline bb31_t dot_product(const bb31_t a[T], const bb31_t b[T]) { + uint32_t acc[2]; + size_t i = 1; + + asm("mul.lo.u32 %0, %2, %3; mul.hi.u32 %1, %2, %3;" + : "=r"(acc[0]), "=r"(acc[1]) + : "r"(*a[0]), "r"(*b[0])); + if ((T & 1) == 0) { + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i]), "r"(*b[i])); + i++; + } + for (; i < T; i += 2) { + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i]), "r"(*b[i])); + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i + 1]), "r"(*b[i + 1])); + final_sub(acc[1]); + } + + uint32_t red; + asm("mul.lo.u32 %0, %1, %2;" : "=r"(red) : "r"(acc[0]), "r"(M)); + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(red), "r"(MOD)); + final_sub(acc[1]); + + return acc[1]; + } + + template + static inline bb31_t dot_product(bb31_t a0, bb31_t b0, const bb31_t a[T - 1], + const bb31_t* b, size_t stride_b = 1) { + uint32_t acc[2]; + size_t i = 0; + + asm("mul.lo.u32 %0, %2, %3; mul.hi.u32 %1, %2, %3;" + : "=r"(acc[0]), "=r"(acc[1]) + : "r"(*a0), "r"(*b0)); + if ((T & 1) == 0) { + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i]), "r"(*b[0])); + i++, b += stride_b; + } + for (; i < T - 1; i += 2) { + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i]), "r"(*b[0])); + b += stride_b; + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(*a[i + 1]), "r"(*b[0])); + b += stride_b; + final_sub(acc[1]); + } + + uint32_t red; + asm("mul.lo.u32 %0, %1, %2;" : "=r"(red) : "r"(acc[0]), "r"(M)); + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %1;" + : "+r"(acc[0]), "+r"(acc[1]) + : "r"(red), "r"(MOD)); + final_sub(acc[1]); + + return acc[1]; + } + + private: + static inline bb31_t sqr_n(bb31_t s, uint32_t n) { +#if 0 +#pragma unroll 2 + while (n--) + s.sqr(); +#else // +20% [for reciprocal()] +#pragma unroll 2 + while (n--) { + uint32_t tmp[2], red; + + asm("mul.lo.u32 %0, %2, %2; mul.hi.u32 %1, %2, %2;" + : "=r"(tmp[0]), "=r"(tmp[1]) + : "r"(s.val)); + asm("mul.lo.u32 %0, %1, %2;" : "=r"(red) : "r"(tmp[0]), "r"(M)); + asm("mad.lo.cc.u32 %0, %2, %3, %0; madc.hi.u32 %1, %2, %3, %4;" + : "+r"(tmp[0]), "=r"(s.val) + : "r"(red), "r"(MOD), "r"(tmp[1])); + + if (n & 1) + final_sub(s.val); + } +#endif + return s; + } + + static inline bb31_t sqr_n_mul(bb31_t s, uint32_t n, bb31_t m) { + s = sqr_n(s, n); + s.mul(m); + + return s; + } + + public: + inline bb31_t reciprocal() const { + bb31_t x11, xff, ret = *this; + + x11 = sqr_n_mul(ret, 4, ret); // 0b10001 + ret = sqr_n_mul(x11, 1, x11); // 0b110011 + ret = sqr_n_mul(ret, 1, x11); // 0b1110111 + xff = sqr_n_mul(ret, 1, x11); // 0b11111111 + ret = sqr_n_mul(ret, 8, xff); // 0b111011111111111 + ret = sqr_n_mul(ret, 8, xff); // 0b11101111111111111111111 + ret = sqr_n_mul(ret, 8, xff); // 0b1110111111111111111111111111111 + + return ret; + } + + friend inline bb31_t operator/(int one, bb31_t a) { + if (one != 1) + asm("trap;"); + return a.reciprocal(); + } + + friend inline bb31_t operator/(bb31_t a, bb31_t b) { + return a * b.reciprocal(); + } + + inline bb31_t& operator/=(const bb31_t a) { + return *this *= a.reciprocal(); + } + + inline bb31_t heptaroot() const { + bb31_t x03, x18, x1b, ret = *this; + + x03 = sqr_n_mul(ret, 1, ret); // 0b11 + x18 = sqr_n(x03, 3); // 0b11000 + x1b = x18 * x03; // 0b11011 + ret = x18 * x1b; // 0b110011 + ret = sqr_n_mul(ret, 6, x1b); // 0b110011011011 + ret = sqr_n_mul(ret, 6, x1b); // 0b110011011011011011 + ret = sqr_n_mul(ret, 6, x1b); // 0b110011011011011011011011 + ret = sqr_n_mul(ret, 6, x1b); // 0b110011011011011011011011011011 + ret = sqr_n_mul(ret, 1, *this); // 0b1100110110110110110110110110111 + + return ret; + } + + inline void shfl_bfly(uint32_t laneMask) { + val = __shfl_xor_sync(0xFFFFFFFF, val, laneMask); + } +}; + +#undef inline +#undef asm +// # endif // __CUDA__ARCH__ + +#else + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + +class bb31_t { + private: + static const uint32_t M = 0x77ffffffu; + static const uint32_t RR = 0x45dddde3u; + static const uint32_t ONE = 0x0ffffffeu; + static const uint32_t MONTY_BITS = 32; + static const uint32_t MONTY_MU = 0x88000001; + static const uint32_t MONTY_MASK = ((1ULL << MONTY_BITS) - 1); + + public: + using mem_t = bb31_t; + uint32_t val; + static const uint32_t DEGREE = 1; + static const uint32_t NBITS = 31; + static const uint32_t MOD = 0x78000001; + + inline bb31_t() {} + + inline bb31_t(uint32_t a) : val(a) {} + + inline constexpr bb31_t(int a) : val(((uint64_t)a << 32) % MOD) {} + + static inline const bb31_t zero() { return bb31_t(0); } + + static inline const bb31_t one() { return bb31_t(ONE); } + + static inline const bb31_t two() { return bb31_t(to_monty(2)); } + + static inline uint32_t to_monty(uint32_t x) { + return (((uint64_t)x << MONTY_BITS) % MOD); + } + + static inline uint32_t from_monty(uint32_t x) { + return monty_reduce((uint64_t)x); + } + + static inline uint32_t monty_reduce(uint64_t x) { + uint64_t t = (x * (uint64_t)MONTY_MU) & (uint64_t)MONTY_MASK; + uint64_t u = t * (uint64_t)MOD; + uint64_t x_sub_u = x - u; + bool over = x < u; + uint32_t x_sub_u_hi = (uint32_t)(x_sub_u >> MONTY_BITS); + uint32_t corr = over ? MOD : 0; + return x_sub_u_hi + corr; + } + + static inline bb31_t from_canonical_u32(uint32_t x) { + assert(x < MOD); + return bb31_t(to_monty(x)); + } + + static inline bb31_t from_canonical_u16(uint16_t x) { + return from_canonical_u32((uint32_t)x); + } + + static inline bb31_t from_canonical_u8(uint8_t x) { + return from_canonical_u32((uint32_t)x); + } + + static inline bb31_t from_bool(bool x) { return bb31_t(x * one().val); } + + inline uint32_t as_canonical_u32() const { return from_monty(val); } + + inline bb31_t& operator+=(bb31_t b) { + val += b.val; + if (val >= MOD) + val -= MOD; + return *this; + } + + inline bb31_t& operator-=(bb31_t b) { + if (val < b.val) + val += MOD; + val -= b.val; + return *this; + } + + inline bb31_t& operator*=(bb31_t b) { + uint64_t long_prod = (uint64_t)val * (uint64_t)b.val; + val = monty_reduce(long_prod); + return *this; + } + + inline bb31_t square() { return *this * *this; } + + friend bb31_t operator+(bb31_t a, bb31_t b) { return a += b; } + + friend bb31_t operator-(bb31_t a, bb31_t b) { return a -= b; } + + friend bb31_t operator*(bb31_t a, bb31_t b) { return a *= b; } + + inline bb31_t& operator<<=(uint32_t l) { + while (l--) { + val <<= 1; + if (val >= MOD) + val -= MOD; + } + + return *this; + } + + friend inline bb31_t operator<<(bb31_t a, uint32_t l) { return a <<= l; } + + inline bb31_t& operator>>=(uint32_t r) { + while (r--) { + val += val & 1 ? MOD : 0; + val >>= 1; + } + + return *this; + } + + inline bb31_t exp_power_of_2(uint32_t power_log) const { + bb31_t result = *this; + for (uint32_t i = 0; i < power_log; ++i) { + result = result.square(); + } + return result; + } + + inline bb31_t reciprocal() const { + assert(*this != zero()); + + bb31_t p1 = *this; + bb31_t p100000000 = p1.exp_power_of_2(8); + bb31_t p100000001 = p100000000 * p1; + bb31_t p10000000000000000 = p100000000.exp_power_of_2(8); + bb31_t p10000000100000001 = p10000000000000000 * p100000001; + bb31_t p10000000100000001000 = p10000000100000001.exp_power_of_2(3); + bb31_t p1000000010000000100000000 = p10000000100000001000.exp_power_of_2(5); + bb31_t p1000000010000000100000001 = p1000000010000000100000000 * p1; + bb31_t p1000010010000100100001001 = + p1000000010000000100000001 * p10000000100000001000; + bb31_t p10000000100000001000000010 = p1000000010000000100000001.square(); + bb31_t p11000010110000101100001011 = + p10000000100000001000000010 * p1000010010000100100001001; + bb31_t p100000001000000010000000100 = p10000000100000001000000010.square(); + bb31_t p111000011110000111100001111 = + p100000001000000010000000100 * p11000010110000101100001011; + bb31_t p1110000111100001111000011110000 = + p111000011110000111100001111.exp_power_of_2(4); + bb31_t p1110111111111111111111111111111 = + p1110000111100001111000011110000 * p111000011110000111100001111; + + return p1110111111111111111111111111111; + } + + inline bool operator==(const bb31_t rhs) const { return val == rhs.val; } + + inline bb31_t &operator^=(int b) { + bb31_t sqr = *this; + if ((b & 1) == 0) + *this = one(); + while (b >>= 1) { + sqr = sqr.square(); + if (b & 1) + *this *= sqr; + } + return *this; + } + + friend bb31_t operator^(bb31_t a, uint32_t b) { return a ^= b; } + + inline bb31_t& sqr() { return *this; } + + inline void set_to_zero() { val = 0; } + + inline bool is_zero() const { return val == 0; } +}; + +#endif // __CUDA__ARCH__ \ No newline at end of file diff --git a/crates/core/machine/include/bitwise.hpp b/crates/core/machine/include/bitwise.hpp new file mode 100644 index 0000000000..190e5f287c --- /dev/null +++ b/crates/core/machine/include/bitwise.hpp @@ -0,0 +1,19 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::bitwise { +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, BitwiseCols& cols) { + cols.shard = F::from_canonical_u32(event.shard); + write_word_from_u32(cols.a, event.a); + write_word_from_u32(cols.b, event.b); + write_word_from_u32(cols.c, event.c); + cols.is_xor = F::from_bool(event.opcode == Opcode::XOR); + cols.is_or = F::from_bool(event.opcode == Opcode::OR); + cols.is_and = F::from_bool(event.opcode == Opcode::AND); + + // No byte lookup yet. +} +} // namespace sp1::bitwise diff --git a/crates/core/machine/include/cpu.hpp b/crates/core/machine/include/cpu.hpp new file mode 100644 index 0000000000..41f78f9ef6 --- /dev/null +++ b/crates/core/machine/include/cpu.hpp @@ -0,0 +1,555 @@ +#pragma once + +#include +#include + +#include "memory.hpp" +#include "prelude.hpp" +#include "utils.hpp" + +// namespace sp1_core_machine_sys::cpu { + +// template +// __SP1_HOSTDEV__ void populate_shard_clk(const CpuEventFfi& event, CpuCols& cols) { +// // cols.shard = F::from_canonical_u32(event.shard).val; +// // cols.clk = F::from_canonical_u32(event.clk).val; + +// // const uint16_t clk_16bit_limb = (uint16_t)event.clk; +// // const uint8_t clk_8bit_limb = (uint8_t)(event.clk >> 16); +// // cols.clk_16bit_limb = F::from_canonical_u16(clk_16bit_limb).val; +// // cols.clk_8bit_limb = F::from_canonical_u8(clk_8bit_limb).val; + +// // blu_events.add_byte_lookup_event(ByteLookupEvent::new( +// // event.shard, +// // U16Range, +// // event.shard as u16, +// // 0, +// // 0, +// // 0, +// // )); +// // blu_events.add_byte_lookup_event(ByteLookupEvent::new( +// // event.shard, +// // U16Range, +// // clk_16bit_limb, +// // 0, +// // 0, +// // 0, +// // )); +// // blu_events.add_byte_lookup_event(ByteLookupEvent::new( +// // event.shard, +// // ByteOpcode::U8Range, +// // 0, +// // 0, +// // 0, +// // clk_8bit_limb as u8, +// // )); +// } + +// // template +// // __SP1_HOSTDEV__ void +// // instruction_populate(InstructionCols& self, const Instruction& instruction) { +// // self.opcode = F::from_canonical_u32((uint32_t)instruction.opcode).val; +// // write_word_from_u32(self.op_a, instruction.op_a); +// // write_word_from_u32(self.op_b, instruction.op_b); +// // write_word_from_u32(self.op_c, instruction.op_c); + +// // self.op_a_0 = F::from_bool(instruction.op_a == 0).val; // 0 = Register::X0 +// // } + +// // template +// // __SP1_HOSTDEV__ void +// // selectors_populate(OpcodeSelectorCols& self, const Instruction& instruction) { +// // self.imm_b = F::from_bool(instruction.imm_b).val; +// // self.imm_c = F::from_bool(instruction.imm_c).val; + +// // switch (instruction.opcode) { +// // // Corresponds to `instruction.is_alu_instruction()` in Rust. +// // case Opcode::ADD: +// // case Opcode::SUB: +// // case Opcode::XOR: +// // case Opcode::OR: +// // case Opcode::AND: +// // case Opcode::SLL: +// // case Opcode::SRL: +// // case Opcode::SRA: +// // case Opcode::SLT: +// // case Opcode::SLTU: +// // case Opcode::MUL: +// // case Opcode::MULH: +// // case Opcode::MULHU: +// // case Opcode::MULHSU: +// // case Opcode::DIV: +// // case Opcode::DIVU: +// // case Opcode::REM: +// // case Opcode::REMU: +// // self.is_alu = F::one().val; +// // break; +// // // Corresponds to `instruction.is_ecall_instruction()` in Rust. +// // case Opcode::ECALL: +// // self.is_ecall = F::one().val; +// // break; +// // // Cleaner version of the `instruction.is_memory_instruction()` branch from Rust. +// // case Opcode::LB: +// // self.is_lb = F::one().val; +// // break; +// // case Opcode::LBU: +// // self.is_lbu = F::one().val; +// // break; +// // case Opcode::LHU: +// // self.is_lhu = F::one().val; +// // break; +// // case Opcode::LH: +// // self.is_lh = F::one().val; +// // break; +// // case Opcode::LW: +// // self.is_lw = F::one().val; +// // break; +// // case Opcode::SB: +// // self.is_sb = F::one().val; +// // break; +// // case Opcode::SH: +// // self.is_sh = F::one().val; +// // break; +// // case Opcode::SW: +// // self.is_sw = F::one().val; +// // break; +// // // Cleaner version of the `instruction.is_branch_instruction()` branch from Rust. +// // case Opcode::BEQ: +// // self.is_beq = F::one().val; +// // break; +// // case Opcode::BNE: +// // self.is_bne = F::one().val; +// // break; +// // case Opcode::BLT: +// // self.is_blt = F::one().val; +// // break; +// // case Opcode::BGE: +// // self.is_bge = F::one().val; +// // break; +// // case Opcode::BLTU: +// // self.is_bltu = F::one().val; +// // break; +// // case Opcode::BGEU: +// // self.is_bgeu = F::one().val; +// // break; +// // // Opcodes which each have their own branch in the original Rust function. +// // case Opcode::JAL: +// // self.is_jal = F::one().val; +// // break; +// // case Opcode::JALR: +// // self.is_jalr = F::one().val; +// // break; +// // case Opcode::AUIPC: +// // self.is_auipc = F::one().val; +// // break; +// // case Opcode::UNIMP: +// // self.is_unimpl = F::one().val; +// // break; +// // default: +// // break; +// // } +// // } + +// // template +// // __SP1_HOSTDEV__ void +// // babybear_word_populate(BabyBearWordRangeChecker& self, uint32_t value) { +// // for (uintptr_t i = 0; i < BYTE_SIZE; ++i) { +// // self.most_sig_byte_decomp[i] = F::from_bool((value & (1 << (i + 24))) != 0).val; +// // } +// // self.and_most_sig_byte_decomp_3_to_5 = +// // F::from_bool(self.most_sig_byte_decomp[3] != 0 && self.most_sig_byte_decomp[4] != 0).val; +// // self.and_most_sig_byte_decomp_3_to_6 = +// // F::from_bool(self.and_most_sig_byte_decomp_3_to_5 != 0 && self.most_sig_byte_decomp[5] != 0) +// // .val; +// // self.and_most_sig_byte_decomp_3_to_7 = +// // F::from_bool(self.and_most_sig_byte_decomp_3_to_6 != 0 && self.most_sig_byte_decomp[6] != 0) +// // .val; +// // } + +// // template +// // __SP1_HOSTDEV__ void populate_memory(CpuCols& cols, const CpuEventFfi& event) { +// // // Populate addr_word and addr_aligned columns. +// // MemoryColumns& memory_columns = cols.opcode_specific_columns.memory; +// // // Wraps because the types involved are unsigned integers. +// // const uint32_t memory_addr = event.b + event.c; +// // const uint32_t aligned_addr = memory_addr - (memory_addr % (uint32_t)WORD_SIZE); +// // write_word_from_u32(memory_columns.addr_word, memory_addr); +// // babybear_word_populate(memory_columns.addr_word_range_checker, memory_addr); +// // memory_columns.addr_aligned = F::from_canonical_u32(aligned_addr).val; + +// // // Populate the aa_least_sig_byte_decomp columns. +// // // assert(aligned_addr % 4 == 0); +// // const uint8_t aligned_addr_ls_byte = (uint8_t)aligned_addr; +// // for (uintptr_t i = 0; i < 6; ++i) { +// // memory_columns.aa_least_sig_byte_decomp[i] = +// // F::from_bool((aligned_addr_ls_byte & (1 << (i + 2))) != 0).val; +// // } +// // memory_columns.addr_word_nonce = F::from_canonical_u32(event.memory_add_nonce).val; + +// // // // Populate memory offsets. +// // const uint8_t addr_offset = (uint8_t)(memory_addr % (uint32_t)WORD_SIZE); +// // memory_columns.addr_offset = F::from_canonical_u8(addr_offset).val; +// // memory_columns.offset_is_one = F::from_bool(addr_offset == 1).val; +// // memory_columns.offset_is_two = F::from_bool(addr_offset == 2).val; +// // memory_columns.offset_is_three = F::from_bool(addr_offset == 3).val; + +// // // If it is a load instruction, set the unsigned_mem_val column. +// // const uint32_t mem_value = memory::unwrap_value(event.memory_record); + +// // // // Add event to byte lookup for byte range checking each byte in the memory addr +// // // let addr_bytes = memory_addr.to_le_bytes(); +// // // for byte_pair in addr_bytes.chunks_exact(2) { +// // // blu_events.add_byte_lookup_event(ByteLookupEvent { +// // // shard: event.shard, +// // // opcode: ByteOpcode::U8Range, +// // // a1: 0, +// // // a2: 0, +// // // b: byte_pair[0], +// // // c: byte_pair[1], +// // // }); +// // // } + +// // uint32_t unsigned_mem_val = mem_value; +// // switch (event.instruction.opcode) { +// // case Opcode::LB: +// // case Opcode::LBU: +// // unsigned_mem_val = (uint32_t)(uint8_t)(mem_value >> 8 * addr_offset); +// // break; +// // case Opcode::LH: +// // case Opcode::LHU: +// // unsigned_mem_val = ((addr_offset >> 1) & 0x1) == 0 ? (mem_value & 0x0000FFFF) +// // : (mem_value & 0xFFFF0000) >> 16; +// // break; +// // case Opcode::LW: +// // // The value assigned at declaration is correct. +// // break; +// // default: +// // return; +// // } +// // // Guard above ensures instruction is a load. +// // write_word_from_u32(cols.unsigned_mem_val, unsigned_mem_val); + +// // uint8_t most_sig_mem_value_byte; +// // switch (event.instruction.opcode) { +// // case Opcode::LB: + +// // most_sig_mem_value_byte = (uint8_t)unsigned_mem_val; +// // break; +// // case Opcode::LH: +// // most_sig_mem_value_byte = (uint8_t)(unsigned_mem_val >> 8); +// // break; +// // default: +// // // The load instruction is unsigned. +// // // Set the `mem_value_is_pos_not_x0` composite flag. +// // cols.mem_value_is_pos_not_x0 = +// // F::from_bool(event.instruction.op_a != 0).val; // 0 = Register::X0 +// // return; +// // } +// // // Guard above ensures the load instruction is signed. +// // for (intptr_t i = BYTE_SIZE - 1; i >= 0; --i) { +// // memory_columns.most_sig_byte_decomp[i] = +// // F::from_canonical_u32(most_sig_mem_value_byte >> i & 0x1).val; +// // } +// // bool mem_value_is_pos_not_x0 = memory_columns.most_sig_byte_decomp[7] == F::zero().val; +// // if (!mem_value_is_pos_not_x0) { +// // cols.mem_value_is_neg_not_x0 = +// // F::from_bool(event.instruction.op_a != 0).val; // 0 = Register::X0 +// // cols.unsigned_mem_val_nonce = F::from_canonical_u32(event.memory_sub_nonce).val; +// // } +// // // Set the `mem_value_is_pos_not_x0` composite flag. +// // cols.mem_value_is_pos_not_x0 = F::from_bool(mem_value_is_pos_not_x0).val; +// // } + +// // template +// // __SP1_HOSTDEV__ void populate_branch(CpuCols& cols, const CpuEventFfi& event) { +// // // let branch_columns = cols.opcode_specific_columns.branch_mut(); +// // BranchCols& branch_columns = cols.opcode_specific_columns.branch; + +// // Opcode opcode = event.instruction.opcode; +// // const bool use_signed_comparison = opcode == Opcode::BLT || opcode == Opcode::BGE; + +// // const bool a_eq_b = event.a == event.b; +// // const bool a_lt_b = +// // use_signed_comparison ? ((int32_t)event.a < (int32_t)event.b) : (event.a < event.b); +// // const bool a_gt_b = +// // use_signed_comparison ? ((int32_t)event.a > (int32_t)event.b) : (event.a > event.b); + +// // branch_columns.a_lt_b_nonce = F::from_canonical_u32(event.branch_lt_nonce).val; +// // branch_columns.a_gt_b_nonce = F::from_canonical_u32(event.branch_gt_nonce).val; + +// // branch_columns.a_eq_b = F::from_bool(a_eq_b).val; +// // branch_columns.a_lt_b = F::from_bool(a_lt_b).val; +// // branch_columns.a_gt_b = F::from_bool(a_gt_b).val; + +// // bool branching; +// // switch (opcode) { +// // case Opcode::BEQ: +// // branching = a_eq_b; +// // break; +// // case Opcode::BNE: +// // branching = !a_eq_b; +// // break; +// // case Opcode::BLT: +// // case Opcode::BLTU: +// // branching = a_lt_b; +// // break; +// // case Opcode::BGE: +// // case Opcode::BGEU: +// // branching = a_eq_b || a_gt_b; +// // break; +// // default: +// // // Precondition violated. +// // assert(false); +// // break; +// // } + +// // // Unsigned arithmetic wraps. +// // const uint32_t next_pc = event.pc + event.c; +// // write_word_from_u32(branch_columns.pc, event.pc); +// // write_word_from_u32(branch_columns.next_pc, next_pc); +// // babybear_word_populate(branch_columns.pc_range_checker, event.pc); +// // babybear_word_populate(branch_columns.next_pc_range_checker, next_pc); + +// // if (branching) { +// // cols.branching = F::one().val; +// // branch_columns.next_pc_nonce = F::from_canonical_u32(event.branch_add_nonce).val; +// // } else { +// // cols.not_branching = F::one().val; +// // } +// // } + +// // template +// // __SP1_HOSTDEV__ void populate_jump(CpuCols& cols, const CpuEventFfi& event) { +// // // let jump_columns = cols.opcode_specific_columns.jump_mut(); +// // JumpCols& jump_columns = cols.opcode_specific_columns.jump; + +// // switch (event.instruction.opcode) { +// // case Opcode::JAL: { +// // // Unsigned arithmetic wraps. +// // uint32_t next_pc = event.pc + event.b; +// // babybear_word_populate(jump_columns.op_a_range_checker, event.a); +// // write_word_from_u32(jump_columns.pc, event.pc); +// // babybear_word_populate(jump_columns.pc_range_checker, event.pc); +// // write_word_from_u32(jump_columns.next_pc, next_pc); +// // babybear_word_populate(jump_columns.next_pc_range_checker, next_pc); +// // jump_columns.jal_nonce = F::from_canonical_u32(event.jump_jal_nonce).val; +// // break; +// // } +// // case Opcode::JALR: { +// // // Unsigned arithmetic wraps. +// // uint32_t next_pc = event.b + event.c; +// // babybear_word_populate(jump_columns.op_a_range_checker, event.a); +// // write_word_from_u32(jump_columns.next_pc, next_pc); +// // babybear_word_populate(jump_columns.next_pc_range_checker, next_pc); +// // jump_columns.jalr_nonce = F::from_canonical_u32(event.jump_jalr_nonce).val; +// // break; +// // } +// // default: +// // // Precondition violated. +// // assert(false); +// // break; +// // } +// // } + +// // template +// // __SP1_HOSTDEV__ void populate_auipc(CpuCols& cols, const CpuEventFfi& event) { +// // AuipcCols& auipc_columns = cols.opcode_specific_columns.auipc; + +// // write_word_from_u32(auipc_columns.pc, event.pc); +// // babybear_word_populate(auipc_columns.pc_range_checker, event.pc); +// // auipc_columns.auipc_nonce = F::from_canonical_u32(event.auipc_nonce).val; +// // } + +// // template +// // __SP1_HOSTDEV__ void +// // is_zero_operation_populate_from_field_element(IsZeroOperation& self, F a) { +// // if (a == F::zero()) { +// // self.inverse = F::zero().val; +// // self.result = F::one().val; +// // } else { +// // self.inverse = a.reciprocal().val; +// // self.result = F::zero().val; +// // } +// // // F is_zero = F::one() - F(self.inverse) * a; +// // // assert(is_zero == F(self.result)); +// // // let is_zero = one.clone() - cols.inverse * a.clone(); +// // // builder.when(is_real.clone()).assert_eq(is_zero, cols.result); + +// // // let prod = self.inverse * a; +// // // debug_assert!(prod == F::one() || prod == F::zero()); +// // // (a == F::zero()) as u32 +// // } + +// // template +// // __SP1_HOSTDEV__ bool populate_ecall(CpuCols& cols, const CpuEventFfi& event) { +// // bool is_halt = false; + +// // // The send_to_table column is the 1st entry of the op_a_access column prev_value field. +// // // Look at `ecall_eval` in cpu/air/mod.rs for the corresponding constraint and +// // // explanation. +// // EcallCols& ecall_cols = cols.opcode_specific_columns.ecall; + +// // cols.ecall_mul_send_to_table = cols.op_a_access.prev_value._0[1]; + +// // F syscall_id = F(cols.op_a_access.prev_value._0[0]); + +// // // In the following statements, truncating to `uint8_t` is the equivalent of the +// // // `SyscallCode::get_syscall_id` calls from the Rust code. + +// // // Populate `is_enter_unconstrained`. +// // is_zero_operation_populate_from_field_element( +// // ecall_cols.is_enter_unconstrained, +// // syscall_id - F::from_canonical_u8((uint8_t)SyscallCode::ENTER_UNCONSTRAINED) +// // ); + +// // // Populate `is_hint_len`. +// // is_zero_operation_populate_from_field_element( +// // ecall_cols.is_hint_len, +// // syscall_id - F::from_canonical_u8((uint8_t)SyscallCode::HINT_LEN) +// // ); + +// // // Populate `is_halt`. +// // is_zero_operation_populate_from_field_element( +// // ecall_cols.is_halt, +// // syscall_id - F::from_canonical_u8((uint8_t)SyscallCode::HALT) +// // ); + +// // // Populate `is_commit`. +// // is_zero_operation_populate_from_field_element( +// // ecall_cols.is_commit, +// // syscall_id - F::from_canonical_u8((uint8_t)SyscallCode::COMMIT) +// // ); + +// // // Populate `is_commit_deferred_proofs`. +// // is_zero_operation_populate_from_field_element( +// // ecall_cols.is_commit_deferred_proofs, +// // syscall_id - F::from_canonical_u8((uint8_t)SyscallCode::COMMIT_DEFERRED_PROOFS) +// // ); + +// // // If the syscall is `COMMIT` or `COMMIT_DEFERRED_PROOFS`, set the index bitmap and +// // // digest word. +// // if (syscall_id +// // == F::from_canonical_u8((uint8_t)SyscallCode::COMMIT +// // ) // Comment to make my editor format nicely... +// // || syscall_id == F::from_canonical_u8((uint8_t)SyscallCode::COMMIT_DEFERRED_PROOFS)) { +// // uint32_t digest_idx = word_to_u32(cols.op_b_access.access.value); +// // ecall_cols.index_bitmap[digest_idx] = F::one().val; +// // } + +// // // Write the syscall nonce. +// // ecall_cols.syscall_nonce = F::from_canonical_u32(event.syscall_nonce).val; + +// // is_halt = syscall_id == F::from_canonical_u32((uint8_t)SyscallCode::HALT); + +// // // For halt and commit deferred proofs syscalls, we need to baby bear range check one of +// // // it's operands. +// // if (is_halt) { +// // write_word_from_u32(ecall_cols.operand_to_check, event.b); +// // babybear_word_populate(ecall_cols.operand_range_check_cols, event.b); +// // cols.ecall_range_check_operand = F::one().val; +// // } + +// // if (syscall_id == F::from_canonical_u32((uint8_t)SyscallCode::COMMIT_DEFERRED_PROOFS)) { +// // write_word_from_u32(ecall_cols.operand_to_check, event.c); +// // babybear_word_populate(ecall_cols.operand_range_check_cols, event.c); +// // cols.ecall_range_check_operand = F::one().val; +// // } + +// // return is_halt; +// // } + +// template +// __SP1_HOSTDEV__ void event_to_row(const CpuEventFfi& event, CpuCols& cols) { +// // // Populate shard and clk columns. +// // populate_shard_clk(event, cols); + +// // // Populate the nonce. +// // cols.nonce = F::from_canonical_u32(event.alu_nonce).val; + +// // // Populate basic fields. +// // cols.pc = F::from_canonical_u32(event.pc).val; +// // cols.next_pc = F::from_canonical_u32(event.next_pc).val; +// // instruction_populate(cols.instruction, event.instruction); +// // // cols.instruction.populate(event.instruction); +// // selectors_populate(cols.selectors, event.instruction); +// // // cols.selectors.populate(event.instruction); +// // write_word_from_u32(cols.op_a_access.access.value, event.a); +// // write_word_from_u32(cols.op_b_access.access.value, event.b); +// // write_word_from_u32(cols.op_c_access.access.value, event.c); + +// // // // Populate memory accesses for a, b, and c. +// // // The function guards against the record being `None`. +// // memory::populate_read_write(cols.op_a_access, event.a_record); +// // if (event.b_record.tag == OptionMemoryRecordEnum::Tag::Read) { +// // memory::populate_read(cols.op_b_access, event.b_record.read._0); +// // } +// // if (event.c_record.tag == OptionMemoryRecordEnum::Tag::Read) { +// // memory::populate_read(cols.op_c_access, event.c_record.read._0); +// // } + +// // // // Populate range checks for a. +// // // let a_bytes = cols +// // // .op_a_access +// // // .access +// // // .val +// // // .0 +// // // .iter() +// // // .map(|x| x.as_canonical_u32()) +// // // .collect::>(); +// // // blu_events.add_byte_lookup_event(ByteLookupEvent { +// // // shard: event.shard, +// // // opcode: ByteOpcode::U8Range, +// // // a1: 0, +// // // a2: 0, +// // // b: a_bytes[0] as u8, +// // // c: a_bytes[1] as u8, +// // // }); +// // // blu_events.add_byte_lookup_event(ByteLookupEvent { +// // // shard: event.shard, +// // // opcode: ByteOpcode::U8Range, +// // // a1: 0, +// // // a2: 0, +// // // b: a_bytes[2] as u8, +// // // c: a_bytes[3] as u8, +// // // }); + +// // // Populate memory accesses for reading from memory. +// // // `event.memory` appears to be vestigial. +// // // assert_eq!(event.memory_record.is_some(), event.memory.is_some()); +// // // The function guards against the record being `None`. +// // memory::populate_read_write( +// // cols.opcode_specific_columns.memory.memory_access, +// // event.memory_record +// // ); + +// // // Populate memory, branch, jump, and auipc specific fields. +// // const bool is_memory = opcode_utils::is_memory(event.instruction.opcode); +// // const bool is_branch = opcode_utils::is_branch(event.instruction.opcode); +// // const bool is_jump = opcode_utils::is_jump(event.instruction.opcode); +// // const bool is_auipc = event.instruction.opcode == Opcode::AUIPC; +// // const bool is_ecall = event.instruction.opcode == Opcode::ECALL; +// // // Calculated by `populate_ecall`, if called. +// // bool is_halt = false; +// // // Unlike the Rust code, we guard outside the function bodies so we can reuse the booleans. +// // if (is_memory) { +// // populate_memory(cols, event); +// // } +// // if (is_branch) { +// // populate_branch(cols, event); +// // } +// // if (is_jump) { +// // populate_jump(cols, event); +// // } +// // if (is_auipc) { +// // populate_auipc(cols, event); +// // } +// // if (is_ecall) { +// // is_halt = populate_ecall(cols, event); +// // } + +// // cols.is_sequential_instr = F::from_bool(!(is_branch || is_jump || is_halt)).val; + +// // // Assert that the instruction is not a no-op. +// // cols.is_real = F::one().val; +// } +// } // namespace sp1::cpu \ No newline at end of file diff --git a/crates/core/machine/include/lt.hpp b/crates/core/machine/include/lt.hpp new file mode 100644 index 0000000000..3c83c144f4 --- /dev/null +++ b/crates/core/machine/include/lt.hpp @@ -0,0 +1,100 @@ +#pragma once + +#include + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::lt { +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, LtCols& cols) { + array_t a = u32_to_le_bytes(event.a); + array_t b = u32_to_le_bytes(event.b); + array_t c = u32_to_le_bytes(event.c); + cols.shard = F::from_canonical_u32(event.shard).val; + word_from_le_bytes(cols.a, a); + word_from_le_bytes(cols.b, b); + word_from_le_bytes(cols.c, c); + + // If this is SLT, mask the MSB of b & c before computing cols.bits. + uint8_t masked_b = b[3] & 0x7f; + uint8_t masked_c = c[3] & 0x7f; + cols.b_masked = F::from_canonical_u8(masked_b); + cols.c_masked = F::from_canonical_u8(masked_c); + + // // Send the masked interaction. + // blu.add_byte_lookup_event(ByteLookupEvent { + // shard: event.shard, + // channel: event.channel, + // opcode: ByteOpcode::AND, + // a1: masked_b as u16, + // a2: 0, + // b: b[3], + // c: 0x7f, + // }); + // blu.add_byte_lookup_event(ByteLookupEvent { + // shard: event.shard, + // channel: event.channel, + // opcode: ByteOpcode::AND, + // a1: masked_c as u16, + // a2: 0, + // b: c[3], + // c: 0x7f, + // }); + + array_t b_comp = b; + array_t c_comp = c; + if (event.opcode == Opcode::SLT) { + b_comp[3] = masked_b; + c_comp[3] = masked_c; + } + + // Set the byte equality flags. + intptr_t i = 3; + while (true) { + uint8_t b_byte = b_comp[i]; + uint8_t c_byte = c_comp[i]; + if (b_byte != c_byte) { + cols.byte_flags[i] = F::one(); + cols.sltu = F::from_bool(b_byte < c_byte); + F b_byte_f = F::from_canonical_u8(b_byte); + F c_byte_f = F::from_canonical_u8(c_byte); + cols.not_eq_inv = (b_byte_f - c_byte_f).reciprocal(); + cols.comparison_bytes[0] = b_byte_f; + cols.comparison_bytes[1] = c_byte_f; + break; + } + if (i == 0) { + // The equality `b_comp == c_comp` holds. + cols.is_comp_eq = F::one(); + break; + } + --i; + } + + cols.msb_b = F::from_bool((b[3] >> 7) & 1); + cols.msb_c = F::from_bool((c[3] >> 7) & 1); + cols.is_sign_eq = F::from_bool(event.opcode != Opcode::SLT || cols.msb_b == cols.msb_c); + + cols.is_slt = F::from_bool(event.opcode == Opcode::SLT); + cols.is_sltu = F::from_bool(event.opcode == Opcode::SLTU); + + cols.bit_b = (F(cols.msb_b) * F(cols.is_slt)); + cols.bit_c = (F(cols.msb_c) * F(cols.is_slt)); + + // if (F(cols.a._0[0]) != F(cols.bit_b) * (F::one() - F(cols.bit_c)) + F(cols.is_sign_eq) * F(cols.sltu)) + // { + // std::exit(1); + // } + + // blu.add_byte_lookup_event(ByteLookupEvent { + // shard: event.shard, + // channel: event.channel, + // opcode: ByteOpcode::LTU, + // a1: cols.sltu.as_canonical_u32() as u16, + // a2: 0, + // b: cols.comparison_bytes[0].as_canonical_u32() as u8, + // c: cols.comparison_bytes[1].as_canonical_u32() as u8, + // }); +} +} // namespace sp1::lt \ No newline at end of file diff --git a/crates/core/machine/include/memory.hpp b/crates/core/machine/include/memory.hpp new file mode 100644 index 0000000000..216a3a449d --- /dev/null +++ b/crates/core/machine/include/memory.hpp @@ -0,0 +1,116 @@ +#pragma once + +#include + +#include "prelude.hpp" +#include "utils.hpp" + +// namespace sp1_core_machine_sys::memory { +// __SP1_HOSTDEV__ __SP1_INLINE__ uint32_t unwrap_value(const OptionMemoryRecordEnum& record) { +// switch (record.tag) { +// case OptionMemoryRecordEnum::Tag::Read: +// return record.read._0.value; +// case OptionMemoryRecordEnum::Tag::Write: +// return record.write._0.value; +// default: +// // Either the tag is `None` or it is an invalid value. +// assert(false); +// } +// // Unreachable. +// return 0; +// } + +// template +// __SP1_HOSTDEV__ void populate_access( +// MemoryAccessCols& self, +// const MemoryRecord& current_record, +// const MemoryRecord& prev_record +// ) { +// write_word_from_u32(self.value, current_record.value); + +// self.prev_shard = F::from_canonical_u32(prev_record.shard).val; +// self.prev_clk = F::from_canonical_u32(prev_record.timestamp).val; + +// // Fill columns used for verifying current memory access time value is greater than +// // previous's. +// const bool use_clk_comparison = prev_record.shard == current_record.shard; +// self.compare_clk = F::from_bool(use_clk_comparison).val; +// const uint32_t prev_time_value = use_clk_comparison ? prev_record.timestamp : prev_record.shard; +// const uint32_t current_time_value = +// use_clk_comparison ? current_record.timestamp : current_record.shard; + +// const uint32_t diff_minus_one = current_time_value - prev_time_value - 1; +// const uint16_t diff_16bit_limb = (uint16_t)(diff_minus_one); +// self.diff_16bit_limb = F::from_canonical_u16(diff_16bit_limb).val; +// const uint8_t diff_8bit_limb = (uint8_t)(diff_minus_one >> 16); +// self.diff_8bit_limb = F::from_canonical_u8(diff_8bit_limb).val; + +// // let shard = current_record.shard; + +// // // Add a byte table lookup with the 16Range op. +// // output.add_u16_range_check(shard, diff_16bit_limb); + +// // // Add a byte table lookup with the U8Range op. +// // output.add_u8_range_check(shard, 0, diff_8bit_limb as u8); +// } + +// template +// __SP1_HOSTDEV__ void +// populate_read(MemoryReadCols& self, const MemoryReadRecord& record) { +// const MemoryRecord current_record = { +// .shard = record.shard, +// .timestamp = record.timestamp, +// .value = record.value, +// }; +// const MemoryRecord prev_record = { +// .shard = record.prev_shard, +// .timestamp = record.prev_timestamp, +// .value = record.value, +// }; +// populate_access(self.access, current_record, prev_record); +// } + +// template +// __SP1_HOSTDEV__ void populate_read_write( +// MemoryReadWriteCols& self, +// const OptionMemoryRecordEnum& record +// ) { +// if (record.tag == OptionMemoryRecordEnum::Tag::None) { +// return; +// } +// MemoryRecord current_record; +// MemoryRecord prev_record; +// switch (record.tag) { +// case OptionMemoryRecordEnum::Tag::Read: +// current_record = { +// .shard = record.read._0.shard, +// .timestamp = record.read._0.timestamp, +// .value = record.read._0.value, +// }; +// prev_record = { +// .shard = record.read._0.prev_shard, +// .timestamp = record.read._0.prev_timestamp, +// .value = record.read._0.value, +// }; +// break; +// case OptionMemoryRecordEnum::Tag::Write: +// current_record = { +// .shard = record.write._0.shard, +// .timestamp = record.write._0.timestamp, +// .value = record.write._0.value, +// }; +// prev_record = { +// .shard = record.write._0.prev_shard, +// .timestamp = record.write._0.prev_timestamp, +// .value = record.write._0.prev_value, +// }; +// break; +// default: +// // Unreachable. `None` case guarded above. +// assert(false); +// break; +// } +// write_word_from_u32(self.prev_value, prev_record.value); +// populate_access(self.access, current_record, prev_record); +// } +// } // namespace sp1::memory \ No newline at end of file diff --git a/crates/core/machine/include/memory_global.hpp b/crates/core/machine/include/memory_global.hpp new file mode 100644 index 0000000000..6859686a56 --- /dev/null +++ b/crates/core/machine/include/memory_global.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" +#include "bb31_septic_extension_t.hpp" +#include "memory_local.hpp" + +namespace sp1_core_machine_sys::memory_global { + template + __SP1_HOSTDEV__ void event_to_row(const MemoryInitializeFinalizeEvent* event, const bool is_receive, MemoryInitCols* cols) { + MemoryRecord record; + if (is_receive) { + record.shard = event->shard; + record.timestamp = event->timestamp; + record.value = event->value; + } else { + record.shard = 0; + record.timestamp = 0; + record.value = event->value; + } + // We populate only the things in the first loop of generate_trace here. The second loop is handled in the kernel directly. + sp1_core_machine_sys::memory_local::populate_memory(&cols->global_interaction_cols, &record, event->addr, is_receive); + cols->addr = F::from_canonical_u32(event->addr); + for(uintptr_t i = 0 ; i < 32 ; i++) { + cols->addr_bits.bits[i] = F::from_canonical_u32(((event->addr) >> i) & 1); + } + cols->addr_bits.and_most_sig_byte_decomp_3_to_5 = cols->addr_bits.bits[27] * cols->addr_bits.bits[28]; + cols->addr_bits.and_most_sig_byte_decomp_3_to_6 = cols->addr_bits.and_most_sig_byte_decomp_3_to_5 * cols->addr_bits.bits[29]; + cols->addr_bits.and_most_sig_byte_decomp_3_to_7 = cols->addr_bits.and_most_sig_byte_decomp_3_to_6 * cols->addr_bits.bits[30]; + cols->shard = F::from_canonical_u32(event->shard); + cols->timestamp = F::from_canonical_u32(event->timestamp); + for(uintptr_t i = 0 ; i < 32 ; i++) { + cols->value[i] = F::from_canonical_u32(((event->value) >> i) & 1); + } + cols->is_real = F::from_canonical_u32(event->used); + } +} // namespace sp1::memory_local \ No newline at end of file diff --git a/crates/core/machine/include/memory_local.hpp b/crates/core/machine/include/memory_local.hpp new file mode 100644 index 0000000000..1d799208b2 --- /dev/null +++ b/crates/core/machine/include/memory_local.hpp @@ -0,0 +1,82 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" +#include "bb31_septic_extension_t.hpp" + +namespace sp1_core_machine_sys::memory_local { + template __SP1_HOSTDEV__ void populate_memory(GlobalInteractionOperation* cols, const MemoryRecord* record, const uint32_t& addr, bool is_receive) { + EF7 x_start; + + { + x_start.value[0] = F::from_canonical_u32(record->shard + (1 << 24)); + x_start.value[1] = F::from_canonical_u32(record->timestamp); + x_start.value[2] = F::from_canonical_u32(addr); + x_start.value[3] = F::from_canonical_u32(record->value & 255); + x_start.value[4] = F::from_canonical_u32((record->value >> 8) & 255); + x_start.value[5] = F::from_canonical_u32((record->value >> 16) & 255); + x_start.value[6] = F::from_canonical_u32((record->value >> 24) & 255); + } + + #pragma unroll(1) + for(uint32_t offset = 0 ; offset < 256 ; offset++) { + EF7 x_trial = x_start.universal_hash(); + EF7 y_sq = x_trial.curve_formula(); + F y_sq_pow_r = y_sq.pow_r(); + F is_square = y_sq_pow_r ^ 1006632960; + if(is_square == F::one()) { + EF7 y = y_sq.sqrt(y_sq_pow_r); + if (y.is_exception()) { + x_start += F::from_canonical_u32(1 << 16); + continue; + } + if (y.is_receive() != is_receive) { + y = EF7::zero() - y; + } + // x_trial, y + for(uint32_t idx = 0 ; idx < 8 ; idx++ ) { + cols->offset_bits[idx] = F::from_canonical_u32((offset >> idx) & 1); + } + for(uintptr_t i = 0 ; i < 7 ; i++) { + cols->x_coordinate._0[i] = x_trial.value[i]; + cols->y_coordinate._0[i] = y.value[i]; + } + uint32_t range_check_value; + if (is_receive) { + range_check_value = y.value[6].as_canonical_u32() - 1; + } else { + range_check_value = y.value[6].as_canonical_u32() - (F::MOD + 1) / 2; + } + F top_4_bits = F::zero(); + for(uint32_t idx = 0 ; idx < 30 ; idx++) { + cols->y6_bit_decomp[idx] = F::from_canonical_u32((range_check_value >> idx) & 1); + if (idx >= 26) { + top_4_bits += cols->y6_bit_decomp[idx]; + } + } + top_4_bits -= F::from_canonical_u32(4); + cols->range_check_witness = top_4_bits.reciprocal(); + return; + } + x_start += F::from_canonical_u32(1 << 16); + } + assert(false); + } + + template + __SP1_HOSTDEV__ void event_to_row(const MemoryLocalEvent* event, SingleMemoryLocal* cols) { + populate_memory(&cols->initial_global_interaction_cols, &event->initial_mem_access, event->addr, true); + populate_memory(&cols->final_global_interaction_cols, &event->final_mem_access, event->addr, false); + cols->addr = F::from_canonical_u32(event->addr); + + cols->initial_shard = F::from_canonical_u32(event->initial_mem_access.shard); + cols->initial_clk = F::from_canonical_u32(event->initial_mem_access.timestamp); + write_word_from_u32_v2(cols->initial_value, event->initial_mem_access.value); + + cols->final_shard = F::from_canonical_u32(event->final_mem_access.shard); + cols->final_clk = F::from_canonical_u32(event->final_mem_access.timestamp); + write_word_from_u32_v2(cols->final_value, event->final_mem_access.value); + + cols->is_real = F::one(); + } +} // namespace sp1::memory_local diff --git a/crates/core/machine/include/mul.hpp b/crates/core/machine/include/mul.hpp new file mode 100644 index 0000000000..efa564275f --- /dev/null +++ b/crates/core/machine/include/mul.hpp @@ -0,0 +1,111 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::mul { +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, MulCols& cols) { + // // Ensure that the opcode is MUL, MULHU, MULH, or MULHSU. + // assert!( + // event.opcode == Opcode::MUL + // || event.opcode == Opcode::MULHU + // || event.opcode == Opcode::MULH + // || event.opcode == Opcode::MULHSU + // ); + + const array_t a = u32_to_le_bytes(event.a); + const array_t b = u32_to_le_bytes(event.b); + const array_t c = u32_to_le_bytes(event.c); + + // Handle b and c's signs. + { + uint8_t b_msb = get_msb(b); + cols.b_msb = F::from_canonical_u8(b_msb).val; + uint8_t c_msb = get_msb(c); + cols.c_msb = F::from_canonical_u8(c_msb).val; + + // If b is signed and it is negative, sign extend b. + if ((event.opcode == Opcode::MULH || event.opcode == Opcode::MULHSU) && b_msb == 1) { + cols.b_sign_extend = F::one().val; + } + + // If c is signed and it is negative, sign extend c. + if (event.opcode == Opcode::MULH && c_msb == 1) { + cols.c_sign_extend = F::one().val; + } + + // // Insert the MSB lookup events. + // { + // let words = [b_word, c_word]; + // let mut blu_events: Vec = vec![]; + // for word in words.iter() { + // let most_significant_byte = word[WORD_SIZE - 1]; + // blu_events.push(ByteLookupEvent { + // shard: event.shard, + // opcode: ByteOpcode::MSB, + // a1: get_msb(*word) as u16, + // a2: 0, + // b: most_significant_byte, + // c: 0, + // }); + // } + // record.add_byte_lookup_events(blu_events); + // } + } + + // Required for the following logic to correctly multiply. + static_assert(2 * WORD_SIZE == LONG_WORD_SIZE); + + array_t product {}; + for (uintptr_t i = 0; i < WORD_SIZE; ++i) { + for (uintptr_t j = 0; j < WORD_SIZE; ++j) { + product[i + j] += (uint32_t)b[i] * (uint32_t)c[j]; + } + if (cols.c_sign_extend != F::zero().val) { + for (uintptr_t j = WORD_SIZE; j < LONG_WORD_SIZE - i; ++j) { + product[i + j] += (uint32_t)b[i] * (uint32_t)0xFF; + } + } + } + if (cols.b_sign_extend != F::zero().val) { + for (uintptr_t i = WORD_SIZE; i < LONG_WORD_SIZE; ++i) { + for (uintptr_t j = 0; j < LONG_WORD_SIZE - i; ++j) { + product[i + j] += (uint32_t)0xFF * (uint32_t)c[j]; + } + } + } + + // Calculate the correct product using the `product` array. We store the + // correct carry value for verification. + const uint32_t base = 1 << BYTE_SIZE; + array_t carry {}; + for (uintptr_t i = 0; i < LONG_WORD_SIZE; ++i) { + carry[i] = product[i] / base; + product[i] %= base; + if (i + 1 < LONG_WORD_SIZE) { + product[i + 1] += carry[i]; + } + cols.carry[i] = F::from_canonical_u32(carry[i]).val; + } + + for (uintptr_t i = 0; i < LONG_WORD_SIZE; ++i) { + cols.product[i] = F::from_canonical_u32(product[i]).val; + } + word_from_le_bytes(cols.a, a); + word_from_le_bytes(cols.b, b); + word_from_le_bytes(cols.c, c); + cols.is_real = F::one().val; + cols.is_mul = F::from_bool(event.opcode == Opcode::MUL).val; + cols.is_mulh = F::from_bool(event.opcode == Opcode::MULH).val; + cols.is_mulhu = F::from_bool(event.opcode == Opcode::MULHU).val; + cols.is_mulhsu = F::from_bool(event.opcode == Opcode::MULHSU).val; + cols.shard = F::from_canonical_u32(event.shard).val; + + // // Range check. + // { + // record.add_u16_range_checks(event.shard, &carry.map(|x| x as u16)); + // record.add_u8_range_checks(event.shard, &product.map(|x| x as u8)); + // } +} +} // namespace sp1::mul diff --git a/crates/core/machine/include/prelude.hpp b/crates/core/machine/include/prelude.hpp new file mode 100644 index 0000000000..1f2b0db1e1 --- /dev/null +++ b/crates/core/machine/include/prelude.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include "sp1-core-machine-sys-cbindgen.hpp" + +#ifndef __CUDACC__ + #define __SP1_HOSTDEV__ + #define __SP1_INLINE__ inline + #include + +namespace sp1_core_machine_sys { +template +using array_t = std::array; +} // namespace sp1 +#else + #define __SP1_HOSTDEV__ __host__ __device__ + #define __SP1_INLINE__ + #include + +namespace sp1_core_machine_sys { +template +using array_t = cuda::std::array; +} // namespace sp1 +#endif diff --git a/crates/core/machine/include/sll.hpp b/crates/core/machine/include/sll.hpp new file mode 100644 index 0000000000..5f24a575a0 --- /dev/null +++ b/crates/core/machine/include/sll.hpp @@ -0,0 +1,66 @@ +#pragma once + +#include + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::sll { +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, ShiftLeftCols& cols) { + array_t a = u32_to_le_bytes(event.a); + array_t b = u32_to_le_bytes(event.b); + array_t c = u32_to_le_bytes(event.c); + cols.shard = F::from_canonical_u32(event.shard).val; + word_from_le_bytes(cols.a, a); + word_from_le_bytes(cols.b, b); + word_from_le_bytes(cols.c, c); + cols.is_real = F::one().val; + for (uintptr_t i = 0; i < BYTE_SIZE; ++i) { + cols.c_least_sig_byte[i] = F::from_canonical_u32((event.c >> i) & 1).val; + } + + // Variables for bit shifting. + uintptr_t num_bits_to_shift = event.c % BYTE_SIZE; + for (uintptr_t i = 0; i < BYTE_SIZE; ++i) { + cols.shift_by_n_bits[i] = F::from_bool(num_bits_to_shift == i).val; + } + + uint32_t bit_shift_multiplier = 1 << num_bits_to_shift; + cols.bit_shift_multiplier = F::from_canonical_u32(bit_shift_multiplier).val; + + uint32_t carry = 0; + uint32_t base = 1 << BYTE_SIZE; + + array_t bit_shift_result; + array_t bit_shift_result_carry; + for (uintptr_t i = 0; i < WORD_SIZE; ++i) { + uint32_t v = b[i] * bit_shift_multiplier + carry; + carry = v / base; + bit_shift_result[i] = (uint8_t)(v % base); + cols.bit_shift_result[i] = F::from_canonical_u8(bit_shift_result[i]).val; + bit_shift_result_carry[i] = (uint8_t)carry; + cols.bit_shift_result_carry[i] = F::from_canonical_u8(bit_shift_result_carry[i]).val; + } + + // // Variables for byte shifting. + uintptr_t num_bytes_to_shift = (uintptr_t)(event.c & 0b11111) / BYTE_SIZE; + for (uintptr_t i = 0; i < WORD_SIZE; ++i) { + cols.shift_by_n_bytes[i] = F::from_bool(num_bytes_to_shift == i).val; + } + + // // Range checks. + // { + // blu.add_u8_range_checks(event.shard, event.channel, &bit_shift_result); + // blu.add_u8_range_checks(event.shard, event.channel, &bit_shift_result_carry); + // } + + // // Sanity check. + // for i in num_bytes_to_shift..WORD_SIZE { + // debug_assert_eq!( + // cols.bit_shift_result[i - num_bytes_to_shift], + // F::from_canonical_u8(a[i]) + // ); + // } +} +} // namespace sp1::sll \ No newline at end of file diff --git a/crates/core/machine/include/sr.hpp b/crates/core/machine/include/sr.hpp new file mode 100644 index 0000000000..a5806fadb1 --- /dev/null +++ b/crates/core/machine/include/sr.hpp @@ -0,0 +1,106 @@ +#pragma once + +#include + +#include "prelude.hpp" +#include "utils.hpp" + +namespace sp1_core_machine_sys::sr { +template +__SP1_HOSTDEV__ void event_to_row(const AluEvent& event, ShiftRightCols& cols) { + // Initialize cols with basic operands and flags derived from the current event. + { + cols.shard = F::from_canonical_u32(event.shard).val; + write_word_from_u32(cols.a, event.a); + write_word_from_u32(cols.b, event.b); + write_word_from_u32(cols.c, event.c); + cols.b_msb = F::from_canonical_u32((event.b >> 31) & 1).val; + cols.is_srl = F::from_bool(event.opcode == Opcode::SRL).val; + cols.is_sra = F::from_bool(event.opcode == Opcode::SRA).val; + cols.is_real = F::one().val; + + for (uintptr_t i = 0; i < BYTE_SIZE; ++i) { + cols.c_least_sig_byte[i] = F::from_canonical_u32((event.c >> i) & 1).val; + } + + // // Insert the MSB lookup event. + // let most_significant_byte = event.b.to_le_bytes()[WORD_SIZE - 1]; + // blu.add_byte_lookup_events(vec![ByteLookupEvent { + // shard: event.shard, + // opcode: ByteOpcode::MSB, + // a1: ((most_significant_byte >> 7) & 1) as u16, + // a2: 0, + // b: most_significant_byte, + // c: 0, + // }]); + } + + // Note that we take the least significant 5 bits per the RISC-V spec. + const uintptr_t num_bytes_to_shift = (event.c % 32) / BYTE_SIZE; + const uintptr_t num_bits_to_shift = (event.c % 32) % BYTE_SIZE; + + // Byte shifting. + // Zero-initialize the array. + array_t byte_shift_result {}; + { + for (uintptr_t i = 0; i < WORD_SIZE; ++i) { + cols.shift_by_n_bytes[i] = F::from_bool(num_bytes_to_shift == i).val; + } + // Sign extension is necessary only for arithmetic right shift. + array_t sign_extended_b = event.opcode == Opcode::SRA + ? u64_to_le_bytes((int64_t)(int32_t)event.b) + : u64_to_le_bytes((uint64_t)event.b); + + for (uintptr_t i = 0; i < LONG_WORD_SIZE - num_bytes_to_shift; ++i) { + byte_shift_result[i] = sign_extended_b[i + num_bytes_to_shift]; + cols.byte_shift_result[i] = + F::from_canonical_u8(sign_extended_b[i + num_bytes_to_shift]).val; + } + } + + // Bit shifting. + { + for (uintptr_t i = 0; i < BYTE_SIZE; ++i) { + cols.shift_by_n_bits[i] = F::from_bool(num_bits_to_shift == i).val; + } + const uint32_t carry_multiplier = 1 << (8 - num_bits_to_shift); + uint32_t last_carry = 0; + array_t bit_shift_result; + array_t shr_carry_output_carry; + array_t shr_carry_output_shifted_byte; + for (intptr_t i = LONG_WORD_SIZE - 1; i >= 0; --i) { + auto [shift, carry] = shr_carry(byte_shift_result[i], num_bits_to_shift); + + // let byte_event = ByteLookupEvent { + // shard: event.shard, + // opcode: ByteOpcode::ShrCarry, + // a1: shift as u16, + // a2: carry, + // b: byte_shift_result[i], + // c: num_bits_to_shift as u8, + // }; + // blu.add_byte_lookup_event(byte_event); + + shr_carry_output_carry[i] = carry; + cols.shr_carry_output_carry[i] = F::from_canonical_u8(carry).val; + + shr_carry_output_shifted_byte[i] = shift; + cols.shr_carry_output_shifted_byte[i] = F::from_canonical_u8(shift).val; + + uint8_t res = (uint8_t)(((uint32_t)shift + last_carry * carry_multiplier) & 0xFF); + bit_shift_result[i] = res; + cols.bit_shift_result[i] = F::from_canonical_u8(res).val; + last_carry = (uint32_t)carry; + } + // for (uintptr_t i = 0; i < WORD_SIZE; ++i) + // { + // assert(cols.a[i] == cols.bit_shift_result[i]); + // } + // // Range checks. + // blu.add_u8_range_checks(event.shard, &byte_shift_result); + // blu.add_u8_range_checks(event.shard, &bit_shift_result); + // blu.add_u8_range_checks(event.shard, &shr_carry_output_carry); + // blu.add_u8_range_checks(event.shard, &shr_carry_output_shifted_byte); + } +} +} // namespace sp1::sr \ No newline at end of file diff --git a/crates/core/machine/include/sys.hpp b/crates/core/machine/include/sys.hpp new file mode 100644 index 0000000000..2a6f56166b --- /dev/null +++ b/crates/core/machine/include/sys.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include "add_sub.hpp" +#include "bitwise.hpp" +#include "cpu.hpp" +#include "lt.hpp" +#include "memory.hpp" +#include "mul.hpp" +#include "sll.hpp" +#include "sp1-core-machine-sys-cbindgen.hpp" +#include "sr.hpp" +#include "memory_local.hpp" +#include "memory_global.hpp" +#include "syscall.hpp" diff --git a/crates/core/machine/include/syscall.hpp b/crates/core/machine/include/syscall.hpp new file mode 100644 index 0000000000..caa91b32f8 --- /dev/null +++ b/crates/core/machine/include/syscall.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include "prelude.hpp" +#include "utils.hpp" +#include "bb31_septic_extension_t.hpp" + +namespace sp1_core_machine_sys::syscall { + template __SP1_HOSTDEV__ void populate_syscall(GlobalInteractionOperation* cols, const SyscallEvent* event, bool is_receive) { + EF7 x_start; + + { + x_start.value[0] = F::from_canonical_u32(event->shard + 8 * (1 << 24)); + x_start.value[1] = F::from_canonical_u32(event->clk & ((1 << 16) - 1)); + x_start.value[2] = F::from_canonical_u32((event->clk) >> 16); + x_start.value[3] = F::from_canonical_u32(event->syscall_id); + x_start.value[4] = F::from_canonical_u32(event->arg1); + x_start.value[5] = F::from_canonical_u32(event->arg2); + x_start.value[6] = F::zero(); + } + + #pragma unroll(1) + for(uint32_t offset = 0 ; offset < 256 ; offset++) { + EF7 x_trial = x_start.universal_hash(); + EF7 y_sq = x_trial.curve_formula(); + F y_sq_pow_r = y_sq.pow_r(); + F is_square = y_sq_pow_r ^ 1006632960; + if(is_square == F::one()) { + EF7 y = y_sq.sqrt(y_sq_pow_r); + if (y.is_exception()) { + x_start += F::from_canonical_u32(1 << 16); + continue; + } + if (y.is_receive() != is_receive) { + y = EF7::zero() - y; + } + // x_trial, y + for(uint32_t idx = 0 ; idx < 8 ; idx++ ) { + cols->offset_bits[idx] = F::from_canonical_u32((offset >> idx) & 1); + } + for(uintptr_t i = 0 ; i < 7 ; i++) { + cols->x_coordinate._0[i] = x_trial.value[i]; + cols->y_coordinate._0[i] = y.value[i]; + } + uint32_t range_check_value; + if (is_receive) { + range_check_value = y.value[6].as_canonical_u32() - 1; + } else { + range_check_value = y.value[6].as_canonical_u32() - (F::MOD + 1) / 2; + } + F top_4_bits = F::zero(); + for(uint32_t idx = 0 ; idx < 30 ; idx++) { + cols->y6_bit_decomp[idx] = F::from_canonical_u32((range_check_value >> idx) & 1); + if (idx >= 26) { + top_4_bits += cols->y6_bit_decomp[idx]; + } + } + top_4_bits -= F::from_canonical_u32(4); + cols->range_check_witness = top_4_bits.reciprocal(); + return; + } + x_start += F::from_canonical_u32(1 << 16); + } + assert(false); + } + + template + __SP1_HOSTDEV__ void event_to_row(const SyscallEvent* event, const bool is_receive, SyscallCols* cols) { + populate_syscall(&cols->global_interaction_cols, event, is_receive); + cols->shard = F::from_canonical_u32(event->shard); + cols->clk_16 = F::from_canonical_u32(event->clk & ((1 << 16) - 1)); + cols->clk_8 = F::from_canonical_u32((event->clk) >> 16); + cols->syscall_id = F::from_canonical_u32(event->syscall_id); + cols->arg1 = F::from_canonical_u32(event->arg1); + cols->arg2 = F::from_canonical_u32(event->arg2); + cols->is_real = F::one(); + } +} // namespace sp1::memory_local diff --git a/crates/core/machine/include/utils.hpp b/crates/core/machine/include/utils.hpp new file mode 100644 index 0000000000..7f798ce314 --- /dev/null +++ b/crates/core/machine/include/utils.hpp @@ -0,0 +1,134 @@ +#pragma once + +#include +#include + +#include "prelude.hpp" + +namespace sp1_core_machine_sys { + +// Compiles to a no-op with -O3 and the like. +__SP1_HOSTDEV__ __SP1_INLINE__ array_t u32_to_le_bytes(uint32_t n) { + return { + (uint8_t)(n >> 8 * 0), + (uint8_t)(n >> 8 * 1), + (uint8_t)(n >> 8 * 2), + (uint8_t)(n >> 8 * 3), + }; +} + +__SP1_HOSTDEV__ __SP1_INLINE__ array_t u64_to_le_bytes(uint64_t n) { + return { + (uint8_t)(n >> 8 * 0), + (uint8_t)(n >> 8 * 1), + (uint8_t)(n >> 8 * 2), + (uint8_t)(n >> 8 * 3), + (uint8_t)(n >> 8 * 4), + (uint8_t)(n >> 8 * 5), + (uint8_t)(n >> 8 * 6), + (uint8_t)(n >> 8 * 7), + }; +} + +/// Shifts a byte to the right and returns both the shifted byte and the bits that carried. +__SP1_HOSTDEV__ __SP1_INLINE__ std::tuple +shr_carry(uint8_t input, uint8_t rotation) { + uint8_t c_mod = rotation & 0x7; + if (c_mod != 0) { + uint8_t res = input >> c_mod; + uint8_t c_mod_comp = 8 - c_mod; + uint8_t carry = (uint8_t)(input << c_mod_comp) >> c_mod_comp; + return {res, carry}; + } else { + return {input, 0}; + } +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void +write_word_from_u32(Word& word, const uint32_t value) { + // Coercion to `uint8_t` truncates the number. + word._0[0] = F::from_canonical_u8(value).val; + word._0[1] = F::from_canonical_u8(value >> 8).val; + word._0[2] = F::from_canonical_u8(value >> 16).val; + word._0[3] = F::from_canonical_u8(value >> 24).val; +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void +write_word_from_u32_v2(Word& word, const uint32_t value) { + word._0[0] = F::from_canonical_u8(value); + word._0[1] = F::from_canonical_u8(value >> 8); + word._0[2] = F::from_canonical_u8(value >> 16); + word._0[3] = F::from_canonical_u8(value >> 24); +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ uint32_t +word_to_u32(const Word& word) { + return ((uint8_t)F(word._0[0]).as_canonical_u32()) + + ((uint8_t)F(word._0[1]).as_canonical_u32() << 8) + + ((uint8_t)F(word._0[1]).as_canonical_u32() << 16) + + ((uint8_t)F(word._0[1]).as_canonical_u32() << 24); +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void word_from_le_bytes( + Word& word, + const array_t bytes +) { + // Coercion to `uint8_t` truncates the number. + word._0[0] = F::from_canonical_u8(bytes[0]).val; + word._0[1] = F::from_canonical_u8(bytes[1]).val; + word._0[2] = F::from_canonical_u8(bytes[2]).val; + word._0[3] = F::from_canonical_u8(bytes[3]).val; +} + +__SP1_HOSTDEV__ __SP1_INLINE__ uint8_t +get_msb(const array_t a) { + return (a[WORD_SIZE - 1] >> (BYTE_SIZE - 1)) & 1; +} + +namespace opcode_utils { + __SP1_HOSTDEV__ __SP1_INLINE__ bool is_memory(Opcode opcode) { + switch (opcode) { + case Opcode::LB: + case Opcode::LH: + case Opcode::LW: + case Opcode::LBU: + case Opcode::LHU: + case Opcode::SB: + case Opcode::SH: + case Opcode::SW: + return true; + default: + return false; + } + } + + __SP1_HOSTDEV__ __SP1_INLINE__ bool is_branch(Opcode opcode) { + switch (opcode) { + case Opcode::BEQ: + case Opcode::BNE: + case Opcode::BLT: + case Opcode::BGE: + case Opcode::BLTU: + case Opcode::BGEU: + return true; + default: + return false; + } + } + + __SP1_HOSTDEV__ __SP1_INLINE__ bool is_jump(Opcode opcode) { + switch (opcode) { + case Opcode::JAL: + case Opcode::JALR: + return true; + default: + return false; + } + } + +} // namespace opcode_utils +} // namespace sp1_core_machine_sys diff --git a/crates/core/machine/src/alu/add_sub/mod.rs b/crates/core/machine/src/alu/add_sub/mod.rs index d276820755..357aca4d6e 100644 --- a/crates/core/machine/src/alu/add_sub/mod.rs +++ b/crates/core/machine/src/alu/add_sub/mod.rs @@ -5,8 +5,8 @@ use core::{ use hashbrown::HashMap; use itertools::Itertools; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField}; +use p3_air::{Air, BaseAir}; +use p3_field::PrimeField; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; use sp1_core_executor::{ @@ -43,9 +43,6 @@ pub struct AddSubCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// Instance of `AddOperation` to handle addition logic in `AddSubChip`'s ALU operations. /// It's result will be `a` for the add operation and `b` for the sub operation. pub add_operation: AddOperation, @@ -98,7 +95,6 @@ impl MachineAir for AddSubChip { let event = &merged_events[idx]; self.event_to_row(event, cols, &mut byte_lookup_events); } - cols.nonce = F::from_canonical_usize(idx); }); }, ); @@ -137,6 +133,10 @@ impl MachineAir for AddSubChip { !shard.add_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl AddSubChip { @@ -175,12 +175,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &AddSubCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &AddSubCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); // Evaluate the addition operation. AddOperation::::eval( @@ -199,7 +193,6 @@ where local.operand_1, local.operand_2, local.shard, - local.nonce, local.is_add, ); @@ -210,7 +203,6 @@ where local.add_operation.value, local.operand_2, local.shard, - local.nonce, local.is_sub, ); @@ -224,14 +216,45 @@ where #[cfg(test)] mod tests { use p3_baby_bear::BabyBear; + use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; + use p3_maybe_rayon::prelude::ParallelSlice; use rand::{thread_rng, Rng}; use sp1_core_executor::{events::AluEvent, ExecutionRecord, Opcode}; use sp1_stark::{air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, StarkGenericConfig}; + use std::borrow::BorrowMut; + use std::sync::LazyLock; - use super::AddSubChip; + use super::*; + use crate::utils::pad_rows_fixed; use crate::utils::{uni_stark_prove as prove, uni_stark_verify as verify}; + /// Lazily initialized record for use across multiple tests. + /// Consists of random `ADD` and `SUB` instructions. + static SHARD: LazyLock = LazyLock::new(|| { + let add_events = (0..1) + .flat_map(|i| { + [{ + let operand_1 = 1u32; + let operand_2 = 2u32; + let result = operand_1.wrapping_add(operand_2); + AluEvent::new(i % 2, 0, Opcode::ADD, result, operand_1, operand_2) + }] + }) + .collect::>(); + let _sub_events = (0..255) + .flat_map(|i| { + [{ + let operand_1 = thread_rng().gen_range(0..u32::MAX); + let operand_2 = thread_rng().gen_range(0..u32::MAX); + let result = operand_1.wrapping_add(operand_2); + AluEvent::new(i % 2, 0, Opcode::SUB, result, operand_1, operand_2) + }] + }) + .collect::>(); + ExecutionRecord { add_events, ..Default::default() } + }); + #[test] fn generate_trace() { let mut shard = ExecutionRecord::default(); @@ -248,7 +271,7 @@ mod tests { let mut challenger = config.challenger(); let mut shard = ExecutionRecord::default(); - for i in 0..255 { + for i in 0..1 { let operand_1 = thread_rng().gen_range(0..u32::MAX); let operand_2 = thread_rng().gen_range(0..u32::MAX); let result = operand_1.wrapping_add(operand_2); @@ -283,4 +306,54 @@ mod tests { let mut challenger = config.challenger(); verify(&config, &chip, &mut challenger, &proof).unwrap(); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + let shard = LazyLock::force(&SHARD); + + let chip = AddSubChip::default(); + let trace: RowMajorMatrix = + chip.generate_trace(shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let chunk_size = + std::cmp::max((input.add_events.len() + input.sub_events.len()) / num_cpus::get(), 1); + + let events = input.add_events.iter().chain(input.sub_events.iter()).collect::>(); + let row_batches = events + .par_chunks(chunk_size) + .map(|events| { + let rows = events + .iter() + .map(|event| { + let mut row = [F::zero(); NUM_ADD_SUB_COLS]; + let cols: &mut AddSubCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::add_sub_event_to_row_babybear(event, cols); + } + row + }) + .collect::>(); + rows + }) + .collect::>(); + + let mut rows: Vec<[F; NUM_ADD_SUB_COLS]> = vec![]; + for row_batch in row_batches { + rows.extend(row_batch); + } + + pad_rows_fixed(&mut rows, || [F::zero(); NUM_ADD_SUB_COLS], None); + + // Convert the trace to a row major matrix. + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_ADD_SUB_COLS) + } } diff --git a/crates/core/machine/src/alu/bitwise/mod.rs b/crates/core/machine/src/alu/bitwise/mod.rs index 88156e286c..1cb3605dd9 100644 --- a/crates/core/machine/src/alu/bitwise/mod.rs +++ b/crates/core/machine/src/alu/bitwise/mod.rs @@ -5,8 +5,8 @@ use core::{ use hashbrown::HashMap; use itertools::Itertools; -use p3_air::{Air, AirBuilder, BaseAir}; -use p3_field::{AbstractField, PrimeField}; +use p3_air::{Air, BaseAir}; +use p3_field::PrimeField; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator, ParallelSlice}; use sp1_core_executor::{ @@ -35,9 +35,6 @@ pub struct BitwiseCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// The output operand. pub a: Word, @@ -91,16 +88,7 @@ impl MachineAir for BitwiseChip { ); // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_BITWISE_COLS); - - for i in 0..trace.height() { - let cols: &mut BitwiseCols = - trace.values[i * NUM_BITWISE_COLS..(i + 1) * NUM_BITWISE_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_BITWISE_COLS) } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { @@ -130,6 +118,10 @@ impl MachineAir for BitwiseChip { !shard.bitwise_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl BitwiseChip { @@ -181,12 +173,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &BitwiseCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &BitwiseCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); // Get the opcode for the operation. let opcode = local.is_xor * ByteOpcode::XOR.as_field::() @@ -211,7 +197,6 @@ where local.b, local.c, local.shard, - local.nonce, local.is_xor + local.is_or + local.is_and, ); diff --git a/crates/core/machine/src/alu/divrem/mod.rs b/crates/core/machine/src/alu/divrem/mod.rs index 1d4d539fc4..28a7e07798 100644 --- a/crates/core/machine/src/alu/divrem/mod.rs +++ b/crates/core/machine/src/alu/divrem/mod.rs @@ -103,9 +103,6 @@ pub struct DivRemCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// The output operand. pub a: Word, @@ -185,22 +182,11 @@ pub struct DivRemCols { /// Flag to indicate whether `c` is negative. pub c_neg: T, - /// The lower nonce of the operation. - pub lower_nonce: T, - - /// The upper nonce of the operation. - pub upper_nonce: T, - - /// The absolute nonce of the operation. - pub abs_nonce: T, - /// Selector to determine whether an ALU Event is sent for absolute value computation of `c`. pub abs_c_alu_event: T, - pub abs_c_alu_event_nonce: T, /// Selector to determine whether an ALU Event is sent for absolute value computation of `rem`. pub abs_rem_alu_event: T, - pub abs_rem_alu_event_nonce: T, /// Selector to know whether this row is enabled. pub is_real: T, @@ -278,21 +264,7 @@ impl MachineAir for DivRemChip { // Set the `alu_event` flags. cols.abs_c_alu_event = cols.c_neg * cols.is_real; - cols.abs_c_alu_event_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[4].0 as usize) - .copied() - .unwrap_or_default(), - ); cols.abs_rem_alu_event = cols.rem_neg * cols.is_real; - cols.abs_rem_alu_event_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[5].0 as usize) - .copied() - .unwrap_or_default(), - ); // Insert the MSB lookup events. { @@ -349,41 +321,6 @@ impl MachineAir for DivRemChip { cols.carry[i] = F::from_canonical_u32(carry[i]); } - // Insert the necessary multiplication & LT events. - { - cols.lower_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[0].0 as usize) - .copied() - .unwrap_or_default(), - ); - cols.upper_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[1].0 as usize) - .copied() - .unwrap_or_default(), - ); - if is_signed_operation(event.opcode) { - cols.abs_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[2].0 as usize) - .copied() - .unwrap_or_default(), - ); - } else { - cols.abs_nonce = F::from_canonical_u32( - input - .nonce_lookup - .get(event.sub_lookups[3].0 as usize) - .copied() - .unwrap_or_default(), - ); - }; - } - // Range check. { output.add_u8_range_checks(event.shard, "ient.to_le_bytes()); @@ -426,13 +363,6 @@ impl MachineAir for DivRemChip { trace.values[i] = padded_row_template[i % NUM_DIVREM_COLS]; } - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut DivRemCols = - trace.values[i * NUM_DIVREM_COLS..(i + 1) * NUM_DIVREM_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - trace } @@ -443,6 +373,10 @@ impl MachineAir for DivRemChip { !shard.divrem_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for DivRemChip { @@ -459,16 +393,10 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &DivRemCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &DivRemCols = (*next).borrow(); let base = AB::F::from_canonical_u32(1 << 8); let one: AB::Expr = AB::F::one().into(); let zero: AB::Expr = AB::F::zero().into(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - // Calculate whether b, remainder, and c are negative. { // Negative if and only if op code is signed & MSB = 1. @@ -502,7 +430,6 @@ where local.quotient, local.c, local.shard, - local.lower_nonce, local.is_real, ); @@ -527,7 +454,6 @@ where local.quotient, local.c, local.shard, - local.upper_nonce, local.is_real, ); } @@ -685,7 +611,6 @@ where local.c, local.abs_c, local.shard, - local.abs_c_alu_event_nonce, local.abs_c_alu_event, ); builder.send_alu( @@ -694,7 +619,6 @@ where local.remainder, local.abs_remainder, local.shard, - local.abs_rem_alu_event_nonce, local.abs_rem_alu_event, ); @@ -740,7 +664,6 @@ where local.abs_remainder, local.max_abs_c_or_1, local.shard, - local.abs_nonce, local.remainder_check_multiplicity, ); } @@ -816,15 +739,7 @@ where + local.is_rem * rem }; - builder.receive_alu( - opcode, - local.a, - local.b, - local.c, - local.shard, - local.nonce, - local.is_real, - ); + builder.receive_alu(opcode, local.a, local.b, local.c, local.shard, local.is_real); } } } diff --git a/crates/core/machine/src/alu/lt/mod.rs b/crates/core/machine/src/alu/lt/mod.rs index 876fdaaf8f..48280ae993 100644 --- a/crates/core/machine/src/alu/lt/mod.rs +++ b/crates/core/machine/src/alu/lt/mod.rs @@ -35,9 +35,6 @@ pub struct LtCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// If the opcode is SLT. pub is_slt: T, @@ -124,7 +121,6 @@ impl MachineAir for LtChip { let event = &input.lt_events[idx]; self.event_to_row(event, cols, &mut byte_lookup_events); } - cols.nonce = F::from_canonical_usize(idx); }); }, ); @@ -161,6 +157,10 @@ impl MachineAir for LtChip { !shard.lt_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl LtChip { @@ -269,12 +269,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &LtCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &LtCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let is_real = local.is_slt + local.is_sltu; @@ -448,7 +442,6 @@ where local.b, local.c, local.shard, - local.nonce, is_real, ); } diff --git a/crates/core/machine/src/alu/mul/mod.rs b/crates/core/machine/src/alu/mul/mod.rs index 6a1ce272fe..38cd34adf8 100644 --- a/crates/core/machine/src/alu/mul/mod.rs +++ b/crates/core/machine/src/alu/mul/mod.rs @@ -45,7 +45,7 @@ use sp1_core_executor::{ ByteOpcode, ExecutionRecord, Opcode, Program, }; use sp1_derive::AlignedBorrow; -use sp1_primitives::consts::WORD_SIZE; +use sp1_primitives::consts::{BYTE_SIZE, LONG_WORD_SIZE, WORD_SIZE}; use sp1_stark::{air::MachineAir, Word}; use crate::{ @@ -57,13 +57,6 @@ use crate::{ /// The number of main trace columns for `MulChip`. pub const NUM_MUL_COLS: usize = size_of::>(); -/// The number of digits in the product is at most the sum of the number of digits in the -/// multiplicands. -const PRODUCT_SIZE: usize = 2 * WORD_SIZE; - -/// The number of bits in a byte. -const BYTE_SIZE: usize = 8; - /// The mask for a byte. const BYTE_MASK: u8 = 0xff; @@ -78,9 +71,6 @@ pub struct MulCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// The output operand. pub a: Word, @@ -91,10 +81,10 @@ pub struct MulCols { pub c: Word, /// Trace. - pub carry: [T; PRODUCT_SIZE], + pub carry: [T; LONG_WORD_SIZE], /// An array storing the product of `b * c` after the carry propagation. - pub product: [T; PRODUCT_SIZE], + pub product: [T; LONG_WORD_SIZE], /// The most significant bit of `b`. pub b_msb: T, @@ -156,13 +146,11 @@ impl MachineAir for MulChip { let event = &input.mul_events[idx]; self.event_to_row(event, cols, &mut byte_lookup_events); } - cols.nonce = F::from_canonical_usize(idx); }); }, ); // Convert the trace to a row major matrix. - RowMajorMatrix::new(values, NUM_MUL_COLS) } @@ -193,6 +181,10 @@ impl MachineAir for MulChip { !shard.mul_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl MulChip { @@ -220,13 +212,13 @@ impl MulChip { // If b is signed and it is negative, sign extend b. if (event.opcode == Opcode::MULH || event.opcode == Opcode::MULHSU) && b_msb == 1 { cols.b_sign_extend = F::one(); - b.resize(PRODUCT_SIZE, BYTE_MASK); + b.resize(LONG_WORD_SIZE, BYTE_MASK); } // If c is signed and it is negative, sign extend c. if event.opcode == Opcode::MULH && c_msb == 1 { cols.c_sign_extend = F::one(); - c.resize(PRODUCT_SIZE, BYTE_MASK); + c.resize(LONG_WORD_SIZE, BYTE_MASK); } // Insert the MSB lookup events. @@ -248,10 +240,10 @@ impl MulChip { } } - let mut product = [0u32; PRODUCT_SIZE]; + let mut product = [0u32; LONG_WORD_SIZE]; for i in 0..b.len() { for j in 0..c.len() { - if i + j < PRODUCT_SIZE { + if i + j < LONG_WORD_SIZE { product[i + j] += (b[i] as u32) * (c[j] as u32); } } @@ -260,11 +252,11 @@ impl MulChip { // Calculate the correct product using the `product` array. We store the // correct carry value for verification. let base = (1 << BYTE_SIZE) as u32; - let mut carry = [0u32; PRODUCT_SIZE]; - for i in 0..PRODUCT_SIZE { + let mut carry = [0u32; LONG_WORD_SIZE]; + for i in 0..LONG_WORD_SIZE { carry[i] = product[i] / base; product[i] %= base; - if i + 1 < PRODUCT_SIZE { + if i + 1 < LONG_WORD_SIZE { product[i + 1] += carry[i]; } cols.carry[i] = F::from_canonical_u32(carry[i]); @@ -303,18 +295,12 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &MulCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &MulCols = (*next).borrow(); let base = AB::F::from_canonical_u32(1 << 8); let zero: AB::Expr = AB::F::zero().into(); let one: AB::Expr = AB::F::one().into(); let byte_mask = AB::F::from_canonical_u8(BYTE_MASK); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - // Calculate the MSBs. let (b_msb, c_msb) = { let msb_pairs = @@ -342,9 +328,9 @@ where // Sign extend local.b and local.c whenever appropriate. let (b, c) = { - let mut b: Vec = vec![AB::F::zero().into(); PRODUCT_SIZE]; - let mut c: Vec = vec![AB::F::zero().into(); PRODUCT_SIZE]; - for i in 0..PRODUCT_SIZE { + let mut b: Vec = vec![AB::F::zero().into(); LONG_WORD_SIZE]; + let mut c: Vec = vec![AB::F::zero().into(); LONG_WORD_SIZE]; + for i in 0..LONG_WORD_SIZE { if i < WORD_SIZE { b[i] = local.b[i].into(); c[i] = local.c[i].into(); @@ -357,10 +343,10 @@ where }; // Compute the uncarried product b(x) * c(x) = m(x). - let mut m: Vec = vec![AB::F::zero().into(); PRODUCT_SIZE]; - for i in 0..PRODUCT_SIZE { - for j in 0..PRODUCT_SIZE { - if i + j < PRODUCT_SIZE { + let mut m: Vec = vec![AB::F::zero().into(); LONG_WORD_SIZE]; + for i in 0..LONG_WORD_SIZE { + for j in 0..LONG_WORD_SIZE { + if i + j < LONG_WORD_SIZE { m[i + j] = m[i + j].clone() + b[i].clone() * c[j].clone(); } } @@ -368,7 +354,7 @@ where // Propagate carry. let product = { - for i in 0..PRODUCT_SIZE { + for i in 0..LONG_WORD_SIZE { if i == 0 { builder.assert_eq(local.product[i], m[i].clone() - local.carry[i] * base); } else { @@ -441,15 +427,7 @@ where } // Receive the arguments. - builder.receive_alu( - opcode, - local.a, - local.b, - local.c, - local.shard, - local.nonce, - local.is_real, - ); + builder.receive_alu(opcode, local.a, local.b, local.c, local.shard, local.is_real); } } diff --git a/crates/core/machine/src/alu/sll/mod.rs b/crates/core/machine/src/alu/sll/mod.rs index af00dd47a5..fcd84a98c2 100644 --- a/crates/core/machine/src/alu/sll/mod.rs +++ b/crates/core/machine/src/alu/sll/mod.rs @@ -68,9 +68,6 @@ pub struct ShiftLeftCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// The output operand. pub a: Word, @@ -154,12 +151,6 @@ impl MachineAir for ShiftLeft { trace.values[i] = padded_row_template[i % NUM_SHIFT_LEFT_COLS]; } - for i in 0..trace.height() { - let cols: &mut ShiftLeftCols = - trace.values[i * NUM_SHIFT_LEFT_COLS..(i + 1) * NUM_SHIFT_LEFT_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - trace } @@ -190,6 +181,10 @@ impl MachineAir for ShiftLeft { !shard.shift_left_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl ShiftLeft { @@ -270,17 +265,11 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &ShiftLeftCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &ShiftLeftCols = (*next).borrow(); let zero: AB::Expr = AB::F::zero().into(); let one: AB::Expr = AB::F::one().into(); let base: AB::Expr = AB::F::from_canonical_u32(1 << BYTE_SIZE).into(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - // We first "bit shift" and next we "byte shift". Then we compare the results with a. // Finally, we perform some misc checks. @@ -392,7 +381,6 @@ where local.b, local.c, local.shard, - local.nonce, local.is_real, ); } diff --git a/crates/core/machine/src/alu/sr/mod.rs b/crates/core/machine/src/alu/sr/mod.rs index b26c949945..916755a310 100644 --- a/crates/core/machine/src/alu/sr/mod.rs +++ b/crates/core/machine/src/alu/sr/mod.rs @@ -88,9 +88,6 @@ pub struct ShiftRightCols { /// The shard number, used for byte lookup table. pub shard: T, - /// The nonce of the operation. - pub nonce: T, - /// The output operand. pub a: Word, @@ -169,7 +166,6 @@ impl MachineAir for ShiftRightChip { cols.shift_by_n_bits[0] = F::one(); cols.shift_by_n_bytes[0] = F::one(); } - cols.nonce = F::from_canonical_usize(idx); }); }, ); @@ -205,6 +201,10 @@ impl MachineAir for ShiftRightChip { !shard.shift_right_events.is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl ShiftRightChip { @@ -329,15 +329,9 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &ShiftRightCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &ShiftRightCols = (*next).borrow(); let zero: AB::Expr = AB::F::zero().into(); let one: AB::Expr = AB::F::one().into(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - // Check that the MSB of most_significant_byte matches local.b_msb using lookup. { let byte = local.b[WORD_SIZE - 1]; @@ -513,7 +507,6 @@ where local.b, local.c, local.shard, - local.nonce, local.is_real, ); } diff --git a/crates/core/machine/src/bytes/trace.rs b/crates/core/machine/src/bytes/trace.rs index 6cfb81bbfc..d62374ceb5 100644 --- a/crates/core/machine/src/bytes/trace.rs +++ b/crates/core/machine/src/bytes/trace.rs @@ -1,6 +1,6 @@ use std::borrow::BorrowMut; -use p3_field::Field; +use p3_field::PrimeField; use p3_matrix::dense::RowMajorMatrix; use sp1_core_executor::{ByteOpcode, ExecutionRecord, Program}; use sp1_stark::air::MachineAir; @@ -14,7 +14,7 @@ use super::{ pub const NUM_ROWS: usize = 1 << 16; -impl MachineAir for ByteChip { +impl MachineAir for ByteChip { type Record = ExecutionRecord; type Program = Program; diff --git a/crates/core/machine/src/cpu/air/branch.rs b/crates/core/machine/src/cpu/air/branch.rs index d8c615682f..176f316da2 100644 --- a/crates/core/machine/src/cpu/air/branch.rs +++ b/crates/core/machine/src/cpu/air/branch.rs @@ -88,7 +88,6 @@ impl CpuChip { branch_cols.pc, local.op_c_val(), local.shard, - branch_cols.next_pc_nonce, local.branching, ); @@ -185,7 +184,6 @@ impl CpuChip { local.op_a_val(), local.op_b_val(), local.shard, - branch_cols.a_lt_b_nonce, is_branch_instruction.clone(), ); @@ -197,7 +195,6 @@ impl CpuChip { local.op_b_val(), local.op_a_val(), local.shard, - branch_cols.a_gt_b_nonce, is_branch_instruction.clone(), ); } diff --git a/crates/core/machine/src/cpu/air/ecall.rs b/crates/core/machine/src/cpu/air/ecall.rs index 59785123fb..3d8b70b7c1 100644 --- a/crates/core/machine/src/cpu/air/ecall.rs +++ b/crates/core/machine/src/cpu/air/ecall.rs @@ -54,7 +54,6 @@ impl CpuChip { builder.send_syscall( local.shard, local.clk, - ecall_cols.syscall_nonce, syscall_id, local.op_b_val().reduce::(), local.op_c_val().reduce::(), diff --git a/crates/core/machine/src/cpu/air/memory.rs b/crates/core/machine/src/cpu/air/memory.rs index 79054d1abf..70eede3b80 100644 --- a/crates/core/machine/src/cpu/air/memory.rs +++ b/crates/core/machine/src/cpu/air/memory.rs @@ -72,7 +72,6 @@ impl CpuChip { local.op_b_val(), local.op_c_val(), local.shard, - memory_columns.addr_word_nonce, is_memory_instruction.clone(), ); @@ -174,7 +173,6 @@ impl CpuChip { local.unsigned_mem_val, signed_value, local.shard, - local.unsigned_mem_val_nonce, local.mem_value_is_neg_not_x0, ); diff --git a/crates/core/machine/src/cpu/air/mod.rs b/crates/core/machine/src/cpu/air/mod.rs index 35b6dc18af..38d40e8437 100644 --- a/crates/core/machine/src/cpu/air/mod.rs +++ b/crates/core/machine/src/cpu/air/mod.rs @@ -66,7 +66,6 @@ where local.op_b_val(), local.op_c_val(), local.shard, - local.nonce, is_alu_instruction, ); @@ -196,7 +195,6 @@ impl CpuChip { jump_columns.pc, local.op_b_val(), local.shard, - jump_columns.jal_nonce, local.selectors.is_jal, ); @@ -207,7 +205,6 @@ impl CpuChip { local.op_b_val(), local.op_c_val(), local.shard, - jump_columns.jalr_nonce, local.selectors.is_jalr, ); } @@ -235,7 +232,6 @@ impl CpuChip { auipc_columns.pc, local.op_b_val(), local.shard, - auipc_columns.auipc_nonce, local.selectors.is_auipc, ); } diff --git a/crates/core/machine/src/cpu/columns/auipc.rs b/crates/core/machine/src/cpu/columns/auipc.rs index 5505f213b3..0e71ff59d4 100644 --- a/crates/core/machine/src/cpu/columns/auipc.rs +++ b/crates/core/machine/src/cpu/columns/auipc.rs @@ -12,5 +12,4 @@ pub struct AuipcCols { /// The current program counter. pub pc: Word, pub pc_range_checker: BabyBearWordRangeChecker, - pub auipc_nonce: T, } diff --git a/crates/core/machine/src/cpu/columns/branch.rs b/crates/core/machine/src/cpu/columns/branch.rs index 6f12f5a675..7b67394894 100644 --- a/crates/core/machine/src/cpu/columns/branch.rs +++ b/crates/core/machine/src/cpu/columns/branch.rs @@ -26,13 +26,4 @@ pub struct BranchCols { /// Whether a is less than b. pub a_lt_b: T, - - /// The nonce of the operation to compute `a_lt_b`. - pub a_lt_b_nonce: T, - - /// The nonce of the operation to compute `a_gt_b`. - pub a_gt_b_nonce: T, - - /// The nonce of the operation to compute `next_pc`. - pub next_pc_nonce: T, } diff --git a/crates/core/machine/src/cpu/columns/ecall.rs b/crates/core/machine/src/cpu/columns/ecall.rs index ea737c169e..0158940e24 100644 --- a/crates/core/machine/src/cpu/columns/ecall.rs +++ b/crates/core/machine/src/cpu/columns/ecall.rs @@ -28,9 +28,6 @@ pub struct EcallCols { /// should be set to 1 and everything else set to 0. pub index_bitmap: [T; PV_DIGEST_NUM_WORDS], - /// The nonce of the syscall operation. - pub syscall_nonce: T, - /// Columns to babybear range check the halt/commit_deferred_proofs operand. pub operand_range_check_cols: BabyBearWordRangeChecker, diff --git a/crates/core/machine/src/cpu/columns/jump.rs b/crates/core/machine/src/cpu/columns/jump.rs index 579f2b5160..f5c2b51917 100644 --- a/crates/core/machine/src/cpu/columns/jump.rs +++ b/crates/core/machine/src/cpu/columns/jump.rs @@ -19,7 +19,4 @@ pub struct JumpCols { // A range checker for `op_a` which may contain `pc + 4`. pub op_a_range_checker: BabyBearWordRangeChecker, - - pub jal_nonce: T, - pub jalr_nonce: T, } diff --git a/crates/core/machine/src/cpu/columns/memory.rs b/crates/core/machine/src/cpu/columns/memory.rs index 3eb52337ab..b4123714f8 100644 --- a/crates/core/machine/src/cpu/columns/memory.rs +++ b/crates/core/machine/src/cpu/columns/memory.rs @@ -33,7 +33,4 @@ pub struct MemoryColumns { // LE bit decomposition for the most significant byte of memory value. This is used to // determine the sign for that value (used for LB and LH). pub most_sig_byte_decomp: [T; 8], - - pub addr_word_nonce: T, - pub unsigned_mem_val_nonce: T, } diff --git a/crates/core/machine/src/cpu/columns/mod.rs b/crates/core/machine/src/cpu/columns/mod.rs index 7a32b03db5..be820ab67d 100644 --- a/crates/core/machine/src/cpu/columns/mod.rs +++ b/crates/core/machine/src/cpu/columns/mod.rs @@ -34,8 +34,6 @@ pub struct CpuCols { /// The current shard. pub shard: T, - pub nonce: T, - /// The clock cycle value. This should be within 24 bits. pub clk: T, /// The least significant 16 bit limb of clk. @@ -101,8 +99,6 @@ pub struct CpuCols { /// memory opcodes (i.e. LB, LH, LW, LBU, and LHU). pub unsigned_mem_val: Word, - pub unsigned_mem_val_nonce: T, - /// The result of selectors.is_ecall * the send_to_table column for the ECALL opcode. pub ecall_mul_send_to_table: T, diff --git a/crates/core/machine/src/cpu/trace.rs b/crates/core/machine/src/cpu/trace.rs index 5a43202608..7b246c0307 100644 --- a/crates/core/machine/src/cpu/trace.rs +++ b/crates/core/machine/src/cpu/trace.rs @@ -58,14 +58,7 @@ impl MachineAir for CpuChip { let mut byte_lookup_events = Vec::new(); let event = &input.cpu_events[idx]; let instruction = &input.program.fetch(event.pc); - self.event_to_row( - event, - &input.nonce_lookup, - cols, - &mut byte_lookup_events, - shard, - instruction, - ); + self.event_to_row(event, cols, &mut byte_lookup_events, shard, instruction); } }); }, @@ -91,14 +84,7 @@ impl MachineAir for CpuChip { let mut row = [F::zero(); NUM_CPU_COLS]; let cols: &mut CpuCols = row.as_mut_slice().borrow_mut(); let instruction = &input.program.fetch(op.pc); - self.event_to_row::( - op, - &input.nonce_lookup, - cols, - &mut blu, - shard, - instruction, - ); + self.event_to_row::(op, cols, &mut blu, shard, instruction); }); blu }) @@ -121,7 +107,6 @@ impl CpuChip { fn event_to_row( &self, event: &CpuEvent, - nonce_lookup: &[u32], cols: &mut CpuCols, blu_events: &mut impl ByteRecord, shard: u32, @@ -130,11 +115,6 @@ impl CpuChip { // Populate shard and clk columns. self.populate_shard_clk(cols, event, blu_events, shard); - // Populate the nonce. - cols.nonce = F::from_canonical_u32( - nonce_lookup.get(event.alu_lookup_id.0 as usize).copied().unwrap_or_default(), - ); - // Populate basic fields. cols.pc = F::from_canonical_u32(event.pc); cols.next_pc = F::from_canonical_u32(event.next_pc); @@ -188,11 +168,11 @@ impl CpuChip { } // Populate memory, branch, jump, and auipc specific fields. - self.populate_memory(cols, event, blu_events, nonce_lookup, shard, instruction); - self.populate_branch(cols, event, nonce_lookup, instruction); - self.populate_jump(cols, event, nonce_lookup, instruction); - self.populate_auipc(cols, event, nonce_lookup, instruction); - let is_halt = self.populate_ecall(cols, event, nonce_lookup); + self.populate_memory(cols, event, blu_events, shard, instruction); + self.populate_branch(cols, event, instruction); + self.populate_jump(cols, event, instruction); + self.populate_auipc(cols, event, instruction); + let is_halt = self.populate_ecall(cols, event); cols.is_sequential_instr = F::from_bool( !instruction.is_branch_instruction() && !instruction.is_jump_instruction() && !is_halt, @@ -250,7 +230,6 @@ impl CpuChip { cols: &mut CpuCols, event: &CpuEvent, blu_events: &mut impl ByteRecord, - nonce_lookup: &[u32], shard: u32, instruction: &Instruction, ) { @@ -281,9 +260,6 @@ impl CpuChip { let aligned_addr_ls_byte = (aligned_addr & 0x000000FF) as u8; let bits: [bool; 8] = array::from_fn(|i| aligned_addr_ls_byte & (1 << i) != 0); memory_columns.aa_least_sig_byte_decomp = array::from_fn(|i| F::from_bool(bits[i + 2])); - memory_columns.addr_word_nonce = F::from_canonical_u32( - nonce_lookup.get(event.memory_add_lookup_id.0 as usize).copied().unwrap_or_default(), - ); // Populate memory offsets. let addr_offset = (memory_addr % WORD_SIZE as u32) as u8; @@ -331,12 +307,6 @@ impl CpuChip { } if memory_columns.most_sig_byte_decomp[7] == F::one() { cols.mem_value_is_neg_not_x0 = F::from_bool(instruction.op_a != (X0 as u8)); - cols.unsigned_mem_val_nonce = F::from_canonical_u32( - nonce_lookup - .get(event.memory_sub_lookup_id.0 as usize) - .copied() - .unwrap_or_default(), - ); } } @@ -368,7 +338,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - nonce_lookup: &[u32], instruction: &Instruction, ) { if instruction.is_branch_instruction() { @@ -389,14 +358,6 @@ impl CpuChip { event.a > event.b }; - branch_columns.a_lt_b_nonce = F::from_canonical_u32( - nonce_lookup.get(event.branch_lt_lookup_id.0 as usize).copied().unwrap_or_default(), - ); - - branch_columns.a_gt_b_nonce = F::from_canonical_u32( - nonce_lookup.get(event.branch_gt_lookup_id.0 as usize).copied().unwrap_or_default(), - ); - branch_columns.a_eq_b = F::from_bool(a_eq_b); branch_columns.a_lt_b = F::from_bool(a_lt_b); branch_columns.a_gt_b = F::from_bool(a_gt_b); @@ -417,12 +378,6 @@ impl CpuChip { if branching { cols.branching = F::one(); - branch_columns.next_pc_nonce = F::from_canonical_u32( - nonce_lookup - .get(event.branch_add_lookup_id.0 as usize) - .copied() - .unwrap_or_default(), - ); } else { cols.not_branching = F::one(); } @@ -434,7 +389,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - nonce_lookup: &[u32], instruction: &Instruction, ) { if instruction.is_jump_instruction() { @@ -448,24 +402,12 @@ impl CpuChip { jump_columns.pc_range_checker.populate(event.pc); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - jump_columns.jal_nonce = F::from_canonical_u32( - nonce_lookup - .get(event.jump_jal_lookup_id.0 as usize) - .copied() - .unwrap_or_default(), - ); } Opcode::JALR => { let next_pc = event.b.wrapping_add(event.c); jump_columns.op_a_range_checker.populate(event.a); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - jump_columns.jalr_nonce = F::from_canonical_u32( - nonce_lookup - .get(event.jump_jalr_lookup_id.0 as usize) - .copied() - .unwrap_or_default(), - ); } _ => unreachable!(), } @@ -477,7 +419,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - nonce_lookup: &[u32], instruction: &Instruction, ) { if matches!(instruction.opcode, Opcode::AUIPC) { @@ -485,19 +426,11 @@ impl CpuChip { auipc_columns.pc = Word::from(event.pc); auipc_columns.pc_range_checker.populate(event.pc); - auipc_columns.auipc_nonce = F::from_canonical_u32( - nonce_lookup.get(event.auipc_lookup_id.0 as usize).copied().unwrap_or_default(), - ); } } /// Populate columns related to ECALL. - fn populate_ecall( - &self, - cols: &mut CpuCols, - event: &CpuEvent, - nonce_lookup: &[u32], - ) -> bool { + fn populate_ecall(&self, cols: &mut CpuCols, event: &CpuEvent) -> bool { let mut is_halt = false; if cols.selectors.is_ecall == F::one() { @@ -548,10 +481,6 @@ impl CpuChip { ecall_cols.index_bitmap[digest_idx] = F::one(); } - // Write the syscall nonce. - ecall_cols.syscall_nonce = - F::from_canonical_u32(nonce_lookup[event.syscall_lookup_id.0 as usize]); - is_halt = syscall_id == F::from_canonical_u32(SyscallCode::HALT.syscall_id()); // For halt and commit deferred proofs syscalls, we need to baby bear range check one of diff --git a/crates/core/machine/src/lib.rs b/crates/core/machine/src/lib.rs index 168be94eac..097700da68 100644 --- a/crates/core/machine/src/lib.rs +++ b/crates/core/machine/src/lib.rs @@ -23,6 +23,8 @@ pub mod memory; pub mod operations; pub mod program; pub mod riscv; +#[cfg(feature = "sys")] +pub mod sys; pub mod syscall; pub mod utils; @@ -31,7 +33,7 @@ pub mod utils; /// This string should be updated whenever any step in verifying an SP1 proof changes, including /// core, recursion, and plonk-bn254. This string is used to download SP1 artifacts and the gnark /// docker image. -pub const SP1_CIRCUIT_VERSION: &str = "v3.0.0"; +pub const SP1_CIRCUIT_VERSION: &str = "v4.0.0-rc.1-alpha"; // Re-export the `SP1ReduceProof` struct from sp1_core_machine. // diff --git a/crates/core/machine/src/memory/global.rs b/crates/core/machine/src/memory/global.rs index db58e9c351..615b769bfb 100644 --- a/crates/core/machine/src/memory/global.rs +++ b/crates/core/machine/src/memory/global.rs @@ -1,28 +1,35 @@ +use super::MemoryChipType; +use crate::{ + operations::GlobalAccumulationOperation, + operations::GlobalInteractionOperation, + operations::{AssertLtColsBits, BabyBearBitDecomposition, IsZeroOperation}, + utils::pad_rows_fixed, +}; use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, }; -use std::array; - +use hashbrown::HashMap; +use itertools::Itertools; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use sp1_core_executor::{events::MemoryInitializeFinalizeEvent, ExecutionRecord, Program}; +use p3_maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator, ParallelSlice}; +use sp1_core_executor::events::ByteLookupEvent; +use sp1_core_executor::{ + events::{ByteRecord, MemoryInitializeFinalizeEvent}, + ExecutionRecord, Program, +}; use sp1_derive::AlignedBorrow; use sp1_stark::{ air::{ - AirInteraction, BaseAirBuilder, InteractionScope, MachineAir, PublicValues, SP1AirBuilder, + BaseAirBuilder, InteractionScope, MachineAir, PublicValues, SP1AirBuilder, SP1_PROOF_NUM_PV_ELTS, }, - InteractionKind, Word, + septic_digest::SepticDigest, + Word, }; - -use crate::{ - operations::{AssertLtColsBits, BabyBearBitDecomposition, IsZeroOperation}, - utils::pad_rows_fixed, -}; - -use super::MemoryChipType; +use std::array; /// A memory chip that can initialize or finalize values in memory. pub struct MemoryGlobalChip { @@ -54,8 +61,46 @@ impl MachineAir for MemoryGlobalChip { } } - fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { - // Do nothing since this chip has no dependencies. + fn generate_dependencies(&self, input: &ExecutionRecord, output: &mut ExecutionRecord) { + let mut memory_events = match self.kind { + MemoryChipType::Initialize => input.global_memory_initialize_events.clone(), + MemoryChipType::Finalize => input.global_memory_finalize_events.clone(), + }; + + let is_receive = match self.kind { + MemoryChipType::Initialize => false, + MemoryChipType::Finalize => true, + }; + + memory_events.sort_by_key(|event| event.addr); + let chunk_size = std::cmp::max(memory_events.len() / num_cpus::get(), 1); + + let blu_batches = memory_events + .par_chunks(chunk_size) + .map(|events| { + let mut blu: HashMap> = HashMap::new(); + events.iter().for_each(|event| { + let MemoryInitializeFinalizeEvent { + addr: _addr, + value, + shard, + timestamp: _timestamp, + used, + } = event.to_owned(); + let interaction_shard = if is_receive { shard } else { 0 }; + let mut row = [F::zero(); NUM_MEMORY_INIT_COLS]; + let cols: &mut MemoryInitCols = row.as_mut_slice().borrow_mut(); + cols.global_interaction_cols.populate_memory_range_check_witness( + interaction_shard, + value, + used != 0, + &mut blu, + ); + }); + blu + }) + .collect::>(); + output.add_sharded_byte_lookup_events(blu_batches.iter().collect_vec()); } fn generate_trace( @@ -73,11 +118,19 @@ impl MachineAir for MemoryGlobalChip { MemoryChipType::Finalize => input.public_values.previous_finalize_addr_bits, }; + let is_receive = match self.kind { + MemoryChipType::Initialize => false, + MemoryChipType::Finalize => true, + }; + + let mut global_cumulative_sum = SepticDigest::::zero().0; + memory_events.sort_by_key(|event| event.addr); - let mut rows: Vec<[F; NUM_MEMORY_INIT_COLS]> = (0..memory_events.len()) // OPT: change this to par_iter - .map(|i| { + let mut rows: Vec<[F; NUM_MEMORY_INIT_COLS]> = memory_events + .par_iter() + .map(|event| { let MemoryInitializeFinalizeEvent { addr, value, shard, timestamp, used } = - memory_events[i]; + event.to_owned(); let mut row = [F::zero(); NUM_MEMORY_INIT_COLS]; let cols: &mut MemoryInitCols = row.as_mut_slice().borrow_mut(); @@ -88,39 +141,60 @@ impl MachineAir for MemoryGlobalChip { cols.value = array::from_fn(|i| F::from_canonical_u32((value >> i) & 1)); cols.is_real = F::from_canonical_u32(used); - if i == 0 { - let prev_addr = previous_addr_bits - .iter() - .enumerate() - .map(|(j, bit)| bit * (1 << j)) - .sum::(); - cols.is_prev_addr_zero.populate(prev_addr); - cols.is_first_comp = F::from_bool(prev_addr != 0); - if prev_addr != 0 { - debug_assert!(prev_addr < addr, "prev_addr {} < addr {}", prev_addr, addr); - let addr_bits: [_; 32] = array::from_fn(|i| (addr >> i) & 1); - cols.lt_cols.populate(&previous_addr_bits, &addr_bits); - } - } + let interaction_shard = if is_receive { shard } else { 0 }; + let interaction_clk = if is_receive { timestamp } else { 0 }; + + cols.global_interaction_cols.populate_memory( + interaction_shard, + interaction_clk, + addr, + value, + is_receive, + used != 0, + ); - if i != 0 { - let prev_is_real = memory_events[i - 1].used; - cols.is_next_comp = F::from_canonical_u32(prev_is_real); - let previous_addr = memory_events[i - 1].addr; - assert_ne!(previous_addr, addr); + row + }) + .collect::>(); + for i in 0..memory_events.len() { + let addr = memory_events[i].addr; + let cols: &mut MemoryInitCols = rows[i].as_mut_slice().borrow_mut(); + if i == 0 { + let prev_addr = previous_addr_bits + .iter() + .enumerate() + .map(|(j, bit)| bit * (1 << j)) + .sum::(); + cols.is_prev_addr_zero.populate(prev_addr); + cols.is_first_comp = F::from_bool(prev_addr != 0); + if prev_addr != 0 { + debug_assert!(prev_addr < addr, "prev_addr {} < addr {}", prev_addr, addr); let addr_bits: [_; 32] = array::from_fn(|i| (addr >> i) & 1); - let prev_addr_bits: [_; 32] = array::from_fn(|i| (previous_addr >> i) & 1); - cols.lt_cols.populate(&prev_addr_bits, &addr_bits); + cols.lt_cols.populate(&previous_addr_bits, &addr_bits); } + } + if i != 0 { + let prev_is_real = memory_events[i - 1].used; + cols.is_next_comp = F::from_canonical_u32(prev_is_real); + let previous_addr = memory_events[i - 1].addr; + assert_ne!(previous_addr, addr); + + let addr_bits: [_; 32] = array::from_fn(|i| (addr >> i) & 1); + let prev_addr_bits: [_; 32] = array::from_fn(|i| (previous_addr >> i) & 1); + cols.lt_cols.populate(&prev_addr_bits, &addr_bits); + } - if i == memory_events.len() - 1 { - cols.is_last_addr = F::one(); - } + if i == memory_events.len() - 1 { + cols.is_last_addr = F::one(); + } - row - }) - .collect::>(); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.is_real], + ); + } // Pad the trace to a power of two depending on the proof shape in `input`. pad_rows_fixed( @@ -129,7 +203,23 @@ impl MachineAir for MemoryGlobalChip { input.fixed_log2_rows::(self), ); - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_MEMORY_INIT_COLS) + let mut trace = RowMajorMatrix::new( + rows.into_iter().flatten().collect::>(), + NUM_MEMORY_INIT_COLS, + ); + + for i in memory_events.len()..trace.height() { + let cols: &mut MemoryInitCols = + trace.values[i * NUM_MEMORY_INIT_COLS..(i + 1) * NUM_MEMORY_INIT_COLS].borrow_mut(); + cols.global_interaction_cols.populate_dummy(); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.is_real], + ); + } + + trace } fn included(&self, shard: &Self::Record) -> bool { @@ -148,6 +238,8 @@ impl MachineAir for MemoryGlobalChip { } } +pub const MEMORY_GLOBAL_INITIAL_DIGEST_POS_COPY: usize = 161; + #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] pub struct MemoryInitCols { @@ -172,6 +264,9 @@ pub struct MemoryInitCols { /// Whether the memory access is a real access. pub is_real: T, + /// The columns for sending a global interaction. + pub global_interaction_cols: GlobalInteractionOperation, + /// Whether or not we are making the assertion `addr < addr_next`. pub is_next_comp: T, @@ -183,6 +278,9 @@ pub struct MemoryInitCols { /// A flag to indicate the last non-padded address. An auxiliary column needed for degree 3. pub is_last_addr: T, + + /// The columns for accumulating the elliptic curve digests. + pub global_accumulation_cols: GlobalAccumulationOperation, } pub(crate) const NUM_MEMORY_INIT_COLS: usize = size_of::>(); @@ -217,20 +315,41 @@ where if self.kind == MemoryChipType::Initialize { let mut values = vec![AB::Expr::zero(), AB::Expr::zero(), local.addr.into()]; - values.extend(value.map(Into::into)); - builder.send( - AirInteraction::new(values, local.is_real.into(), InteractionKind::Memory), - InteractionScope::Global, + values.extend(value.clone().map(Into::into)); + GlobalInteractionOperation::::eval_single_digest_memory( + builder, + AB::Expr::zero(), + AB::Expr::zero(), + local.addr.into(), + value, + local.global_interaction_cols, + false, + local.is_real, ); } else { let mut values = vec![local.shard.into(), local.timestamp.into(), local.addr.into()]; - values.extend(value); - builder.receive( - AirInteraction::new(values, local.is_real.into(), InteractionKind::Memory), - InteractionScope::Global, + values.extend(value.clone()); + GlobalInteractionOperation::::eval_single_digest_memory( + builder, + local.shard.into(), + local.timestamp.into(), + local.addr.into(), + value, + local.global_interaction_cols, + true, + local.is_real, ); } + GlobalAccumulationOperation::::eval_accumulation( + builder, + [local.global_interaction_cols], + [local.is_real], + [next.is_real], + local.global_accumulation_cols, + next.global_accumulation_cols, + ); + // Canonically decompose the address into bits so we can do comparisons. BabyBearBitDecomposition::::range_check( builder, @@ -369,6 +488,7 @@ mod tests { }; use p3_baby_bear::BabyBear; use sp1_core_executor::{programs::tests::simple_program, Executor}; + use sp1_stark::InteractionKind; use sp1_stark::{ baby_bear_poseidon2::BabyBearPoseidon2, debug_interactions_with_all_chips, SP1CoreOpts, StarkMachine, diff --git a/crates/core/machine/src/memory/local.rs b/crates/core/machine/src/memory/local.rs index 8be4377031..ba8043ff27 100644 --- a/crates/core/machine/src/memory/local.rs +++ b/crates/core/machine/src/memory/local.rs @@ -1,27 +1,58 @@ use std::{ borrow::{Borrow, BorrowMut}, - mem::size_of, + mem::{size_of, transmute}, }; -use crate::utils::{next_power_of_two, zeroed_f_vec}; +use crate::utils::{indices_arr, next_power_of_two, zeroed_f_vec}; +use crate::{operations::GlobalAccumulationOperation, operations::GlobalInteractionOperation}; +use hashbrown::HashMap; +use itertools::Itertools; use p3_air::{Air, BaseAir}; use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; -use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; +use p3_maybe_rayon::prelude::IndexedParallelIterator; +use p3_maybe_rayon::prelude::IntoParallelIterator; +use p3_maybe_rayon::prelude::IntoParallelRefMutIterator; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator, ParallelSlice}; +use rayon_scan::ScanParallelIterator; +use sp1_core_executor::events::ByteLookupEvent; +use sp1_core_executor::events::ByteRecord; use sp1_core_executor::{ExecutionRecord, Program}; use sp1_derive::AlignedBorrow; use sp1_stark::{ air::{AirInteraction, InteractionScope, MachineAir, SP1AirBuilder}, + septic_curve::SepticCurve, + septic_curve::SepticCurveComplete, + septic_digest::SepticDigest, + septic_extension::SepticExtension, InteractionKind, Word, }; +/// Creates the column map for the CPU. +const fn make_col_map() -> MemoryLocalCols { + let indices_arr = indices_arr::(); + unsafe { transmute::<[usize; NUM_MEMORY_LOCAL_INIT_COLS], MemoryLocalCols>(indices_arr) } +} + +const MEMORY_LOCAL_COL_MAP: MemoryLocalCols = make_col_map(); + +pub const MEMORY_LOCAL_INITIAL_DIGEST_POS: usize = + MEMORY_LOCAL_COL_MAP.global_accumulation_cols.initial_digest[0].0[0]; + +pub const MEMORY_LOCAL_INITIAL_DIGEST_POS_COPY: usize = 480; + +#[repr(C)] +pub struct Ghost { + pub v: [usize; MEMORY_LOCAL_INITIAL_DIGEST_POS_COPY], +} + pub const NUM_LOCAL_MEMORY_ENTRIES_PER_ROW: usize = 4; pub(crate) const NUM_MEMORY_LOCAL_INIT_COLS: usize = size_of::>(); #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] -struct SingleMemoryLocal { +pub struct SingleMemoryLocal { /// The address of the memory access. pub addr: T, @@ -43,6 +74,12 @@ struct SingleMemoryLocal { /// The final value of the memory access. pub final_value: Word, + /// The global interaction columns for initial access. + pub initial_global_interaction_cols: GlobalInteractionOperation, + + /// The global interaction columns for final access. + pub final_global_interaction_cols: GlobalInteractionOperation, + /// Whether the memory access is a real access. pub is_real: T, } @@ -51,6 +88,7 @@ struct SingleMemoryLocal { #[repr(C)] pub struct MemoryLocalCols { memory_local_entries: [SingleMemoryLocal; NUM_LOCAL_MEMORY_ENTRIES_PER_ROW], + pub global_accumulation_cols: GlobalAccumulationOperation, } pub struct MemoryLocalChip {} @@ -64,6 +102,7 @@ impl MemoryLocalChip { impl BaseAir for MemoryLocalChip { fn width(&self) -> usize { + assert_eq!(MEMORY_LOCAL_INITIAL_DIGEST_POS_COPY, MEMORY_LOCAL_INITIAL_DIGEST_POS); NUM_MEMORY_LOCAL_INIT_COLS } } @@ -77,8 +116,43 @@ impl MachineAir for MemoryLocalChip { "MemoryLocal".to_string() } - fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { - // Do nothing since this chip has no dependencies. + fn generate_dependencies(&self, input: &ExecutionRecord, output: &mut ExecutionRecord) { + let events = input.get_local_mem_events().collect::>(); + let nb_rows = (events.len() + 3) / 4; + let chunk_size = std::cmp::max((nb_rows + 1) / num_cpus::get(), 1); + + let blu_batches = events + .par_chunks(chunk_size * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW) + .map(|events| { + let mut blu: HashMap> = HashMap::new(); + events.chunks(NUM_LOCAL_MEMORY_ENTRIES_PER_ROW).for_each(|events| { + let mut row = [F::zero(); NUM_MEMORY_LOCAL_INIT_COLS]; + let cols: &mut MemoryLocalCols = row.as_mut_slice().borrow_mut(); + for k in 0..NUM_LOCAL_MEMORY_ENTRIES_PER_ROW { + let cols = &mut cols.memory_local_entries[k]; + if k < events.len() { + let event = events[k]; + cols.initial_global_interaction_cols + .populate_memory_range_check_witness( + event.initial_mem_access.shard, + event.initial_mem_access.value, + true, + &mut blu, + ); + cols.final_global_interaction_cols.populate_memory_range_check_witness( + event.final_mem_access.shard, + event.final_mem_access.value, + true, + &mut blu, + ); + } + } + }); + blu + }) + .collect::>(); + + output.add_sharded_byte_lookup_events(blu_batches.iter().collect_vec()); } fn generate_trace( @@ -92,13 +166,21 @@ impl MachineAir for MemoryLocalChip { let size_log2 = input.fixed_log2_rows::(self); let padded_nb_rows = next_power_of_two(nb_rows, size_log2); let mut values = zeroed_f_vec(padded_nb_rows * NUM_MEMORY_LOCAL_INIT_COLS); - let chunk_size = std::cmp::max((nb_rows + 1) / num_cpus::get(), 1); + let chunk_size = std::cmp::max(nb_rows / num_cpus::get(), 0) + 1; - values + let mut chunks = values[..nb_rows * NUM_MEMORY_LOCAL_INIT_COLS] .chunks_mut(chunk_size * NUM_MEMORY_LOCAL_INIT_COLS) + .collect::>(); + + let point_chunks = chunks + .par_iter_mut() .enumerate() - .par_bridge() - .for_each(|(i, rows)| { + .map(|(i, rows)| { + let mut point_chunks = + Vec::with_capacity(chunk_size * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 + 1); + if i == 0 { + point_chunks.push(SepticCurveComplete::Affine(SepticDigest::::zero().0)); + } rows.chunks_mut(NUM_MEMORY_LOCAL_INIT_COLS).enumerate().for_each(|(j, row)| { let idx = (i * chunk_size + j) * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW; @@ -118,9 +200,101 @@ impl MachineAir for MemoryLocalChip { cols.initial_value = event.initial_mem_access.value.into(); cols.final_value = event.final_mem_access.value.into(); cols.is_real = F::one(); + cols.initial_global_interaction_cols.populate_memory( + event.initial_mem_access.shard, + event.initial_mem_access.timestamp, + event.addr, + event.initial_mem_access.value, + true, + true, + ); + point_chunks.push(SepticCurveComplete::Affine(SepticCurve { + x: SepticExtension( + cols.initial_global_interaction_cols.x_coordinate.0, + ), + y: SepticExtension( + cols.initial_global_interaction_cols.y_coordinate.0, + ), + })); + cols.final_global_interaction_cols.populate_memory( + event.final_mem_access.shard, + event.final_mem_access.timestamp, + event.addr, + event.final_mem_access.value, + false, + true, + ); + point_chunks.push(SepticCurveComplete::Affine(SepticCurve { + x: SepticExtension( + cols.final_global_interaction_cols.x_coordinate.0, + ), + y: SepticExtension( + cols.final_global_interaction_cols.y_coordinate.0, + ), + })); + } else { + cols.initial_global_interaction_cols.populate_dummy(); + cols.final_global_interaction_cols.populate_dummy(); } } }); + point_chunks + }) + .collect::>(); + + let mut points = Vec::with_capacity(1 + events.len() * 2); + for mut point_chunk in point_chunks { + points.append(&mut point_chunk); + } + + if events.is_empty() { + points = vec![SepticCurveComplete::Affine(SepticDigest::::zero().0)]; + } + + let cumulative_sum = points + .into_par_iter() + .with_min_len(1 << 15) + .scan(|a, b| *a + *b, SepticCurveComplete::Infinity) + .collect::>>(); + + let final_digest = cumulative_sum.last().unwrap().point(); + let dummy = SepticCurve::::dummy(); + let final_sum_checker = SepticCurve::::sum_checker_x(final_digest, dummy, final_digest); + + let chunk_size = std::cmp::max(padded_nb_rows / num_cpus::get(), 0) + 1; + values + .chunks_mut(chunk_size * NUM_MEMORY_LOCAL_INIT_COLS) + .enumerate() + .par_bridge() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_MEMORY_LOCAL_INIT_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + + let cols: &mut MemoryLocalCols = row.borrow_mut(); + if idx < nb_rows { + let start = NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 * idx; + let end = std::cmp::min( + NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 * (idx + 1) + 1, + cumulative_sum.len(), + ); + cols.global_accumulation_cols.populate_real( + &cumulative_sum[start..end], + final_digest, + final_sum_checker, + ); + } else { + for k in 0..NUM_LOCAL_MEMORY_ENTRIES_PER_ROW { + cols.memory_local_entries[k] + .initial_global_interaction_cols + .populate_dummy(); + cols.memory_local_entries[k] + .final_global_interaction_cols + .populate_dummy(); + } + cols.global_accumulation_cols + .populate_dummy(final_digest, final_sum_checker); + } + }) }); // Convert the trace to a row major matrix. @@ -148,6 +322,12 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &MemoryLocalCols = (*local).borrow(); + let next = main.row_slice(1); + let next: &MemoryLocalCols = (*next).borrow(); + + let mut global_interaction_cols = Vec::with_capacity(8); + let mut local_is_reals = Vec::with_capacity(8); + let mut next_is_reals = Vec::with_capacity(8); for local in local.memory_local_entries.iter() { builder.assert_eq( @@ -155,39 +335,77 @@ where local.is_real * local.is_real * local.is_real, ); - for scope in [InteractionScope::Global, InteractionScope::Local] { - let mut values = - vec![local.initial_shard.into(), local.initial_clk.into(), local.addr.into()]; - values.extend(local.initial_value.map(Into::into)); - builder.receive( - AirInteraction::new( - values.clone(), - local.is_real.into(), - InteractionKind::Memory, - ), - scope, - ); - - let mut values = - vec![local.final_shard.into(), local.final_clk.into(), local.addr.into()]; - values.extend(local.final_value.map(Into::into)); - builder.send( - AirInteraction::new( - values.clone(), - local.is_real.into(), - InteractionKind::Memory, - ), - scope, - ); - } + let mut values = + vec![local.initial_shard.into(), local.initial_clk.into(), local.addr.into()]; + values.extend(local.initial_value.map(Into::into)); + builder.receive( + AirInteraction::new(values.clone(), local.is_real.into(), InteractionKind::Memory), + InteractionScope::Local, + ); + + GlobalInteractionOperation::::eval_single_digest_memory( + builder, + local.initial_shard.into(), + local.initial_clk.into(), + local.addr.into(), + local.initial_value.map(Into::into).0, + local.initial_global_interaction_cols, + true, + local.is_real, + ); + + global_interaction_cols.push(local.initial_global_interaction_cols); + local_is_reals.push(local.is_real); + + let mut values = + vec![local.final_shard.into(), local.final_clk.into(), local.addr.into()]; + values.extend(local.final_value.map(Into::into)); + builder.send( + AirInteraction::new(values.clone(), local.is_real.into(), InteractionKind::Memory), + InteractionScope::Local, + ); + + GlobalInteractionOperation::::eval_single_digest_memory( + builder, + local.final_shard.into(), + local.final_clk.into(), + local.addr.into(), + local.final_value.map(Into::into).0, + local.final_global_interaction_cols, + false, + local.is_real, + ); + + global_interaction_cols.push(local.final_global_interaction_cols); + local_is_reals.push(local.is_real); + } + + for next in next.memory_local_entries.iter() { + next_is_reals.push(next.is_real); + next_is_reals.push(next.is_real); } + + GlobalAccumulationOperation::::eval_accumulation( + builder, + global_interaction_cols + .try_into() + .unwrap_or_else(|_| panic!("There should be 8 interactions")), + local_is_reals.try_into().unwrap_or_else(|_| panic!("There should be 8 interactions")), + next_is_reals.try_into().unwrap_or_else(|_| panic!("There should be 8 interactions")), + local.global_accumulation_cols, + next.global_accumulation_cols, + ); } } #[cfg(test)] mod tests { + use super::*; use p3_baby_bear::BabyBear; use p3_matrix::dense::RowMajorMatrix; + use rand::thread_rng; + use rand::Rng; + use sp1_core_executor::events::{MemoryLocalEvent, MemoryRecord}; use sp1_core_executor::{programs::tests::simple_program, ExecutionRecord, Executor}; use sp1_stark::{ air::{InteractionScope, MachineAir}, @@ -280,4 +498,167 @@ mod tests { InteractionScope::Global, ); } + + #[cfg(feature = "sys")] + fn get_test_execution_record() -> ExecutionRecord { + let cpu_local_memory_access = (0..=255) + .flat_map(|_| { + [{ + let addr = thread_rng().gen_range(0..BabyBear::ORDER_U32); + let init_value = thread_rng().gen_range(0..u32::MAX); + let init_shard = thread_rng().gen_range(0..(1u32 << 16)); + let init_timestamp = thread_rng().gen_range(0..(1u32 << 24)); + let final_value = thread_rng().gen_range(0..u32::MAX); + let final_timestamp = thread_rng().gen_range(0..(1u32 << 24)); + let final_shard = thread_rng().gen_range(0..(1u32 << 16)); + MemoryLocalEvent { + addr, + initial_mem_access: MemoryRecord { + shard: init_shard, + timestamp: init_timestamp, + value: init_value, + }, + final_mem_access: MemoryRecord { + shard: final_shard, + timestamp: final_timestamp, + value: final_value, + }, + } + }] + }) + .collect::>(); + ExecutionRecord { cpu_local_memory_access, ..Default::default() } + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + let record = get_test_execution_record(); + let chip = MemoryLocalChip::new(); + let trace: RowMajorMatrix = + chip.generate_trace(&record, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&record, trace.height()); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord, height: usize) -> RowMajorMatrix { + type F = BabyBear; + // Generate the trace rows for each event. + let events = input.get_local_mem_events().collect::>(); + let nb_rows = (events.len() + 3) / 4; + let padded_nb_rows = height; + let mut values = zeroed_f_vec(padded_nb_rows * NUM_MEMORY_LOCAL_INIT_COLS); + let chunk_size = std::cmp::max(nb_rows / num_cpus::get(), 0) + 1; + + let mut chunks = values[..nb_rows * NUM_MEMORY_LOCAL_INIT_COLS] + .chunks_mut(chunk_size * NUM_MEMORY_LOCAL_INIT_COLS) + .collect::>(); + + let point_chunks = chunks + .par_iter_mut() + .enumerate() + .map(|(i, rows)| { + let mut point_chunks = + Vec::with_capacity(chunk_size * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 + 1); + if i == 0 { + point_chunks.push(SepticCurveComplete::Affine(SepticDigest::::zero().0)); + } + rows.chunks_mut(NUM_MEMORY_LOCAL_INIT_COLS).enumerate().for_each(|(j, row)| { + let idx = (i * chunk_size + j) * NUM_LOCAL_MEMORY_ENTRIES_PER_ROW; + let cols: &mut MemoryLocalCols = row.borrow_mut(); + for k in 0..NUM_LOCAL_MEMORY_ENTRIES_PER_ROW { + let cols = &mut cols.memory_local_entries[k]; + if idx + k < events.len() { + unsafe { + crate::sys::memory_local_event_to_row_babybear( + events[idx + k], + cols, + ); + } + point_chunks.push(SepticCurveComplete::Affine(SepticCurve { + x: SepticExtension( + cols.initial_global_interaction_cols.x_coordinate.0, + ), + y: SepticExtension( + cols.initial_global_interaction_cols.y_coordinate.0, + ), + })); + point_chunks.push(SepticCurveComplete::Affine(SepticCurve { + x: SepticExtension( + cols.final_global_interaction_cols.x_coordinate.0, + ), + y: SepticExtension( + cols.final_global_interaction_cols.y_coordinate.0, + ), + })); + } else { + cols.initial_global_interaction_cols.populate_dummy(); + cols.final_global_interaction_cols.populate_dummy(); + } + } + }); + point_chunks + }) + .collect::>(); + + let mut points = Vec::with_capacity(1 + events.len() * 2); + for mut point_chunk in point_chunks { + points.append(&mut point_chunk); + } + + if events.is_empty() { + points = vec![SepticCurveComplete::Affine(SepticDigest::::zero().0)]; + } + + let cumulative_sum = points + .into_par_iter() + .with_min_len(1 << 15) + .scan(|a, b| *a + *b, SepticCurveComplete::Infinity) + .collect::>>(); + + let final_digest = cumulative_sum.last().unwrap().point(); + let dummy = SepticCurve::::dummy(); + let final_sum_checker = SepticCurve::::sum_checker_x(final_digest, dummy, final_digest); + + let chunk_size = std::cmp::max(padded_nb_rows / num_cpus::get(), 0) + 1; + values + .chunks_mut(chunk_size * NUM_MEMORY_LOCAL_INIT_COLS) + .enumerate() + .par_bridge() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_MEMORY_LOCAL_INIT_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + + let cols: &mut MemoryLocalCols = row.borrow_mut(); + if idx < nb_rows { + let start = NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 * idx; + let end = std::cmp::min( + NUM_LOCAL_MEMORY_ENTRIES_PER_ROW * 2 * (idx + 1) + 1, + cumulative_sum.len(), + ); + cols.global_accumulation_cols.populate_real( + &cumulative_sum[start..end], + final_digest, + final_sum_checker, + ); + } else { + for k in 0..NUM_LOCAL_MEMORY_ENTRIES_PER_ROW { + cols.memory_local_entries[k] + .initial_global_interaction_cols + .populate_dummy(); + cols.memory_local_entries[k] + .final_global_interaction_cols + .populate_dummy(); + } + cols.global_accumulation_cols + .populate_dummy(final_digest, final_sum_checker); + } + }) + }); + + // Convert the trace to a row major matrix. + RowMajorMatrix::new(values, NUM_MEMORY_LOCAL_INIT_COLS) + } } diff --git a/crates/core/machine/src/memory/program.rs b/crates/core/machine/src/memory/program.rs index 699e052c0d..3777330842 100644 --- a/crates/core/machine/src/memory/program.rs +++ b/crates/core/machine/src/memory/program.rs @@ -4,18 +4,21 @@ use core::{ }; use itertools::Itertools; use p3_air::{Air, AirBuilder, AirBuilderWithPublicValues, BaseAir, PairBuilder}; -use p3_field::{AbstractField, PrimeField}; +use p3_field::AbstractField; use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use crate::{operations::GlobalAccumulationOperation, operations::GlobalInteractionOperation}; +use hashbrown::HashMap; +use p3_field::PrimeField32; use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; +use sp1_core_executor::events::ByteLookupEvent; +use sp1_core_executor::events::ByteRecord; use sp1_core_executor::{ExecutionRecord, Program}; use sp1_derive::AlignedBorrow; use sp1_stark::{ - air::{ - AirInteraction, InteractionScope, MachineAir, PublicValues, SP1AirBuilder, - SP1_PROOF_NUM_PV_ELTS, - }, - InteractionKind, Word, + air::{InteractionScope, MachineAir, PublicValues, SP1AirBuilder, SP1_PROOF_NUM_PV_ELTS}, + septic_digest::SepticDigest, + Word, }; use crate::{ @@ -47,6 +50,12 @@ pub struct MemoryProgramMultCols { /// Whether the shard is the first shard. pub is_first_shard: IsZeroOperation, + + /// The columns for the global interaction. + pub global_interaction_cols: GlobalInteractionOperation, + + /// The columns for accumulating the elliptic curve digests. + pub global_accumulation_cols: GlobalAccumulationOperation, } /// Chip that initializes memory that is provided from the program. The table is preprocessed and @@ -61,7 +70,7 @@ impl MemoryProgramChip { } } -impl MachineAir for MemoryProgramChip { +impl MachineAir for MemoryProgramChip { type Record = ExecutionRecord; type Program = Program; @@ -82,7 +91,7 @@ impl MachineAir for MemoryProgramChip { let mut values = zeroed_f_vec(padded_nb_rows * NUM_MEMORY_PROGRAM_PREPROCESSED_COLS); let chunk_size = std::cmp::max((nb_rows + 1) / num_cpus::get(), 1); - let memory = program.memory_image.iter().collect::>(); + let memory = program.memory_image.iter().sorted().collect::>(); values .chunks_mut(chunk_size * NUM_MEMORY_PROGRAM_PREPROCESSED_COLS) .enumerate() @@ -107,8 +116,21 @@ impl MachineAir for MemoryProgramChip { Some(RowMajorMatrix::new(values, NUM_MEMORY_PROGRAM_PREPROCESSED_COLS)) } - fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { - // Do nothing since this chip has no dependencies. + fn generate_dependencies(&self, input: &ExecutionRecord, output: &mut ExecutionRecord) { + let program_memory = &input.program.memory_image; + + let mult_bool = input.public_values.shard == 1; + + let mut blu: HashMap> = HashMap::new(); + + program_memory.iter().for_each(|(&_addr, &word)| { + let mut row = [F::zero(); NUM_MEMORY_PROGRAM_MULT_COLS]; + let cols: &mut MemoryProgramMultCols = row.as_mut_slice().borrow_mut(); + cols.global_interaction_cols + .populate_memory_range_check_witness(0, word, mult_bool, &mut blu); + }); + + output.add_sharded_byte_lookup_events(vec![&blu]); } fn generate_trace( @@ -116,18 +138,26 @@ impl MachineAir for MemoryProgramChip { input: &ExecutionRecord, _output: &mut ExecutionRecord, ) -> RowMajorMatrix { - let program_memory_addrs = input.program.memory_image.keys().copied().sorted(); + let program_memory = &input.program.memory_image; - let mult = if input.public_values.shard == 1 { F::one() } else { F::zero() }; + let mult_bool = input.public_values.shard == 1; + let mult = F::from_bool(mult_bool); + let mut global_cumulative_sum = SepticDigest::::zero().0; // Generate the trace rows for each event. - let mut rows = program_memory_addrs - .into_iter() - .map(|_| { + let mut rows = program_memory + .iter() + .map(|(&addr, &word)| { let mut row = [F::zero(); NUM_MEMORY_PROGRAM_MULT_COLS]; let cols: &mut MemoryProgramMultCols = row.as_mut_slice().borrow_mut(); cols.multiplicity = mult; cols.is_first_shard.populate(input.public_values.shard - 1); + cols.global_interaction_cols.populate_memory(0, 0, addr, word, false, mult_bool); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.multiplicity], + ); row }) .collect::>(); @@ -140,15 +170,29 @@ impl MachineAir for MemoryProgramChip { ); // Convert the trace to a row major matrix. - - RowMajorMatrix::new( + let mut trace = RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), NUM_MEMORY_PROGRAM_MULT_COLS, - ) + ); + + let len = input.program.memory_image.len(); + for i in len..trace.height() { + let cols: &mut MemoryProgramMultCols = trace.values + [i * NUM_MEMORY_PROGRAM_MULT_COLS..(i + 1) * NUM_MEMORY_PROGRAM_MULT_COLS] + .borrow_mut(); + cols.global_interaction_cols.populate_dummy(); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.multiplicity], + ); + } + + trace } fn included(&self, _: &Self::Record) -> bool { - true + false } fn commit_scope(&self) -> InteractionScope { @@ -176,6 +220,9 @@ where let mult_local = main.row_slice(0); let mult_local: &MemoryProgramMultCols = (*mult_local).borrow(); + let mult_next = main.row_slice(1); + let mult_next: &MemoryProgramMultCols = (*mult_next).borrow(); + // Get shard from public values and evaluate whether it is the first shard. let public_values_slice: [AB::Expr; SP1_PROOF_NUM_PV_ELTS] = core::array::from_fn(|i| builder.public_values()[i].into()); @@ -203,9 +250,24 @@ where let mut values = vec![AB::Expr::zero(), AB::Expr::zero(), prep_local.addr.into()]; values.extend(prep_local.value.map(Into::into)); - builder.send( - AirInteraction::new(values, mult_local.multiplicity.into(), InteractionKind::Memory), - InteractionScope::Global, + GlobalInteractionOperation::::eval_single_digest_memory( + builder, + AB::Expr::zero(), + AB::Expr::zero(), + prep_local.addr.into(), + prep_local.value.map(Into::into).0, + mult_local.global_interaction_cols, + false, + mult_local.multiplicity, + ); + + GlobalAccumulationOperation::::eval_accumulation( + builder, + [mult_local.global_interaction_cols], + [mult_local.multiplicity], + [mult_next.multiplicity], + mult_local.global_accumulation_cols, + mult_next.global_accumulation_cols, ); } } diff --git a/crates/core/machine/src/operations/field/field_den.rs b/crates/core/machine/src/operations/field/field_den.rs index b9bb80b306..10762c6574 100644 --- a/crates/core/machine/src/operations/field/field_den.rs +++ b/crates/core/machine/src/operations/field/field_den.rs @@ -153,9 +153,10 @@ mod tests { StarkGenericConfig, }; + use crate::utils::uni_stark::{uni_stark_prove, uni_stark_verify}; + use super::{FieldDenCols, Limbs}; - use crate::utils::{uni_stark_prove as prove, uni_stark_verify as verify}; use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, @@ -287,9 +288,9 @@ mod tests { // This it to test that the proof DOESN'T work if messed up. // let row = trace.row_mut(0); // row[0] = BabyBear::from_canonical_u8(0); - let proof = prove::(&config, &chip, &mut challenger, trace); + let proof = uni_stark_prove::(&config, &chip, &mut challenger, trace); let mut challenger = config.challenger(); - verify(&config, &chip, &mut challenger, &proof).unwrap(); + uni_stark_verify(&config, &chip, &mut challenger, &proof).unwrap(); } } diff --git a/crates/core/machine/src/operations/field/field_inner_product.rs b/crates/core/machine/src/operations/field/field_inner_product.rs index 30f2610e74..41221c54e9 100644 --- a/crates/core/machine/src/operations/field/field_inner_product.rs +++ b/crates/core/machine/src/operations/field/field_inner_product.rs @@ -143,7 +143,10 @@ mod tests { use super::{FieldInnerProductCols, Limbs}; - use crate::utils::{pad_to_power_of_two, uni_stark_prove as prove, uni_stark_verify as verify}; + use crate::utils::{ + pad_to_power_of_two, + uni_stark::{uni_stark_prove, uni_stark_verify}, + }; use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, @@ -271,9 +274,9 @@ mod tests { let chip: FieldIpChip = FieldIpChip::new(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); - let proof = prove::(&config, &chip, &mut challenger, trace); + let proof = uni_stark_prove::(&config, &chip, &mut challenger, trace); let mut challenger = config.challenger(); - verify(&config, &chip, &mut challenger, &proof).unwrap(); + uni_stark_verify(&config, &chip, &mut challenger, &proof).unwrap(); } } diff --git a/crates/core/machine/src/operations/field/field_op.rs b/crates/core/machine/src/operations/field/field_op.rs index 971b9f6f30..26d69da5a2 100644 --- a/crates/core/machine/src/operations/field/field_op.rs +++ b/crates/core/machine/src/operations/field/field_op.rs @@ -36,12 +36,68 @@ use typenum::Unsigned; pub struct FieldOpCols { /// The result of `a op b`, where a, b are field elements pub result: Limbs, - pub(crate) carry: Limbs, + pub carry: Limbs, pub(crate) witness_low: Limbs, pub(crate) witness_high: Limbs, } impl FieldOpCols { + #[allow(clippy::too_many_arguments)] + /// Populate result and carry columns from the equation (a*b + c) % modulus + pub fn populate_mul_and_carry( + &mut self, + record: &mut impl ByteRecord, + shard: u32, + a: &BigUint, + b: &BigUint, + c: &BigUint, + modulus: &BigUint, + ) -> (BigUint, BigUint) { + let p_a: Polynomial = P::to_limbs_field::(a).into(); + let p_b: Polynomial = P::to_limbs_field::(b).into(); + let p_c: Polynomial = P::to_limbs_field::(c).into(); + + let mul_add = a * b + c; + let result = &mul_add % modulus; + let carry = (mul_add - &result) / modulus; + debug_assert!(&result < modulus); + debug_assert!(&carry < modulus); + debug_assert_eq!(&carry * modulus, a * b + c - &result); + + let p_modulus_limbs = + modulus.to_bytes_le().iter().map(|x| F::from_canonical_u8(*x)).collect::>(); + let p_modulus: Polynomial = p_modulus_limbs.iter().into(); + let p_result: Polynomial = P::to_limbs_field::(&result).into(); + let p_carry: Polynomial = P::to_limbs_field::(&carry).into(); + + let p_op = &p_a * &p_b + &p_c; + let p_vanishing = &p_op - &p_result - &p_carry * &p_modulus; + + let p_witness = compute_root_quotient_and_shift( + &p_vanishing, + P::WITNESS_OFFSET, + P::NB_BITS_PER_LIMB as u32, + P::NB_WITNESS_LIMBS, + ); + + let (mut p_witness_low, mut p_witness_high) = split_u16_limbs_to_u8_limbs(&p_witness); + + self.result = p_result.into(); + self.carry = p_carry.into(); + + p_witness_low.resize(P::Witness::USIZE, F::zero()); + p_witness_high.resize(P::Witness::USIZE, F::zero()); + self.witness_low = Limbs(p_witness_low.try_into().unwrap()); + self.witness_high = Limbs(p_witness_high.try_into().unwrap()); + + record.add_u8_range_checks_field(shard, &self.result.0); + record.add_u8_range_checks_field(shard, &self.carry.0); + record.add_u8_range_checks_field(shard, &self.witness_low.0); + record.add_u8_range_checks_field(shard, &self.witness_high.0); + + (result, carry) + } + pub fn populate_carry_and_witness( &mut self, a: &BigUint, @@ -224,6 +280,29 @@ impl FieldOpCols { self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result, is_real); } + #[allow(clippy::too_many_arguments)] + pub fn eval_mul_and_carry>( + &self, + builder: &mut AB, + a: &(impl Into> + Clone), + b: &(impl Into> + Clone), + c: &(impl Into> + Clone), + modulus: &(impl Into> + Clone), + is_real: impl Into + Clone, + ) where + V: Into, + Limbs: Copy, + { + let p_a: Polynomial = (a).clone().into(); + let p_b: Polynomial = (b).clone().into(); + let p_c: Polynomial = (c).clone().into(); + + let p_result: Polynomial<_> = self.result.into(); + let p_op = p_a * p_b + p_c; + + self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result, is_real); + } + #[allow(clippy::too_many_arguments)] pub fn eval_with_modulus>( &self, @@ -311,7 +390,8 @@ mod tests { use super::{FieldOpCols, FieldOperation, Limbs}; - use crate::utils::{pad_to_power_of_two, uni_stark_prove as prove, uni_stark_verify as verify}; + use crate::utils::pad_to_power_of_two; + use crate::utils::uni_stark::{uni_stark_prove, uni_stark_verify}; use core::borrow::{Borrow, BorrowMut}; use num::bigint::RandBigInt; use p3_air::Air; @@ -456,10 +536,11 @@ mod tests { let shard = ExecutionRecord::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); - let proof = prove::(&config, &chip, &mut challenger, trace); + let proof = + uni_stark_prove::(&config, &chip, &mut challenger, trace); let mut challenger = config.challenger(); - verify(&config, &chip, &mut challenger, &proof).unwrap(); + uni_stark_verify(&config, &chip, &mut challenger, &proof).unwrap(); } } } diff --git a/crates/core/machine/src/operations/field/field_sqrt.rs b/crates/core/machine/src/operations/field/field_sqrt.rs index a0f40c6a48..7bc98a2b00 100644 --- a/crates/core/machine/src/operations/field/field_sqrt.rs +++ b/crates/core/machine/src/operations/field/field_sqrt.rs @@ -152,7 +152,10 @@ mod tests { use sp1_curves::params::{FieldParameters, Limbs}; use sp1_stark::air::{MachineAir, SP1AirBuilder}; - use crate::utils::{pad_to_power_of_two, uni_stark_prove as prove, uni_stark_verify as verify}; + use crate::utils::{ + pad_to_power_of_two, + uni_stark::{uni_stark_prove, uni_stark_verify}, + }; use core::{ borrow::{Borrow, BorrowMut}, mem::size_of, @@ -283,9 +286,9 @@ mod tests { let shard = ExecutionRecord::default(); let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); - let proof = prove::(&config, &chip, &mut challenger, trace); + let proof = uni_stark_prove::(&config, &chip, &mut challenger, trace); let mut challenger = config.challenger(); - verify(&config, &chip, &mut challenger, &proof).unwrap(); + uni_stark_verify(&config, &chip, &mut challenger, &proof).unwrap(); } } diff --git a/crates/core/machine/src/operations/global_accumulation.rs b/crates/core/machine/src/operations/global_accumulation.rs new file mode 100644 index 0000000000..b041072e9e --- /dev/null +++ b/crates/core/machine/src/operations/global_accumulation.rs @@ -0,0 +1,222 @@ +use crate::operations::GlobalInteractionOperation; +use p3_air::AirBuilder; +use p3_field::AbstractExtensionField; +use p3_field::AbstractField; +use p3_field::Field; +use p3_field::PrimeField32; +use sp1_derive::AlignedBorrow; +use sp1_stark::air::BaseAirBuilder; +use sp1_stark::air::SepticExtensionAirBuilder; +use sp1_stark::septic_curve::SepticCurveComplete; +use sp1_stark::{ + air::SP1AirBuilder, + septic_curve::SepticCurve, + septic_digest::SepticDigest, + septic_extension::{SepticBlock, SepticExtension}, +}; + +/// A set of columns needed to compute the global interaction elliptic curve digest. +/// It is critical that this struct is at the end of the main trace, as the permutation constraints will be dependent on this fact. +/// It is also critical the the cumulative sum is at the end of this struct, for the same reason. +#[derive(AlignedBorrow, Debug, Clone, Copy)] +#[repr(C)] +pub struct GlobalAccumulationOperation { + pub initial_digest: [SepticBlock; 2], + pub sum_checker: [SepticBlock; N], + pub cumulative_sum: [[SepticBlock; 2]; N], +} + +impl Default for GlobalAccumulationOperation { + fn default() -> Self { + Self { + initial_digest: core::array::from_fn(|_| SepticBlock::::default()), + sum_checker: core::array::from_fn(|_| SepticBlock::::default()), + cumulative_sum: core::array::from_fn(|_| { + [SepticBlock::::default(), SepticBlock::::default()] + }), + } + } +} + +impl GlobalAccumulationOperation { + pub fn populate( + &mut self, + initial_digest: &mut SepticCurve, + global_interaction_cols: [GlobalInteractionOperation; N], + is_real: [F; N], + ) { + self.initial_digest[0] = SepticBlock::from(initial_digest.x.0); + self.initial_digest[1] = SepticBlock::from(initial_digest.y.0); + + for i in 0..N { + let point_cur = SepticCurve { + x: SepticExtension(global_interaction_cols[i].x_coordinate.0), + y: SepticExtension(global_interaction_cols[i].y_coordinate.0), + }; + assert!(is_real[i] == F::one() || is_real[i] == F::zero()); + let sum_point = if is_real[i] == F::one() { + point_cur.add_incomplete(*initial_digest) + } else { + *initial_digest + }; + let sum_checker = if is_real[i] == F::one() { + SepticExtension::::zero() + } else { + SepticCurve::::sum_checker_x(*initial_digest, point_cur, sum_point) + }; + self.sum_checker[i] = SepticBlock::from(sum_checker.0); + self.cumulative_sum[i][0] = SepticBlock::from(sum_point.x.0); + self.cumulative_sum[i][1] = SepticBlock::from(sum_point.y.0); + *initial_digest = sum_point; + } + } + + pub fn populate_dummy( + &mut self, + final_digest: SepticCurve, + final_sum_checker: SepticExtension, + ) { + self.initial_digest[0] = SepticBlock::from(final_digest.x.0); + self.initial_digest[1] = SepticBlock::from(final_digest.y.0); + for i in 0..N { + self.sum_checker[i] = SepticBlock::from(final_sum_checker.0); + self.cumulative_sum[i][0] = SepticBlock::from(final_digest.x.0); + self.cumulative_sum[i][1] = SepticBlock::from(final_digest.y.0); + } + } + + pub fn populate_real( + &mut self, + sums: &[SepticCurveComplete], + final_digest: SepticCurve, + final_sum_checker: SepticExtension, + ) { + let len = sums.len(); + let sums = sums.iter().map(|complete_point| complete_point.point()).collect::>(); + self.initial_digest[0] = SepticBlock::from(sums[0].x.0); + self.initial_digest[1] = SepticBlock::from(sums[0].y.0); + for i in 0..N { + if len >= i + 2 { + self.sum_checker[i] = SepticBlock([F::zero(); 7]); + self.cumulative_sum[i][0] = SepticBlock::from(sums[i + 1].x.0); + self.cumulative_sum[i][1] = SepticBlock::from(sums[i + 1].y.0); + } else { + self.sum_checker[i] = SepticBlock::from(final_sum_checker.0); + self.cumulative_sum[i][0] = SepticBlock::from(final_digest.x.0); + self.cumulative_sum[i][1] = SepticBlock::from(final_digest.y.0); + } + } + } +} + +impl GlobalAccumulationOperation { + pub fn eval_accumulation( + builder: &mut AB, + global_interaction_cols: [GlobalInteractionOperation; N], + local_is_real: [AB::Var; N], + next_is_real: [AB::Var; N], + local_accumulation: GlobalAccumulationOperation, + next_accumulation: GlobalAccumulationOperation, + ) { + // First, constrain the control flow regarding `is_real`. + // Constrain that all `is_real` values are boolean. + for i in 0..N { + builder.assert_bool(local_is_real[i]); + } + + // Constrain that `is_real = 0` implies the next `is_real` values are all zero. + for i in 0..N - 1 { + // `is_real[i] == 0` implies `is_real[i + 1] == 0`. + builder.when_not(local_is_real[i]).assert_zero(local_is_real[i + 1]); + } + + // Constrain that `is_real[N - 1] == 0` implies `next.is_real[0] == 0` + builder.when_transition().when_not(local_is_real[N - 1]).assert_zero(next_is_real[0]); + + // Next, constrain the accumulation. + // Constrain that the first `initial_digest` is the starting point. + let initial_digest = SepticCurve:: { + x: SepticExtension::::from_base_fn(|i| { + local_accumulation.initial_digest[0][i].into() + }), + y: SepticExtension::::from_base_fn(|i| { + local_accumulation.initial_digest[1][i].into() + }), + }; + + let ith_cumulative_sum = |idx: usize| SepticCurve:: { + x: SepticExtension::::from_base_fn(|i| { + local_accumulation.cumulative_sum[idx][0].0[i].into() + }), + y: SepticExtension::::from_base_fn(|i| { + local_accumulation.cumulative_sum[idx][1].0[i].into() + }), + }; + + let ith_point_to_add = |idx: usize| SepticCurve:: { + x: SepticExtension::::from_base_fn(|i| { + global_interaction_cols[idx].x_coordinate.0[i].into() + }), + y: SepticExtension::::from_base_fn(|i| { + global_interaction_cols[idx].y_coordinate.0[i].into() + }), + }; + + let starting_digest = SepticDigest::::zero().0; + + builder.when_first_row().assert_septic_ext_eq(initial_digest.x.clone(), starting_digest.x); + builder.when_first_row().assert_septic_ext_eq(initial_digest.y.clone(), starting_digest.y); + + // Constrain that when `is_real = 1`, addition is being carried out, and when `is_real = 0`, the sum remains the same. + for i in 0..N { + let current_sum = + if i == 0 { initial_digest.clone() } else { ith_cumulative_sum(i - 1) }; + let point_to_add = ith_point_to_add(i); + let next_sum = ith_cumulative_sum(i); + // If `is_real == 1`, current_sum + point_to_add == next_sum must hold. + let sum_checker_x = SepticCurve::::sum_checker_x( + current_sum.clone(), + point_to_add.clone(), + next_sum.clone(), + ); + let sum_checker_y = SepticCurve::::sum_checker_y( + current_sum.clone(), + point_to_add, + next_sum.clone(), + ); + let witnessed_sum_checker_x = SepticExtension::::from_base_fn(|idx| { + local_accumulation.sum_checker[i].0[idx].into() + }); + builder.assert_septic_ext_eq(sum_checker_x, witnessed_sum_checker_x.clone()); + builder + .when(local_is_real[i]) + .assert_septic_ext_eq(witnessed_sum_checker_x, SepticExtension::::zero()); + builder + .when(local_is_real[i]) + .assert_septic_ext_eq(sum_checker_y, SepticExtension::::zero()); + + // If `is_real == 0`, current_sum == next_sum must hold. + builder + .when_not(local_is_real[i]) + .assert_septic_ext_eq(current_sum.x.clone(), next_sum.x.clone()); + builder.when_not(local_is_real[i]).assert_septic_ext_eq(current_sum.y, next_sum.y); + } + + // Constrain that the final digest is the next row's initial_digest. + let final_digest = ith_cumulative_sum(N - 1); + + let next_initial_digest = SepticCurve:: { + x: SepticExtension::::from_base_fn(|i| { + next_accumulation.initial_digest[0][i].into() + }), + y: SepticExtension::::from_base_fn(|i| { + next_accumulation.initial_digest[1][i].into() + }), + }; + + builder + .when_transition() + .assert_septic_ext_eq(final_digest.x.clone(), next_initial_digest.x.clone()); + builder.when_transition().assert_septic_ext_eq(final_digest.y, next_initial_digest.y); + } +} diff --git a/crates/core/machine/src/operations/global_interaction.rs b/crates/core/machine/src/operations/global_interaction.rs new file mode 100644 index 0000000000..5e9a7d789a --- /dev/null +++ b/crates/core/machine/src/operations/global_interaction.rs @@ -0,0 +1,337 @@ +use crate::air::WordAirBuilder; +use p3_air::AirBuilder; +use p3_field::AbstractExtensionField; +use p3_field::AbstractField; +use p3_field::Field; +use p3_field::PrimeField32; +use sp1_core_executor::events::ByteRecord; +use sp1_core_executor::ByteOpcode; +use sp1_derive::AlignedBorrow; +use sp1_stark::air::SepticExtensionAirBuilder; +use sp1_stark::{ + air::SP1AirBuilder, + septic_curve::{SepticCurve, CURVE_WITNESS_DUMMY_POINT_X, CURVE_WITNESS_DUMMY_POINT_Y}, + septic_extension::{SepticBlock, SepticExtension}, + InteractionKind, +}; + +/// A set of columns needed to compute the global interaction elliptic curve digest. +#[derive(AlignedBorrow, Default, Debug, Clone, Copy)] +#[repr(C)] +pub struct GlobalInteractionOperation { + pub offset_bits: [T; 8], + pub x_coordinate: SepticBlock, + pub y_coordinate: SepticBlock, + pub y6_bit_decomp: [T; 30], + pub range_check_witness: T, +} + +impl GlobalInteractionOperation { + pub fn get_digest( + values: SepticBlock, + is_receive: bool, + kind: InteractionKind, + ) -> (SepticCurve, u8) { + let x_start = SepticExtension::::from_base_fn(|i| F::from_canonical_u32(values.0[i])) + + SepticExtension::from_base(F::from_canonical_u32((kind as u32) << 24)); + let (point, offset) = SepticCurve::::lift_x(x_start); + if !is_receive { + return (point.neg(), offset); + } + (point, offset) + } + + pub fn populate( + &mut self, + values: SepticBlock, + is_receive: bool, + is_real: bool, + kind: InteractionKind, + ) { + if is_real { + let (point, offset) = Self::get_digest(values, is_receive, kind); + for i in 0..8 { + self.offset_bits[i] = F::from_canonical_u8((offset >> i) & 1); + } + self.x_coordinate = SepticBlock::::from(point.x.0); + self.y_coordinate = SepticBlock::::from(point.y.0); + let range_check_value = if is_receive { + point.y.0[6].as_canonical_u32() - 1 + } else { + point.y.0[6].as_canonical_u32() - (F::ORDER_U32 + 1) / 2 + }; + let mut top_4_bits = F::zero(); + for i in 0..30 { + self.y6_bit_decomp[i] = F::from_canonical_u32((range_check_value >> i) & 1); + if i >= 26 { + top_4_bits += self.y6_bit_decomp[i]; + } + } + top_4_bits -= F::from_canonical_u32(4); + self.range_check_witness = top_4_bits.inverse(); + } else { + self.populate_dummy(); + } + } + + #[allow(clippy::too_many_arguments)] + pub fn populate_memory_range_check_witness( + &self, + shard: u32, + value: u32, + is_real: bool, + blu: &mut impl ByteRecord, + ) { + if is_real { + blu.add_u8_range_checks(shard, &value.to_le_bytes()); + blu.add_u16_range_check(shard, shard as u16); + } + } + + #[allow(clippy::too_many_arguments)] + pub fn populate_memory( + &mut self, + shard: u32, + clk: u32, + addr: u32, + value: u32, + is_receive: bool, + is_real: bool, + ) { + self.populate( + SepticBlock([ + shard, + clk, + addr, + value & 255, + (value >> 8) & 255, + (value >> 16) & 255, + (value >> 24) & 255, + ]), + is_receive, + is_real, + InteractionKind::Memory, + ); + } + + #[allow(clippy::too_many_arguments)] + pub fn populate_syscall_range_check_witness( + &self, + shard: u32, + clk_16: u16, + clk_8: u8, + syscall_id: u32, + is_real: bool, + blu: &mut impl ByteRecord, + ) { + if is_real { + blu.add_u16_range_checks(shard, &[shard as u16, clk_16]); + blu.add_u8_range_checks(shard, &[clk_8, syscall_id as u8]); + } + } + + #[allow(clippy::too_many_arguments)] + pub fn populate_syscall( + &mut self, + shard: u32, + clk_16: u16, + clk_8: u8, + syscall_id: u32, + arg1: u32, + arg2: u32, + is_receive: bool, + is_real: bool, + ) { + self.populate( + SepticBlock([shard, clk_16.into(), clk_8.into(), syscall_id, arg1, arg2, 0]), + is_receive, + is_real, + InteractionKind::Syscall, + ); + } + + pub fn populate_dummy(&mut self) { + for i in 0..8 { + self.offset_bits[i] = F::zero(); + } + self.x_coordinate = SepticBlock::::from_base_fn(|i| { + F::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_X[i]) + }); + self.y_coordinate = SepticBlock::::from_base_fn(|i| { + F::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_Y[i]) + }); + for i in 0..30 { + self.y6_bit_decomp[i] = F::zero(); + } + self.range_check_witness = F::zero(); + } +} + +impl GlobalInteractionOperation { + /// Constrain that the y coordinate is correct decompression, and send the resulting digest coordinate to the permutation trace. + /// The first value in `values` must be a value range checked to u16. + fn eval_single_digest( + builder: &mut AB, + values: [AB::Expr; 7], + cols: GlobalInteractionOperation, + is_receive: bool, + is_real: AB::Var, + kind: InteractionKind, + ) { + // Constrain that the `is_real` is boolean. + builder.assert_bool(is_real); + + // Compute the offset and range check each bits, ensuring that the offset is a byte. + let mut offset = AB::Expr::zero(); + for i in 0..8 { + builder.assert_bool(cols.offset_bits[i]); + offset = offset.clone() + cols.offset_bits[i] * AB::F::from_canonical_u32(1 << i); + } + + // Compute the message. + let message = SepticExtension(values) + + SepticExtension::::from_base( + offset * AB::F::from_canonical_u32(1 << 16) + + AB::F::from_canonical_u32(kind as u32) * AB::F::from_canonical_u32(1 << 24), + ); + + // Compute a * m + b. + let am_plus_b = SepticCurve::::universal_hash(message); + + let x = SepticExtension::::from_base_fn(|i| cols.x_coordinate[i].into()); + + // Constrain that when `is_real` is true, then `x == a * m + b`. + builder.when(is_real).assert_septic_ext_eq(x.clone(), am_plus_b); + + // Constrain that y is a valid y-coordinate. + let y = SepticExtension::::from_base_fn(|i| cols.y_coordinate[i].into()); + + // Constrain that `(x, y)` is a valid point on the curve. + let y2 = y.square(); + let x3_2x_26z5 = SepticCurve::::curve_formula(x); + + builder.assert_septic_ext_eq(y2, x3_2x_26z5); + + let mut y6_value = AB::Expr::zero(); + let mut top_4_bits = AB::Expr::zero(); + for i in 0..30 { + builder.assert_bool(cols.y6_bit_decomp[i]); + y6_value = y6_value.clone() + cols.y6_bit_decomp[i] * AB::F::from_canonical_u32(1 << i); + if i >= 26 { + top_4_bits = top_4_bits.clone() + cols.y6_bit_decomp[i]; + } + } + top_4_bits = top_4_bits.clone() - AB::Expr::from_canonical_u32(4); + builder.when(is_real).assert_eq(cols.range_check_witness * top_4_bits, AB::Expr::one()); + + // Constrain that y has correct sign. + // If it's a receive: 0 <= y_6 - 1 < (p - 1) / 2 = 2^30 - 2^26 + // If it's a send: 0 <= y_6 - (p + 1) / 2 < (p - 1) / 2 = 2^30 - 2^26 + if is_receive { + builder.when(is_real).assert_eq(y.0[6].clone(), AB::Expr::one() + y6_value); + } else { + builder.when(is_real).assert_eq( + y.0[6].clone(), + AB::Expr::from_canonical_u32((1 << 30) - (1 << 26) + 1) + y6_value, + ); + } + } + + #[allow(clippy::too_many_arguments)] + pub fn eval_single_digest_memory( + builder: &mut AB, + shard: AB::Expr, + clk: AB::Expr, + addr: AB::Expr, + value: [AB::Expr; 4], + cols: GlobalInteractionOperation, + is_receive: bool, + is_real: AB::Var, + ) { + let values = [ + shard.clone(), + clk.clone(), + addr.clone(), + value[0].clone(), + value[1].clone(), + value[2].clone(), + value[3].clone(), + ]; + + Self::eval_single_digest( + builder, + values, + cols, + is_receive, + is_real, + InteractionKind::Memory, + ); + + // Range check for message space. + // Range check shard to be a valid u16. + builder.send_byte( + AB::Expr::from_canonical_u8(ByteOpcode::U16Range as u8), + shard, + AB::Expr::zero(), + AB::Expr::zero(), + is_real, + ); + // Range check the word value to be valid u8 word. + builder.slice_range_check_u8(&value, is_real); + } + + #[allow(clippy::too_many_arguments)] + pub fn eval_single_digest_syscall( + builder: &mut AB, + shard: AB::Expr, + clk_16: AB::Expr, + clk_8: AB::Expr, + syscall_id: AB::Expr, + arg1: AB::Expr, + arg2: AB::Expr, + cols: GlobalInteractionOperation, + is_receive: bool, + is_real: AB::Var, + ) { + let values = [ + shard.clone(), + clk_16.clone(), + clk_8.clone(), + syscall_id.clone(), + arg1.clone(), + arg2.clone(), + AB::Expr::zero(), + ]; + + Self::eval_single_digest( + builder, + values, + cols, + is_receive, + is_real, + InteractionKind::Syscall, + ); + + // Range check for message space. + // Range check shard to be a valid u16. + builder.send_byte( + AB::Expr::from_canonical_u8(ByteOpcode::U16Range as u8), + shard, + AB::Expr::zero(), + AB::Expr::zero(), + is_real, + ); + + // Range check clk_8 and syscall_id to be u8. + builder.slice_range_check_u8(&[clk_8, syscall_id], is_real); + + // Range check clk_16 to be u16. + builder.send_byte( + AB::Expr::from_canonical_u8(ByteOpcode::U16Range as u8), + clk_16, + AB::Expr::zero(), + AB::Expr::zero(), + is_real, + ); + } +} diff --git a/crates/core/machine/src/operations/mod.rs b/crates/core/machine/src/operations/mod.rs index 394daf906b..4dfe2791ee 100644 --- a/crates/core/machine/src/operations/mod.rs +++ b/crates/core/machine/src/operations/mod.rs @@ -13,6 +13,8 @@ mod baby_bear_word; pub mod field; mod fixed_rotate_right; mod fixed_shift_right; +mod global_accumulation; +mod global_interaction; mod is_equal_word; mod is_zero; mod is_zero_word; @@ -29,6 +31,8 @@ pub use baby_bear_range::*; pub use baby_bear_word::*; pub use fixed_rotate_right::*; pub use fixed_shift_right::*; +pub use global_accumulation::*; +pub use global_interaction::*; pub use is_equal_word::*; pub use is_zero::*; pub use is_zero_word::*; diff --git a/crates/core/machine/src/riscv/cost.rs b/crates/core/machine/src/riscv/cost.rs index db8f464c2f..b260954097 100644 --- a/crates/core/machine/src/riscv/cost.rs +++ b/crates/core/machine/src/riscv/cost.rs @@ -26,7 +26,7 @@ pub trait CostEstimator { impl CostEstimator for ExecutionReport { fn estimate_area(&self) -> u64 { let mut total_area = 0; - let mut total_chips = 3; + let mut total_chips = 2; let (chips, costs) = RiscvAir::::get_chips_and_costs(); let cpu_events = self.total_instruction_count(); @@ -103,6 +103,10 @@ impl CostEstimator for ExecutionReport { total_area += (uint256_mul_events as u64) * costs[&RiscvAirDiscriminants::Uint256Mul]; total_chips += 1; + let u256xu2048_mul_events = self.syscall_counts[SyscallCode::U256XU2048_MUL]; + total_area += (u256xu2048_mul_events as u64) * costs[&RiscvAirDiscriminants::U256x2048Mul]; + total_chips += 1; + let bls12381_fp_events = self.syscall_counts[SyscallCode::BLS12381_FP_ADD] + self.syscall_counts[SyscallCode::BLS12381_FP_SUB] + self.syscall_counts[SyscallCode::BLS12381_FP_MUL]; diff --git a/crates/core/machine/src/riscv/mod.rs b/crates/core/machine/src/riscv/mod.rs index 6812eadbbe..b544995d4a 100644 --- a/crates/core/machine/src/riscv/mod.rs +++ b/crates/core/machine/src/riscv/mod.rs @@ -9,9 +9,7 @@ use sp1_core_executor::{ }; use crate::{ - memory::{ - MemoryChipType, MemoryLocalChip, MemoryProgramChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW, - }, + memory::{MemoryChipType, MemoryLocalChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW}, riscv::MemoryChipType::{Finalize, Initialize}, syscall::precompiles::fptower::{Fp2AddSubAssignChip, Fp2MulAssignChip, FpOpChip}, }; @@ -43,6 +41,7 @@ pub(crate) mod riscv_chips { edwards::{EdAddAssignChip, EdDecompressChip}, keccak256::KeccakPermuteChip, sha256::{ShaCompressChip, ShaExtendChip}, + u256x2048_mul::U256x2048MulChip, uint256::Uint256MulChip, weierstrass::{ WeierstrassAddAssignChip, WeierstrassDecompressChip, @@ -95,7 +94,7 @@ pub enum RiscvAir { /// A table for the local memory state. MemoryLocal(MemoryLocalChip), /// A table for initializing the program memory. - ProgramMemory(MemoryProgramChip), + // ProgramMemory(MemoryProgramChip), /// A table for all the syscall invocations. SyscallCore(SyscallChip), /// A table for all the precompile invocations. @@ -132,6 +131,8 @@ pub enum RiscvAir { Bls12381Double(WeierstrassDoubleAssignChip>), /// A precompile for uint256 mul. Uint256Mul(Uint256MulChip), + /// A precompile for u256x2048 mul. + U256x2048Mul(U256x2048MulChip), /// A precompile for decompressing a point on the BLS12-381 curve. Bls12381Decompress(WeierstrassDecompressChip>), /// A precompile for BLS12-381 fp operation. @@ -275,6 +276,10 @@ impl RiscvAir { costs.insert(RiscvAirDiscriminants::Uint256Mul, uint256_mul.cost()); chips.push(uint256_mul); + let u256x2048_mul = Chip::new(RiscvAir::U256x2048Mul(U256x2048MulChip::default())); + costs.insert(RiscvAirDiscriminants::U256x2048Mul, u256x2048_mul.cost()); + chips.push(u256x2048_mul); + let bls12381_fp = Chip::new(RiscvAir::Bls12381Fp(FpOpChip::::new())); costs.insert(RiscvAirDiscriminants::Bls12381Fp, bls12381_fp.cost()); chips.push(bls12381_fp); @@ -361,9 +366,9 @@ impl RiscvAir { costs.insert(RiscvAirDiscriminants::MemoryLocal, memory_local.cost()); chips.push(memory_local); - let memory_program = Chip::new(RiscvAir::ProgramMemory(MemoryProgramChip::default())); - costs.insert(RiscvAirDiscriminants::ProgramMemory, memory_program.cost()); - chips.push(memory_program); + // let memory_program = Chip::new(RiscvAir::ProgramMemory(MemoryProgramChip::default())); + // costs.insert(RiscvAirDiscriminants::ProgramMemory, memory_program.cost()); + // chips.push(memory_program); let byte = Chip::new(RiscvAir::ByteLookup(ByteChip::default())); costs.insert(RiscvAirDiscriminants::ByteLookup, byte.cost()); @@ -376,7 +381,7 @@ impl RiscvAir { pub(crate) fn preprocessed_heights(program: &Program) -> Vec<(Self, usize)> { vec![ (RiscvAir::Program(ProgramChip::default()), program.instructions.len()), - (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program.memory_image.len()), + // (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program.memory_image.len()), (RiscvAir::ByteLookup(ByteChip::default()), 1 << 16), ] } @@ -454,7 +459,7 @@ impl RiscvAir { // Remove the preprocessed chips. airs.remove(&Self::Program(ProgramChip::default())); - airs.remove(&Self::ProgramMemory(MemoryProgramChip::default())); + // airs.remove(&Self::ProgramMemory(MemoryProgramChip::default())); airs.remove(&Self::ByteLookup(ByteChip::default())); airs.into_iter() @@ -502,6 +507,7 @@ impl RiscvAir { Self::Sha256Compress(_) => SyscallCode::SHA_COMPRESS, Self::Sha256Extend(_) => SyscallCode::SHA_EXTEND, Self::Uint256Mul(_) => SyscallCode::UINT256_MUL, + Self::U256x2048Mul(_) => SyscallCode::U256XU2048_MUL, Self::Bls12381Decompress(_) => SyscallCode::BLS12381_DECOMPRESS, Self::K256Decompress(_) => SyscallCode::SECP256K1_DECOMPRESS, Self::P256Decompress(_) => SyscallCode::SECP256R1_DECOMPRESS, @@ -516,7 +522,7 @@ impl RiscvAir { Self::MemoryGlobalInit(_) => unreachable!("Invalid for memory init/final"), Self::MemoryGlobalFinal(_) => unreachable!("Invalid for memory init/final"), Self::MemoryLocal(_) => unreachable!("Invalid for memory local"), - Self::ProgramMemory(_) => unreachable!("Invalid for memory program"), + // Self::ProgramMemory(_) => unreachable!("Invalid for memory program"), Self::Program(_) => unreachable!("Invalid for core chip"), Self::Mul(_) => unreachable!("Invalid for core chip"), Self::Lt(_) => unreachable!("Invalid for core chip"), @@ -571,26 +577,26 @@ pub mod tests { use crate::{ io::SP1Stdin, riscv::RiscvAir, - utils, - utils::{prove, run_test, setup_logger}, + utils::{self, prove_core, run_test, setup_logger}, }; use sp1_core_executor::{ programs::tests::{ fibonacci_program, simple_memory_program, simple_program, ssz_withdrawals_program, }, - Instruction, Opcode, Program, + Instruction, Opcode, Program, SP1Context, }; use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, SP1CoreOpts, StarkProvingKey, - StarkVerifyingKey, + baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, MachineProver, SP1CoreOpts, + StarkProvingKey, StarkVerifyingKey, }; #[test] fn test_simple_prove() { utils::setup_logger(); let program = simple_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] @@ -607,7 +613,8 @@ pub mod tests { Instruction::new(*shift_op, 31, 29, 3, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } } @@ -621,7 +628,8 @@ pub mod tests { Instruction::new(Opcode::SUB, 31, 30, 29, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] @@ -633,7 +641,8 @@ pub mod tests { Instruction::new(Opcode::ADD, 31, 30, 29, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] @@ -650,7 +659,8 @@ pub mod tests { Instruction::new(*mul_op, 31, 30, 29, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } } @@ -666,7 +676,8 @@ pub mod tests { Instruction::new(*lt_op, 31, 30, 29, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } @@ -682,7 +693,8 @@ pub mod tests { Instruction::new(*bitwise_op, 31, 30, 29, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } @@ -705,7 +717,8 @@ pub mod tests { Instruction::new(*div_rem_op, 31, 29, 30, false, false), ]; let program = Program::new(instructions, 0, 0); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } } @@ -714,7 +727,8 @@ pub mod tests { fn test_fibonacci_prove_simple() { setup_logger(); let program = fibonacci_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] @@ -726,7 +740,13 @@ pub mod tests { let mut opts = SP1CoreOpts::default(); opts.shard_size = 1024; opts.shard_batch_size = 2; - prove::<_, CpuProver<_, _>>(program, &stdin, BabyBearPoseidon2::new(), opts, None).unwrap(); + + let config = BabyBearPoseidon2::new(); + let machine = RiscvAir::machine(config); + let prover = CpuProver::new(machine); + let (pk, vk) = prover.setup(&program); + prove_core::<_, _>(&prover, &pk, &vk, program, &stdin, opts, SP1Context::default(), None) + .unwrap(); } #[test] @@ -734,28 +754,30 @@ pub mod tests { setup_logger(); let program = fibonacci_program(); let stdin = SP1Stdin::new(); - prove::<_, CpuProver<_, _>>( - program, - &stdin, - BabyBearPoseidon2::new(), - SP1CoreOpts::default(), - None, - ) - .unwrap(); + + let opts = SP1CoreOpts::default(); + let config = BabyBearPoseidon2::new(); + let machine = RiscvAir::machine(config); + let prover = CpuProver::new(machine); + let (pk, vk) = prover.setup(&program); + prove_core::<_, _>(&prover, &pk, &vk, program, &stdin, opts, SP1Context::default(), None) + .unwrap(); } #[test] fn test_simple_memory_program_prove() { setup_logger(); let program = simple_memory_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_ssz_withdrawal() { setup_logger(); let program = ssz_withdrawals_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] diff --git a/crates/core/machine/src/riscv/shape.rs b/crates/core/machine/src/riscv/shape.rs index ac8f887d85..0cd57759d3 100644 --- a/crates/core/machine/src/riscv/shape.rs +++ b/crates/core/machine/src/riscv/shape.rs @@ -9,7 +9,7 @@ use sp1_stark::{air::MachineAir, MachineRecord, ProofShape}; use thiserror::Error; use crate::{ - memory::{MemoryLocalChip, MemoryProgramChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW}, + memory::{MemoryLocalChip, NUM_LOCAL_MEMORY_ENTRIES_PER_ROW}, riscv::MemoryChipType::{Finalize, Initialize}, }; @@ -37,6 +37,8 @@ pub enum CoreShapeError { /// A structure that enables fixing the shape of an executionrecord. pub struct CoreShapeConfig { included_shapes: Vec>, + // Shapes for shards with a CPU chip and memory initialize/finalize events. + shapes_with_cpu_and_memory_finalize: Vec, Vec>>>, allowed_preprocessed_log_heights: HashMap, Vec>>, allowed_core_log_heights: Vec, Vec>>>, maximal_core_log_heights_mask: Vec, @@ -119,10 +121,21 @@ impl CoreShapeConfig { // If cpu is included, try to fix the shape as a core. // Get the heights of the core airs in the record. - let heights = RiscvAir::::core_heights(record); + let mut heights = RiscvAir::::core_heights(record); - // Try to find a shape within the included shapes. - for (i, allowed_log_heights) in self.allowed_core_log_heights.iter().enumerate() { + let mut shape_candidates = self.allowed_core_log_heights.iter().collect::>(); + + // If the record has global memory init/finalize events, replace the candidates with + // shapes that include the memory initialize/finalize chip. + if !record.global_memory_finalize_events.is_empty() + || !record.global_memory_initialize_events.is_empty() + { + heights.extend(RiscvAir::::get_memory_init_final_heights(record)); + shape_candidates = self.shapes_with_cpu_and_memory_finalize.iter().collect(); + } + + // Try to find a shape fitting within at least one of the candidate shapes. + for (i, allowed_log_heights) in shape_candidates.iter().enumerate() { if let Some(shape) = Self::find_shape_from_allowed_heights(&heights, allowed_log_heights) { @@ -151,7 +164,8 @@ impl CoreShapeConfig { return Err(CoreShapeError::ShapeError(record.stats())); } - // If the record is a global memory init/finalize record, try to fix the shape as such. + // If the record is a does not have the CPU chip and is a global memory init/finalize + // record, try to fix the shape as such. if !record.global_memory_initialize_events.is_empty() || !record.global_memory_finalize_events.is_empty() { @@ -204,7 +218,8 @@ impl CoreShapeConfig { mem_events_per_row: usize, allowed_log_height: usize, ) -> Vec<[(String, usize); 3]> { - (1..=air.rows_per_event()) + // TODO: this is a temporary fix to the shape, concretely fix this + (1..=4 * air.rows_per_event()) .rev() .map(|rows_per_event| { [ @@ -347,11 +362,11 @@ impl Default for CoreShapeConfig { fn default() -> Self { // Preprocessed chip heights. let program_heights = vec![Some(19), Some(20), Some(21), Some(22)]; - let program_memory_heights = vec![Some(19), Some(20), Some(21), Some(22)]; + // let program_memory_heights = vec![Some(19), Some(20), Some(21), Some(22)]; let allowed_preprocessed_log_heights = HashMap::from([ (RiscvAir::Program(ProgramChip::default()), program_heights), - (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program_memory_heights), + // (RiscvAir::ProgramMemory(MemoryProgramChip::default()), program_memory_heights), (RiscvAir::ByteLookup(ByteChip::default()), vec![Some(16)]), ]); @@ -647,6 +662,20 @@ impl Default for CoreShapeConfig { divrem_height: vec![Some(10), Some(16), Some(17)], is_potentially_maximal: true, }, + // Shards with mainly arithmetic, few memory accesses, and no division. + CoreShapeSpec { + cpu_height: vec![Some(21)], + add_sub_height: vec![Some(21)], + lt_height: vec![Some(19)], + bitwise_height: vec![Some(6)], + shift_right_height: vec![Some(19)], + shift_left_height: vec![Some(6)], + syscall_core_height: vec![Some(0)], + memory_local_height: vec![Some(6)], + mul_height: vec![Some(19)], + divrem_height: vec![Some(0)], + is_potentially_maximal: true, + }, // Shards with basic arithmetic and branching. CoreShapeSpec { cpu_height: vec![Some(21)], @@ -713,6 +742,147 @@ impl Default for CoreShapeConfig { .insert(air, (mem_events_per_row, precompile_heights.clone())); } + // Shapes for shards with a CPU chip and memory initialize/finalize events. + let shapes_with_cpu_and_memory_finalize = vec![ + // Small shape with few Muls and LTs. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(13)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(12)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(4)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(10)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(8)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(6)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + // Small shape with few Muls. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(14)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(14)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(4)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(10)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(13)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(6)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + + // Small shape with many Muls. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(15)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(14)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(12)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(12)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(12)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(7)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + // Medium shape with few muls. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(17)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(17)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(4)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(10)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(16)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(6)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + // Medium shape with many Muls. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(18)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(17)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(15)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(15)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(15)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(7)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + // Large shapes + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(20)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(20)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(4)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(10)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(19)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(6)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(20)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(20)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(4)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(11)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(19)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(6)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![Some(1)]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![Some(1)]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(21)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(21)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(11)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(19)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(19)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(10)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(19)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(7)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![None]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![None]), + (RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), vec![Some(8)]), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(15)]), + ]), + // Catchall shape. + HashMap::from([ + (RiscvAir::::Cpu(CpuChip::default()), vec![Some(21)]), + (RiscvAir::::Add(AddSubChip::default()), vec![Some(21)]), + (RiscvAir::::Bitwise(BitwiseChip::default()), vec![Some(19)]), + (RiscvAir::::Mul(MulChip::default()), vec![Some(19)]), + (RiscvAir::::ShiftRight(ShiftRightChip::default()), vec![Some(19)]), + (RiscvAir::::ShiftLeft(ShiftLeft::default()), vec![Some(19)]), + (RiscvAir::::Lt(LtChip::default()), vec![Some(20)]), + (RiscvAir::::MemoryLocal(MemoryLocalChip::new()), vec![Some(19)]), + (RiscvAir::::SyscallCore(SyscallChip::core()), vec![Some(19)]), + (RiscvAir::::DivRem(DivRemChip::default()), vec![Some(21)]), + ( + RiscvAir::::MemoryGlobalInit(MemoryGlobalChip::new(Initialize)), + vec![Some(19)], + ), + (RiscvAir::::MemoryGlobalFinal(MemoryGlobalChip::new(Finalize)), vec![Some(19)]), + ]), + ]; + Self { included_shapes: vec![], allowed_preprocessed_log_heights, @@ -720,16 +890,16 @@ impl Default for CoreShapeConfig { maximal_core_log_heights_mask, memory_allowed_log_heights, precompile_allowed_log_heights, + shapes_with_cpu_and_memory_finalize, } } } -#[cfg(any(test, feature = "programs"))] +#[cfg(test)] pub mod tests { use std::fmt::Debug; - use p3_challenger::{CanObserve, FieldChallenger}; - use sp1_stark::{air::InteractionScope, Dom, MachineProver, StarkGenericConfig}; + use sp1_stark::{Dom, MachineProver, StarkGenericConfig}; use super::*; @@ -750,30 +920,15 @@ pub mod tests { let (pk, _) = prover.setup(&program); // Try to generate traces. - let global_traces = prover.generate_traces(&record, InteractionScope::Global); - let local_traces = prover.generate_traces(&record, InteractionScope::Local); + let main_traces = prover.generate_traces(&record); // Try to commit the traces. - let global_data = prover.commit(&record, global_traces); - let local_data = prover.commit(&record, local_traces); + let main_data = prover.commit(&record, main_traces); let mut challenger = prover.machine().config().challenger(); - challenger.observe(global_data.main_commit.clone()); - challenger.observe(local_data.main_commit.clone()); - - let global_permutation_challenges: [::Challenge; 2] = - [challenger.sample_ext_element(), challenger.sample_ext_element()]; // Try to "open". - prover - .open( - &pk, - Some(global_data), - local_data, - &mut challenger, - &global_permutation_challenges, - ) - .unwrap(); + prover.open(&pk, main_data, &mut challenger).unwrap(); } #[test] @@ -790,8 +945,7 @@ pub mod tests { fn test_dummy_record() { use crate::utils::setup_logger; use p3_baby_bear::BabyBear; - use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; - use sp1_stark::CpuProver; + use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, CpuProver}; type SC = BabyBearPoseidon2; type A = RiscvAir; @@ -800,7 +954,7 @@ pub mod tests { let preprocessed_log_heights = [ (RiscvAir::::Program(ProgramChip::default()), 10), - (RiscvAir::::ProgramMemory(MemoryProgramChip::default()), 10), + // (RiscvAir::::ProgramMemory(MemoryProgramChip::default()), 10), (RiscvAir::::ByteLookup(ByteChip::default()), 16), ]; diff --git a/crates/core/machine/src/runtime/syscall.rs b/crates/core/machine/src/runtime/syscall.rs index 1d15ddf727..18a748da32 100644 --- a/crates/core/machine/src/runtime/syscall.rs +++ b/crates/core/machine/src/runtime/syscall.rs @@ -12,6 +12,7 @@ use crate::syscall::precompiles::edwards::EdDecompressChip; use crate::syscall::precompiles::fptower::{Fp2AddSubSyscall, Fp2MulAssignChip, FpOpSyscall}; use crate::syscall::precompiles::keccak256::KeccakPermuteChip; use crate::syscall::precompiles::sha256::{ShaCompressChip, ShaExtendChip}; +use crate::syscall::precompiles::u256x2048_mul::U256x2048MulChip; use crate::syscall::precompiles::uint256::Uint256MulChip; use crate::syscall::precompiles::weierstrass::WeierstrassAddAssignChip; use crate::syscall::precompiles::weierstrass::WeierstrassDecompressChip; @@ -109,6 +110,9 @@ pub enum SyscallCode { /// Executes the `UINT256_MUL` precompile. UINT256_MUL = 0x00_01_01_1D, + /// Executes the `U256X2048_MUL` precompile. + U256X2048_MUL = 0x00_01_01_2F, + /// Executes the `BLS12381_ADD` precompile. BLS12381_ADD = 0x00_01_01_1E, @@ -181,6 +185,7 @@ impl SyscallCode { 0x00_00_00_F0 => SyscallCode::HINT_LEN, 0x00_00_00_F1 => SyscallCode::HINT_READ, 0x00_01_01_1D => SyscallCode::UINT256_MUL, + 0x00_01_01_2F => SyscallCode::U256X2048_MUL, 0x00_01_01_20 => SyscallCode::BLS12381_FP_ADD, 0x00_01_01_21 => SyscallCode::BLS12381_FP_SUB, 0x00_01_01_22 => SyscallCode::BLS12381_FP_MUL, @@ -382,6 +387,7 @@ pub fn default_syscall_map() -> HashMap> { Arc::new(WeierstrassDoubleAssignChip::::new()), ); syscall_map.insert(SyscallCode::UINT256_MUL, Arc::new(Uint256MulChip::new())); + syscall_map.insert(SyscallCode::U256X2048_MUL, Arc::new(U256x2048MulChip::new())); syscall_map.insert( SyscallCode::BLS12381_FP_ADD, Arc::new(FpOpSyscall::::new(FieldOperation::Add)), @@ -529,6 +535,9 @@ mod tests { SyscallCode::UINT256_MUL => { assert_eq!(code as u32, sp1_zkvm::syscalls::UINT256_MUL) } + SyscallCode::U256X2048_MUL => { + assert_eq!(code as u32, sp1_zkvm::syscalls::U256X2048_MUL) + } SyscallCode::COMMIT => assert_eq!(code as u32, sp1_zkvm::syscalls::COMMIT), SyscallCode::COMMIT_DEFERRED_PROOFS => { assert_eq!(code as u32, sp1_zkvm::syscalls::COMMIT_DEFERRED_PROOFS) diff --git a/crates/core/machine/src/sys.rs b/crates/core/machine/src/sys.rs new file mode 100644 index 0000000000..aa14f63d71 --- /dev/null +++ b/crates/core/machine/src/sys.rs @@ -0,0 +1,163 @@ +use crate::{ + alu::{AddSubCols, BitwiseCols, LtCols, MulCols, ShiftLeftCols, ShiftRightCols}, + memory::MemoryInitCols, + memory::SingleMemoryLocal, + syscall::chip::SyscallCols, +}; +use hashbrown::HashMap; +use p3_baby_bear::BabyBear; + +use sp1_core_executor::events::{ + AluEvent, CpuEvent, LookupId, MemoryInitializeFinalizeEvent, MemoryLocalEvent, + MemoryReadRecord, MemoryRecordEnum, MemoryWriteRecord, SyscallEvent, +}; + +#[link(name = "sp1-core-machine-sys", kind = "static")] +extern "C-unwind" { + pub fn add_sub_event_to_row_babybear(event: &AluEvent, cols: &mut AddSubCols); + pub fn mul_event_to_row_babybear(event: &AluEvent, cols: &mut MulCols); + pub fn bitwise_event_to_row_babybear(event: &AluEvent, cols: &mut BitwiseCols); + pub fn lt_event_to_row_babybear(event: &AluEvent, cols: &mut LtCols); + pub fn sll_event_to_row_babybear(event: &AluEvent, cols: &mut ShiftLeftCols); + pub fn sr_event_to_row_babybear(event: &AluEvent, cols: &mut ShiftRightCols); + pub fn memory_local_event_to_row_babybear( + event: &MemoryLocalEvent, + cols: &mut SingleMemoryLocal, + ); + pub fn memory_global_event_to_row_babybear( + event: &MemoryInitializeFinalizeEvent, + is_receive: bool, + cols: &mut MemoryInitCols, + ); + pub fn syscall_event_to_row_babybear( + event: &SyscallEvent, + is_receive: bool, + cols: &mut SyscallCols, + ); +} + +/// An alternative to `Option` that is FFI-safe. +/// +/// See [`MemoryRecordEnum`]. +#[derive(Debug, Copy, Clone)] +#[repr(C)] +pub enum OptionMemoryRecordEnum { + /// Read. + Read(MemoryReadRecord), + /// Write. + Write(MemoryWriteRecord), + None, +} + +impl From> for OptionMemoryRecordEnum { + fn from(value: Option) -> Self { + match value { + Some(MemoryRecordEnum::Read(r)) => Self::Read(r), + Some(MemoryRecordEnum::Write(r)) => Self::Write(r), + None => Self::None, + } + } +} + +impl From for Option { + fn from(value: OptionMemoryRecordEnum) -> Self { + match value { + OptionMemoryRecordEnum::Read(r) => Some(MemoryRecordEnum::Read(r)), + OptionMemoryRecordEnum::Write(r) => Some(MemoryRecordEnum::Write(r)), + OptionMemoryRecordEnum::None => None, + } + } +} + +/// An FFI-safe version of [`CpuEvent`] that also looks up nonces ahead of time. +#[derive(Debug, Clone, Copy)] +#[repr(C)] +pub struct CpuEventFfi { + /// The clock cycle. + pub clk: u32, + /// The program counter. + pub pc: u32, + /// The next program counter. + pub next_pc: u32, + /// The first operand. + pub a: u32, + /// The first operand memory record. + pub a_record: OptionMemoryRecordEnum, + /// The second operand. + pub b: u32, + /// The second operand memory record. + pub b_record: OptionMemoryRecordEnum, + /// The third operand. + pub c: u32, + /// The third operand memory record. + pub c_record: OptionMemoryRecordEnum, + // Seems to be vestigial. Verify before completely removing this. + // /// The memory value. + // pub memory: Option<&'a u32>, + /// The memory record. + pub memory_record: OptionMemoryRecordEnum, + /// The exit code. + pub exit_code: u32, + + pub alu_nonce: u32, + pub syscall_nonce: u32, + pub memory_add_nonce: u32, + pub memory_sub_nonce: u32, + pub branch_gt_nonce: u32, + pub branch_lt_nonce: u32, + pub branch_add_nonce: u32, + pub jump_jal_nonce: u32, + pub jump_jalr_nonce: u32, + pub auipc_nonce: u32, +} + +impl CpuEventFfi { + pub fn new(event: &CpuEvent, nonce_lookup: &HashMap) -> Self { + let &CpuEvent { + clk, + pc, + next_pc, + a, + a_record, + b, + b_record, + c, + c_record, + memory_record, + exit_code, + ref alu_lookup_id, + ref syscall_lookup_id, + ref memory_add_lookup_id, + ref memory_sub_lookup_id, + ref branch_gt_lookup_id, + ref branch_lt_lookup_id, + ref branch_add_lookup_id, + ref jump_jal_lookup_id, + ref jump_jalr_lookup_id, + ref auipc_lookup_id, + } = event; + Self { + clk, + pc, + next_pc, + a, + a_record: a_record.into(), + b, + b_record: b_record.into(), + c, + c_record: c_record.into(), + memory_record: memory_record.into(), + exit_code, + alu_nonce: nonce_lookup.get(alu_lookup_id).copied().unwrap_or_default(), + syscall_nonce: nonce_lookup.get(syscall_lookup_id).copied().unwrap_or_default(), + memory_add_nonce: nonce_lookup.get(memory_add_lookup_id).copied().unwrap_or_default(), + memory_sub_nonce: nonce_lookup.get(memory_sub_lookup_id).copied().unwrap_or_default(), + branch_gt_nonce: nonce_lookup.get(branch_gt_lookup_id).copied().unwrap_or_default(), + branch_lt_nonce: nonce_lookup.get(branch_lt_lookup_id).copied().unwrap_or_default(), + branch_add_nonce: nonce_lookup.get(branch_add_lookup_id).copied().unwrap_or_default(), + jump_jal_nonce: nonce_lookup.get(jump_jal_lookup_id).copied().unwrap_or_default(), + jump_jalr_nonce: nonce_lookup.get(jump_jalr_lookup_id).copied().unwrap_or_default(), + auipc_nonce: nonce_lookup.get(auipc_lookup_id).copied().unwrap_or_default(), + } + } +} diff --git a/crates/core/machine/src/syscall/chip.rs b/crates/core/machine/src/syscall/chip.rs index 00257d46aa..6ced3d79d8 100644 --- a/crates/core/machine/src/syscall/chip.rs +++ b/crates/core/machine/src/syscall/chip.rs @@ -1,18 +1,28 @@ -use core::fmt; -use std::{ - borrow::{Borrow, BorrowMut}, - mem::size_of, +use crate::{ + operations::GlobalAccumulationOperation, operations::GlobalInteractionOperation, + utils::pad_rows_fixed, }; - +use core::fmt; +use hashbrown::HashMap; +use itertools::Itertools; use p3_air::{Air, BaseAir}; +use p3_field::AbstractField; use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use p3_maybe_rayon::prelude::IntoParallelRefIterator; +use p3_maybe_rayon::prelude::ParallelBridge; +use p3_maybe_rayon::prelude::ParallelIterator; +use p3_maybe_rayon::prelude::ParallelSlice; +use sp1_core_executor::events::ByteLookupEvent; +use sp1_core_executor::events::ByteRecord; use sp1_core_executor::{events::SyscallEvent, ExecutionRecord, Program}; use sp1_derive::AlignedBorrow; use sp1_stark::air::{InteractionScope, MachineAir, SP1AirBuilder}; - -use crate::utils::pad_rows_fixed; - +use sp1_stark::septic_digest::SepticDigest; +use std::{ + borrow::{Borrow, BorrowMut}, + mem::size_of, +}; /// The number of main trace columns for `SyscallChip`. pub const NUM_SYSCALL_COLS: usize = size_of::>(); @@ -39,8 +49,14 @@ impl SyscallChip { pub const fn precompile() -> Self { Self::new(SyscallShardKind::Precompile) } + + pub fn shard_kind(&self) -> SyscallShardKind { + self.shard_kind + } } +pub const SYSCALL_INITIAL_DIGEST_POS_COPY: usize = 60; + /// The column layout for the chip. #[derive(AlignedBorrow, Default, Clone, Copy)] #[repr(C)] @@ -48,10 +64,11 @@ pub struct SyscallCols { /// The shard number of the syscall. pub shard: T, - /// The clk of the syscall. - pub clk: T, + /// The bottom 16 bits of clk of the syscall. + pub clk_16: T, - pub nonce: T, + /// The top 8 bits of clk of the syscall. + pub clk_8: T, /// The syscall_id of the syscall. pub syscall_id: T, @@ -63,6 +80,12 @@ pub struct SyscallCols { pub arg2: T, pub is_real: T, + + /// The global interaction columns. + pub global_interaction_cols: GlobalInteractionOperation, + + /// The columns for accumulating the elliptic curve digests. + pub global_accumulation_cols: GlobalAccumulationOperation, } impl MachineAir for SyscallChip { @@ -74,8 +97,38 @@ impl MachineAir for SyscallChip { format!("Syscall{}", self.shard_kind).to_string() } - fn generate_dependencies(&self, _input: &ExecutionRecord, _output: &mut ExecutionRecord) { - // Do nothing since this chip has no dependencies. + fn generate_dependencies(&self, input: &ExecutionRecord, output: &mut ExecutionRecord) { + let events = match self.shard_kind { + SyscallShardKind::Core => &input.syscall_events, + SyscallShardKind::Precompile => &input + .precompile_events + .all_events() + .map(|(event, _)| event.to_owned()) + .collect::>(), + }; + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + let blu_batches = events + .par_chunks(chunk_size) + .map(|events| { + let mut blu: HashMap> = HashMap::new(); + events.iter().for_each(|event| { + let mut row = [F::zero(); NUM_SYSCALL_COLS]; + let cols: &mut SyscallCols = row.as_mut_slice().borrow_mut(); + let clk_16 = (event.clk & 65535) as u16; + let clk_8 = (event.clk >> 16) as u8; + cols.global_interaction_cols.populate_syscall_range_check_witness( + event.shard, + clk_16, + clk_8, + event.syscall_id, + true, + &mut blu, + ); + }); + blu + }) + .collect::>(); + output.add_sharded_byte_lookup_events(blu_batches.iter().collect_vec()); } fn generate_trace( @@ -83,37 +136,62 @@ impl MachineAir for SyscallChip { input: &ExecutionRecord, _output: &mut ExecutionRecord, ) -> RowMajorMatrix { - let mut rows = Vec::new(); + let mut global_cumulative_sum = SepticDigest::::zero().0; - let row_fn = |syscall_event: &SyscallEvent| { + let row_fn = |syscall_event: &SyscallEvent, is_receive: bool| { let mut row = [F::zero(); NUM_SYSCALL_COLS]; let cols: &mut SyscallCols = row.as_mut_slice().borrow_mut(); + debug_assert!(syscall_event.clk < (1 << 24)); + let clk_16 = (syscall_event.clk & 65535) as u16; + let clk_8 = (syscall_event.clk >> 16) as u8; + cols.shard = F::from_canonical_u32(syscall_event.shard); - cols.clk = F::from_canonical_u32(syscall_event.clk); + cols.clk_16 = F::from_canonical_u16(clk_16); + cols.clk_8 = F::from_canonical_u8(clk_8); cols.syscall_id = F::from_canonical_u32(syscall_event.syscall_id); - cols.nonce = F::from_canonical_u32(syscall_event.nonce); cols.arg1 = F::from_canonical_u32(syscall_event.arg1); cols.arg2 = F::from_canonical_u32(syscall_event.arg2); cols.is_real = F::one(); + cols.global_interaction_cols.populate_syscall( + syscall_event.shard, + clk_16, + clk_8, + syscall_event.syscall_id, + syscall_event.arg1, + syscall_event.arg2, + is_receive, + true, + ); row }; - match self.shard_kind { - SyscallShardKind::Core => { - for event in input.syscall_events.iter() { - let row = row_fn(event); - rows.push(row); - } - } - SyscallShardKind::Precompile => { - for event in input.precompile_events.all_events().map(|(event, _)| event) { - let row = row_fn(event); - rows.push(row); - } - } + let mut rows = match self.shard_kind { + SyscallShardKind::Core => input + .syscall_events + .par_iter() + .map(|event| row_fn(event, false)) + .collect::>(), + SyscallShardKind::Precompile => input + .precompile_events + .all_events() + .map(|(event, _)| event) + .par_bridge() + .map(|event| row_fn(event, true)) + .collect::>(), }; + let num_events = rows.len(); + + for i in 0..num_events { + let cols: &mut SyscallCols = rows[i].as_mut_slice().borrow_mut(); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.is_real], + ); + } + // Pad the trace to a power of two depending on the proof shape in `input`. pad_rows_fixed( &mut rows, @@ -121,7 +199,21 @@ impl MachineAir for SyscallChip { input.fixed_log2_rows::(self), ); - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_SYSCALL_COLS) + let mut trace = + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_SYSCALL_COLS); + + for i in num_events..trace.height() { + let cols: &mut SyscallCols = + trace.values[i * NUM_SYSCALL_COLS..(i + 1) * NUM_SYSCALL_COLS].borrow_mut(); + cols.global_interaction_cols.populate_dummy(); + cols.global_accumulation_cols.populate( + &mut global_cumulative_sum, + [cols.global_interaction_cols], + [cols.is_real], + ); + } + + trace } fn included(&self, shard: &Self::Record) -> bool { @@ -153,6 +245,8 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &SyscallCols = (*local).borrow(); + let next = main.row_slice(1); + let next: &SyscallCols = (*next).borrow(); builder.assert_eq( local.is_real * local.is_real * local.is_real, @@ -163,8 +257,7 @@ where SyscallShardKind::Core => { builder.receive_syscall( local.shard, - local.clk, - local.nonce, + local.clk_16 + local.clk_8 * AB::Expr::from_canonical_u32(1 << 16), local.syscall_id, local.arg1, local.arg2, @@ -173,22 +266,23 @@ where ); // Send the call to the global bus to/from the precompile chips. - builder.send_syscall( - local.shard, - local.clk, - local.nonce, - local.syscall_id, - local.arg1, - local.arg2, + GlobalInteractionOperation::::eval_single_digest_syscall( + builder, + local.shard.into(), + local.clk_16.into(), + local.clk_8.into(), + local.syscall_id.into(), + local.arg1.into(), + local.arg2.into(), + local.global_interaction_cols, + false, local.is_real, - InteractionScope::Global, ); } SyscallShardKind::Precompile => { builder.send_syscall( local.shard, - local.clk, - local.nonce, + local.clk_16 + local.clk_8 * AB::Expr::from_canonical_u32(1 << 16), local.syscall_id, local.arg1, local.arg2, @@ -196,19 +290,29 @@ where InteractionScope::Local, ); - // Send the call to the global bus to/from the precompile chips. - builder.receive_syscall( - local.shard, - local.clk, - local.nonce, - local.syscall_id, - local.arg1, - local.arg2, + GlobalInteractionOperation::::eval_single_digest_syscall( + builder, + local.shard.into(), + local.clk_16.into(), + local.clk_8.into(), + local.syscall_id.into(), + local.arg1.into(), + local.arg2.into(), + local.global_interaction_cols, + true, local.is_real, - InteractionScope::Global, ); } } + + GlobalAccumulationOperation::::eval_accumulation( + builder, + [local.global_interaction_cols], + [local.is_real], + [next.is_real], + local.global_accumulation_cols, + next.global_accumulation_cols, + ); } } diff --git a/crates/core/machine/src/syscall/precompiles/README.md b/crates/core/machine/src/syscall/precompiles/README.md index 315d78915a..a6d26bd3fe 100644 --- a/crates/core/machine/src/syscall/precompiles/README.md +++ b/crates/core/machine/src/syscall/precompiles/README.md @@ -188,9 +188,9 @@ pub fn default_syscall_map() -> HashMap> { ## Write Unit Tests for the New Precompile ### Create a New SP1 Test Package -Create a new SP1 crate for your custom precompile test package inside the directory `sp1/tests`. An example `Cargo.toml` for this may look like +Create a new SP1 crate for your custom precompile test package inside the directory +`sp1/crates/test-artifacts/programs`. An example `Cargo.toml` for this may look like: ```toml -[workspace] [package] name = "custom-precompile-test" version = "1.0.0" @@ -198,18 +198,17 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../zkvm/entrypoint" } -sp1-derive = { path = "../../derive" } +sp1-zkvm = { path = "../../../../zkvm/entrypoint" } +sp1-derive = { path = "../../../../derive" } num-bigint = "0.4.6" rand = "0.8.5" ``` -Then implement the tests and run `cargo prove build` to generate an ELF file. +Don't forget to include your crate to the workspace at `crates/test-artifacts/programs/Cargo.toml`. Then implement the tests and run `cargo prove build` to generate an ELF file. -### Include the ELF File in `program.rs` -In your main SP1 project, include the generated ELF file by updating `program.rs`. +### Include the ELF File in `test-artifacts` crate `lib.rs` +In your main SP1 project, include the generated ELF file by updating `crates/test-artifacts/src/lib.rs`. ```rust -pub const CUSTOM_PRECOMPILE_ELF: &[u8] = - include_bytes!("path/to/generated/elf/file"); +pub const CUSTOM_PRECOMPILE_ELF: &[u8] = include_elf!("your-test-crate-name"); // Other ELF files... ``` @@ -230,10 +229,11 @@ mod tests { utils::{ self, run_test_io, - tests::CUSTOM_PRECOMPILE_ELF, }, }; + use test_artifacts::CUSTOM_PRECOMPILE_ELF; + #[test] fn test_custom_precompile() { utils::setup_logger(); diff --git a/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs b/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs index 94e81e20cb..d29e3c0488 100644 --- a/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs +++ b/crates/core/machine/src/syscall/precompiles/edwards/ed_add.rs @@ -9,7 +9,7 @@ use itertools::Itertools; use num::{BigUint, Zero}; use crate::air::MemoryAirBuilder; -use p3_air::{Air, AirBuilder, BaseAir}; +use p3_air::{Air, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator, ParallelSlice}; @@ -45,7 +45,6 @@ pub struct EdAddAssignCols { pub is_real: T, pub shard: T, pub clk: T, - pub nonce: T, pub p_ptr: T, pub q_ptr: T, pub p_access: [MemoryWriteCols; WORDS_CURVE_POINT], @@ -158,17 +157,7 @@ impl MachineAir for Ed ); // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_ED_ADD_COLS); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut EdAddAssignCols = - trace.values[i * NUM_ED_ADD_COLS..(i + 1) * NUM_ED_ADD_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_ED_ADD_COLS) } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { @@ -204,6 +193,10 @@ impl MachineAir for Ed !shard.get_precompile_events(SyscallCode::ED_ADD).is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl EdAddAssignChip { @@ -255,12 +248,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &EdAddAssignCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &EdAddAssignCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let x1: Limbs::Limbs> = limbs_from_prev_access(&local.p_access[0..8]); @@ -328,7 +315,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, AB::F::from_canonical_u32(SyscallCode::ED_ADD.syscall_id()), local.p_ptr, local.q_ptr, @@ -342,23 +328,23 @@ where mod tests { use sp1_core_executor::Program; use sp1_stark::CpuProver; + use test_artifacts::{ED25519_ELF, ED_ADD_ELF}; - use crate::{ - utils, - utils::tests::{ED25519_ELF, ED_ADD_ELF}, - }; + use crate::{io::SP1Stdin, utils}; #[test] fn test_ed_add_simple() { utils::setup_logger(); let program = Program::from(ED_ADD_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_ed25519_program() { utils::setup_logger(); let program = Program::from(ED25519_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs b/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs index 97d0e526fe..534d4fde4f 100644 --- a/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs +++ b/crates/core/machine/src/syscall/precompiles/edwards/ed_decompress.rs @@ -45,7 +45,6 @@ pub struct EdDecompressCols { pub is_real: T, pub shard: T, pub clk: T, - pub nonce: T, pub ptr: T, pub sign: T, pub x_access: GenericArray, WordsFieldElement>, @@ -71,9 +70,6 @@ impl EdDecompressCols { self.shard = F::from_canonical_u32(event.shard); self.clk = F::from_canonical_u32(event.clk); self.ptr = F::from_canonical_u32(event.ptr); - self.nonce = F::from_canonical_u32( - record.nonce_lookup.get(event.lookup_id.0 as usize).copied().unwrap_or_default(), - ); self.sign = F::from_bool(event.sign); for i in 0..8 { self.x_access[i].populate(event.x_memory_records[i], &mut new_byte_lookup_events); @@ -181,7 +177,6 @@ impl EdDecompressCols { builder.receive_syscall( self.shard, self.clk, - self.nonce, AB::F::from_canonical_u32(SyscallCode::ED_DECOMPRESS.syscall_id()), self.ptr, self.sign, @@ -244,20 +239,7 @@ impl MachineAir for EdDecompressChip(self), ); - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_ED_DECOMPRESS_COLS, - ); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut EdDecompressCols = trace.values - [i * NUM_ED_DECOMPRESS_COLS..(i + 1) * NUM_ED_DECOMPRESS_COLS] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_ED_DECOMPRESS_COLS) } fn included(&self, shard: &Self::Record) -> bool { @@ -267,6 +249,10 @@ impl MachineAir for EdDecompressChip bool { + true + } } impl BaseAir for EdDecompressChip { @@ -283,12 +269,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &EdDecompressCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &EdDecompressCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); local.eval::(builder); } @@ -298,13 +278,15 @@ where pub mod tests { use sp1_core_executor::Program; use sp1_stark::CpuProver; + use test_artifacts::ED_DECOMPRESS_ELF; - use crate::utils::{self, tests::ED_DECOMPRESS_ELF}; + use crate::{io::SP1Stdin, utils}; #[test] fn test_ed_decompress() { utils::setup_logger(); let program = Program::from(ED_DECOMPRESS_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp.rs index a9c21016c7..d74b682adc 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp.rs @@ -8,7 +8,7 @@ use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; -use p3_air::{Air, AirBuilder, BaseAir}; +use p3_air::{Air, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ @@ -43,7 +43,6 @@ pub struct FpOpChip

{ pub struct FpOpCols { pub is_real: T, pub shard: T, - pub nonce: T, pub clk: T, pub is_add: T, pub is_sub: T, @@ -88,8 +87,8 @@ impl MachineAir for FpOpChip

{ } fn generate_trace(&self, input: &Self::Record, output: &mut Self::Record) -> RowMajorMatrix { - // All the fp events for a given curve are coalesce to the curve's Add operation. Only retrieve - // precompile events for that operation. + // All the fp events for a given curve are coalesce to the curve's Add operation. Only + // retrieve precompile events for that operation. // TODO: Fix this. let events = match P::FIELD_TYPE { @@ -165,17 +164,7 @@ impl MachineAir for FpOpChip

{ ); // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), num_fp_cols::

()); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut FpOpCols = - trace.values[i * num_fp_cols::

()..(i + 1) * num_fp_cols::

()].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), num_fp_cols::

()) } fn included(&self, shard: &Self::Record) -> bool { @@ -202,6 +191,10 @@ impl MachineAir for FpOpChip

{ } } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for FpOpChip

{ @@ -219,12 +212,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &FpOpCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &FpOpCols = (*next).borrow(); - - // Check that nonce is incremented. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); // Check that operations flags are boolean. builder.assert_bool(local.is_add); @@ -295,7 +282,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs index 7f28309597..ee47aef714 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp2_addsub.rs @@ -8,7 +8,7 @@ use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; -use p3_air::{Air, AirBuilder, BaseAir}; +use p3_air::{Air, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ @@ -40,7 +40,6 @@ pub const fn num_fp2_addsub_cols() -> usize { pub struct Fp2AddSubAssignCols { pub is_real: T, pub shard: T, - pub nonce: T, pub clk: T, pub is_add: T, pub x_ptr: T, @@ -91,8 +90,8 @@ impl MachineAir for Fp2AddSubAssignChip

{ } fn generate_trace(&self, input: &Self::Record, output: &mut Self::Record) -> RowMajorMatrix { - // All the fp2 sub and add events for a given curve are coalesce to the curve's Add operation. Only retrieve - // precompile events for that operation. + // All the fp2 sub and add events for a given curve are coalesce to the curve's Add + // operation. Only retrieve precompile events for that operation. // TODO: Fix this. let events = match P::FIELD_TYPE { @@ -175,25 +174,15 @@ impl MachineAir for Fp2AddSubAssignChip

{ ); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( + RowMajorMatrix::new( rows.into_iter().flatten().collect::>(), num_fp2_addsub_cols::

(), - ); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut Fp2AddSubAssignCols = trace.values - [i * num_fp2_addsub_cols::

()..(i + 1) * num_fp2_addsub_cols::

()] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + ) } fn included(&self, shard: &Self::Record) -> bool { - // All the fp2 sub and add events for a given curve are coalesce to the curve's Add operation. Only retrieve - // precompile events for that operation. + // All the fp2 sub and add events for a given curve are coalesce to the curve's Add + // operation. Only retrieve precompile events for that operation. // TODO: Fix this. assert!( @@ -214,6 +203,10 @@ impl MachineAir for Fp2AddSubAssignChip

{ } } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for Fp2AddSubAssignChip

{ @@ -231,14 +224,10 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &Fp2AddSubAssignCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &Fp2AddSubAssignCols = (*next).borrow(); // Constrain the `is_add` flag to be boolean. builder.assert_bool(local.is_add); - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let num_words_field_element =

::Limbs::USIZE / 4; let p_x = limbs_from_prev_access(&local.x_access[0..num_words_field_element]); @@ -318,7 +307,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, diff --git a/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs b/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs index 95a624cc2f..9c26bab1db 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/fp2_mul.rs @@ -7,7 +7,7 @@ use crate::{air::MemoryAirBuilder, utils::zeroed_f_vec}; use generic_array::GenericArray; use itertools::Itertools; use num::{BigUint, Zero}; -use p3_air::{Air, AirBuilder, BaseAir}; +use p3_air::{Air, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ @@ -40,7 +40,6 @@ pub const fn num_fp2_mul_cols() -> usize { pub struct Fp2MulAssignCols { pub is_real: T, pub shard: T, - pub nonce: T, pub clk: T, pub x_ptr: T, pub y_ptr: T, @@ -214,20 +213,7 @@ impl MachineAir for Fp2MulAssignChip

{ ); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - num_fp2_mul_cols::

(), - ); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut Fp2MulAssignCols = trace.values - [i * num_fp2_mul_cols::

()..(i + 1) * num_fp2_mul_cols::

()] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), num_fp2_mul_cols::

()) } fn included(&self, shard: &Self::Record) -> bool { @@ -244,6 +230,10 @@ impl MachineAir for Fp2MulAssignChip

{ } } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for Fp2MulAssignChip

{ @@ -261,11 +251,7 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &Fp2MulAssignCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &Fp2MulAssignCols = (*next).borrow(); - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let num_words_field_element =

::Limbs::USIZE / 4; let p_x = limbs_from_prev_access(&local.x_access[0..num_words_field_element]); @@ -371,7 +357,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id_felt, local.x_ptr, local.y_ptr, diff --git a/crates/core/machine/src/syscall/precompiles/fptower/mod.rs b/crates/core/machine/src/syscall/precompiles/fptower/mod.rs index c0d63d3ab4..752b5ae422 100644 --- a/crates/core/machine/src/syscall/precompiles/fptower/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/fptower/mod.rs @@ -10,55 +10,59 @@ pub use fp2_mul::*; mod tests { use sp1_stark::CpuProver; - use sp1_core_executor::{ - programs::tests::{ - BLS12381_FP2_ADDSUB_ELF, BLS12381_FP2_MUL_ELF, BLS12381_FP_ELF, BN254_FP2_ADDSUB_ELF, - BN254_FP2_MUL_ELF, BN254_FP_ELF, - }, - Program, + use sp1_core_executor::Program; + use test_artifacts::{ + BLS12381_FP2_ADDSUB_ELF, BLS12381_FP2_MUL_ELF, BLS12381_FP_ELF, BN254_FP2_ADDSUB_ELF, + BN254_FP2_MUL_ELF, BN254_FP_ELF, }; - use crate::utils; + use crate::{io::SP1Stdin, utils}; #[test] fn test_bls12381_fp_ops() { utils::setup_logger(); let program = Program::from(BLS12381_FP_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_fp2_addsub() { utils::setup_logger(); let program = Program::from(BLS12381_FP2_ADDSUB_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_fp2_mul() { utils::setup_logger(); let program = Program::from(BLS12381_FP2_MUL_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_fp_ops() { utils::setup_logger(); let program = Program::from(BN254_FP_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_fp2_addsub() { utils::setup_logger(); let program = Program::from(BN254_FP2_ADDSUB_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_fp2_mul() { utils::setup_logger(); let program = Program::from(BN254_FP2_MUL_ELF).unwrap(); - utils::run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/air.rs b/crates/core/machine/src/syscall/precompiles/keccak256/air.rs index a5925fb6c8..4e299ec7ae 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/air.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/air.rs @@ -33,10 +33,6 @@ where let local: &KeccakMemCols = (*local).borrow(); let next: &KeccakMemCols = (*next).borrow(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - let first_step = local.keccak.step_flags[0]; let final_step = local.keccak.step_flags[NUM_ROUNDS - 1]; let not_final_step = AB::Expr::one() - final_step; @@ -66,7 +62,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, AB::F::from_canonical_u32(SyscallCode::KECCAK_PERMUTE.syscall_id()), local.state_addr, AB::Expr::zero(), @@ -140,15 +135,17 @@ mod test { use crate::{ io::SP1Stdin, riscv::RiscvAir, - utils::{prove, setup_logger, tests::KECCAK256_ELF}, + utils::{prove_core, setup_logger}, }; use sp1_primitives::io::SP1PublicValues; use rand::{Rng, SeedableRng}; - use sp1_core_executor::Program; + use sp1_core_executor::{Program, SP1Context}; use sp1_stark::{ - baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, SP1CoreOpts, StarkGenericConfig, + baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, MachineProver, SP1CoreOpts, + StarkGenericConfig, }; + use test_artifacts::KECCAK256_ELF; use tiny_keccak::Hasher; const NUM_TEST_CASES: usize = 45; @@ -180,9 +177,21 @@ mod test { let config = BabyBearPoseidon2::new(); let program = Program::from(KECCAK256_ELF).unwrap(); - let (proof, public_values, _) = - prove::<_, CpuProver<_, _>>(program, &stdin, config, SP1CoreOpts::default(), None) - .unwrap(); + let opts = SP1CoreOpts::default(); + let machine = RiscvAir::machine(config); + let prover = CpuProver::new(machine); + let (pk, vk) = prover.setup(&program); + let (proof, public_values, _) = prove_core::<_, _>( + &prover, + &pk, + &vk, + program, + &stdin, + opts, + SP1Context::default(), + None, + ) + .unwrap(); let mut public_values = SP1PublicValues::from(&public_values); let config = BabyBearPoseidon2::new(); diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs b/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs index 7b622b3bc1..68e4035d18 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/columns.rs @@ -19,7 +19,6 @@ pub(crate) struct KeccakMemCols { pub shard: T, pub clk: T, - pub nonce: T, pub state_addr: T, /// Memory columns for the state. diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/mod.rs b/crates/core/machine/src/syscall/precompiles/keccak256/mod.rs index 2f53e23c77..ba9b76b022 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/mod.rs @@ -4,7 +4,7 @@ mod trace; use p3_keccak_air::KeccakAir; -pub(crate) const STATE_SIZE: usize = 25; +pub const STATE_SIZE: usize = 25; // The permutation state is 25 u64's. Our word size is 32 bits, so it is 50 words. pub const STATE_NUM_WORDS: usize = STATE_SIZE * 2; @@ -23,8 +23,12 @@ impl KeccakPermuteChip { pub mod permute_tests { use sp1_core_executor::{syscalls::SyscallCode, Executor, Instruction, Opcode, Program}; use sp1_stark::{CpuProver, SP1CoreOpts}; + use test_artifacts::KECCAK_PERMUTE_ELF; - use crate::utils::{self, run_test, tests::KECCAK_PERMUTE_ELF}; + use crate::{ + io::SP1Stdin, + utils::{self}, + }; pub fn keccak_permute_program() -> Program { let digest_ptr = 100; @@ -57,13 +61,15 @@ pub mod permute_tests { utils::setup_logger(); let program = keccak_permute_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } #[test] fn test_keccak_permute_program_prove() { utils::setup_logger(); let program = Program::from(KECCAK_PERMUTE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + utils::run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs b/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs index 020b28c9f0..900ff55878 100644 --- a/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/keccak256/trace.rs @@ -96,16 +96,7 @@ impl MachineAir for KeccakPermuteChip { }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new(values, NUM_KECCAK_MEM_COLS); - - // Write the nonce to the trace. - for i in 0..trace.height() { - let cols: &mut KeccakMemCols = - trace.values[i * NUM_KECCAK_MEM_COLS..(i + 1) * NUM_KECCAK_MEM_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(values, NUM_KECCAK_MEM_COLS) } fn included(&self, shard: &Self::Record) -> bool { diff --git a/crates/core/machine/src/syscall/precompiles/mod.rs b/crates/core/machine/src/syscall/precompiles/mod.rs index f07da94609..4b06dd3c12 100644 --- a/crates/core/machine/src/syscall/precompiles/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/mod.rs @@ -2,5 +2,6 @@ pub mod edwards; pub mod fptower; pub mod keccak256; pub mod sha256; +pub mod u256x2048_mul; pub mod uint256; pub mod weierstrass; diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs index 2ecb8deb37..066a5db297 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/air.rs @@ -39,10 +39,6 @@ where let local: &ShaCompressCols = (*local).borrow(); let next: &ShaCompressCols = (*next).borrow(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - self.eval_control_flow_flags(builder, local, next); self.eval_memory(builder, local); @@ -55,7 +51,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, AB::F::from_canonical_u32(SyscallCode::SHA_COMPRESS.syscall_id()), local.w_ptr, local.h_ptr, diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs index 5d48b9edcc..c5510142df 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/columns.rs @@ -25,7 +25,6 @@ pub const NUM_SHA_COMPRESS_COLS: usize = size_of::>(); pub struct ShaCompressCols { /// Inputs. pub shard: T, - pub nonce: T, pub clk: T, pub w_ptr: T, pub h_ptr: T, diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/mod.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/mod.rs index 539dbe885b..08e58cca31 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/mod.rs @@ -34,8 +34,12 @@ pub mod compress_tests { use sp1_core_executor::{syscalls::SyscallCode, Instruction, Opcode, Program}; use sp1_stark::CpuProver; + use test_artifacts::SHA_COMPRESS_ELF; - use crate::utils::{run_test, setup_logger, tests::SHA_COMPRESS_ELF}; + use crate::{ + io::SP1Stdin, + utils::{run_test, setup_logger}, + }; pub fn sha_compress_program() -> Program { let w_ptr = 100; @@ -66,13 +70,15 @@ pub mod compress_tests { fn prove_babybear() { setup_logger(); let program = sha_compress_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_sha_compress_program() { setup_logger(); let program = Program::from(SHA_COMPRESS_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs b/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs index d6b61b67f2..5333de82cb 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/compress/trace.rs @@ -3,7 +3,7 @@ use std::borrow::BorrowMut; use hashbrown::HashMap; use itertools::Itertools; use p3_field::PrimeField32; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use p3_matrix::dense::RowMajorMatrix; use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; use sp1_core_executor::{ events::{ByteLookupEvent, ByteRecord, PrecompileEvent, ShaCompressEvent}, @@ -77,20 +77,7 @@ impl MachineAir for ShaCompressChip { } // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_SHA_COMPRESS_COLS, - ); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut ShaCompressCols = trace.values - [i * NUM_SHA_COMPRESS_COLS..(i + 1) * NUM_SHA_COMPRESS_COLS] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_SHA_COMPRESS_COLS) } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs index f5da0f247a..17e8648918 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/air.rs @@ -33,10 +33,6 @@ where let local: &ShaExtendCols = (*local).borrow(); let next: &ShaExtendCols = (*next).borrow(); - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); - let i_start = AB::F::from_canonical_u32(16); let nb_bytes_in_word = AB::F::from_canonical_u32(4); @@ -203,7 +199,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, AB::F::from_canonical_u32(SyscallCode::SHA_EXTEND.syscall_id()), local.w_ptr, AB::Expr::zero(), diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs index ff7a5f5f7c..69b5fcd2a9 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/columns.rs @@ -17,7 +17,6 @@ pub const NUM_SHA_EXTEND_COLS: usize = size_of::>(); pub struct ShaExtendCols { /// Inputs. pub shard: T, - pub nonce: T, pub clk: T, pub w_ptr: T, diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs index cb3aea1bbb..53eae24694 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs @@ -37,10 +37,11 @@ pub mod extend_tests { events::AluEvent, syscalls::SyscallCode, ExecutionRecord, Instruction, Opcode, Program, }; use sp1_stark::{air::MachineAir, CpuProver}; + use test_artifacts::{SHA2_ELF, SHA_EXTEND_ELF}; - use crate::utils::{ - self, run_test, - tests::{SHA2_ELF, SHA_EXTEND_ELF}, + use crate::{ + io::SP1Stdin, + utils::{self, run_test}, }; use super::ShaExtendChip; @@ -77,20 +78,23 @@ pub mod extend_tests { fn test_sha_prove() { utils::setup_logger(); let program = sha_extend_program(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_sha256_program() { utils::setup_logger(); let program = Program::from(SHA2_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_sha_extend_program() { utils::setup_logger(); let program = Program::from(SHA_EXTEND_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs b/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs index 75e1a16533..ca61b642c3 100644 --- a/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs +++ b/crates/core/machine/src/syscall/precompiles/sha256/extend/trace.rs @@ -1,7 +1,7 @@ use hashbrown::HashMap; use itertools::Itertools; use p3_field::PrimeField32; -use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use p3_matrix::dense::RowMajorMatrix; use p3_maybe_rayon::prelude::{ParallelIterator, ParallelSlice}; use sp1_core_executor::{ events::{ByteLookupEvent, ByteRecord, PrecompileEvent, ShaExtendEvent}, @@ -51,19 +51,7 @@ impl MachineAir for ShaExtendChip { } // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new( - rows.into_iter().flatten().collect::>(), - NUM_SHA_EXTEND_COLS, - ); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut ShaExtendCols = - trace.values[i * NUM_SHA_EXTEND_COLS..(i + 1) * NUM_SHA_EXTEND_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_SHA_EXTEND_COLS) } fn generate_dependencies(&self, input: &Self::Record, output: &mut Self::Record) { diff --git a/crates/core/machine/src/syscall/precompiles/u256x2048_mul/air.rs b/crates/core/machine/src/syscall/precompiles/u256x2048_mul/air.rs new file mode 100644 index 0000000000..054bf4f4fe --- /dev/null +++ b/crates/core/machine/src/syscall/precompiles/u256x2048_mul/air.rs @@ -0,0 +1,415 @@ +use crate::{ + air::MemoryAirBuilder, + memory::{value_as_limbs, MemoryCols, MemoryReadCols, MemoryWriteCols}, + operations::field::field_op::FieldOpCols, + utils::{limbs_from_access, pad_rows_fixed, words_to_bytes_le}, +}; + +use num::{BigUint, One, Zero}; +use p3_air::{Air, AirBuilder, BaseAir}; +use p3_field::{AbstractField, PrimeField32}; +use p3_matrix::{dense::RowMajorMatrix, Matrix}; +use sp1_core_executor::{ + events::{ByteRecord, FieldOperation, PrecompileEvent}, + syscalls::SyscallCode, + ExecutionRecord, Program, Register, +}; +use sp1_curves::{ + params::{NumLimbs, NumWords}, + uint256::U256Field, +}; +use sp1_derive::AlignedBorrow; +use sp1_stark::{ + air::{BaseAirBuilder, InteractionScope, MachineAir, Polynomial, SP1AirBuilder}, + MachineRecord, +}; +use std::{ + borrow::{Borrow, BorrowMut}, + mem::size_of, +}; +use typenum::Unsigned; + +/// The number of columns in the U256x2048MulCols. +const NUM_COLS: usize = size_of::>(); + +#[derive(Default)] +pub struct U256x2048MulChip; + +impl U256x2048MulChip { + pub const fn new() -> Self { + Self + } +} +type WordsFieldElement = ::WordsFieldElement; +const WORDS_FIELD_ELEMENT: usize = WordsFieldElement::USIZE; +const LO_REGISTER: u32 = Register::X12 as u32; +const HI_REGISTER: u32 = Register::X13 as u32; + +/// A set of columns for the U256x2048Mul operation. +#[derive(Debug, Clone, AlignedBorrow)] +#[repr(C)] +pub struct U256x2048MulCols { + /// The shard number of the syscall. + pub shard: T, + + /// The clock cycle of the syscall. + pub clk: T, + + /// The nonce of the operation. + pub nonce: T, + + /// The pointer to the first input. + pub a_ptr: T, + + /// The pointer to the second input. + pub b_ptr: T, + + pub lo_ptr: T, + pub hi_ptr: T, + + pub lo_ptr_memory: MemoryReadCols, + pub hi_ptr_memory: MemoryReadCols, + + // Memory columns. + pub a_memory: [MemoryReadCols; WORDS_FIELD_ELEMENT], + pub b_memory: [MemoryReadCols; WORDS_FIELD_ELEMENT * 8], + pub lo_memory: [MemoryWriteCols; WORDS_FIELD_ELEMENT * 8], + pub hi_memory: [MemoryWriteCols; WORDS_FIELD_ELEMENT], + + // Output values. We compute (x * y) % 2^2048 and (x * y) / 2^2048. + pub a_mul_b1: FieldOpCols, + pub ab2_plus_carry: FieldOpCols, + pub ab3_plus_carry: FieldOpCols, + pub ab4_plus_carry: FieldOpCols, + pub ab5_plus_carry: FieldOpCols, + pub ab6_plus_carry: FieldOpCols, + pub ab7_plus_carry: FieldOpCols, + pub ab8_plus_carry: FieldOpCols, + pub is_real: T, +} + +impl MachineAir for U256x2048MulChip { + type Record = ExecutionRecord; + type Program = Program; + + fn name(&self) -> String { + "U256XU2048Mul".to_string() + } + + fn generate_trace( + &self, + input: &ExecutionRecord, + output: &mut ExecutionRecord, + ) -> RowMajorMatrix { + // Implement trace generation logic. + let rows_and_records = input + .get_precompile_events(SyscallCode::U256XU2048_MUL) + .chunks(1) + .map(|events| { + let mut records = ExecutionRecord::default(); + let mut new_byte_lookup_events = Vec::new(); + + let rows = events + .iter() + .map(|(_, event)| { + let event = if let PrecompileEvent::U256xU2048Mul(event) = event { + event + } else { + unreachable!() + }; + let mut row: [F; NUM_COLS] = [F::zero(); NUM_COLS]; + let cols: &mut U256x2048MulCols = row.as_mut_slice().borrow_mut(); + + // Assign basic values to the columns. + cols.is_real = F::one(); + cols.shard = F::from_canonical_u32(event.shard); + cols.clk = F::from_canonical_u32(event.clk); + cols.a_ptr = F::from_canonical_u32(event.a_ptr); + cols.b_ptr = F::from_canonical_u32(event.b_ptr); + cols.lo_ptr = F::from_canonical_u32(event.lo_ptr); + cols.hi_ptr = F::from_canonical_u32(event.hi_ptr); + + // Populate memory accesses for lo_ptr and hi_ptr. + cols.lo_ptr_memory + .populate(event.lo_ptr_memory, &mut new_byte_lookup_events); + cols.hi_ptr_memory + .populate(event.hi_ptr_memory, &mut new_byte_lookup_events); + + // Populate memory columns. + for i in 0..WORDS_FIELD_ELEMENT { + cols.a_memory[i] + .populate(event.a_memory_records[i], &mut new_byte_lookup_events); + } + + for i in 0..WORDS_FIELD_ELEMENT * 8 { + cols.b_memory[i] + .populate(event.b_memory_records[i], &mut new_byte_lookup_events); + } + + for i in 0..WORDS_FIELD_ELEMENT * 8 { + cols.lo_memory[i] + .populate(event.lo_memory_records[i], &mut new_byte_lookup_events); + } + + for i in 0..WORDS_FIELD_ELEMENT { + cols.hi_memory[i] + .populate(event.hi_memory_records[i], &mut new_byte_lookup_events); + } + + let a = BigUint::from_bytes_le(&words_to_bytes_le::<32>(&event.a)); + let b_array: [BigUint; 8] = event + .b + .chunks(8) + .map(|chunk| BigUint::from_bytes_le(&words_to_bytes_le::<32>(chunk))) + .collect::>() + .try_into() + .unwrap(); + + let effective_modulus = BigUint::one() << 256; + + let mut carries = vec![BigUint::zero(); 9]; + let mut ab_plus_carry_cols = [ + &mut cols.a_mul_b1, + &mut cols.ab2_plus_carry, + &mut cols.ab3_plus_carry, + &mut cols.ab4_plus_carry, + &mut cols.ab5_plus_carry, + &mut cols.ab6_plus_carry, + &mut cols.ab7_plus_carry, + &mut cols.ab8_plus_carry, + ]; + + for (i, col) in ab_plus_carry_cols.iter_mut().enumerate() { + let (_, carry) = col.populate_mul_and_carry( + &mut new_byte_lookup_events, + event.shard, + &a, + &b_array[i], + &carries[i], + &effective_modulus, + ); + carries[i + 1] = carry; + } + row + }) + .collect::>(); + records.add_byte_lookup_events(new_byte_lookup_events); + (rows, records) + }) + .collect::>(); + + // Generate the trace rows for each event. + let mut rows = Vec::new(); + for (row, mut record) in rows_and_records { + rows.extend(row); + output.append(&mut record); + } + + pad_rows_fixed( + &mut rows, + || { + let mut row: [F; NUM_COLS] = [F::zero(); NUM_COLS]; + let cols: &mut U256x2048MulCols = row.as_mut_slice().borrow_mut(); + + let x = BigUint::zero(); + let y = BigUint::zero(); + let z = BigUint::zero(); + let modulus = BigUint::one() << 256; + + // Populate all the mul and carry columns with zero values. + cols.a_mul_b1.populate(&mut vec![], 0, &x, &y, FieldOperation::Mul); + cols.ab2_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab3_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab4_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab5_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab6_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab7_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + cols.ab8_plus_carry.populate_mul_and_carry(&mut vec![], 0, &x, &y, &z, &modulus); + + row + }, + input.fixed_log2_rows::(self), + ); + + // Convert the trace to a row major matrix. + let mut trace = + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_COLS); + + // Write the nonces to the trace. + for i in 0..trace.height() { + let cols: &mut U256x2048MulCols = + trace.values[i * NUM_COLS..(i + 1) * NUM_COLS].borrow_mut(); + cols.nonce = F::from_canonical_usize(i); + } + + trace + } + + fn included(&self, shard: &Self::Record) -> bool { + if let Some(shape) = shard.shape.as_ref() { + shape.included::(self) + } else { + !shard.get_precompile_events(SyscallCode::U256XU2048_MUL).is_empty() + } + } +} + +impl BaseAir for U256x2048MulChip { + fn width(&self) -> usize { + NUM_COLS + } +} + +impl Air for U256x2048MulChip +where + AB: SP1AirBuilder, +{ + fn eval(&self, builder: &mut AB) { + let main = builder.main(); + let local = main.row_slice(0); + let local: &U256x2048MulCols = (*local).borrow(); + let next = main.row_slice(1); + let next: &U256x2048MulCols = (*next).borrow(); + + // Assert that is_real is a boolean. + builder.assert_bool(local.is_real); + + // Receive the arguments. + builder.receive_syscall( + local.shard, + local.clk, + AB::F::from_canonical_u32(SyscallCode::U256XU2048_MUL.syscall_id()), + local.a_ptr, + local.b_ptr, + local.is_real, + InteractionScope::Local, + ); + + // Evaluate that the lo_ptr and hi_ptr are read from the correct memory locations. + builder.eval_memory_access( + local.shard, + local.clk.into(), + AB::Expr::from_canonical_u32(LO_REGISTER), + &local.lo_ptr_memory, + local.is_real, + ); + + builder.eval_memory_access( + local.shard, + local.clk.into(), + AB::Expr::from_canonical_u32(HI_REGISTER), + &local.hi_ptr_memory, + local.is_real, + ); + + // Evaluate the memory accesses for a_memory and b_memory. + builder.eval_memory_access_slice( + local.shard, + local.clk.into(), + local.a_ptr, + &local.a_memory, + local.is_real, + ); + + builder.eval_memory_access_slice( + local.shard, + local.clk.into(), + local.b_ptr, + &local.b_memory, + local.is_real, + ); + + // Evaluate the memory accesses for lo_memory and hi_memory. + builder.eval_memory_access_slice( + local.shard, + local.clk.into() + AB::Expr::one(), + local.lo_ptr, + &local.lo_memory, + local.is_real, + ); + + builder.eval_memory_access_slice( + local.shard, + local.clk.into() + AB::Expr::one(), + local.hi_ptr, + &local.hi_memory, + local.is_real, + ); + + // Constrain the incrementing nonce. + builder.when_first_row().assert_zero(local.nonce); + builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); + + let a_limbs = + limbs_from_access::::Limbs, _>(&local.a_memory); + + // Iterate through chunks of 8 for b_memory and convert each chunk to its limbs. + let b_limb_array = local + .b_memory + .chunks(8) + .map(limbs_from_access::::Limbs, _>) + .collect::>(); + + let mut coeff_2_256 = Vec::new(); + coeff_2_256.resize(32, AB::Expr::zero()); + coeff_2_256.push(AB::Expr::one()); + let modulus_polynomial: Polynomial = Polynomial::from_coefficients(&coeff_2_256); + + // Evaluate that each of the mul and carry columns are valid. + let outputs = [ + &local.a_mul_b1, + &local.ab2_plus_carry, + &local.ab3_plus_carry, + &local.ab4_plus_carry, + &local.ab5_plus_carry, + &local.ab6_plus_carry, + &local.ab7_plus_carry, + &local.ab8_plus_carry, + ]; + + outputs[0].eval_mul_and_carry( + builder, + &a_limbs, + &b_limb_array[0], + &Polynomial::from_coefficients(&[AB::Expr::zero()]), // Zero polynomial for no previous carry + &modulus_polynomial, + local.is_real, + ); + + for i in 1..outputs.len() { + outputs[i].eval_mul_and_carry( + builder, + &a_limbs, + &b_limb_array[i], + &outputs[i - 1].carry, + &modulus_polynomial, + local.is_real, + ); + } + + // Assert that the correct result is being written to hi_memory. + builder + .when(local.is_real) + .assert_all_eq(outputs[outputs.len() - 1].carry, value_as_limbs(&local.hi_memory)); + + // Loop through chunks of 8 for lo_memory and assert that each chunk is equal to corresponding result of outputs. + for i in 0..8 { + builder.when(local.is_real).assert_all_eq( + outputs[i].result, + value_as_limbs( + &local.lo_memory[i * WORDS_FIELD_ELEMENT..(i + 1) * WORDS_FIELD_ELEMENT], + ), + ); + } + + // Constrain that the lo_ptr is the value of lo_ptr_memory. + builder + .when(local.is_real) + .assert_eq(local.lo_ptr, local.lo_ptr_memory.value().reduce::()); + + // Constrain that the hi_ptr is the value of hi_ptr_memory. + builder + .when(local.is_real) + .assert_eq(local.hi_ptr, local.hi_ptr_memory.value().reduce::()); + } +} diff --git a/crates/core/machine/src/syscall/precompiles/u256x2048_mul/mod.rs b/crates/core/machine/src/syscall/precompiles/u256x2048_mul/mod.rs new file mode 100644 index 0000000000..75a64dd9b3 --- /dev/null +++ b/crates/core/machine/src/syscall/precompiles/u256x2048_mul/mod.rs @@ -0,0 +1,212 @@ +mod air; + +pub use air::*; + +#[cfg(test)] +mod tests { + use num::{BigUint, Integer, One}; + use p3_baby_bear::BabyBear; + use p3_matrix::dense::RowMajorMatrix; + use rand::Rng; + use sp1_core_executor::{ + events::{ + LookupId, MemoryReadRecord, MemoryWriteRecord, PrecompileEvent, SyscallEvent, + U256xU2048MulEvent, + }, + syscalls::SyscallCode, + ExecutionRecord, Program, + }; + use sp1_primitives::consts::bytes_to_words_le; + use sp1_stark::{ + air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, StarkGenericConfig, + }; + use test_artifacts::U256XU2048_MUL_ELF; + + use crate::{ + io::SP1Stdin, + utils::{ + self, run_test, + uni_stark::{uni_stark_prove, uni_stark_verify}, + }, + }; + use crate::{ + syscall::precompiles::u256x2048_mul::air::U256x2048MulChip, utils::words_to_bytes_le_vec, + }; + + fn generate_test_execution_record(pass: bool) -> ExecutionRecord { + let mut execution_record = ExecutionRecord::default(); + + let rng = &mut rand::thread_rng(); + let a_ptr: u32 = 0u32; + let b_ptr: u32 = 1u32; + let lo_ptr: u32 = 2u32; + let hi_ptr: u32 = 3u32; + + let lo_ts = 1u32; + let hi_ts = lo_ts + 1; + + let a: Vec = (0..8).map(|_| rng.gen()).collect(); + let b: Vec = (0..64).map(|_| rng.gen()).collect(); + + let uint256_a = BigUint::from_bytes_le(&words_to_bytes_le_vec(&a)); + let uint2048_b = BigUint::from_bytes_le(&words_to_bytes_le_vec(&b)); + + let result = uint256_a * uint2048_b; + + let two_to_2048 = BigUint::one() << 2048; + + let (hi_big, lo_big) = result.div_rem(&two_to_2048); + + let mut a_memory_records = Vec::new(); + for i in 0..8 { + a_memory_records.push(MemoryReadRecord { + value: a[i], + shard: 0u32, + timestamp: hi_ts, + prev_shard: 0u32, + prev_timestamp: lo_ts, + }); + } + let mut b_memory_records = Vec::new(); + for i in 0..64 { + b_memory_records.push(MemoryReadRecord { + value: b[i], + shard: 0u32, + timestamp: hi_ts, + prev_shard: 0u32, + prev_timestamp: lo_ts, + }); + } + let lo_ptr_memory = MemoryReadRecord { + value: lo_ptr, + shard: 0u32, + timestamp: hi_ts, + prev_shard: 0u32, + prev_timestamp: lo_ts, + }; + let hi_ptr_memory = MemoryReadRecord { + value: hi_ptr, + shard: 0u32, + timestamp: hi_ts, + prev_shard: 0u32, + prev_timestamp: lo_ts, + }; + + let (lo, hi) = if pass { + let mut lo_bytes = lo_big.to_bytes_le(); + lo_bytes.resize(256, 0u8); + let lo_words = bytes_to_words_le::<64>(&lo_bytes); + + let mut hi_bytes = hi_big.to_bytes_le(); + hi_bytes.resize(32, 0u8); + let hi_words = bytes_to_words_le::<8>(&hi_bytes); + (lo_words.to_vec(), hi_words.to_vec()) + } else { + let lo: Vec = (0..64).map(|_| rng.gen()).collect(); + let hi: Vec = (0..8).map(|_| rng.gen()).collect(); + (lo, hi) + }; + let mut lo_memory_records = Vec::new(); + for i in 0..64 { + lo_memory_records.push(MemoryWriteRecord { + value: lo[i], + shard: 0u32, + timestamp: hi_ts + 1, + prev_value: 0u32, + prev_shard: 0u32, + prev_timestamp: hi_ts, + }); + } + let mut hi_memory_records = Vec::new(); + for i in 0..8 { + hi_memory_records.push(MemoryWriteRecord { + value: hi[i], + shard: 0u32, + timestamp: hi_ts + 1, + prev_value: 0u32, + prev_shard: 0u32, + prev_timestamp: hi_ts, + }); + } + + let lookup_id = LookupId(rng.gen()); + + let event = PrecompileEvent::U256xU2048Mul(U256xU2048MulEvent { + lookup_id, + shard: 0u32, + clk: hi_ts, + a_ptr, + a, + b_ptr, + b, + lo_ptr, + lo, + hi_ptr, + hi, + lo_ptr_memory, + hi_ptr_memory, + a_memory_records, + b_memory_records, + lo_memory_records, + hi_memory_records, + local_mem_access: Vec::new(), + }); + + let syscall_code = SyscallCode::U256XU2048_MUL; + let syscall_event = SyscallEvent { + shard: 0u32, + clk: hi_ts, + lookup_id, + syscall_id: syscall_code as u32, + arg1: a_ptr, + arg2: b_ptr, + nonce: 0u32, + }; + + execution_record.precompile_events.add_event(syscall_code, syscall_event, event); + + execution_record + } + + #[test] + fn test_uint256_mul() { + utils::setup_logger(); + let program = Program::from(U256XU2048_MUL_ELF).unwrap(); + run_test::>(program, SP1Stdin::new()).unwrap(); + } + + #[test] + fn test_u256x2048_mul_pass() { + let config = BabyBearPoseidon2::new(); + let execution_record = generate_test_execution_record(true); + let chip = U256x2048MulChip::new(); + let trace: RowMajorMatrix = + chip.generate_trace(&execution_record, &mut ExecutionRecord::default()); + let proof = uni_stark_prove::( + &config, + &chip, + &mut config.challenger(), + trace, + ); + uni_stark_verify(&config, &chip, &mut config.challenger(), &proof).unwrap(); + } + + #[test] + fn test_u256x2048_mul_failure() { + for _ in 0..10 { + let config = BabyBearPoseidon2::new(); + let execution_record = generate_test_execution_record(false); + let chip = U256x2048MulChip::new(); + let trace: RowMajorMatrix = + chip.generate_trace(&execution_record, &mut ExecutionRecord::default()); + let proof = uni_stark_prove::( + &config, + &chip, + &mut config.challenger(), + trace, + ); + let result = uni_stark_verify(&config, &chip, &mut config.challenger(), &proof); + assert!(result.is_err()); + } + } +} diff --git a/crates/core/machine/src/syscall/precompiles/uint256/air.rs b/crates/core/machine/src/syscall/precompiles/uint256/air.rs index 54e0925f9e..3e10f9c4f3 100644 --- a/crates/core/machine/src/syscall/precompiles/uint256/air.rs +++ b/crates/core/machine/src/syscall/precompiles/uint256/air.rs @@ -14,7 +14,7 @@ use crate::{ use generic_array::GenericArray; use num::{BigUint, One, Zero}; -use p3_air::{Air, AirBuilder, BaseAir}; +use p3_air::{Air, BaseAir}; use p3_field::{AbstractField, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use sp1_core_executor::{ @@ -62,9 +62,6 @@ pub struct Uint256MulCols { /// The clock cycle of the syscall. pub clk: T, - /// The nonce of the operation. - pub nonce: T, - /// The pointer to the first input. pub x_ptr: T, @@ -207,17 +204,7 @@ impl MachineAir for Uint256MulChip { ); // Convert the trace to a row major matrix. - let mut trace = - RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_COLS); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut Uint256MulCols = - trace.values[i * NUM_COLS..(i + 1) * NUM_COLS].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), NUM_COLS) } fn included(&self, shard: &Self::Record) -> bool { @@ -227,6 +214,10 @@ impl MachineAir for Uint256MulChip { !shard.get_precompile_events(SyscallCode::UINT256_MUL).is_empty() } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for Uint256MulChip { @@ -244,12 +235,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &Uint256MulCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &Uint256MulCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); // We are computing (x * y) % modulus. The value of x is stored in the "prev_value" of // the x_memory, since we write to it later. @@ -331,7 +316,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, AB::F::from_canonical_u32(SyscallCode::UINT256_MUL.syscall_id()), local.x_ptr, local.y_ptr, diff --git a/crates/core/machine/src/syscall/precompiles/uint256/mod.rs b/crates/core/machine/src/syscall/precompiles/uint256/mod.rs index ce50c01cae..fb2cb787be 100644 --- a/crates/core/machine/src/syscall/precompiles/uint256/mod.rs +++ b/crates/core/machine/src/syscall/precompiles/uint256/mod.rs @@ -8,17 +8,18 @@ mod tests { use sp1_core_executor::Program; use sp1_curves::{params::FieldParameters, uint256::U256Field, utils::biguint_from_limbs}; use sp1_stark::CpuProver; + use test_artifacts::UINT256_MUL_ELF; use crate::{ io::SP1Stdin, - utils::{self, run_test_io, tests::UINT256_MUL_ELF}, + utils::{self, run_test}, }; #[test] fn test_uint256_mul() { utils::setup_logger(); let program = Program::from(UINT256_MUL_ELF).unwrap(); - run_test_io::>(program, SP1Stdin::new()).unwrap(); + run_test::>(program, SP1Stdin::new()).unwrap(); } #[test] diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs index d678f93086..ea03a08587 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_add.rs @@ -47,7 +47,6 @@ pub const fn num_weierstrass_add_cols() -> usize pub struct WeierstrassAddAssignCols { pub is_real: T, pub shard: T, - pub nonce: T, pub clk: T, pub p_ptr: T, pub q_ptr: T, @@ -252,18 +251,7 @@ impl MachineAir }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new(values, num_weierstrass_add_cols::()); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut WeierstrassAddAssignCols = trace.values[i - * num_weierstrass_add_cols::() - ..(i + 1) * num_weierstrass_add_cols::()] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(values, num_weierstrass_add_cols::()) } fn included(&self, shard: &Self::Record) -> bool { @@ -285,6 +273,10 @@ impl MachineAir } } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for WeierstrassAddAssignChip { @@ -302,12 +294,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &WeierstrassAddAssignCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &WeierstrassAddAssignCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let num_words_field_element = ::Limbs::USIZE / 4; @@ -418,7 +404,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id_felt, local.p_ptr, local.q_ptr, @@ -466,68 +451,77 @@ mod tests { use sp1_core_executor::Program; use sp1_stark::CpuProver; + use test_artifacts::{ + BLS12381_ADD_ELF, BLS12381_DOUBLE_ELF, BLS12381_MUL_ELF, BN254_ADD_ELF, BN254_MUL_ELF, + SECP256K1_ADD_ELF, SECP256K1_MUL_ELF, SECP256R1_ADD_ELF, + }; - use crate::utils::{ - run_test, setup_logger, - tests::{ - BLS12381_ADD_ELF, BLS12381_DOUBLE_ELF, BLS12381_MUL_ELF, BN254_ADD_ELF, BN254_MUL_ELF, - SECP256K1_ADD_ELF, SECP256K1_MUL_ELF, SECP256R1_ADD_ELF, - }, + use crate::{ + io::SP1Stdin, + utils::{run_test, setup_logger}, }; #[test] fn test_secp256k1_add_simple() { setup_logger(); let program = Program::from(SECP256K1_ADD_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_secp256r1_add_simple() { setup_logger(); let program = Program::from(SECP256R1_ADD_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_add_simple() { setup_logger(); let program = Program::from(BN254_ADD_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_mul_simple() { setup_logger(); let program = Program::from(BN254_MUL_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_secp256k1_mul_simple() { setup_logger(); let program = Program::from(SECP256K1_MUL_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_add_simple() { setup_logger(); let program = Program::from(BLS12381_ADD_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_double_simple() { setup_logger(); let program = Program::from(BLS12381_DOUBLE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_mul_simple() { setup_logger(); let program = Program::from(BLS12381_MUL_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs index 1edc1440f9..e428259011 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_decompress.rs @@ -49,7 +49,6 @@ pub struct WeierstrassDecompressCols { pub is_real: T, pub shard: T, pub clk: T, - pub nonce: T, pub ptr: T, pub sign_bit: T, pub x_access: GenericArray, P::WordsFieldElement>, @@ -278,16 +277,7 @@ impl MachineAir input.fixed_log2_rows::(self), ); - let mut trace = RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), width); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut WeierstrassDecompressCols = - trace.values[i * width..i * width + weierstrass_width].borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(rows.into_iter().flatten().collect::>(), width) } fn included(&self, shard: &Self::Record) -> bool { @@ -308,6 +298,10 @@ impl MachineAir } } } + + fn local_only(&self) -> bool { + true + } } impl BaseAir for WeierstrassDecompressChip { @@ -334,13 +328,6 @@ where let local_slice = main.row_slice(0); let local: &WeierstrassDecompressCols = (*local_slice)[0..weierstrass_cols].borrow(); - let next = main.row_slice(1); - let next: &WeierstrassDecompressCols = - (*next)[0..weierstrass_cols].borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let num_limbs = ::Limbs::USIZE; let num_words_field_element = num_limbs / 4; @@ -528,7 +515,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id, local.ptr, local.sign_bit, @@ -542,7 +528,7 @@ where mod tests { use crate::{ io::SP1Stdin, - utils::{self, tests::BLS12381_DECOMPRESS_ELF}, + utils::{self, run_test}, }; use amcl::{ bls381::bls381::{basic::key_pair_generate_g2, utils::deserialize_g1}, @@ -552,9 +538,8 @@ mod tests { use rand::{thread_rng, Rng}; use sp1_core_executor::Program; use sp1_stark::CpuProver; - - use crate::utils::{ - run_test_io, tests::SECP256K1_DECOMPRESS_ELF, tests::SECP256R1_DECOMPRESS_ELF, + use test_artifacts::{ + BLS12381_DECOMPRESS_ELF, SECP256K1_DECOMPRESS_ELF, SECP256R1_DECOMPRESS_ELF, }; #[test] @@ -572,11 +557,9 @@ mod tests { let (_, compressed) = key_pair_generate_g2(&mut rand); let stdin = SP1Stdin::from(&compressed); - let mut public_values = run_test_io::>( - Program::from(BLS12381_DECOMPRESS_ELF).unwrap(), - stdin, - ) - .unwrap(); + let mut public_values = + run_test::>(Program::from(BLS12381_DECOMPRESS_ELF).unwrap(), stdin) + .unwrap(); let mut result = [0; 96]; public_values.read_slice(&mut result); @@ -606,7 +589,7 @@ mod tests { let inputs = SP1Stdin::from(&compressed); - let mut public_values = run_test_io::>( + let mut public_values = run_test::>( Program::from(SECP256K1_DECOMPRESS_ELF).unwrap(), inputs, ) @@ -635,7 +618,7 @@ mod tests { let inputs = SP1Stdin::from(compressed); - let mut public_values = run_test_io::>( + let mut public_values = run_test::>( Program::from(SECP256R1_DECOMPRESS_ELF).unwrap(), inputs, ) diff --git a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs index 9e18fc60f1..34f3fbfc55 100644 --- a/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs +++ b/crates/core/machine/src/syscall/precompiles/weierstrass/weierstrass_double.rs @@ -46,7 +46,6 @@ pub const fn num_weierstrass_double_cols() -> usi pub struct WeierstrassDoubleAssignCols { pub is_real: T, pub shard: T, - pub nonce: T, pub clk: T, pub p_ptr: T, pub p_access: GenericArray, P::WordsCurvePoint>, @@ -274,18 +273,7 @@ impl MachineAir }); // Convert the trace to a row major matrix. - let mut trace = RowMajorMatrix::new(values, num_weierstrass_double_cols::()); - - // Write the nonces to the trace. - for i in 0..trace.height() { - let cols: &mut WeierstrassDoubleAssignCols = trace.values[i - * num_weierstrass_double_cols::() - ..(i + 1) * num_weierstrass_double_cols::()] - .borrow_mut(); - cols.nonce = F::from_canonical_usize(i); - } - - trace + RowMajorMatrix::new(values, num_weierstrass_double_cols::()) } fn included(&self, shard: &Self::Record) -> bool { @@ -309,6 +297,10 @@ impl MachineAir } } } + + fn local_only(&self) -> bool { + true + } } impl WeierstrassDoubleAssignChip { @@ -352,12 +344,6 @@ where let main = builder.main(); let local = main.row_slice(0); let local: &WeierstrassDoubleAssignCols = (*local).borrow(); - let next = main.row_slice(1); - let next: &WeierstrassDoubleAssignCols = (*next).borrow(); - - // Constrain the incrementing nonce. - builder.when_first_row().assert_zero(local.nonce); - builder.when_transition().assert_eq(local.nonce + AB::Expr::one(), next.nonce); let num_words_field_element = E::BaseField::NB_LIMBS / 4; let p_x = limbs_from_prev_access(&local.p_access[0..num_words_field_element]); @@ -480,7 +466,6 @@ where builder.receive_syscall( local.shard, local.clk, - local.nonce, syscall_id_felt, local.p_ptr, AB::Expr::zero(), @@ -494,39 +479,44 @@ where pub mod tests { use sp1_core_executor::Program; use sp1_stark::CpuProver; + use test_artifacts::{ + BLS12381_DOUBLE_ELF, BN254_DOUBLE_ELF, SECP256K1_DOUBLE_ELF, SECP256R1_DOUBLE_ELF, + }; - use crate::utils::{ - run_test, setup_logger, - tests::{ - BLS12381_DOUBLE_ELF, BN254_DOUBLE_ELF, SECP256K1_DOUBLE_ELF, SECP256R1_DOUBLE_ELF, - }, + use crate::{ + io::SP1Stdin, + utils::{run_test, setup_logger}, }; #[test] fn test_secp256k1_double_simple() { setup_logger(); let program = Program::from(SECP256K1_DOUBLE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_secp256r1_double_simple() { setup_logger(); let program = Program::from(SECP256R1_DOUBLE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bn254_double_simple() { setup_logger(); let program = Program::from(BN254_DOUBLE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } #[test] fn test_bls12381_double_simple() { setup_logger(); let program = Program::from(BLS12381_DOUBLE_ELF).unwrap(); - run_test::>(program).unwrap(); + let stdin = SP1Stdin::new(); + run_test::>(program, stdin).unwrap(); } } diff --git a/crates/core/machine/src/utils/logger.rs b/crates/core/machine/src/utils/logger.rs index c460a31628..e10dd47c0d 100644 --- a/crates/core/machine/src/utils/logger.rs +++ b/crates/core/machine/src/utils/logger.rs @@ -12,14 +12,14 @@ static INIT: Once = Once::new(); /// Set the `RUST_LOG` environment variable to be set to `info` or `debug`. pub fn setup_logger() { INIT.call_once(|| { - let default_filter = "off"; let env_filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(default_filter)) + .unwrap_or_else(|_| EnvFilter::new("off")) .add_directive("hyper=off".parse().unwrap()) .add_directive("p3_keccak_air=off".parse().unwrap()) .add_directive("p3_fri=off".parse().unwrap()) .add_directive("p3_dft=off".parse().unwrap()) - .add_directive("p3_challenger=off".parse().unwrap()); + .add_directive("p3_challenger=off".parse().unwrap()) + .add_directive("sp1_cuda=off".parse().unwrap()); // if the RUST_LOGGER environment variable is set, use it to determine which logger to // configure (tracing_forest or tracing_subscriber) diff --git a/crates/core/machine/src/utils/mod.rs b/crates/core/machine/src/utils/mod.rs index 124cd402f9..a16298fe2c 100644 --- a/crates/core/machine/src/utils/mod.rs +++ b/crates/core/machine/src/utils/mod.rs @@ -1,22 +1,20 @@ pub mod concurrency; mod logger; -#[cfg(any(test, feature = "programs"))] -mod programs; mod prove; mod span; -mod tracer; +mod test; +pub mod uni_stark; pub use logger::*; use p3_field::Field; pub use prove::*; use sp1_curves::params::Limbs; pub use span::*; -pub use tracer::*; - -#[cfg(any(test, feature = "programs"))] -pub use programs::*; +pub use test::*; +pub use uni_stark::*; use crate::memory::MemoryCols; + use generic_array::ArrayLength; use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; diff --git a/crates/core/machine/src/utils/programs.rs b/crates/core/machine/src/utils/programs.rs deleted file mode 100644 index 4ee29bb7ad..0000000000 --- a/crates/core/machine/src/utils/programs.rs +++ /dev/null @@ -1,112 +0,0 @@ -pub mod tests { - /// Demos. - - pub const CHESS_ELF: &[u8] = - include_bytes!("../../../../../examples/chess/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const FIBONACCI_IO_ELF: &[u8] = - include_bytes!("../../../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const IO_ELF: &[u8] = - include_bytes!("../../../../../examples/io/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const JSON_ELF: &[u8] = - include_bytes!("../../../../../examples/json/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const REGEX_ELF: &[u8] = - include_bytes!("../../../../../examples/regex/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const RSA_ELF: &[u8] = - include_bytes!("../../../../../examples/rsa/program/elf/riscv32im-succinct-zkvm-elf"); - - pub const SSZ_WITHDRAWALS_ELF: &[u8] = include_bytes!( - "../../../../../examples/ssz-withdrawals/program/elf/riscv32im-succinct-zkvm-elf" - ); - - pub const TENDERMINT_ELF: &[u8] = include_bytes!( - "../../../../../examples/tendermint/program/elf/riscv32im-succinct-zkvm-elf" - ); - - /// Tests. - - pub const FIBONACCI_ELF: &[u8] = - include_bytes!("../../../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED25519_ELF: &[u8] = - include_bytes!("../../../../../tests/ed25519/elf/riscv32im-succinct-zkvm-elf"); - - pub const CYCLE_TRACKER_ELF: &[u8] = - include_bytes!("../../../../../tests/cycle-tracker/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED_ADD_ELF: &[u8] = - include_bytes!("../../../../../tests/ed-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const ED_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../../tests/ed-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const KECCAK_PERMUTE_ELF: &[u8] = - include_bytes!("../../../../../tests/keccak-permute/elf/riscv32im-succinct-zkvm-elf"); - - pub const KECCAK256_ELF: &[u8] = - include_bytes!("../../../../../tests/keccak256/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_ADD_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256k1-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256k1-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256k1-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256R1_ADD_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256r1-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256R1_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256r1-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256R1_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256r1-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA_COMPRESS_ELF: &[u8] = - include_bytes!("../../../../../tests/sha-compress/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA_EXTEND_ELF: &[u8] = - include_bytes!("../../../../../tests/sha-extend/elf/riscv32im-succinct-zkvm-elf"); - - pub const SHA2_ELF: &[u8] = - include_bytes!("../../../../../tests/sha2/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_ADD_ELF: &[u8] = - include_bytes!("../../../../../tests/bn254-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../../tests/bn254-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const BN254_MUL_ELF: &[u8] = - include_bytes!("../../../../../tests/bn254-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const SECP256K1_MUL_ELF: &[u8] = - include_bytes!("../../../../../tests/secp256k1-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_ADD_ELF: &[u8] = - include_bytes!("../../../../../tests/bls12381-add/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_DOUBLE_ELF: &[u8] = - include_bytes!("../../../../../tests/bls12381-double/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_MUL_ELF: &[u8] = - include_bytes!("../../../../../tests/bls12381-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const UINT256_MUL_ELF: &[u8] = - include_bytes!("../../../../../tests/uint256-mul/elf/riscv32im-succinct-zkvm-elf"); - - pub const BLS12381_DECOMPRESS_ELF: &[u8] = - include_bytes!("../../../../../tests/bls12381-decompress/elf/riscv32im-succinct-zkvm-elf"); - - pub const VERIFY_PROOF_ELF: &[u8] = - include_bytes!("../../../../../tests/verify-proof/elf/riscv32im-succinct-zkvm-elf"); - - pub const PANIC_ELF: &[u8] = - include_bytes!("../../../../../tests/panic/elf/riscv32im-succinct-zkvm-elf"); -} diff --git a/crates/core/machine/src/utils/prove.rs b/crates/core/machine/src/utils/prove.rs index 62b6d0916e..fefbc4b839 100644 --- a/crates/core/machine/src/utils/prove.rs +++ b/crates/core/machine/src/utils/prove.rs @@ -1,129 +1,83 @@ +use p3_matrix::dense::RowMajorMatrix; use std::{ - collections::VecDeque, fs::File, - io::{ - Seek, {self}, + io::{self, Seek, SeekFrom}, + sync::{ + mpsc::{channel, sync_channel, Sender}, + Arc, Mutex, }, - sync::{mpsc::sync_channel, Arc, Mutex}, + thread::ScopedJoinHandle, }; use web_time::Instant; use crate::riscv::{CoreShapeConfig, RiscvAir}; -use p3_challenger::FieldChallenger; use p3_maybe_rayon::prelude::*; -use serde::{de::DeserializeOwned, Serialize}; -use size::Size; -use sp1_stark::{ - air::InteractionScope, baby_bear_poseidon2::BabyBearPoseidon2, MachineProvingKey, - MachineVerificationError, -}; -use std::thread::ScopedJoinHandle; +use sp1_stark::MachineProvingKey; +use sp1_stark::StarkVerifyingKey; use thiserror::Error; -use p3_baby_bear::BabyBear; use p3_field::PrimeField32; -use p3_matrix::Matrix; +use sp1_stark::air::MachineAir; use crate::{ io::SP1Stdin, utils::{chunk_vec, concurrency::TurnBasedSync}, }; -use sp1_core_executor::{events::sorted_table_lines, ExecutionState}; -use sp1_primitives::io::SP1PublicValues; +use sp1_core_executor::{ + events::{format_table_line, sorted_table_lines}, + ExecutionState, +}; use sp1_core_executor::{ subproof::NoOpSubproofVerifier, ExecutionError, ExecutionRecord, ExecutionReport, Executor, Program, SP1Context, }; use sp1_stark::{ - air::{MachineAir, PublicValues}, - Com, CpuProver, DebugConstraintBuilder, InteractionBuilder, MachineProof, MachineProver, - MachineRecord, OpeningProof, PcsProverData, ProverConstraintFolder, SP1CoreOpts, - StarkGenericConfig, StarkMachine, StarkProvingKey, StarkVerifyingKey, UniConfig, Val, - VerifierConstraintFolder, + air::PublicValues, Com, MachineProof, MachineProver, MachineRecord, OpeningProof, + PcsProverData, ProofShape, SP1CoreOpts, ShardProof, StarkGenericConfig, Val, }; -#[derive(Error, Debug)] -pub enum SP1CoreProverError { - #[error("failed to execute program: {0}")] - ExecutionError(ExecutionError), - #[error("io error: {0}")] - IoError(io::Error), - #[error("serialization error: {0}")] - SerializationError(bincode::Error), -} - -pub fn prove_simple>>( - config: SC, - mut runtime: Executor, -) -> Result<(MachineProof, u64), SP1CoreProverError> -where - SC::Challenger: Clone, - OpeningProof: Send + Sync, - Com: Send + Sync, - PcsProverData: Send + Sync, - // ShardMainData: Serialize + DeserializeOwned, - ::Val: PrimeField32, -{ - // Setup the machine. - let machine = RiscvAir::machine(config); - let prover = P::new(machine); - let (pk, _) = prover.setup(runtime.program.as_ref()); - - // Set the shard numbers. - runtime.records.iter_mut().enumerate().for_each(|(i, shard)| { - shard.public_values.shard = (i + 1) as u32; - }); - - // Prove the program. - let mut challenger = prover.config().challenger(); - let proving_start = Instant::now(); - let proof = - prover.prove(&pk, runtime.records, &mut challenger, SP1CoreOpts::default()).unwrap(); - let proving_duration = proving_start.elapsed().as_millis(); - let nb_bytes = bincode::serialize(&proof).unwrap().len(); - - // Print the summary. - tracing::info!( - "summary: cycles={}, e2e={}, khz={:.2}, proofSize={}", - runtime.state.global_clk, - proving_duration, - (runtime.state.global_clk as f64 / proving_duration as f64), - Size::from_bytes(nb_bytes), - ); - - Ok((proof, runtime.state.global_clk)) -} - -pub fn prove>>( +#[allow(clippy::too_many_arguments)] +pub fn prove_core>>( + prover: &P, + pk: &P::DeviceProvingKey, + _: &StarkVerifyingKey, program: Program, stdin: &SP1Stdin, - config: SC, opts: SP1CoreOpts, + context: SP1Context, shape_config: Option<&CoreShapeConfig>, ) -> Result<(MachineProof, Vec, u64), SP1CoreProverError> where + SC::Val: PrimeField32, SC::Challenger: 'static + Clone + Send, - ::Val: PrimeField32, OpeningProof: Send, Com: Send + Sync, PcsProverData: Send + Sync, { - let machine = RiscvAir::machine(config); - let prover = P::new(machine); - let (pk, _) = prover.setup(&program); - prove_with_context::( - &prover, - &pk, + let (proof_tx, proof_rx) = channel(); + let (shape_tx, shape_rx) = channel(); + let (public_values, cycles) = prove_core_stream( + prover, + pk, program, stdin, opts, - Default::default(), + context, shape_config, - ) + proof_tx, + shape_tx, + )?; + + let _: Vec<_> = shape_rx.iter().collect(); + let shard_proofs: Vec> = proof_rx.iter().collect(); + let proof = MachineProof { shard_proofs }; + + Ok((proof, public_values, cycles)) } -pub fn prove_with_context>>( +#[allow(clippy::too_many_arguments)] +pub fn prove_core_stream>>( prover: &P, pk: &P::DeviceProvingKey, program: Program, @@ -131,7 +85,9 @@ pub fn prove_with_context>, -) -> Result<(MachineProof, Vec, u64), SP1CoreProverError> + proof_tx: Sender>, + shape_and_done_tx: Sender<(ProofShape, bool)>, +) -> Result<(Vec, u64), SP1CoreProverError> where SC::Val: PrimeField32, SC::Challenger: 'static + Clone + Send, @@ -161,7 +117,7 @@ where // Spawn the checkpoint generator thread. let checkpoint_generator_span = tracing::Span::current().clone(); let (checkpoints_tx, checkpoints_rx) = - sync_channel::<(usize, File, bool)>(opts.checkpoints_channel_capacity); + sync_channel::<(usize, File, bool, u64)>(opts.checkpoints_channel_capacity); let checkpoint_generator_handle: ScopedJoinHandle> = s.spawn(move || { let _span = checkpoint_generator_span.enter(); @@ -173,7 +129,7 @@ where let _span = span.enter(); // Execute the runtime until we reach a checkpoint. - let (checkpoint, done) = runtime + let (checkpoint, _, done) = runtime .execute_state(false) .map_err(SP1CoreProverError::ExecutionError)?; @@ -185,7 +141,9 @@ where .map_err(SP1CoreProverError::IoError)?; // Send the checkpoint. - checkpoints_tx.send((index, checkpoint_file, done)).unwrap(); + checkpoints_tx + .send((index, checkpoint_file, done, runtime.state.global_clk)) + .unwrap(); // If we've reached the final checkpoint, break out of the loop. if done { @@ -198,263 +156,36 @@ where }) }); - // Spawn the workers for phase 1 record generation. - let p1_record_gen_sync = Arc::new(TurnBasedSync::new()); - let p1_trace_gen_sync = Arc::new(TurnBasedSync::new()); - let (p1_records_and_traces_tx, p1_records_and_traces_rx) = - sync_channel::<(Vec, Vec>)>>)>( - opts.records_and_traces_channel_capacity, - ); - let p1_records_and_traces_tx = Arc::new(Mutex::new(p1_records_and_traces_tx)); - let checkpoints_rx = Arc::new(Mutex::new(checkpoints_rx)); - - let checkpoints = Arc::new(Mutex::new(VecDeque::new())); - let state = Arc::new(Mutex::new(PublicValues::::default().reset())); - let deferred = Arc::new(Mutex::new(ExecutionRecord::new(program.clone().into()))); - let mut p1_record_and_trace_gen_handles = Vec::new(); - for _ in 0..opts.trace_gen_workers { - let record_gen_sync = Arc::clone(&p1_record_gen_sync); - let trace_gen_sync = Arc::clone(&p1_trace_gen_sync); - let checkpoints_rx = Arc::clone(&checkpoints_rx); - let records_and_traces_tx = Arc::clone(&p1_records_and_traces_tx); - - let checkpoints = Arc::clone(&checkpoints); - let state = Arc::clone(&state); - let deferred = Arc::clone(&deferred); - let program = program.clone(); - - let span = tracing::Span::current().clone(); - - let handle = s.spawn(move || { - let _span = span.enter(); - tracing::debug_span!("phase 1 trace generation").in_scope(|| { - loop { - // Receive the latest checkpoint. - let received = { checkpoints_rx.lock().unwrap().recv() }; - - if let Ok((index, mut checkpoint, done)) = received { - // Trace the checkpoint and reconstruct the execution records. - let (mut records, _) = tracing::debug_span!("trace checkpoint") - .in_scope(|| { - trace_checkpoint::( - program.clone(), - &checkpoint, - opts, - shape_config, - ) - }); - log::info!("generated {} records", records.len()); - reset_seek(&mut checkpoint); - - // Wait for our turn to update the state. - log::info!("waiting for turn {}", index); - record_gen_sync.wait_for_turn(index); - - // Update the public values & prover state for the shards which contain - // "cpu events". - let mut state = state.lock().unwrap(); - for record in records.iter_mut() { - state.shard += 1; - state.execution_shard = record.public_values.execution_shard; - state.start_pc = record.public_values.start_pc; - state.next_pc = record.public_values.next_pc; - state.committed_value_digest = - record.public_values.committed_value_digest; - state.deferred_proofs_digest = - record.public_values.deferred_proofs_digest; - record.public_values = *state; - } - - // Defer events that are too expensive to include in every shard. - let mut deferred = deferred.lock().unwrap(); - for record in records.iter_mut() { - deferred.append(&mut record.defer()); - } - - // See if any deferred shards are ready to be committed to. - let mut deferred = deferred.split(done, opts.split_opts); - log::info!("deferred {} records", deferred.len()); - - // Update the public values & prover state for the shards which do not - // contain "cpu events" before committing to them. - if !done { - state.execution_shard += 1; - } - for record in deferred.iter_mut() { - state.shard += 1; - state.previous_init_addr_bits = - record.public_values.previous_init_addr_bits; - state.last_init_addr_bits = - record.public_values.last_init_addr_bits; - state.previous_finalize_addr_bits = - record.public_values.previous_finalize_addr_bits; - state.last_finalize_addr_bits = - record.public_values.last_finalize_addr_bits; - state.start_pc = state.next_pc; - record.public_values = *state; - } - records.append(&mut deferred); - - // Collect the checkpoints to be used again in the phase 2 prover. - log::info!("collecting checkpoints"); - let mut checkpoints = checkpoints.lock().unwrap(); - checkpoints.push_back((index, checkpoint, done)); - - // Let another worker update the state. - record_gen_sync.advance_turn(); - - // Fix the shape of the records. - if let Some(shape_config) = shape_config { - for record in records.iter_mut() { - tracing::info!("fixing shape"); - shape_config.fix_shape(record).unwrap(); - } - } - - // Generate the traces. - let mut traces = vec![]; - tracing::debug_span!("generate traces", index).in_scope(|| { - traces = records - .par_iter() - .map(|record| { - prover.generate_traces(record, InteractionScope::Global) - }) - .collect::>(); - }); - - // Wait for our turn. - trace_gen_sync.wait_for_turn(index); - - // Send the records to the phase 1 prover. - let chunked_records = chunk_vec(records, opts.shard_batch_size); - let chunked_traces = chunk_vec(traces, opts.shard_batch_size); - chunked_records.into_iter().zip(chunked_traces).for_each( - |(records, traces)| { - records_and_traces_tx - .lock() - .unwrap() - .send((records, traces)) - .unwrap(); - }, - ); - - trace_gen_sync.advance_turn(); - } else { - break; - } - } - }) - }); - p1_record_and_trace_gen_handles.push(handle); - } - drop(p1_records_and_traces_tx); - // Create the challenger and observe the verifying key. let mut challenger = prover.config().challenger(); pk.observe_into(&mut challenger); - // Spawn the phase 1 prover thread. - let phase_1_prover_span = tracing::Span::current().clone(); - let phase_1_prover_handle = s.spawn(move || { - let _span = phase_1_prover_span.enter(); - tracing::debug_span!("phase 1 prover").in_scope(|| { - for (records, traces) in p1_records_and_traces_rx.iter() { - tracing::debug_span!("batch").in_scope(|| { - let span = tracing::Span::current().clone(); - - // Collect the public values. - let public_values = records - .iter() - .map(|record| { - record.public_values::()[0..prover.machine().num_pv_elts()] - .to_vec() - }) - .collect::>(); - - // Commit to each shard. - let commitments = records - .into_par_iter() - .zip(traces.into_par_iter()) - .map(|(record, traces)| { - let _span = span.enter(); - - for (name, trace) in traces.clone() { - let trace_width = trace.width(); - let trace_height = trace.height(); - tracing::debug!( - "Phase 1 area: {:<15} | Main Cols = {:<5} | Rows = {:<5} | Cells = {:<10}", - name, - trace_width, - trace_height, - trace_width * trace_height, - ); - - } - - let data = prover.commit(&record, traces); - let phase1_main_commit = data.main_commit.clone(); - drop(data); - phase1_main_commit - }) - .collect::>(); - - // the commitments. - for (commit, public_values) in - commitments.into_iter().zip(public_values.into_iter()) - { - prover.observe(&mut challenger, commit.clone(), &public_values); - } - }); - } - }); - - challenger - }); - - // Wait until the checkpoint generator handle has fully finished. - let public_values_stream = checkpoint_generator_handle.join().unwrap().unwrap(); - - // Wait until the records and traces have been fully generated. - p1_record_and_trace_gen_handles.into_iter().for_each(|handle| handle.join().unwrap()); - - // Wait until the phase 1 prover has completely finished. - let mut challenger = phase_1_prover_handle.join().unwrap(); - - // Sample for the global permutation challenges. - // Obtain the challenges used for the global permutation argument. - let mut global_permutation_challenges: Vec = Vec::new(); - for _ in 0..2 { - global_permutation_challenges.push(challenger.sample_ext_element()); - } - // Spawn the phase 2 record generator thread. let p2_record_gen_sync = Arc::new(TurnBasedSync::new()); let p2_trace_gen_sync = Arc::new(TurnBasedSync::new()); let (p2_records_and_traces_tx, p2_records_and_traces_rx) = - sync_channel::<( - Vec, - ( - Vec>)>>, - Vec>)>>, - ), - )>(opts.records_and_traces_channel_capacity); + sync_channel::<(Vec, Vec>)>>)>( + opts.records_and_traces_channel_capacity, + ); let p2_records_and_traces_tx = Arc::new(Mutex::new(p2_records_and_traces_tx)); + let shape_tx = Arc::new(Mutex::new(shape_and_done_tx)); let report_aggregate = Arc::new(Mutex::new(ExecutionReport::default())); let state = Arc::new(Mutex::new(PublicValues::::default().reset())); let deferred = Arc::new(Mutex::new(ExecutionRecord::new(program.clone().into()))); let mut p2_record_and_trace_gen_handles = Vec::new(); + let checkpoints_rx = Arc::new(Mutex::new(checkpoints_rx)); for _ in 0..opts.trace_gen_workers { let record_gen_sync = Arc::clone(&p2_record_gen_sync); let trace_gen_sync = Arc::clone(&p2_trace_gen_sync); let records_and_traces_tx = Arc::clone(&p2_records_and_traces_tx); + let checkpoints_rx = Arc::clone(&checkpoints_rx); + let shape_tx = Arc::clone(&shape_tx); let report_aggregate = Arc::clone(&report_aggregate); - let checkpoints = Arc::clone(&checkpoints); let state = Arc::clone(&state); let deferred = Arc::clone(&deferred); let program = program.clone(); - let span = tracing::Span::current().clone(); #[cfg(feature = "debug")] @@ -464,10 +195,8 @@ where let _span = span.enter(); tracing::debug_span!("phase 2 trace generation").in_scope(|| { loop { - // Receive the latest checkpoint. - let received = { checkpoints.lock().unwrap().pop_front() }; - if let Some((index, mut checkpoint, done)) = received { - // Trace the checkpoint and reconstruct the execution records. + let received = { checkpoints_rx.lock().unwrap().recv() }; + if let Ok((index, mut checkpoint, done, num_cycles)) = received { let (mut records, report) = tracing::debug_span!("trace checkpoint") .in_scope(|| { trace_checkpoint::( @@ -477,9 +206,13 @@ where shape_config, ) }); + + // Trace the checkpoint and reconstruct the execution records. log::info!("generated {} records", records.len()); *report_aggregate.lock().unwrap() += report; - reset_seek(&mut checkpoint); + checkpoint + .seek(SeekFrom::Start(0)) + .expect("failed to seek to start of tempfile"); // Wait for our turn to update the state. record_gen_sync.wait_for_turn(index); @@ -499,14 +232,33 @@ where record.public_values = *state; } + tracing::info!("Records length:{}, done: {}", records.len(), done); + // Defer events that are too expensive to include in every shard. let mut deferred = deferred.lock().unwrap(); for record in records.iter_mut() { deferred.append(&mut record.defer()); } + // tracing::info!("Deferred length: {}", deferred.len()); + + let last_record = if done + && num_cycles < 1 << 26 + && deferred.global_memory_initialize_events.len() + < opts.split_opts.memory / 4 + && deferred.global_memory_finalize_events.len() + < opts.split_opts.memory / 4 + { + tracing::info!("Number of cycles: {}", num_cycles); + records.last_mut() + } else { + None + }; + + tracing::info!("Last record is some: {:?}", last_record.is_some()); + // See if any deferred shards are ready to be committed to. - let mut deferred = deferred.split(done, opts.split_opts); + let mut deferred = deferred.split(done, last_record, opts.split_opts); log::info!("deferred {} records", deferred.len()); // Update the public values & prover state for the shards which do not @@ -544,27 +296,31 @@ where } } + // Send the shapes to the channel, if necessary. + for record in records.iter() { + let mut heights = vec![]; + let chips = prover.shard_chips(record).collect::>(); + if let Some(shape) = record.shape.as_ref() { + for chip in chips.iter() { + let height = shape.inner[&chip.name()]; + heights.push((chip.name().clone(), height)); + } + shape_tx + .lock() + .unwrap() + .send((ProofShape::from_log2_heights(&heights), done)) + .unwrap(); + } + } + #[cfg(feature = "debug")] all_records_tx.send(records.clone()).unwrap(); - // Generate the traces. - let mut local_traces = Vec::new(); - tracing::debug_span!("generate local traces", index).in_scope(|| { - local_traces = records - .par_iter() - .map(|record| { - prover.generate_traces(record, InteractionScope::Local) - }) - .collect::>(); - }); - - let mut global_traces = Vec::new(); - tracing::debug_span!("generate global traces", index).in_scope(|| { - global_traces = records + let mut main_traces = Vec::new(); + tracing::debug_span!("generate main traces", index).in_scope(|| { + main_traces = records .par_iter() - .map(|record| { - prover.generate_traces(record, InteractionScope::Global) - }) + .map(|record| prover.generate_traces(record)) .collect::>(); }); @@ -572,19 +328,15 @@ where // Send the records to the phase 2 prover. let chunked_records = chunk_vec(records, opts.shard_batch_size); - let chunked_global_traces = - chunk_vec(global_traces, opts.shard_batch_size); - let chunked_local_traces = - chunk_vec(local_traces, opts.shard_batch_size); + let chunked_main_traces = chunk_vec(main_traces, opts.shard_batch_size); chunked_records .into_iter() - .zip(chunked_global_traces.into_iter()) - .zip(chunked_local_traces.into_iter()) - .for_each(|((records, global_traces), local_traces)| { + .zip(chunked_main_traces.into_iter()) + .for_each(|(records, main_traces)| { records_and_traces_tx .lock() .unwrap() - .send((records, (global_traces, local_traces))) + .send((records, main_traces)) .unwrap(); }); @@ -603,68 +355,62 @@ where // Spawn the phase 2 prover thread. let p2_prover_span = tracing::Span::current().clone(); + let proof_tx = Arc::new(Mutex::new(proof_tx)); let p2_prover_handle = s.spawn(move || { let _span = p2_prover_span.enter(); - let mut shard_proofs = Vec::new(); tracing::debug_span!("phase 2 prover").in_scope(|| { for (records, traces) in p2_records_and_traces_rx.into_iter() { tracing::debug_span!("batch").in_scope(|| { let span = tracing::Span::current().clone(); - shard_proofs.par_extend( - records.into_par_iter().zip(traces.into_par_iter()).map( - |(record, (global_traces, local_traces))| { - let _span = span.enter(); - - let global_commit_span = - tracing::debug_span!("commit to global traces").entered(); - let global_data = prover.commit(&record, global_traces); - global_commit_span.exit(); - let local_commit_span = - tracing::debug_span!("commit to local traces").entered(); - let local_data = prover.commit(&record, local_traces); - local_commit_span.exit(); - - let opening_span = tracing::debug_span!("opening").entered(); - let proof = prover - .open( - pk, - Some(global_data), - local_data, - &mut challenger.clone(), - &global_permutation_challenges, - ) - .unwrap(); - opening_span.exit(); - - #[cfg(debug_assertions)] - { - if let Some(shape) = record.shape.as_ref() { - assert_eq!( - proof.shape(), - shape.clone().into_iter().collect(), - ); - } + let proofs = records + .into_par_iter() + .zip(traces.into_par_iter()) + .map(|(record, main_traces)| { + let _span = span.enter(); + + let main_data = prover.commit(&record, main_traces); + + let opening_span = tracing::debug_span!("opening").entered(); + let proof = + prover.open(pk, main_data, &mut challenger.clone()).unwrap(); + opening_span.exit(); + + #[cfg(debug_assertions)] + { + if let Some(shape) = record.shape.as_ref() { + assert_eq!( + proof.shape(), + shape.clone().into_iter().collect(), + ); } + } - rayon::spawn(move || { - drop(record); - }); + rayon::spawn(move || { + drop(record); + }); - proof - }, - ), - ); + proof + }) + .collect::>(); + + // Send the batch of proofs to the channel. + let proof_tx = proof_tx.lock().unwrap(); + for proof in proofs { + proof_tx.send(proof).unwrap(); + } }); } }); - shard_proofs }); + // Wait until the checkpoint generator handle has fully finished. + let public_values_stream = checkpoint_generator_handle.join().unwrap().unwrap(); + // Wait until the records and traces have been fully generated for phase 2. p2_record_and_trace_gen_handles.into_iter().for_each(|handle| handle.join().unwrap()); // Wait until the phase 2 prover has finished. - let shard_proofs = p2_prover_handle.join().unwrap(); + p2_prover_handle.join().unwrap(); // Log some of the `ExecutionReport` information. let report_aggregate = report_aggregate.lock().unwrap(); @@ -678,25 +424,34 @@ where // Print the opcode and syscall count tables like `du`: sorted by count (descending) and // with the count in the first column. tracing::info!("execution report (opcode counts):"); - for line in sorted_table_lines(report_aggregate.opcode_counts.as_ref()) { - tracing::info!(" {line}"); + let (width, lines) = sorted_table_lines(report_aggregate.opcode_counts.as_ref()); + for (label, count) in lines { + if *count > 0 { + tracing::info!(" {}", format_table_line(&width, &label, count)); + } else { + tracing::debug!(" {}", format_table_line(&width, &label, count)); + } } + tracing::info!("execution report (syscall counts):"); - for line in sorted_table_lines(report_aggregate.syscall_counts.as_ref()) { - tracing::info!(" {line}"); + let (width, lines) = sorted_table_lines(report_aggregate.syscall_counts.as_ref()); + for (label, count) in lines { + if *count > 0 { + tracing::info!(" {}", format_table_line(&width, &label, count)); + } else { + tracing::debug!(" {}", format_table_line(&width, &label, count)); + } } - let proof = MachineProof:: { shard_proofs }; let cycles = report_aggregate.total_instruction_count(); // Print the summary. let proving_time = proving_start.elapsed().as_secs_f64(); tracing::info!( - "summary: cycles={}, e2e={}s, khz={:.2}, proofSize={}", + "summary: cycles={}, e2e={}s, khz={:.2}", cycles, proving_time, (cycles as f64 / (proving_time * 1000.0) as f64), - bincode::serialize(&proof).unwrap().len(), ); #[cfg(feature = "debug")] @@ -707,143 +462,11 @@ where prover.machine().debug_constraints(&pk_host, all_records, &mut challenger); } - Ok((proof, public_values_stream, cycles)) + Ok((public_values_stream, cycles)) }) } -/// Runs a program and returns the public values stream. -pub fn run_test_io>>( - mut program: Program, - inputs: SP1Stdin, -) -> Result> { - let shape_config = CoreShapeConfig::::default(); - shape_config.fix_preprocessed_shape(&mut program).unwrap(); - let runtime = tracing::debug_span!("runtime.run(...)").in_scope(|| { - let mut runtime = Executor::new(program, SP1CoreOpts::default()); - runtime.maximal_shapes = - Some(shape_config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); - runtime.write_vecs(&inputs.buffer); - runtime.run().unwrap(); - runtime - }); - let public_values = SP1PublicValues::from(&runtime.state.public_values_stream); - - let _ = run_test_core::

(runtime, inputs, Some(&shape_config))?; - Ok(public_values) -} - -pub fn run_test>>( - mut program: Program, -) -> Result, MachineVerificationError> { - let shape_config = CoreShapeConfig::default(); - shape_config.fix_preprocessed_shape(&mut program).unwrap(); - let runtime = tracing::debug_span!("runtime.run(...)").in_scope(|| { - let mut runtime = Executor::new(program, SP1CoreOpts::default()); - runtime.maximal_shapes = - Some(shape_config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); - runtime.run().unwrap(); - runtime - }); - run_test_core::

(runtime, SP1Stdin::new(), Some(&shape_config)) -} - -#[allow(unused_variables)] -pub fn run_test_core>>( - runtime: Executor, - inputs: SP1Stdin, - shape_config: Option<&CoreShapeConfig>, -) -> Result, MachineVerificationError> { - let config = BabyBearPoseidon2::new(); - let machine = RiscvAir::machine(config); - let prover = P::new(machine); - - let (pk, _) = prover.setup(runtime.program.as_ref()); - let (proof, output, _) = prove_with_context( - &prover, - &pk, - Program::clone(&runtime.program), - &inputs, - SP1CoreOpts::default(), - SP1Context::default(), - shape_config, - ) - .unwrap(); - - let config = BabyBearPoseidon2::new(); - let machine = RiscvAir::machine(config); - let (pk, vk) = machine.setup(runtime.program.as_ref()); - let mut challenger = machine.config().challenger(); - machine.verify(&vk, &proof, &mut challenger).unwrap(); - - Ok(proof) -} - -#[allow(unused_variables)] -pub fn run_test_machine_with_prover>( - prover: &P, - records: Vec, - pk: P::DeviceProvingKey, - vk: StarkVerifyingKey, -) -> Result, MachineVerificationError> -where - A: MachineAir - + Air>> - + for<'a> Air> - + for<'a> Air, SC::Challenge>>, - A::Record: MachineRecord, - SC: StarkGenericConfig, - SC::Val: p3_field::PrimeField32, - SC::Challenger: Clone, - Com: Send + Sync, - PcsProverData: Send + Sync + Serialize + DeserializeOwned, - OpeningProof: Send + Sync, -{ - let mut challenger = prover.config().challenger(); - let prove_span = tracing::debug_span!("prove").entered(); - - #[cfg(feature = "debug")] - prover.machine().debug_constraints( - &prover.pk_to_host(&pk), - records.clone(), - &mut challenger.clone(), - ); - - let proof = prover.prove(&pk, records, &mut challenger, SP1CoreOpts::default()).unwrap(); - prove_span.exit(); - let nb_bytes = bincode::serialize(&proof).unwrap().len(); - - let mut challenger = prover.config().challenger(); - prover.machine().verify(&vk, &proof, &mut challenger)?; - - Ok(proof) -} - -#[allow(unused_variables)] -pub fn run_test_machine( - records: Vec, - machine: StarkMachine, - pk: StarkProvingKey, - vk: StarkVerifyingKey, -) -> Result, MachineVerificationError> -where - A: MachineAir - + for<'a> Air> - + Air>> - + for<'a> Air> - + for<'a> Air, SC::Challenge>>, - A::Record: MachineRecord, - SC: StarkGenericConfig, - SC::Val: p3_field::PrimeField32, - SC::Challenger: Clone, - Com: Send + Sync, - PcsProverData: Send + Sync + Serialize + DeserializeOwned, - OpeningProof: Send + Sync, -{ - let prover = CpuProver::new(machine); - run_test_machine_with_prover::>(&prover, records, pk, vk) -} - -fn trace_checkpoint( +pub fn trace_checkpoint( program: Program, file: &File, opts: SP1CoreOpts, @@ -855,7 +478,7 @@ where let mut reader = std::io::BufReader::new(file); let state: ExecutionState = bincode::deserialize_from(&mut reader).expect("failed to deserialize state"); - let mut runtime = Executor::recover(program.clone(), state.clone(), opts); + let mut runtime = Executor::recover(program, state, opts); runtime.maximal_shapes = shape_config .map(|config| config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); @@ -869,74 +492,12 @@ where (records, runtime.report) } -fn reset_seek(file: &mut File) { - file.seek(std::io::SeekFrom::Start(0)).expect("failed to seek to start of tempfile"); -} - -#[cfg(debug_assertions)] -#[cfg(not(doctest))] -pub fn uni_stark_prove( - config: &SC, - air: &A, - challenger: &mut SC::Challenger, - trace: RowMajorMatrix, -) -> Proof> -where - SC: StarkGenericConfig, - A: Air> - + for<'a> Air>> - + for<'a> Air>, -{ - p3_uni_stark::prove(&UniConfig(config.clone()), air, challenger, trace, &vec![]) -} - -#[cfg(not(debug_assertions))] -pub fn uni_stark_prove( - config: &SC, - air: &A, - challenger: &mut SC::Challenger, - trace: RowMajorMatrix, -) -> Proof> -where - SC: StarkGenericConfig, - A: Air> - + for<'a> Air>>, -{ - p3_uni_stark::prove(&UniConfig(config.clone()), air, challenger, trace, &vec![]) -} - -#[cfg(debug_assertions)] -#[cfg(not(doctest))] -pub fn uni_stark_verify( - config: &SC, - air: &A, - challenger: &mut SC::Challenger, - proof: &Proof>, -) -> Result<(), p3_uni_stark::VerificationError> -where - SC: StarkGenericConfig, - A: Air> - + for<'a> Air>> - + for<'a> Air>, -{ - p3_uni_stark::verify(&UniConfig(config.clone()), air, challenger, proof, &vec![]) -} - -#[cfg(not(debug_assertions))] -pub fn uni_stark_verify( - config: &SC, - air: &A, - challenger: &mut SC::Challenger, - proof: &Proof>, -) -> Result<(), p3_uni_stark::VerificationError> -where - SC: StarkGenericConfig, - A: Air> - + for<'a> Air>>, -{ - p3_uni_stark::verify(&UniConfig(config.clone()), air, challenger, proof, &vec![]) +#[derive(Error, Debug)] +pub enum SP1CoreProverError { + #[error("failed to execute program: {0}")] + ExecutionError(ExecutionError), + #[error("io error: {0}")] + IoError(io::Error), + #[error("serialization error: {0}")] + SerializationError(bincode::Error), } - -use p3_air::Air; -use p3_matrix::dense::RowMajorMatrix; -use p3_uni_stark::Proof; diff --git a/crates/core/machine/src/utils/span.rs b/crates/core/machine/src/utils/span.rs index 8e8ce69933..c5c02b4ab1 100644 --- a/crates/core/machine/src/utils/span.rs +++ b/crates/core/machine/src/utils/span.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, fmt::Display, hash::Hash, iter::once}; -use sp1_core_executor::events::sorted_table_lines; +use sp1_core_executor::events::{format_table_line, sorted_table_lines}; use thiserror::Error; /// A builder to create a [`Span`]. @@ -116,13 +116,15 @@ where /// Format and yield lines describing this span. Appropriate for logging. pub fn lines(&self) -> Vec { let Self { name, cts: instr_cts, children } = self; + let (width, lines) = sorted_table_lines(instr_cts); + let lines = lines.map(|(label, count)| format_table_line(&width, &label, count)); once(format!("{}", name)) .chain( children .iter() .flat_map(|c| c.lines()) - .chain(sorted_table_lines(instr_cts)) + .chain(lines) .map(|line| format!("│ {line}")), ) .chain(once(format!("└╴ {} total", self.total()))) diff --git a/crates/core/machine/src/utils/test.rs b/crates/core/machine/src/utils/test.rs new file mode 100644 index 0000000000..f18509a601 --- /dev/null +++ b/crates/core/machine/src/utils/test.rs @@ -0,0 +1,138 @@ +use p3_air::Air; +use p3_baby_bear::BabyBear; +use serde::{de::DeserializeOwned, Serialize}; +use sp1_core_executor::{Executor, Program, SP1Context}; +use sp1_primitives::io::SP1PublicValues; +use sp1_stark::{ + air::MachineAir, baby_bear_poseidon2::BabyBearPoseidon2, Com, CpuProver, + DebugConstraintBuilder, InteractionBuilder, MachineProof, MachineProver, MachineRecord, + MachineVerificationError, OpeningProof, PcsProverData, ProverConstraintFolder, SP1CoreOpts, + StarkGenericConfig, StarkMachine, StarkProvingKey, StarkVerifyingKey, Val, + VerifierConstraintFolder, +}; + +use crate::{ + io::SP1Stdin, + riscv::{CoreShapeConfig, RiscvAir}, +}; + +use super::prove_core; + +/// The canonical entry point for testing a [`Program`] and [`SP1Stdin`] with a [`MachineProver`]. +pub fn run_test>>( + mut program: Program, + inputs: SP1Stdin, +) -> Result> { + let shape_config = CoreShapeConfig::::default(); + shape_config.fix_preprocessed_shape(&mut program).unwrap(); + + let runtime = tracing::debug_span!("runtime.run(...)").in_scope(|| { + let mut runtime = Executor::new(program, SP1CoreOpts::default()); + runtime.maximal_shapes = + Some(shape_config.maximal_core_shapes().into_iter().map(|s| s.inner).collect()); + runtime.write_vecs(&inputs.buffer); + runtime.run().unwrap(); + runtime + }); + let public_values = SP1PublicValues::from(&runtime.state.public_values_stream); + + let _ = run_test_core::

(runtime, inputs, Some(&shape_config))?; + Ok(public_values) +} + +#[allow(unused_variables)] +pub fn run_test_core>>( + runtime: Executor, + inputs: SP1Stdin, + shape_config: Option<&CoreShapeConfig>, +) -> Result, MachineVerificationError> { + let config = BabyBearPoseidon2::new(); + let machine = RiscvAir::machine(config); + let prover = P::new(machine); + + let (pk, vk) = prover.setup(runtime.program.as_ref()); + let (proof, output, _) = prove_core( + &prover, + &pk, + &vk, + Program::clone(&runtime.program), + &inputs, + SP1CoreOpts::default(), + SP1Context::default(), + shape_config, + ) + .unwrap(); + + let config = BabyBearPoseidon2::new(); + let machine = RiscvAir::machine(config); + let (pk, vk) = machine.setup(runtime.program.as_ref()); + let mut challenger = machine.config().challenger(); + machine.verify(&vk, &proof, &mut challenger).unwrap(); + + Ok(proof) +} + +#[allow(unused_variables)] +pub fn run_test_machine_with_prover>( + prover: &P, + records: Vec, + pk: P::DeviceProvingKey, + vk: StarkVerifyingKey, +) -> Result, MachineVerificationError> +where + A: MachineAir + + Air>> + + for<'a> Air> + + for<'a> Air, SC::Challenge>>, + A::Record: MachineRecord, + SC: StarkGenericConfig, + SC::Val: p3_field::PrimeField32, + SC::Challenger: Clone, + Com: Send + Sync, + PcsProverData: Send + Sync + Serialize + DeserializeOwned, + OpeningProof: Send + Sync, +{ + let mut challenger = prover.config().challenger(); + let prove_span = tracing::debug_span!("prove").entered(); + + #[cfg(feature = "debug")] + prover.machine().debug_constraints( + &prover.pk_to_host(&pk), + records.clone(), + &mut challenger.clone(), + ); + + let proof = prover.prove(&pk, records, &mut challenger, SP1CoreOpts::default()).unwrap(); + prove_span.exit(); + let nb_bytes = bincode::serialize(&proof).unwrap().len(); + + let mut challenger = prover.config().challenger(); + prover.machine().verify(&vk, &proof, &mut challenger)?; + + Ok(proof) +} + +#[allow(unused_variables)] +pub fn run_test_machine( + records: Vec, + machine: StarkMachine, + pk: StarkProvingKey, + vk: StarkVerifyingKey, +) -> Result, MachineVerificationError> +where + A: MachineAir + + for<'a> Air> + + Air>> + + for<'a> Air> + + for<'a> Air, SC::Challenge>>, + A::Record: MachineRecord, + SC: StarkGenericConfig, + SC::Val: p3_field::PrimeField32, + SC::Challenger: Clone, + Com: Send + Sync, + PcsProverData: Send + Sync + Serialize + DeserializeOwned, + OpeningProof: Send + Sync, +{ + let prover = CpuProver::new(machine); + run_test_machine_with_prover::>(&prover, records, pk, vk) +} diff --git a/crates/core/machine/src/utils/tracer.rs b/crates/core/machine/src/utils/tracer.rs deleted file mode 100644 index 88a4c7f6ac..0000000000 --- a/crates/core/machine/src/utils/tracer.rs +++ /dev/null @@ -1,23 +0,0 @@ -use std::env; - -use tracing::level_filters::LevelFilter; -use tracing_forest::ForestLayer; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry}; - -/// A tracer to benchmark the performance of the vm. -/// -/// Set the `RUST_TRACER` environment variable to be set to `info` or `debug`. -/// ! DEPRECATED: don't use this function, use `setup_logger` instead. -pub fn setup_tracer() { - let tracer_config = env::var("RUST_TRACER").unwrap_or_else(|_| "none".to_string()); - let mut env_filter = EnvFilter::builder() - .with_default_directive(LevelFilter::OFF.into()) - .with_default_directive("log::=off".parse().unwrap()) - .from_env_lossy(); - if tracer_config == "info" { - env_filter = env_filter.add_directive("sp1_core=info".parse().unwrap()); - } else if tracer_config == "debug" { - env_filter = env_filter.add_directive("sp1_core=debug".parse().unwrap()); - } - Registry::default().with(env_filter).with(ForestLayer::default()).init(); -} diff --git a/crates/core/machine/src/utils/uni_stark.rs b/crates/core/machine/src/utils/uni_stark.rs new file mode 100644 index 0000000000..c7d510c95d --- /dev/null +++ b/crates/core/machine/src/utils/uni_stark.rs @@ -0,0 +1,68 @@ +use p3_air::Air; +use p3_matrix::dense::RowMajorMatrix; +use p3_uni_stark::Proof; +use sp1_stark::{StarkGenericConfig, UniConfig}; + +#[cfg(debug_assertions)] +#[cfg(not(doctest))] +pub fn uni_stark_prove( + config: &SC, + air: &A, + challenger: &mut SC::Challenger, + trace: RowMajorMatrix, +) -> Proof> +where + SC: StarkGenericConfig, + A: Air> + + for<'a> Air>> + + for<'a> Air>, +{ + p3_uni_stark::prove(&UniConfig(config.clone()), air, challenger, trace, &vec![]) +} + +#[cfg(not(debug_assertions))] +pub fn uni_stark_prove( + config: &SC, + air: &A, + challenger: &mut SC::Challenger, + trace: RowMajorMatrix, +) -> Proof> +where + SC: StarkGenericConfig, + A: Air> + + for<'a> Air>>, +{ + p3_uni_stark::prove(&UniConfig(config.clone()), air, challenger, trace, &vec![]) +} + +#[cfg(debug_assertions)] +#[cfg(not(doctest))] +pub fn uni_stark_verify( + config: &SC, + air: &A, + challenger: &mut SC::Challenger, + proof: &Proof>, +) -> Result<(), p3_uni_stark::VerificationError> +where + SC: StarkGenericConfig, + A: Air> + + for<'a> Air>> + + for<'a> Air>, +{ + p3_uni_stark::verify(&UniConfig(config.clone()), air, challenger, proof, &vec![]) +} + +#[cfg(not(debug_assertions))] +pub fn uni_stark_verify( + config: &SC, + air: &A, + challenger: &mut SC::Challenger, + proof: &Proof>, +) -> Result<(), p3_uni_stark::VerificationError> +where + SC: StarkGenericConfig, + A: Air> + + for<'a> Air>>, +{ + p3_uni_stark::verify(&UniConfig(config.clone()), air, challenger, proof, &vec![]) +} diff --git a/crates/cuda/Cargo.toml b/crates/cuda/Cargo.toml index 851d6ee8c4..a94f88c11b 100644 --- a/crates/cuda/Cargo.toml +++ b/crates/cuda/Cargo.toml @@ -14,19 +14,20 @@ sp1-core-machine = { workspace = true } sp1-prover = { workspace = true } prost = "0.13" bincode = "1.3.3" -serde = { version = "1.0.197", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } tokio = { version = "^1.38.0", features = ["full"] } -tracing = "0.1.40" +tracing = { workspace = true } twirp = { package = "twirp-rs", version = "0.13.0-succinct" } ctrlc = "3.4.4" +test-artifacts = { workspace = true, optional = true } [build-dependencies] prost-build = { version = "0.13", optional = true } twirp-build = { package = "twirp-build-rs", version = "0.13.0-succinct", optional = true } [dev-dependencies] -sp1-core-machine = { workspace = true, features = ["programs"] } +sp1-core-machine = { workspace = true } [features] default = [] -protobuf = ["dep:prost-build", "dep:twirp-build"] +protobuf = ["dep:prost-build", "dep:twirp-build", "dep:test-artifacts"] diff --git a/crates/cuda/proto/api.proto b/crates/cuda/proto/api.proto index c5e078676b..ec93419428 100644 --- a/crates/cuda/proto/api.proto +++ b/crates/cuda/proto/api.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package api; service ProverService { + rpc Setup(SetupRequest) returns (SetupResponse) {} rpc Ready(ReadyRequest) returns (ReadyResponse) {} rpc ProveCore(ProveCoreRequest) returns (ProveCoreResponse) {} rpc Compress(CompressRequest) returns (CompressResponse) {} @@ -16,6 +17,14 @@ message ReadyResponse { bool ready = 1; } +message SetupRequest { + bytes data = 1; +} + +message SetupResponse { + bytes result = 1; +} + message ProveCoreRequest { bytes data = 1; } diff --git a/crates/cuda/src/lib.rs b/crates/cuda/src/lib.rs index f63a65c744..b01aac664a 100644 --- a/crates/cuda/src/lib.rs +++ b/crates/cuda/src/lib.rs @@ -17,7 +17,7 @@ use reqwest::{Request, Response}; use serde::{Deserialize, Serialize}; use sp1_core_machine::{io::SP1Stdin, reduce::SP1ReduceProof, utils::SP1CoreProverError}; use sp1_prover::{ - types::SP1ProvingKey, InnerSC, OuterSC, SP1CoreProof, SP1RecursionProverError, SP1VerifyingKey, + InnerSC, OuterSC, SP1CoreProof, SP1ProvingKey, SP1RecursionProverError, SP1VerifyingKey, }; use tokio::task::block_in_place; use twirp::{ @@ -46,13 +46,28 @@ pub struct SP1CudaProver { cleaned_up: Arc, } +/// The payload for the [sp1_prover::SP1Prover::setup] method. +/// +/// We use this object to serialize and deserialize the payload from the client to the server. +#[derive(Serialize, Deserialize)] +pub struct SetupRequestPayload { + pub elf: Vec, +} + +/// The payload for the [sp1_prover::SP1Prover::setup] method response. +/// +/// We use this object to serialize and deserialize the payload from the server to the client. +#[derive(Serialize, Deserialize)] +pub struct SetupResponsePayload { + pub pk: SP1ProvingKey, + pub vk: SP1VerifyingKey, +} + /// The payload for the [sp1_prover::SP1Prover::prove_core] method. /// /// We use this object to serialize and deserialize the payload from the client to the server. #[derive(Serialize, Deserialize)] pub struct ProveCoreRequestPayload { - /// The proving key. - pub pk: SP1ProvingKey, /// The input stream. pub stdin: SP1Stdin, } @@ -91,7 +106,8 @@ impl SP1CudaProver { /// [SP1ProverClient] that can be used to communicate with the container. pub fn new() -> Result> { let container_name = "sp1-gpu"; - let image_name = "public.ecr.aws/succinct-labs/sp1-gpu:7e66232"; + let image_name = std::env::var("SP1_GPU_IMAGE") + .unwrap_or_else(|_| "jtguibas/sp1-gpu:v4.0.0-rc1".to_string()); let cleaned_up = Arc::new(AtomicBool::new(false)); let cleanup_name = container_name; @@ -103,7 +119,7 @@ impl SP1CudaProver { } // Pull the docker image if it's not present - if let Err(e) = Command::new("docker").args(["pull", image_name]).output() { + if let Err(e) = Command::new("docker").args(["pull", &image_name]).output() { return Err(format!("Failed to pull Docker image: {}. Please check your internet connection and Docker permissions.", e).into()); } @@ -121,7 +137,7 @@ impl SP1CudaProver { "all", "--name", container_name, - image_name, + &image_name, ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) @@ -229,17 +245,21 @@ impl SP1CudaProver { } } + /// Executes the [sp1_prover::SP1Prover::setup] method inside the container. + pub fn setup(&self, elf: &[u8]) -> Result<(SP1ProvingKey, SP1VerifyingKey), Box> { + let payload = SetupRequestPayload { elf: elf.to_vec() }; + let request = + crate::proto::api::SetupRequest { data: bincode::serialize(&payload).unwrap() }; + let response = block_on(async { self.client.setup(request).await }).unwrap(); + let payload: SetupResponsePayload = bincode::deserialize(&response.result).unwrap(); + Ok((payload.pk, payload.vk)) + } + /// Executes the [sp1_prover::SP1Prover::prove_core] method inside the container. /// /// You will need at least 24GB of VRAM to run this method. - /// - /// **WARNING**: This is an experimental feature and may not work as expected. - pub fn prove_core( - &self, - pk: &SP1ProvingKey, - stdin: &SP1Stdin, - ) -> Result { - let payload = ProveCoreRequestPayload { pk: pk.clone(), stdin: stdin.clone() }; + pub fn prove_core(&self, stdin: &SP1Stdin) -> Result { + let payload = ProveCoreRequestPayload { stdin: stdin.clone() }; let request = crate::proto::api::ProveCoreRequest { data: bincode::serialize(&payload).unwrap() }; let response = block_on(async { self.client.prove_core(request).await }).unwrap(); @@ -250,8 +270,6 @@ impl SP1CudaProver { /// Executes the [sp1_prover::SP1Prover::compress] method inside the container. /// /// You will need at least 24GB of VRAM to run this method. - /// - /// **WARNING**: This is an experimental feature and may not work as expected. pub fn compress( &self, vk: &SP1VerifyingKey, @@ -269,9 +287,7 @@ impl SP1CudaProver { /// Executes the [sp1_prover::SP1Prover::shrink] method inside the container. /// - /// You will need at least 40GB of VRAM to run this method. - /// - /// **WARNING**: This is an experimental feature and may not work as expected. + /// You will need at least 24GB of VRAM to run this method. pub fn shrink( &self, reduced_proof: SP1ReduceProof, @@ -287,9 +303,7 @@ impl SP1CudaProver { /// Executes the [sp1_prover::SP1Prover::wrap_bn254] method inside the container. /// - /// You will need at least 40GB of VRAM to run this method. - /// - /// **WARNING**: This is an experimental feature and may not work as expected. + /// You will need at least 24GB of VRAM to run this method. pub fn wrap_bn254( &self, reduced_proof: SP1ReduceProof, @@ -363,77 +377,77 @@ impl Middleware for LoggingMiddleware { } } -#[cfg(feature = "protobuf")] -#[cfg(test)] -mod tests { - use sp1_core_machine::{ - reduce::SP1ReduceProof, - utils::{setup_logger, tests::FIBONACCI_ELF}, - }; - use sp1_prover::{components::DefaultProverComponents, InnerSC, SP1CoreProof, SP1Prover}; - use twirp::{url::Url, Client}; - - use crate::{ - proto::api::ProverServiceClient, CompressRequestPayload, ProveCoreRequestPayload, - SP1CudaProver, SP1Stdin, - }; - - #[test] - fn test_client() { - setup_logger(); - - let prover = SP1Prover::::new(); - let client = SP1CudaProver::new().expect("Failed to create SP1CudaProver"); - let (pk, vk) = prover.setup(FIBONACCI_ELF); - - println!("proving core"); - let proof = client.prove_core(&pk, &SP1Stdin::new()).unwrap(); - - println!("verifying core"); - prover.verify(&proof.proof, &vk).unwrap(); - - println!("proving compress"); - let proof = client.compress(&vk, proof, vec![]).unwrap(); - - println!("verifying compress"); - prover.verify_compressed(&proof, &vk).unwrap(); - - println!("proving shrink"); - let proof = client.shrink(proof).unwrap(); - - println!("verifying shrink"); - prover.verify_shrink(&proof, &vk).unwrap(); - - println!("proving wrap_bn254"); - let proof = client.wrap_bn254(proof).unwrap(); - - println!("verifying wrap_bn254"); - prover.verify_wrap_bn254(&proof, &vk).unwrap(); - } - - #[tokio::test] - async fn test_prove_core() { - let client = - Client::from_base_url(Url::parse("http://localhost:3000/twirp/").unwrap()).unwrap(); - - let prover = SP1Prover::::new(); - let (pk, vk) = prover.setup(FIBONACCI_ELF); - let payload = ProveCoreRequestPayload { pk, stdin: SP1Stdin::new() }; - let request = - crate::proto::api::ProveCoreRequest { data: bincode::serialize(&payload).unwrap() }; - let proof = client.prove_core(request).await.unwrap(); - let proof: SP1CoreProof = bincode::deserialize(&proof.result).unwrap(); - prover.verify(&proof.proof, &vk).unwrap(); - - tracing::info!("compress"); - let payload = CompressRequestPayload { vk: vk.clone(), proof, deferred_proofs: vec![] }; - let request = - crate::proto::api::CompressRequest { data: bincode::serialize(&payload).unwrap() }; - let compressed_proof = client.compress(request).await.unwrap(); - let compressed_proof: SP1ReduceProof = - bincode::deserialize(&compressed_proof.result).unwrap(); - - tracing::info!("verify compressed"); - prover.verify_compressed(&compressed_proof, &vk).unwrap(); - } -} +// #[cfg(feature = "protobuf")] +// #[cfg(test)] +// mod tests { +// use sp1_core_machine::{ +// reduce::SP1ReduceProof, +// utils::{setup_logger, tests::FIBONACCI_ELF}, +// }; +// use sp1_prover::{components::DefaultProverComponents, InnerSC, SP1CoreProof, SP1Prover}; +// use twirp::{url::Url, Client}; + +// use crate::{ +// proto::api::ProverServiceClient, CompressRequestPayload, ProveCoreRequestPayload, +// SP1CudaProver, SP1Stdin, +// }; + +// #[test] +// fn test_client() { +// setup_logger(); + +// let prover = SP1Prover::::new(); +// let client = SP1CudaProver::new().expect("Failed to create SP1CudaProver"); +// let (pk, vk) = prover.setup(FIBONACCI_ELF); + +// println!("proving core"); +// let proof = client.prove_core(&pk, &SP1Stdin::new()).unwrap(); + +// println!("verifying core"); +// prover.verify(&proof.proof, &vk).unwrap(); + +// println!("proving compress"); +// let proof = client.compress(&vk, proof, vec![]).unwrap(); + +// println!("verifying compress"); +// prover.verify_compressed(&proof, &vk).unwrap(); + +// println!("proving shrink"); +// let proof = client.shrink(proof).unwrap(); + +// println!("verifying shrink"); +// prover.verify_shrink(&proof, &vk).unwrap(); + +// println!("proving wrap_bn254"); +// let proof = client.wrap_bn254(proof).unwrap(); + +// println!("verifying wrap_bn254"); +// prover.verify_wrap_bn254(&proof, &vk).unwrap(); +// } + +// #[tokio::test] +// async fn test_prove_core() { +// let client = +// Client::from_base_url(Url::parse("http://localhost:3000/twirp/").unwrap()).unwrap(); + +// let prover = SP1Prover::::new(); +// let (pk, vk) = prover.setup(FIBONACCI_ELF); +// let payload = ProveCoreRequestPayload { pk, stdin: SP1Stdin::new() }; +// let request = +// crate::proto::api::ProveCoreRequest { data: bincode::serialize(&payload).unwrap() }; +// let proof = client.prove_core(request).await.unwrap(); +// let proof: SP1CoreProof = bincode::deserialize(&proof.result).unwrap(); +// prover.verify(&proof.proof, &vk).unwrap(); + +// tracing::info!("compress"); +// let payload = CompressRequestPayload { vk: vk.clone(), proof, deferred_proofs: vec![] }; +// let request = +// crate::proto::api::CompressRequest { data: bincode::serialize(&payload).unwrap() }; +// let compressed_proof = client.compress(request).await.unwrap(); +// let compressed_proof: SP1ReduceProof = +// bincode::deserialize(&compressed_proof.result).unwrap(); + +// tracing::info!("verify compressed"); +// prover.verify_compressed(&compressed_proof, &vk).unwrap(); +// } +// } diff --git a/crates/cuda/src/proto/api.rs b/crates/cuda/src/proto/api.rs index 54aea9d77d..840055f181 100644 --- a/crates/cuda/src/proto/api.rs +++ b/crates/cuda/src/proto/api.rs @@ -10,6 +10,18 @@ pub struct ReadyResponse { } #[derive(serde::Serialize, serde::Deserialize)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetupRequest { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +#[derive(serde::Serialize, serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SetupResponse { + #[prost(bytes = "vec", tag = "1")] + pub result: ::prost::alloc::vec::Vec, +} +#[derive(serde::Serialize, serde::Deserialize)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct ProveCoreRequest { #[prost(bytes = "vec", tag = "1")] pub data: ::prost::alloc::vec::Vec, @@ -60,6 +72,11 @@ pub use twirp; pub const SERVICE_FQN: &str = "/api.ProverService"; #[twirp::async_trait::async_trait] pub trait ProverService { + async fn setup( + &self, + ctx: twirp::Context, + req: SetupRequest, + ) -> Result; async fn ready( &self, ctx: twirp::Context, @@ -91,6 +108,13 @@ impl ProverService for std::sync::Arc where T: ProverService + Sync + Send, { + async fn setup( + &self, + ctx: twirp::Context, + req: SetupRequest, + ) -> Result { + T::setup(&*self, ctx, req).await + } async fn ready( &self, ctx: twirp::Context, @@ -132,6 +156,12 @@ where T: ProverService + Clone + Send + Sync + 'static, { twirp::details::TwirpRouterBuilder::new(api) + .route( + "/Setup", + |api: T, ctx: twirp::Context, req: SetupRequest| async move { + api.setup(ctx, req).await + }, + ) .route( "/Ready", |api: T, ctx: twirp::Context, req: ReadyRequest| async move { @@ -166,6 +196,10 @@ where } #[twirp::async_trait::async_trait] pub trait ProverServiceClient: Send + Sync + std::fmt::Debug { + async fn setup( + &self, + req: SetupRequest, + ) -> Result; async fn ready( &self, req: ReadyRequest, @@ -186,6 +220,12 @@ pub trait ProverServiceClient: Send + Sync + std::fmt::Debug { } #[twirp::async_trait::async_trait] impl ProverServiceClient for twirp::client::Client { + async fn setup( + &self, + req: SetupRequest, + ) -> Result { + self.request("api.ProverService/Setup", req).await + } async fn ready( &self, req: ReadyRequest, diff --git a/crates/curves/Cargo.toml b/crates/curves/Cargo.toml index 5e6d2b8420..c656dbd02b 100644 --- a/crates/curves/Cargo.toml +++ b/crates/curves/Cargo.toml @@ -11,7 +11,7 @@ categories = { workspace = true } [dependencies] num = "0.4.3" -serde = { version = "1.0.207", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } typenum = "1.17.0" curve25519-dalek = { version = "4.1.2" } k256 = { version = "0.13.3", features = ["expose-field"] } @@ -27,7 +27,7 @@ sp1-stark = { workspace = true } sp1-primitives = { workspace = true } p3-field = { workspace = true } -itertools = "0.13.0" +itertools = { workspace = true } rug = { version = "1.26.1", optional = true } cfg-if = "1.0.0" diff --git a/crates/eval/Cargo.toml b/crates/eval/Cargo.toml index e986c88112..2e9ebd42ca 100644 --- a/crates/eval/Cargo.toml +++ b/crates/eval/Cargo.toml @@ -16,10 +16,10 @@ sp1-stark = { workspace = true } anyhow = "1.0.83" clap = { version = "4.5.9", features = ["derive"] } -serde = "1.0.204" +serde = { workspace = true } bincode = "1.3.3" time = "0.3.26" slack-rust = { package = "slack-rust-rs", version = "0.0.1" } tokio = { version = "1.39.0", features = ["full"] } reqwest = { version = "0.12.4", features = ["json"] } -serde_json = "1.0.104" +serde_json = { workspace = true } diff --git a/crates/eval/src/lib.rs b/crates/eval/src/lib.rs index 06f85a0a68..7032d2f58a 100644 --- a/crates/eval/src/lib.rs +++ b/crates/eval/src/lib.rs @@ -3,8 +3,10 @@ use clap::{command, Parser}; use reqwest::Client; use serde::Serialize; use serde_json::json; -use slack_rust::chat::post_message::{post_message, PostMessageRequest}; -use slack_rust::http_client::default_client; +use slack_rust::{ + chat::post_message::{post_message, PostMessageRequest}, + http_client::default_client, +}; use sp1_prover::{components::SP1ProverComponents, utils::get_cycles, SP1Prover}; use sp1_sdk::{SP1Context, SP1Stdin}; use sp1_stark::SP1ProverOpts; @@ -19,7 +21,8 @@ mod program; #[derive(Parser, Clone)] #[command(about = "Evaluate the performance of SP1 on programs.")] struct EvalArgs { - /// The programs to evaluate, specified by name. If not specified, all programs will be evaluated. + /// The programs to evaluate, specified by name. If not specified, all programs will be + /// evaluated. #[arg(long, use_value_delimiter = true, value_delimiter = ',')] pub programs: Vec, @@ -169,14 +172,14 @@ fn run_evaluation( let cycles = get_cycles(elf, stdin); let prover = SP1Prover::::new(); - let (pk, vk) = prover.setup(elf); + let (_, pk_d, program, vk) = prover.setup(elf); let context = SP1Context::default(); let (_, exec_duration) = time_operation(|| prover.execute(elf, stdin, context.clone())); let (core_proof, core_duration) = - time_operation(|| prover.prove_core(&pk, stdin, opts, context).unwrap()); + time_operation(|| prover.prove_core(&pk_d, program, stdin, opts, context).unwrap()); let (_, compress_duration) = time_operation(|| prover.compress(&vk, core_proof, vec![], opts).unwrap()); diff --git a/crates/perf/Cargo.toml b/crates/perf/Cargo.toml index 93ae716949..38300783fe 100644 --- a/crates/perf/Cargo.toml +++ b/crates/perf/Cargo.toml @@ -16,6 +16,7 @@ sp1-sdk = { workspace = true } p3-baby-bear = { workspace = true } sp1-stark = { workspace = true } sp1-cuda = { workspace = true } +test-artifacts = { workspace = true } clap = { version = "4.5.9", features = ["derive"] } bincode = "1.3.3" diff --git a/crates/perf/src/main.rs b/crates/perf/src/main.rs index 493ab5aa0c..e493ce2c01 100644 --- a/crates/perf/src/main.rs +++ b/crates/perf/src/main.rs @@ -1,12 +1,16 @@ -use std::time::{Duration, Instant}; +use std::{ + env, + time::{Duration, Instant}, +}; -use clap::{command, Parser, ValueEnum}; -use sp1_core_executor::programs::tests::VERIFY_PROOF_ELF; +use clap::ValueEnum; +use clap::{command, Parser}; use sp1_cuda::SP1CudaProver; -use sp1_prover::components::DefaultProverComponents; use sp1_prover::HashableKey; +use sp1_prover::{components::DefaultProverComponents, ProverMode}; use sp1_sdk::{self, ProverClient, SP1Context, SP1Prover, SP1Stdin}; use sp1_stark::SP1ProverOpts; +use test_artifacts::VERIFY_PROOF_ELF; #[derive(Parser, Clone)] #[command(about = "Evaluate the performance of SP1 on programs.")] @@ -36,13 +40,6 @@ struct PerfResult { pub verify_wrap_duration: Duration, } -#[derive(Debug, Clone, ValueEnum, PartialEq, Eq)] -enum ProverMode { - Cpu, - Cuda, - Network, -} - #[derive(Debug, Clone, ValueEnum, PartialEq, Eq)] enum Stage { Execute, @@ -65,7 +62,7 @@ fn main() { let stdin: SP1Stdin = bincode::deserialize(&stdin).expect("failed to deserialize stdin"); let prover = SP1Prover::::new(); - let (pk, vk) = prover.setup(&elf); + let (pk, pk_d, program, vk) = prover.setup(&elf); let cycles = sp1_prover::utils::get_cycles(&elf, &stdin); let stage = args.stage; if stage == Stage::Execute { @@ -80,8 +77,9 @@ fn main() { let (_, execution_duration) = time_operation(|| prover.execute(&elf, &stdin, context.clone())); - let (core_proof, prove_core_duration) = - time_operation(|| prover.prove_core(&pk, &stdin, opts, context).unwrap()); + let (core_proof, prove_core_duration) = time_operation(|| { + prover.prove_core(&pk_d, program, &stdin, opts, context).unwrap() + }); let (_, verify_core_duration) = time_operation(|| prover.verify(&core_proof.proof, &vk)); @@ -106,7 +104,8 @@ fn main() { time_operation(|| prover.verify_wrap_bn254(&wrapped_bn254_proof, &vk)); // Generate a proof that verifies two deferred proofs from the proof above. - let (pk_verify_proof, vk_verify_proof) = prover.setup(VERIFY_PROOF_ELF); + let (_, pk_verify_proof_d, pk_verify_program, vk_verify_proof) = + prover.setup(VERIFY_PROOF_ELF); let pv = core_proof.public_values.to_vec(); let mut stdin = SP1Stdin::new(); @@ -118,7 +117,9 @@ fn main() { let context = SP1Context::default(); let (core_proof, _) = time_operation(|| { - prover.prove_core(&pk_verify_proof, &stdin, opts, context).unwrap() + prover + .prove_core(&pk_verify_proof_d, pk_verify_program, &stdin, opts, context) + .unwrap() }); let deferred_proofs = stdin.proofs.into_iter().map(|(proof, _)| proof).collect::>(); @@ -151,8 +152,10 @@ fn main() { let (_, execution_duration) = time_operation(|| prover.execute(&elf, &stdin, context.clone())); + let (_, _) = time_operation(|| server.setup(&elf).unwrap()); + let (core_proof, prove_core_duration) = - time_operation(|| server.prove_core(&pk, &stdin).unwrap()); + time_operation(|| server.prove_core(&stdin).unwrap()); let (_, verify_core_duration) = time_operation(|| { prover.verify(&core_proof.proof, &vk).expect("Proof verification failed") @@ -193,7 +196,23 @@ fn main() { println!("{:?}", result); } ProverMode::Network => { - let prover = ProverClient::network(); + let private_key = env::var("SP1_PRIVATE_KEY") + .expect("SP1_PRIVATE_KEY must be set for remote proving"); + let rpc_url = env::var("PROVER_NETWORK_RPC").ok(); + let skip_simulation = + env::var("SKIP_SIMULATION").map(|val| val == "true").unwrap_or_default(); + + let mut prover_builder = ProverClient::builder().mode(ProverMode::Network); + + if let Some(rpc_url) = rpc_url { + prover_builder = prover_builder.rpc_url(rpc_url); + } + + if skip_simulation { + prover_builder = prover_builder.skip_simulation(); + } + + let prover = prover_builder.private_key(private_key).build(); let (_, _) = time_operation(|| prover.execute(&elf, stdin.clone())); let (proof, _) = @@ -205,5 +224,6 @@ fn main() { let (_, _) = time_operation(|| prover.verify(&proof, &vk)); } + ProverMode::Mock => unreachable!(), }; } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 56ea6e4178..433ad1b5af 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -18,5 +18,5 @@ p3-field = { workspace = true } p3-baby-bear = { workspace = true } p3-poseidon2 = { workspace = true } p3-symmetric = { workspace = true } -serde = { version = "1.0.207", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } sha2 = "0.10.8" diff --git a/crates/primitives/src/consts.rs b/crates/primitives/src/consts.rs index 396905274b..0e497bd01e 100644 --- a/crates/primitives/src/consts.rs +++ b/crates/primitives/src/consts.rs @@ -1,9 +1,15 @@ /// The maximum size of the memory in bytes. pub const MAXIMUM_MEMORY_SIZE: u32 = u32::MAX; +/// The number of bits in a byte. +pub const BYTE_SIZE: usize = 8; + /// The size of a word in bytes. pub const WORD_SIZE: usize = 4; +/// The number of bytes necessary to represent a 64-bit integer. +pub const LONG_WORD_SIZE: usize = 2 * WORD_SIZE; + /// Converts a slice of words to a byte vector in little endian. pub fn words_to_bytes_le_vec(words: &[u32]) -> Vec { words.iter().flat_map(|word| word.to_le_bytes().to_vec()).collect::>() diff --git a/crates/prover/Cargo.toml b/crates/prover/Cargo.toml index ee27d7df69..c2d8583e20 100644 --- a/crates/prover/Cargo.toml +++ b/crates/prover/Cargo.toml @@ -26,11 +26,12 @@ p3-baby-bear = { workspace = true } p3-bn254-fr = { workspace = true } p3-commit = { workspace = true } bincode = "1.3.3" -serde = { version = "1.0", features = ["derive", "rc"] } -itertools = "0.13.0" -tracing = "0.1.40" -tracing-subscriber = "0.3.18" -serde_json = "1.0.121" +serde = { workspace = true, features = ["derive", "rc"] } +itertools = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +tracing-appender = "0.2.3" +serde_json = { workspace = true } clap = { version = "4.5.9", features = ["derive", "env"] } anyhow = "1.0.83" dirs = "5.0.1" @@ -39,6 +40,10 @@ num-bigint = "0.4.6" thiserror = "1.0.63" lru = "0.12.4" eyre = "0.6.12" +test-artifacts = { workspace = true, optional = true } + +[dev-dependencies] +test-artifacts = { workspace = true } [[bin]] name = "build_plonk_bn254" @@ -66,5 +71,5 @@ path = "scripts/find_minimal_large_recursion_shape.rs" [features] native-gnark = ["sp1-recursion-gnark-ffi/native"] -export-tests = [] +export-tests = ["dep:test-artifacts"] debug = ["sp1-core-machine/debug"] diff --git a/crates/prover/scripts/e2e.rs b/crates/prover/scripts/e2e.rs index 535e846a9e..9d08502e53 100644 --- a/crates/prover/scripts/e2e.rs +++ b/crates/prover/scripts/e2e.rs @@ -116,4 +116,8 @@ // println!("groth16 proof: {:?}", // String::from_utf8(hex::encode(proof.encoded_proof)).unwrap()); } -pub fn main() {} +use sp1_prover::{components::DefaultProverComponents, SP1Prover}; + +pub fn main() { + let prover = SP1Prover::::new(); +} diff --git a/crates/prover/scripts/fibonacci_groth16.rs b/crates/prover/scripts/fibonacci_groth16.rs deleted file mode 100644 index 2082b0587a..0000000000 --- a/crates/prover/scripts/fibonacci_groth16.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Tests end-to-end performance of wrapping a recursion proof to PLONK. - -use std::time::Instant; - -use itertools::iproduct; -use sp1_core_machine::{ - io::SP1Stdin, - utils::{SP1ProverOpts, SP1ProverOpts}, -}; -use sp1_prover::SP1Prover; -use tracing_subscriber::EnvFilter; -use tracing_subscriber::{fmt::format::FmtSpan, util::SubscriberInitExt}; - -fn main() { - // Setup tracer. - let default_filter = "off"; - let log_appender = tracing_appender::rolling::never("scripts/results", "fibonacci_groth16.log"); - let env_filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(default_filter)) - .add_directive("p3_keccak_air=off".parse().unwrap()) - .add_directive("p3_fri=off".parse().unwrap()) - .add_directive("p3_challenger=off".parse().unwrap()) - .add_directive("p3_dft=off".parse().unwrap()) - .add_directive("sp1_core=off".parse().unwrap()); - tracing_subscriber::fmt::Subscriber::builder() - .with_ansi(false) - .with_file(false) - .with_target(false) - .with_thread_names(false) - .with_env_filter(env_filter) - .with_span_events(FmtSpan::CLOSE) - .with_writer(log_appender) - .finish() - .init(); - - // Setup environment variables. - std::env::set_var("RECONSTRUCT_COMMITMENTS", "false"); - - // Initialize prover. - let prover = SP1Prover::new(); - - // Setup sweep. - let iterations = [480000u32]; - let shard_sizes = [1 << 22]; - let batch_sizes = [2]; - let elf = include_bytes!("../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); - let (pk, vk) = prover.setup(elf); - - for (shard_size, iterations, batch_size) in iproduct!(shard_sizes, iterations, batch_sizes) { - tracing::info!( - "running: shard_size={}, iterations={}, batch_size={}", - shard_size, - iterations, - batch_size - ); - std::env::set_var("SHARD_SIZE", shard_size.to_string()); - - tracing::info!("proving leaves"); - let stdin = SP1Stdin { - buffer: vec![bincode::serialize::(&iterations).unwrap()], - ptr: 0, - proofs: vec![], - }; - let leaf_proving_start = Instant::now(); - let proof = prover - .prove_core(&pk, &stdin, SP1ProverOpts::default(), SP1Context::default()) - .unwrap(); - let leaf_proving_duration = leaf_proving_start.elapsed().as_secs_f64(); - tracing::info!("leaf_proving_duration={}", leaf_proving_duration); - - tracing::info!("proving inner"); - let recursion_proving_start = Instant::now(); - let _ = prover.compress(&vk, proof, vec![]); - let recursion_proving_duration = recursion_proving_start.elapsed().as_secs_f64(); - tracing::info!("recursion_proving_duration={}", recursion_proving_duration); - } -} diff --git a/crates/prover/scripts/fibonacci_sweep.rs b/crates/prover/scripts/fibonacci_sweep.rs deleted file mode 100644 index ca9eb93ce7..0000000000 --- a/crates/prover/scripts/fibonacci_sweep.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Sweeps end-to-end prover performance across a wide range of parameters for Fibonacci. - -use std::{fs::File, io::BufWriter, io::Write, time::Instant}; - -use itertools::iproduct; -use sp1_core_machine::{ - io::SP1Stdin, - utils::{SP1ProverOpts, SP1ProverOpts}, -}; -use sp1_prover::SP1Prover; -use tracing_subscriber::EnvFilter; -use tracing_subscriber::{fmt::format::FmtSpan, util::SubscriberInitExt}; - -fn main() { - // Setup tracer. - let default_filter = "off"; - let log_appender = tracing_appender::rolling::never("scripts/results", "fibonacci_sweep.log"); - let env_filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(default_filter)) - .add_directive("p3_keccak_air=off".parse().unwrap()) - .add_directive("p3_fri=off".parse().unwrap()) - .add_directive("p3_challenger=off".parse().unwrap()) - .add_directive("p3_dft=off".parse().unwrap()) - .add_directive("sp1_core=off".parse().unwrap()); - tracing_subscriber::fmt::Subscriber::builder() - .with_ansi(false) - .with_file(false) - .with_target(false) - .with_thread_names(false) - .with_env_filter(env_filter) - .with_span_events(FmtSpan::CLOSE) - .with_writer(log_appender) - .finish() - .init(); - - // Setup environment variables. - std::env::set_var("RECONSTRUCT_COMMITMENTS", "false"); - - // Initialize prover. - let prover = SP1Prover::new(); - - // Setup sweep. - let iterations = [480000u32]; - let shard_sizes = [1 << 19, 1 << 20, 1 << 21, 1 << 22]; - let batch_sizes = [2, 3, 4]; - let elf = include_bytes!("../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); - let (pk, vk) = prover.setup(elf); - - let mut lines = vec![ - "iterations,shard_size,batch_size,leaf_proving_duration,recursion_proving_duration" - .to_string(), - ]; - for (shard_size, iterations, batch_size) in iproduct!(shard_sizes, iterations, batch_sizes) { - tracing::info!( - "running: shard_size={}, iterations={}, batch_size={}", - shard_size, - iterations, - batch_size - ); - std::env::set_var("SHARD_SIZE", shard_size.to_string()); - - let stdin = SP1Stdin { - buffer: vec![bincode::serialize::(&iterations).unwrap()], - ptr: 0, - proofs: vec![], - }; - let leaf_proving_start = Instant::now(); - let proof = prover - .prove_core(&pk, &stdin, SP1ProverOpts::default(), SP1Context::default()) - .unwrap(); - let leaf_proving_duration = leaf_proving_start.elapsed().as_secs_f64(); - - let recursion_proving_start = Instant::now(); - let _ = prover.compress(&vk, proof, vec![]); - let recursion_proving_duration = recursion_proving_start.elapsed().as_secs_f64(); - - lines.push(format!( - "{},{},{},{},{}", - iterations, shard_size, batch_size, leaf_proving_duration, recursion_proving_duration - )); - } - - let file = File::create("scripts/results/fibonacci_sweep.csv").unwrap(); - let mut writer = BufWriter::new(file); - for line in lines.clone() { - writeln!(writer, "{}", line).unwrap(); - } - - println!("{:#?}", lines); -} diff --git a/crates/prover/scripts/find_minimal_large_recursion_shape.rs b/crates/prover/scripts/find_minimal_large_recursion_shape.rs index 7fc8aa0428..1c15f5e512 100644 --- a/crates/prover/scripts/find_minimal_large_recursion_shape.rs +++ b/crates/prover/scripts/find_minimal_large_recursion_shape.rs @@ -43,12 +43,14 @@ fn main() { prover.recursion_shape_config.as_ref().expect("recursion shape config not found"); // Create the maximal shape from all of the shapes in recursion_shape_config, then add 2 to - // all the log-heights of that shape. This is the starting candidate for the "minimal large shape". + // all the log-heights of that shape. This is the starting candidate for the "minimal large + // shape". let candidate = recursion_shape_config.union_config_with_extra_room().first().unwrap().clone(); prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&candidate)); - // Check that this candidate is big enough for all core shapes, including those with precompiles. + // Check that this candidate is big enough for all core shapes, including those with + // precompiles. assert!(check_shapes(reduce_batch_size, false, num_compiler_workers, &prover,)); let mut answer = candidate.clone(); @@ -91,8 +93,8 @@ fn main() { // Repeat this process to tune the shrink shape. let mut shrink_shape = ShrinkAir::::shrink_shape().clone_into_hash_map(); - // First, check that the current shrink shape is compatible with the compress shape choice arising - // from the tuning process above. + // First, check that the current shrink shape is compatible with the compress shape choice + // arising from the tuning process above. assert!({ prover.recursion_shape_config = Some(RecursionShapeConfig::from_hash_map(&answer)); catch_unwind(AssertUnwindSafe(|| { diff --git a/crates/prover/scripts/tendermint_sweep.rs b/crates/prover/scripts/tendermint_sweep.rs deleted file mode 100644 index 467d3b2f8d..0000000000 --- a/crates/prover/scripts/tendermint_sweep.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Sweeps end-to-end prover performance across a wide range of parameters for Tendermint. - -use std::{fs::File, io::BufWriter, io::Write, time::Instant}; - -use itertools::iproduct; -use sp1_core_machine::{ - io::SP1Stdin, - utils::{SP1ProverOpts, SP1ProverOpts}, -}; -use sp1_prover::SP1Prover; -use tracing_subscriber::EnvFilter; -use tracing_subscriber::{fmt::format::FmtSpan, util::SubscriberInitExt}; - -fn main() { - // Setup tracer. - let default_filter = "off"; - let log_appender = tracing_appender::rolling::never("scripts/results", "tendermint_sweep.log"); - let env_filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(default_filter)) - .add_directive("p3_keccak_air=off".parse().unwrap()) - .add_directive("p3_fri=off".parse().unwrap()) - .add_directive("p3_challenger=off".parse().unwrap()) - .add_directive("p3_dft=off".parse().unwrap()) - .add_directive("sp1_core=off".parse().unwrap()); - tracing_subscriber::fmt::Subscriber::builder() - .with_ansi(false) - .with_file(false) - .with_target(false) - .with_thread_names(false) - .with_env_filter(env_filter) - .with_span_events(FmtSpan::CLOSE) - .with_writer(log_appender) - .finish() - .init(); - - // Setup environment variables. - std::env::set_var("RECONSTRUCT_COMMITMENTS", "false"); - - // Initialize prover. - let prover = SP1Prover::new(); - - // Setup sweep. - let iterations = [480000u32]; - let shard_sizes = [1 << 19, 1 << 20, 1 << 21, 1 << 22]; - let batch_sizes = [2]; - let elf = include_bytes!("../../tests/tendermint-benchmark/elf/riscv32im-succinct-zkvm-elf"); - let (pk, vk) = prover.setup(elf); - - let mut lines = vec![ - "iterations,shard_size,batch_size,leaf_proving_duration,recursion_proving_duration" - .to_string(), - ]; - for (shard_size, iterations, batch_size) in iproduct!(shard_sizes, iterations, batch_sizes) { - tracing::info!( - "running: shard_size={}, iterations={}, batch_size={}", - shard_size, - iterations, - batch_size - ); - std::env::set_var("SHARD_SIZE", shard_size.to_string()); - - let stdin = SP1Stdin { - buffer: vec![bincode::serialize::(&iterations).unwrap()], - ptr: 0, - proofs: vec![], - }; - let leaf_proving_start = Instant::now(); - let proof = prover - .prove_core(&pk, &stdin, SP1ProverOpts::default(), SP1Context::default()) - .unwrap(); - let leaf_proving_duration = leaf_proving_start.elapsed().as_secs_f64(); - - let recursion_proving_start = Instant::now(); - let _ = prover.compress(&vk, proof, vec![]); - let recursion_proving_duration = recursion_proving_start.elapsed().as_secs_f64(); - - lines.push(format!( - "{},{},{},{},{}", - iterations, shard_size, batch_size, leaf_proving_duration, recursion_proving_duration - )); - } - - let file = File::create("scripts/results/tendermint_sweep.csv").unwrap(); - let mut writer = BufWriter::new(file); - for line in lines.clone() { - writeln!(writer, "{}", line).unwrap(); - } - - println!("{:#?}", lines); -} diff --git a/crates/prover/src/build.rs b/crates/prover/src/build.rs index 7b5feb1118..9466f2bf26 100644 --- a/crates/prover/src/build.rs +++ b/crates/prover/src/build.rs @@ -157,12 +157,12 @@ pub fn dummy_proof() -> (StarkVerifyingKey, ShardProof) { let context = SP1Context::default(); tracing::info!("setup elf"); - let (pk, vk) = prover.setup(elf); + let (_, pk_d, program, vk) = prover.setup(elf); tracing::info!("prove core"); let mut stdin = SP1Stdin::new(); stdin.write(&500u32); - let core_proof = prover.prove_core(&pk, &stdin, opts, context).unwrap(); + let core_proof = prover.prove_core(&pk_d, program, &stdin, opts, context).unwrap(); tracing::info!("compress"); let compressed_proof = prover.compress(&vk, core_proof, vec![], opts).unwrap(); diff --git a/crates/prover/src/lib.rs b/crates/prover/src/lib.rs index 54ec6bd1f2..fb1dd412b1 100644 --- a/crates/prover/src/lib.rs +++ b/crates/prover/src/lib.rs @@ -26,15 +26,15 @@ use std::{ path::Path, sync::{ atomic::{AtomicUsize, Ordering}, - mpsc::sync_channel, + mpsc::{channel, sync_channel}, Arc, Mutex, OnceLock, }, thread, }; +use crate::shapes::SP1CompressProgramShape; use lru::LruCache; use p3_baby_bear::BabyBear; -use p3_challenger::CanObserve; use p3_field::{AbstractField, PrimeField, PrimeField32}; use p3_matrix::dense::RowMajorMatrix; use shapes::SP1ProofShape; @@ -74,12 +74,11 @@ use sp1_recursion_core::{ }; pub use sp1_recursion_gnark_ffi::proof::{Groth16Bn254Proof, PlonkBn254Proof}; use sp1_recursion_gnark_ffi::{groth16_bn254::Groth16Bn254Prover, plonk_bn254::PlonkBn254Prover}; -use sp1_stark::{air::InteractionScope, MachineProvingKey, ProofShape}; use sp1_stark::{ - air::PublicValues, baby_bear_poseidon2::BabyBearPoseidon2, Challenge, Challenger, - MachineProver, SP1CoreOpts, SP1ProverOpts, ShardProof, StarkGenericConfig, StarkVerifyingKey, - Val, Word, DIGEST_SIZE, + baby_bear_poseidon2::BabyBearPoseidon2, Challenge, MachineProver, SP1CoreOpts, SP1ProverOpts, + ShardProof, StarkGenericConfig, StarkVerifyingKey, Val, Word, DIGEST_SIZE, }; +use sp1_stark::{MachineProvingKey, ProofShape}; use tracing::instrument; pub use types::*; @@ -98,6 +97,11 @@ pub type InnerSC = BabyBearPoseidon2; /// The configuration for the outer prover. pub type OuterSC = BabyBearPoseidon2Outer; +pub type DeviceProvingKey = <::CoreProver as MachineProver< + BabyBearPoseidon2, + RiscvAir, +>>::DeviceProvingKey; + const COMPRESS_DEGREE: usize = 3; const SHRINK_DEGREE: usize = 3; const WRAP_DEGREE: usize = 9; @@ -105,14 +109,6 @@ const WRAP_DEGREE: usize = 9; const CORE_CACHE_SIZE: usize = 5; pub const REDUCE_BATCH_SIZE: usize = 2; -// TODO: FIX -// -// const SHAPES_URL_PREFIX: &str = "https://sp1-circuits.s3.us-east-2.amazonaws.com/shapes"; -// const SHAPES_VERSION: &str = "146079e0e"; -// lazy_static! { -// static ref SHAPES_INIT: Once = Once::new(); -// } - pub type CompressAir = RecursionAir; pub type ShrinkAir = RecursionAir; pub type WrapAir = RecursionAir; @@ -211,7 +207,6 @@ impl SP1Prover { let vk_verification = env::var("VERIFY_VK").map(|v| v.eq_ignore_ascii_case("true")).unwrap_or(false); - tracing::info!("vk verification: {}", vk_verification); // Read the shapes from the shapes directory and deserialize them into memory. @@ -265,13 +260,12 @@ impl SP1Prover { } } - /// Fully initializes the programs, proving keys, and verifying keys that are normally - /// lazily initialized. TODO: remove this. - pub fn initialize(&mut self) {} - /// Creates a proving key and a verifying key for a given RISC-V ELF. #[instrument(name = "setup", level = "debug", skip_all)] - pub fn setup(&self, elf: &[u8]) -> (SP1ProvingKey, SP1VerifyingKey) { + pub fn setup( + &self, + elf: &[u8], + ) -> (SP1ProvingKey, DeviceProvingKey, Program, SP1VerifyingKey) { let program = self.get_program(elf).unwrap(); let (pk, vk) = self.core_prover.setup(&program); let vk = SP1VerifyingKey { vk }; @@ -280,7 +274,8 @@ impl SP1Prover { elf: elf.to_vec(), vk: vk.clone(), }; - (pk, vk) + let pk_d = self.core_prover.pk_to_device(&pk.pk); + (pk, pk_d, program, vk) } /// Get a program with an allowed preprocessed shape. @@ -317,312 +312,71 @@ impl SP1Prover { #[instrument(name = "prove_core", level = "info", skip_all)] pub fn prove_core<'a>( &'a self, - pk: &SP1ProvingKey, + pk_d: &<::CoreProver as MachineProver< + BabyBearPoseidon2, + RiscvAir, + >>::DeviceProvingKey, + program: Program, stdin: &SP1Stdin, opts: SP1ProverOpts, mut context: SP1Context<'a>, ) -> Result { context.subproof_verifier.replace(Arc::new(self)); - let program = self.get_program(&pk.elf).unwrap(); - let pk = self.core_prover.pk_to_device(&pk.pk); - let (proof, public_values_stream, cycles) = - sp1_core_machine::utils::prove_with_context::<_, C::CoreProver>( - &self.core_prover, - &pk, - program, - stdin, - opts.core_opts, - context, - self.core_shape_config.as_ref(), - )?; - Self::check_for_high_cycles(cycles); - let public_values = SP1PublicValues::from(&public_values_stream); - Ok(SP1CoreProof { - proof: SP1CoreProofData(proof.shard_proofs), - stdin: stdin.clone(), - public_values, - cycles, - }) - } - - pub fn recursion_program( - &self, - input: &SP1RecursionWitnessValues, - ) -> Arc> { - let mut cache = self.recursion_programs.lock().unwrap_or_else(|e| e.into_inner()); - cache - .get_or_insert(input.shape(), || { - let misses = self.recursion_cache_misses.fetch_add(1, Ordering::Relaxed); - tracing::debug!("core cache miss, misses: {}", misses); - // Get the operations. - let builder_span = tracing::debug_span!("build recursion program").entered(); - let mut builder = Builder::::default(); - - let input = input.read(&mut builder); - SP1RecursiveVerifier::verify(&mut builder, self.core_prover.machine(), input); - let operations = builder.into_operations(); - builder_span.exit(); - - // Compile the program. - let compiler_span = tracing::debug_span!("compile recursion program").entered(); - let mut compiler = AsmCompiler::::default(); - let mut program = compiler.compile(operations); - if let Some(recursion_shape_config) = &self.recursion_shape_config { - recursion_shape_config.fix_shape(&mut program); - } - let program = Arc::new(program); - compiler_span.exit(); - program - }) - .clone() - } - pub fn compress_program( - &self, - shape_tuning: bool, - input: &SP1CompressWithVKeyWitnessValues, - ) -> Arc> { - if self.recursion_shape_config.is_some() && !shape_tuning { - self.compress_programs.get(&input.shape()).map(Clone::clone).unwrap() - } else { - // Get the operations. - Arc::new(compress_program_from_input::( - self.recursion_shape_config.as_ref(), - &self.compress_prover, - self.vk_verification, - input, - )) - } - } - - pub fn shrink_program( - &self, - shrink_shape: RecursionShape, - input: &SP1CompressWithVKeyWitnessValues, - ) -> Arc> { - // Get the operations. - let builder_span = tracing::debug_span!("build shrink program").entered(); - let mut builder = Builder::::default(); - let input = input.read(&mut builder); - // Verify the proof. - SP1CompressRootVerifierWithVKey::verify( - &mut builder, - self.compress_prover.machine(), - input, - self.vk_verification, - PublicValuesOutputDigest::Reduce, - ); - let operations = builder.into_operations(); - builder_span.exit(); - - // Compile the program. - let compiler_span = tracing::debug_span!("compile shrink program").entered(); - let mut compiler = AsmCompiler::::default(); - let mut program = compiler.compile(operations); - - program.shape = Some(shrink_shape); - let program = Arc::new(program); - compiler_span.exit(); - program - } - - pub fn wrap_program(&self) -> Arc> { - self.wrap_program - .get_or_init(|| { - // Get the operations. - let builder_span = tracing::debug_span!("build compress program").entered(); - let mut builder = Builder::::default(); - - let shrink_shape: ProofShape = ShrinkAir::::shrink_shape().into(); - let input_shape = SP1CompressShape::from(vec![shrink_shape]); - let shape = SP1CompressWithVkeyShape { - compress_shape: input_shape, - merkle_tree_height: self.vk_merkle_tree.height, - }; - let dummy_input = - SP1CompressWithVKeyWitnessValues::dummy(self.shrink_prover.machine(), &shape); + // Launch two threads to simultaneously prove the core and compile the first few + // recursion programs in parallel. + let span = tracing::Span::current().clone(); + std::thread::scope(|s| { + let _span = span.enter(); + let (proof_tx, proof_rx) = channel(); + let (shape_tx, shape_rx) = channel(); + + let span = tracing::Span::current().clone(); + let handle = s.spawn(move || { + let _span = span.enter(); + + // Copy the proving key to the device. + let pk = pk_d; + + // Prove the core and stream the proofs and shapes. + sp1_core_machine::utils::prove_core_stream::<_, C::CoreProver>( + &self.core_prover, + pk, + program, + stdin, + opts.core_opts, + context, + self.core_shape_config.as_ref(), + proof_tx, + shape_tx, + ) + }); - let input = dummy_input.read(&mut builder); + // Receive the first few shapes and comile the recursion programs. + for _ in 0..3 { + if let Ok((shape, is_complete)) = shape_rx.recv() { + let compress_shape = SP1CompressProgramShape::Recursion(SP1RecursionShape { + proof_shapes: vec![shape], + is_complete, + }); - // Attest that the merkle tree root is correct. - let root = input.merkle_var.root; - for (val, expected) in root.iter().zip(self.vk_root.iter()) { - builder.assert_felt_eq(*val, *expected); + // Insert the program into the cache. + self.program_from_shape(false, compress_shape, None); } - // Verify the proof. - SP1CompressRootVerifierWithVKey::verify( - &mut builder, - self.shrink_prover.machine(), - input, - self.vk_verification, - PublicValuesOutputDigest::Root, - ); - - let operations = builder.into_operations(); - builder_span.exit(); - - // Compile the program. - let compiler_span = tracing::debug_span!("compile compress program").entered(); - let mut compiler = AsmCompiler::::default(); - let program = Arc::new(compiler.compile(operations)); - compiler_span.exit(); - program - }) - .clone() - } - - pub fn deferred_program( - &self, - input: &SP1DeferredWitnessValues, - ) -> Arc> { - // Compile the program. - - // Get the operations. - let operations_span = - tracing::debug_span!("get operations for the deferred program").entered(); - let mut builder = Builder::::default(); - let input_read_span = tracing::debug_span!("Read input values").entered(); - let input = input.read(&mut builder); - input_read_span.exit(); - let verify_span = tracing::debug_span!("Verify deferred program").entered(); - - // Verify the proof. - SP1DeferredVerifier::verify( - &mut builder, - self.compress_prover.machine(), - input, - self.vk_verification, - ); - verify_span.exit(); - let operations = builder.into_operations(); - operations_span.exit(); - - let compiler_span = tracing::debug_span!("compile deferred program").entered(); - let mut compiler = AsmCompiler::::default(); - let mut program = compiler.compile(operations); - if let Some(recursion_shape_config) = &self.recursion_shape_config { - recursion_shape_config.fix_shape(&mut program); - } - let program = Arc::new(program); - compiler_span.exit(); - program - } - - pub fn get_recursion_core_inputs( - &self, - vk: &StarkVerifyingKey, - leaf_challenger: &Challenger, - shard_proofs: &[ShardProof], - batch_size: usize, - is_complete: bool, - ) -> Vec> { - let mut core_inputs = Vec::new(); - let mut reconstruct_challenger = self.core_prover.config().challenger(); - vk.observe_into(&mut reconstruct_challenger); - - // Prepare the inputs for the recursion programs. - for (batch_idx, batch) in shard_proofs.chunks(batch_size).enumerate() { - let proofs = batch.to_vec(); - - core_inputs.push(SP1RecursionWitnessValues { - vk: vk.clone(), - shard_proofs: proofs.clone(), - leaf_challenger: leaf_challenger.clone(), - initial_reconstruct_challenger: reconstruct_challenger.clone(), - is_complete, - is_first_shard: batch_idx == 0, - vk_root: self.vk_root, - }); - assert_eq!(reconstruct_challenger.input_buffer.len(), 0); - assert_eq!(reconstruct_challenger.sponge_state.len(), 16); - assert_eq!(reconstruct_challenger.output_buffer.len(), 16); - - for proof in batch.iter() { - reconstruct_challenger.observe(proof.commitment.global_main_commit); - reconstruct_challenger - .observe_slice(&proof.public_values[0..self.core_prover.num_pv_elts()]); } - } - - // Check that the leaf challenger is the same as the reconstruct challenger. - assert_eq!(reconstruct_challenger.sponge_state, leaf_challenger.sponge_state); - assert_eq!(reconstruct_challenger.input_buffer, leaf_challenger.input_buffer); - assert_eq!(reconstruct_challenger.output_buffer, leaf_challenger.output_buffer); - core_inputs - } - pub fn get_recursion_deferred_inputs<'a>( - &'a self, - vk: &'a StarkVerifyingKey, - leaf_challenger: &'a Challenger, - last_proof_pv: &PublicValues, BabyBear>, - deferred_proofs: &[SP1ReduceProof], - batch_size: usize, - ) -> Vec> { - // Prepare the inputs for the deferred proofs recursive verification. - let mut deferred_digest = [Val::::zero(); DIGEST_SIZE]; - let mut deferred_inputs = Vec::new(); - - for batch in deferred_proofs.chunks(batch_size) { - let vks_and_proofs = - batch.iter().cloned().map(|proof| (proof.vk, proof.proof)).collect::>(); - - let input = SP1CompressWitnessValues { vks_and_proofs, is_complete: true }; - let input = self.make_merkle_proofs(input); - let SP1CompressWithVKeyWitnessValues { compress_val, merkle_val } = input; - - deferred_inputs.push(SP1DeferredWitnessValues { - vks_and_proofs: compress_val.vks_and_proofs, - vk_merkle_data: merkle_val, - start_reconstruct_deferred_digest: deferred_digest, - is_complete: false, - sp1_vk_digest: vk.hash_babybear(), - end_pc: Val::::zero(), - end_shard: last_proof_pv.shard + BabyBear::one(), - end_execution_shard: last_proof_pv.execution_shard, - init_addr_bits: last_proof_pv.last_init_addr_bits, - finalize_addr_bits: last_proof_pv.last_finalize_addr_bits, - leaf_challenger: leaf_challenger.clone(), - committed_value_digest: last_proof_pv.committed_value_digest, - deferred_proofs_digest: last_proof_pv.deferred_proofs_digest, - }); - - deferred_digest = Self::hash_deferred_proofs(deferred_digest, batch); - } - deferred_inputs - } - - /// Generate the inputs for the first layer of recursive proofs. - #[allow(clippy::type_complexity)] - pub fn get_first_layer_inputs<'a>( - &'a self, - vk: &'a SP1VerifyingKey, - leaf_challenger: &'a Challenger, - shard_proofs: &[ShardProof], - deferred_proofs: &[SP1ReduceProof], - batch_size: usize, - ) -> Vec { - let is_complete = shard_proofs.len() == 1 && deferred_proofs.is_empty(); - let core_inputs = self.get_recursion_core_inputs( - &vk.vk, - leaf_challenger, - shard_proofs, - batch_size, - is_complete, - ); - let last_proof_pv = shard_proofs.last().unwrap().public_values.as_slice().borrow(); - let deferred_inputs = self.get_recursion_deferred_inputs( - &vk.vk, - leaf_challenger, - last_proof_pv, - deferred_proofs, - batch_size, - ); - - let mut inputs = Vec::new(); - inputs.extend(core_inputs.into_iter().map(SP1CircuitWitness::Core)); - inputs.extend(deferred_inputs.into_iter().map(SP1CircuitWitness::Deferred)); - inputs + // Collect the shard proofs and the public values stream. + let shard_proofs: Vec> = proof_rx.iter().collect(); + let (public_values_stream, cycles) = handle.join().unwrap().unwrap(); + let public_values = SP1PublicValues::from(&public_values_stream); + Self::check_for_high_cycles(cycles); + Ok(SP1CoreProof { + proof: SP1CoreProofData(shard_proofs), + stdin: stdin.clone(), + public_values, + cycles, + }) + }) } /// Reduce shards proofs to a single shard proof using the recursion prover. @@ -641,22 +395,9 @@ impl SP1Prover { let shard_proofs = &proof.proof.0; - // Get the leaf challenger. - let mut leaf_challenger = self.core_prover.config().challenger(); - vk.vk.observe_into(&mut leaf_challenger); - shard_proofs.iter().for_each(|proof| { - leaf_challenger.observe(proof.commitment.global_main_commit); - leaf_challenger.observe_slice(&proof.public_values[0..self.core_prover.num_pv_elts()]); - }); - // Generate the first layer inputs. - let first_layer_inputs = self.get_first_layer_inputs( - vk, - &leaf_challenger, - shard_proofs, - &deferred_proofs, - first_layer_batch_size, - ); + let first_layer_inputs = + self.get_first_layer_inputs(vk, shard_proofs, &deferred_proofs, first_layer_batch_size); // Calculate the expected height of the tree. let mut expected_height = if first_layer_inputs.len() == 1 { 0 } else { 1 }; @@ -770,10 +511,8 @@ impl SP1Prover { // Generate the traces. let record = records.into_iter().next().unwrap(); - let traces = tracing::debug_span!("generate traces").in_scope(|| { - self.compress_prover - .generate_traces(&record, InteractionScope::Local) - }); + let traces = tracing::debug_span!("generate traces") + .in_scope(|| self.compress_prover.generate_traces(&record)); // Wait for our turn to update the state. record_and_trace_sync.wait_for_turn(index); @@ -834,7 +573,9 @@ impl SP1Prover { let _span = span.enter(); loop { let received = { record_and_trace_rx.lock().unwrap().recv() }; - if let Ok((index, height, TracesOrInput::ProgramRecordTraces(boxed_prt))) = received { + if let Ok((index, height, TracesOrInput::ProgramRecordTraces(boxed_prt))) = + received + { let (program, record, traces) = *boxed_prt; tracing::debug_span!("batch").in_scope(|| { // Get the keys. @@ -855,30 +596,12 @@ impl SP1Prover { ); // Commit to the record and traces. - let local_data = tracing::debug_span!("commit") - .in_scope(|| self.compress_prover.commit(&record, traces)); - - // Observe the commitment. - tracing::debug_span!("observe public values").in_scope(|| { - challenger.observe_slice( - &local_data.public_values[0..self.compress_prover.num_pv_elts()], - ); - }); + let data = tracing::debug_span!("commit") + .in_scope(|| self.compress_prover.commit(&record, traces)); // Generate the proof. let proof = tracing::debug_span!("open").in_scope(|| { - self.compress_prover - .open( - &pk, - None, - local_data, - &mut challenger, - &[ - ::Challenge::zero(), - ::Challenge::zero(), - ], - ) - .unwrap() + self.compress_prover.open(&pk, data, &mut challenger).unwrap() }); // Verify the proof. @@ -903,22 +626,32 @@ impl SP1Prover { // Advance the turn. prover_sync.advance_turn(); }); - } else if let Ok((index, height, TracesOrInput::CircuitWitness(witness_box))) = received { + } else if let Ok(( + index, + height, + TracesOrInput::CircuitWitness(witness_box), + )) = received + { let witness = *witness_box; if let SP1CircuitWitness::Compress(inner_witness) = witness { - let SP1CompressWitnessValues { vks_and_proofs, is_complete: _ } = inner_witness; - assert!(vks_and_proofs.len()==1); + let SP1CompressWitnessValues { vks_and_proofs, is_complete: _ } = + inner_witness; + assert!(vks_and_proofs.len() == 1); let (vk, proof) = vks_and_proofs.last().unwrap(); - // Wait for our turn to update the state. - prover_sync.wait_for_turn(index); + // Wait for our turn to update the state. + prover_sync.wait_for_turn(index); - // Send the proof. - proofs_tx.lock().unwrap().send((index, height, vk.clone(), proof.clone())).unwrap(); + // Send the proof. + proofs_tx + .lock() + .unwrap() + .send((index, height, vk.clone(), proof.clone())) + .unwrap(); - // Advance the turn. - prover_sync.advance_turn(); - } - } else { + // Advance the turn. + prover_sync.advance_turn(); + } + } else { break; } } @@ -941,6 +674,9 @@ impl SP1Prover { ShardProof, )> = Vec::new(); loop { + if expected_height == 0 { + break; + } let received = { proofs_rx.lock().unwrap().recv() }; if let Ok((index, height, vk, proof)) = received { batch.push((index, height, vk, proof)); @@ -1188,6 +924,262 @@ impl SP1Prover { proof } + pub fn recursion_program( + &self, + input: &SP1RecursionWitnessValues, + ) -> Arc> { + println!("getting recursion program: {:?}", input.shape()); + let mut cache = self.recursion_programs.lock().unwrap_or_else(|e| e.into_inner()); + println!("inserting to cache"); + cache + .get_or_insert(input.shape(), || { + let misses = self.recursion_cache_misses.fetch_add(1, Ordering::Relaxed); + tracing::debug!("core cache miss, misses: {}", misses); + // Get the operations. + let builder_span = tracing::debug_span!("build recursion program").entered(); + let mut builder = Builder::::default(); + + let input = input.read(&mut builder); + SP1RecursiveVerifier::verify(&mut builder, self.core_prover.machine(), input); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile recursion program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + if let Some(recursion_shape_config) = &self.recursion_shape_config { + recursion_shape_config.fix_shape(&mut program); + } + let program = Arc::new(program); + compiler_span.exit(); + program + }) + .clone() + } + + pub fn compress_program( + &self, + shape_tuning: bool, + input: &SP1CompressWithVKeyWitnessValues, + ) -> Arc> { + if self.recursion_shape_config.is_some() && !shape_tuning { + self.compress_programs.get(&input.shape()).map(Clone::clone).unwrap() + } else { + // Get the operations. + Arc::new(compress_program_from_input::( + self.recursion_shape_config.as_ref(), + &self.compress_prover, + self.vk_verification, + input, + )) + } + } + + pub fn shrink_program( + &self, + shrink_shape: RecursionShape, + input: &SP1CompressWithVKeyWitnessValues, + ) -> Arc> { + // Get the operations. + let builder_span = tracing::debug_span!("build shrink program").entered(); + let mut builder = Builder::::default(); + let input = input.read(&mut builder); + // Verify the proof. + SP1CompressRootVerifierWithVKey::verify( + &mut builder, + self.compress_prover.machine(), + input, + self.vk_verification, + PublicValuesOutputDigest::Reduce, + ); + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile shrink program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + + program.shape = Some(shrink_shape); + let program = Arc::new(program); + compiler_span.exit(); + program + } + + pub fn wrap_program(&self) -> Arc> { + self.wrap_program + .get_or_init(|| { + // Get the operations. + let builder_span = tracing::debug_span!("build compress program").entered(); + let mut builder = Builder::::default(); + + let shrink_shape: ProofShape = ShrinkAir::::shrink_shape().into(); + let input_shape = SP1CompressShape::from(vec![shrink_shape]); + let shape = SP1CompressWithVkeyShape { + compress_shape: input_shape, + merkle_tree_height: self.vk_merkle_tree.height, + }; + let dummy_input = + SP1CompressWithVKeyWitnessValues::dummy(self.shrink_prover.machine(), &shape); + + let input = dummy_input.read(&mut builder); + + // Attest that the merkle tree root is correct. + let root = input.merkle_var.root; + for (val, expected) in root.iter().zip(self.vk_root.iter()) { + builder.assert_felt_eq(*val, *expected); + } + // Verify the proof. + SP1CompressRootVerifierWithVKey::verify( + &mut builder, + self.shrink_prover.machine(), + input, + self.vk_verification, + PublicValuesOutputDigest::Root, + ); + + let operations = builder.into_operations(); + builder_span.exit(); + + // Compile the program. + let compiler_span = tracing::debug_span!("compile compress program").entered(); + let mut compiler = AsmCompiler::::default(); + let program = Arc::new(compiler.compile(operations)); + compiler_span.exit(); + program + }) + .clone() + } + + pub fn deferred_program( + &self, + input: &SP1DeferredWitnessValues, + ) -> Arc> { + // Compile the program. + + // Get the operations. + let operations_span = + tracing::debug_span!("get operations for the deferred program").entered(); + let mut builder = Builder::::default(); + let input_read_span = tracing::debug_span!("Read input values").entered(); + let input = input.read(&mut builder); + input_read_span.exit(); + let verify_span = tracing::debug_span!("Verify deferred program").entered(); + + // Verify the proof. + SP1DeferredVerifier::verify( + &mut builder, + self.compress_prover.machine(), + input, + self.vk_verification, + ); + verify_span.exit(); + let operations = builder.into_operations(); + operations_span.exit(); + + let compiler_span = tracing::debug_span!("compile deferred program").entered(); + let mut compiler = AsmCompiler::::default(); + let mut program = compiler.compile(operations); + if let Some(recursion_shape_config) = &self.recursion_shape_config { + recursion_shape_config.fix_shape(&mut program); + } + let program = Arc::new(program); + compiler_span.exit(); + program + } + + pub fn get_recursion_core_inputs( + &self, + vk: &StarkVerifyingKey, + shard_proofs: &[ShardProof], + batch_size: usize, + is_complete: bool, + deferred_digest: [Val; 8], + ) -> Vec> { + let mut core_inputs = Vec::new(); + + // Prepare the inputs for the recursion programs. + for (batch_idx, batch) in shard_proofs.chunks(batch_size).enumerate() { + let proofs = batch.to_vec(); + + core_inputs.push(SP1RecursionWitnessValues { + vk: vk.clone(), + shard_proofs: proofs.clone(), + is_complete, + is_first_shard: batch_idx == 0, + vk_root: self.vk_root, + reconstruct_deferred_digest: deferred_digest, + }); + } + core_inputs + } + + pub fn get_recursion_deferred_inputs<'a>( + &'a self, + vk: &'a StarkVerifyingKey, + deferred_proofs: &[SP1ReduceProof], + batch_size: usize, + ) -> (Vec>, [BabyBear; 8]) { + // Prepare the inputs for the deferred proofs recursive verification. + let mut deferred_digest = [Val::::zero(); DIGEST_SIZE]; + let mut deferred_inputs = Vec::new(); + + for batch in deferred_proofs.chunks(batch_size) { + let vks_and_proofs = + batch.iter().cloned().map(|proof| (proof.vk, proof.proof)).collect::>(); + + let input = SP1CompressWitnessValues { vks_and_proofs, is_complete: true }; + let input = self.make_merkle_proofs(input); + let SP1CompressWithVKeyWitnessValues { compress_val, merkle_val } = input; + + deferred_inputs.push(SP1DeferredWitnessValues { + vks_and_proofs: compress_val.vks_and_proofs, + vk_merkle_data: merkle_val, + start_reconstruct_deferred_digest: deferred_digest, + is_complete: false, + sp1_vk_digest: vk.hash_babybear(), + end_pc: vk.pc_start, + end_shard: BabyBear::one(), + end_execution_shard: BabyBear::one(), + init_addr_bits: [BabyBear::zero(); 32], + finalize_addr_bits: [BabyBear::zero(); 32], + committed_value_digest: [Word::([BabyBear::zero(); 4]); 8], + deferred_proofs_digest: [BabyBear::zero(); 8], + }); + + deferred_digest = Self::hash_deferred_proofs(deferred_digest, batch); + } + (deferred_inputs, deferred_digest) + } + + /// Generate the inputs for the first layer of recursive proofs. + #[allow(clippy::type_complexity)] + pub fn get_first_layer_inputs<'a>( + &'a self, + vk: &'a SP1VerifyingKey, + shard_proofs: &[ShardProof], + deferred_proofs: &[SP1ReduceProof], + batch_size: usize, + ) -> Vec { + let (deferred_inputs, deferred_digest) = + self.get_recursion_deferred_inputs(&vk.vk, deferred_proofs, batch_size); + + let is_complete = shard_proofs.len() == 1 && deferred_proofs.is_empty(); + let core_inputs = self.get_recursion_core_inputs( + &vk.vk, + shard_proofs, + batch_size, + is_complete, + deferred_digest, + ); + + let mut inputs = Vec::new(); + inputs.extend(deferred_inputs.into_iter().map(SP1CircuitWitness::Deferred)); + inputs.extend(core_inputs.into_iter().map(SP1CircuitWitness::Core)); + inputs + } + /// Accumulate deferred proofs into a single digest. pub fn hash_deferred_proofs( prev_digest: [Val; DIGEST_SIZE], @@ -1360,10 +1352,10 @@ pub mod tests { let context = SP1Context::default(); tracing::info!("setup elf"); - let (pk, vk) = prover.setup(elf); + let (_, pk_d, program, vk) = prover.setup(elf); tracing::info!("prove core"); - let core_proof = prover.prove_core(&pk, &stdin, opts, context)?; + let core_proof = prover.prove_core(&pk_d, program, &stdin, opts, context)?; let public_values = core_proof.public_values.clone(); if env::var("COLLECT_SHAPES").is_ok() { @@ -1490,26 +1482,31 @@ pub mod tests { opts: SP1ProverOpts, ) -> Result<()> { // Test program which proves the Keccak-256 hash of various inputs. - let keccak_elf = include_bytes!("../../../tests/keccak256/elf/riscv32im-succinct-zkvm-elf"); + let keccak_elf = test_artifacts::KECCAK256_ELF; // Test program which verifies proofs of a vkey and a list of committed inputs. - let verify_elf = - include_bytes!("../../../tests/verify-proof/elf/riscv32im-succinct-zkvm-elf"); + let verify_elf = test_artifacts::VERIFY_PROOF_ELF; tracing::info!("initializing prover"); let prover = SP1Prover::::new(); tracing::info!("setup keccak elf"); - let (keccak_pk, keccak_vk) = prover.setup(keccak_elf); + let (_, keccak_pk_d, keccak_program, keccak_vk) = prover.setup(keccak_elf); tracing::info!("setup verify elf"); - let (verify_pk, verify_vk) = prover.setup(verify_elf); + let (_, verify_pk_d, verify_program, verify_vk) = prover.setup(verify_elf); tracing::info!("prove subproof 1"); let mut stdin = SP1Stdin::new(); stdin.write(&1usize); stdin.write(&vec![0u8, 0, 0]); - let deferred_proof_1 = prover.prove_core(&keccak_pk, &stdin, opts, Default::default())?; + let deferred_proof_1 = prover.prove_core( + &keccak_pk_d, + keccak_program.clone(), + &stdin, + opts, + Default::default(), + )?; let pv_1 = deferred_proof_1.public_values.as_slice().to_vec().clone(); // Generate a second proof of keccak of various inputs. @@ -1519,16 +1516,19 @@ pub mod tests { stdin.write(&vec![0u8, 1, 2]); stdin.write(&vec![2, 3, 4]); stdin.write(&vec![5, 6, 7]); - let deferred_proof_2 = prover.prove_core(&keccak_pk, &stdin, opts, Default::default())?; + let deferred_proof_2 = + prover.prove_core(&keccak_pk_d, keccak_program, &stdin, opts, Default::default())?; let pv_2 = deferred_proof_2.public_values.as_slice().to_vec().clone(); // Generate recursive proof of first subproof. tracing::info!("compress subproof 1"); let deferred_reduce_1 = prover.compress(&keccak_vk, deferred_proof_1, vec![], opts)?; + prover.verify_compressed(&deferred_reduce_1, &keccak_vk)?; // Generate recursive proof of second subproof. tracing::info!("compress subproof 2"); let deferred_reduce_2 = prover.compress(&keccak_vk, deferred_proof_2, vec![], opts)?; + prover.verify_compressed(&deferred_reduce_2, &keccak_vk)?; // Run verify program with keccak vkey, subproofs, and their committed values. let mut stdin = SP1Stdin::new(); @@ -1546,7 +1546,8 @@ pub mod tests { stdin.write_proof(deferred_reduce_2.clone(), keccak_vk.vk.clone()); tracing::info!("proving verify program (core)"); - let verify_proof = prover.prove_core(&verify_pk, &stdin, opts, Default::default())?; + let verify_proof = + prover.prove_core(&verify_pk_d, verify_program, &stdin, opts, Default::default())?; // let public_values = verify_proof.public_values.clone(); // Generate recursive proof of verify program @@ -1589,13 +1590,14 @@ pub mod tests { #[test] #[serial] fn test_e2e() -> Result<()> { - let elf = include_bytes!("../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); + let elf = test_artifacts::FIBONACCI_ELF; setup_logger(); let opts = SP1ProverOpts::default(); // TODO(mattstam): We should Test::Plonk here, but this uses the existing // docker image which has a different API than the current. So we need to wait until the // next release (v1.2.0+), and then switch it back. let prover = SP1Prover::::new(); + test_e2e_prover::( &prover, elf, @@ -1613,4 +1615,14 @@ pub mod tests { setup_logger(); test_e2e_with_deferred_proofs_prover::(SP1ProverOpts::default()) } + + // #[test] + // fn test_deterministic_setup() { + // setup_logger(); + // let prover = SP1Prover::::new(); + // let program = test_artifacts::FIBONACCI_ELF; + // let (pk, vk) = prover.setup(&program); + // let pk2 = prover.setup(&program).0; + // assert_eq!(pk.pk.commit, pk2.pk.commit); + // } } diff --git a/crates/prover/src/types.rs b/crates/prover/src/types.rs index dfebfb6c83..b4e362e0fc 100644 --- a/crates/prover/src/types.rs +++ b/crates/prover/src/types.rs @@ -1,6 +1,7 @@ use std::{fs::File, path::Path}; use anyhow::Result; +use clap::ValueEnum; use p3_baby_bear::BabyBear; use p3_bn254_fr::Bn254Fr; use p3_commit::{Pcs, TwoAdicMultiplicativeCoset}; @@ -183,6 +184,16 @@ impl SP1Bn254ProofData { } } +#[derive(Debug, Default, Clone, ValueEnum, PartialEq, Eq)] +pub enum ProverMode { + #[default] + Cpu, + Cuda, + Network, + #[value(skip)] + Mock, +} + #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ProofSystem { Plonk, diff --git a/crates/prover/src/verify.rs b/crates/prover/src/verify.rs index 1a2ff25502..115b52a0e0 100644 --- a/crates/prover/src/verify.rs +++ b/crates/prover/src/verify.rs @@ -228,12 +228,14 @@ impl SP1Prover { // - `deferred_proofs_digest` should be zero. // // Transition: - // - If `committed_value_digest_prev` is not zero, then `committed_value_digest` should equal + // - If `committed_value_digest_prev` is not zero, then `committed_value_digest` should + // equal // `committed_value_digest_prev`. Otherwise, `committed_value_digest` should equal zero. // - If `deferred_proofs_digest_prev` is not zero, then `deferred_proofs_digest` should // equal // `deferred_proofs_digest_prev`. Otherwise, `deferred_proofs_digest` should equal zero. - // - If it's not a shard with "CPU", then `committed_value_digest` should not change from the + // - If it's not a shard with "CPU", then `committed_value_digest` should not change from + // the // previous shard. // - If it's not a shard with "CPU", then `deferred_proofs_digest` should not change from // the diff --git a/crates/recursion/circuit/Cargo.toml b/crates/recursion/circuit/Cargo.toml index 7299445d5a..708fa7368e 100644 --- a/crates/recursion/circuit/Cargo.toml +++ b/crates/recursion/circuit/Cargo.toml @@ -31,13 +31,14 @@ sp1-recursion-compiler = { workspace = true } sp1-primitives = { workspace = true } sp1-recursion-gnark-ffi = { workspace = true } -itertools = "0.13.0" -serde = { version = "1.0", features = ["derive"] } +itertools = { workspace = true } +serde = { workspace = true, features = ["derive"] } rand = "0.8.5" -tracing = "0.1.40" -hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } +tracing = { workspace = true } +hashbrown = { workspace = true, features = ["serde", "inline-more"] } num-traits = "0.2.19" rayon = "1.10.0" +test-artifacts = { workspace = true, optional = true } [dev-dependencies] sp1-core-executor = { workspace = true, features = ["programs"] } @@ -49,8 +50,9 @@ p3-merkle-tree = { workspace = true } p3-poseidon2 = { workspace = true } zkhash = "0.2.0" rand = "0.8.5" +test-artifacts = { workspace = true } [features] native-gnark = ["sp1-recursion-gnark-ffi/native"] -export-tests = [] +export-tests = ["dep:test-artifacts"] debug = ["sp1-core-machine/debug"] diff --git a/crates/recursion/circuit/src/constraints.rs b/crates/recursion/circuit/src/constraints.rs index fa3c506764..d18dc555af 100644 --- a/crates/recursion/circuit/src/constraints.rs +++ b/crates/recursion/circuit/src/constraints.rs @@ -34,10 +34,11 @@ where A: MachineAir + for<'a> Air>, { #[allow(clippy::too_many_arguments)] + #[allow(clippy::type_complexity)] pub fn verify_constraints( builder: &mut Builder, chip: &MachineChip, - opening: &ChipOpenedValues>, + opening: &ChipOpenedValues, Ext>, trace_domain: TwoAdicMultiplicativeCoset, qc_domains: Vec>, zeta: Ext, @@ -65,10 +66,11 @@ where builder.assert_ext_eq(folded_constraints * sels.inv_zeroifier, quotient); } + #[allow(clippy::type_complexity)] pub fn eval_constraints( builder: &mut Builder, chip: &MachineChip, - opening: &ChipOpenedValues>, + opening: &ChipOpenedValues, Ext>, selectors: &LagrangeSelectors>, alpha: Ext, permutation_challenges: &[Ext], @@ -101,7 +103,8 @@ where main: opening.main.view(), perm: perm_opening.view(), perm_challenges: permutation_challenges, - cumulative_sums: &[opening.global_cumulative_sum, opening.local_cumulative_sum], + local_cumulative_sum: &opening.local_cumulative_sum, + global_cumulative_sum: &opening.global_cumulative_sum, public_values, is_first_row: selectors.is_first_row, is_last_row: selectors.is_last_row, @@ -115,9 +118,10 @@ where builder.eval(folder.accumulator) } + #[allow(clippy::type_complexity)] pub fn recompute_quotient( builder: &mut Builder, - opening: &ChipOpenedValues>, + opening: &ChipOpenedValues, Ext>, qc_domains: &[TwoAdicMultiplicativeCoset], zeta: Ext, ) -> Ext { @@ -151,8 +155,9 @@ where - C::F::one(); ( { - // We use the precomputed powers of zeta to compute (inline) the value of - // `other_domain.zp_at_point_variable(builder, zeta)`. + // We use the precomputed powers of zeta to compute (inline) the + // value of `other_domain. + // zp_at_point_variable(builder, zeta)`. let z: Ext<_, _> = builder.eval( zetas[other_domain.log_n] * SymbolicFelt::from_f(shift_power) - SymbolicExt::from_f(C::EF::one()), @@ -189,9 +194,10 @@ where ) } + #[allow(clippy::type_complexity)] pub fn verify_opening_shape( chip: &MachineChip, - opening: &ChipOpenedValues>, + opening: &ChipOpenedValues, Ext>, ) -> Result<(), OpeningShapeError> { // Verify that the preprocessed width matches the expected value for the chip. if opening.preprocessed.local.len() != chip.preprocessed_width() { diff --git a/crates/recursion/circuit/src/fri.rs b/crates/recursion/circuit/src/fri.rs index ea343bea4e..68af777b22 100644 --- a/crates/recursion/circuit/src/fri.rs +++ b/crates/recursion/circuit/src/fri.rs @@ -404,8 +404,8 @@ pub fn dummy_query_proof( } } -/// Make a dummy PCS proof for a given proof shape. Used to generate vkey information for fixed proof -/// shapes. +/// Make a dummy PCS proof for a given proof shape. Used to generate vkey information for fixed +/// proof shapes. /// /// The parameter `batch_shapes` contains (width, height) data for each matrix in each batch. pub fn dummy_pcs_proof( diff --git a/crates/recursion/circuit/src/hash.rs b/crates/recursion/circuit/src/hash.rs index bb499172d1..f312b812ce 100644 --- a/crates/recursion/circuit/src/hash.rs +++ b/crates/recursion/circuit/src/hash.rs @@ -1,5 +1,7 @@ -use std::fmt::Debug; -use std::iter::{repeat, zip}; +use std::{ + fmt::Debug, + iter::{repeat, zip}, +}; use itertools::Itertools; use p3_baby_bear::BabyBear; @@ -11,11 +13,11 @@ use sp1_recursion_compiler::{ circuit::CircuitV2Builder, ir::{Builder, Config, DslIr, Felt, Var}, }; -use sp1_recursion_core::stark::{outer_perm, OUTER_MULTI_FIELD_CHALLENGER_WIDTH}; -use sp1_recursion_core::{stark::BabyBearPoseidon2Outer, DIGEST_SIZE}; -use sp1_recursion_core::{HASH_RATE, PERMUTATION_WIDTH}; -use sp1_stark::baby_bear_poseidon2::BabyBearPoseidon2; -use sp1_stark::inner_perm; +use sp1_recursion_core::{ + stark::{outer_perm, BabyBearPoseidon2Outer, OUTER_MULTI_FIELD_CHALLENGER_WIDTH}, + DIGEST_SIZE, HASH_RATE, PERMUTATION_WIDTH, +}; +use sp1_stark::{baby_bear_poseidon2::BabyBearPoseidon2, inner_perm}; use crate::{ challenger::{reduce_32, POSEIDON_2_BB_RATE}, diff --git a/crates/recursion/circuit/src/machine/complete.rs b/crates/recursion/circuit/src/machine/complete.rs index 58df24f42e..02aaf61217 100644 --- a/crates/recursion/circuit/src/machine/complete.rs +++ b/crates/recursion/circuit/src/machine/complete.rs @@ -1,6 +1,7 @@ use itertools::Itertools; +use p3_baby_bear::BabyBear; use p3_field::AbstractField; - +use sp1_recursion_compiler::circuit::CircuitV2Builder; use sp1_recursion_compiler::ir::{Builder, Config, Felt}; use sp1_recursion_core::air::RecursionPublicValues; @@ -8,7 +9,7 @@ use sp1_recursion_core::air::RecursionPublicValues; /// /// The assertions consist of checking all the expected boundary conditions from a compress proof /// that represents the end of the recursion tower. -pub(crate) fn assert_complete( +pub(crate) fn assert_complete>( builder: &mut Builder, public_values: &RecursionPublicValues>, is_complete: Felt, @@ -19,11 +20,9 @@ pub(crate) fn assert_complete( start_shard, next_shard, start_execution_shard, - cumulative_sum, start_reconstruct_deferred_digest, end_reconstruct_deferred_digest, - leaf_challenger, - end_reconstruct_challenger, + global_cumulative_sum, contains_execution_shard, .. } = public_values; @@ -48,13 +47,6 @@ pub(crate) fn assert_complete( // Assert that the start execution shard is equal to 1. builder.assert_felt_eq(is_complete * (*start_execution_shard - C::F::one()), C::F::zero()); - // Assert that the end reconstruct challenger is equal to the leaf challenger. - for (end_challenger_d, leaf_challenger_d) in - end_reconstruct_challenger.into_iter().zip(*leaf_challenger) - { - builder.assert_felt_eq(is_complete * (end_challenger_d - leaf_challenger_d), C::F::zero()); - } - // The start reconstruct deferred digest should be zero. for start_digest_word in start_reconstruct_deferred_digest { builder.assert_felt_eq(is_complete * *start_digest_word, C::F::zero()); @@ -68,8 +60,5 @@ pub(crate) fn assert_complete( .assert_felt_eq(is_complete * (*end_digest_word - *deferred_digest_word), C::F::zero()); } - // Assert that the cumulative sum is zero. - for b in cumulative_sum.iter() { - builder.assert_felt_eq(is_complete * *b, C::F::zero()); - } + builder.assert_digest_zero_v2(is_complete, *global_cumulative_sum); } diff --git a/crates/recursion/circuit/src/machine/compress.rs b/crates/recursion/circuit/src/machine/compress.rs index fe99eb43c2..4cbb6f6adf 100644 --- a/crates/recursion/circuit/src/machine/compress.rs +++ b/crates/recursion/circuit/src/machine/compress.rs @@ -15,12 +15,9 @@ use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use sp1_recursion_compiler::ir::{Builder, Ext, Felt, SymbolicFelt}; +use sp1_recursion_compiler::ir::{Builder, Felt, SymbolicFelt}; -use sp1_recursion_core::{ - air::{ChallengerPublicValues, RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, - D, -}; +use sp1_recursion_core::air::{RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}; use sp1_stark::{ air::{MachineAir, POSEIDON_NUM_WORDS, PV_DIGEST_NUM_WORDS}, @@ -37,10 +34,10 @@ use crate::{ root_public_values_digest, }, stark::{dummy_vk_and_shard_proof, ShardProofVariable, StarkVerifier}, - utils::uninit_challenger_pv, BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, }; +use sp1_recursion_compiler::circuit::CircuitV2Builder; /// A program to verify a batch of recursive proofs and aggregate their public values. #[derive(Debug, Clone, Copy)] pub struct SP1CompressVerifier { @@ -127,12 +124,6 @@ where let mut exit_code: Felt<_> = builder.uninit(); let mut execution_shard: Felt<_> = unsafe { MaybeUninit::zeroed().assume_init() }; - let mut initial_reconstruct_challenger_values: ChallengerPublicValues> = - unsafe { uninit_challenger_pv(builder) }; - let mut reconstruct_challenger_values: ChallengerPublicValues> = - unsafe { uninit_challenger_pv(builder) }; - let mut leaf_challenger_values: ChallengerPublicValues> = - unsafe { uninit_challenger_pv(builder) }; let mut committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS] = array::from_fn(|_| { Word(array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() })) @@ -141,8 +132,7 @@ where array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); let mut reconstruct_deferred_digest: [Felt<_>; POSEIDON_NUM_WORDS] = core::array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); - let mut global_cumulative_sum: [Felt<_>; D] = - core::array::from_fn(|_| builder.eval(C::F::zero())); + let mut global_cumulative_sums = Vec::new(); let mut init_addr_bits: [Felt<_>; 32] = core::array::from_fn(|_| unsafe { MaybeUninit::zeroed().assume_init() }); let mut finalize_addr_bits: [Felt<_>; 32] = @@ -162,10 +152,11 @@ where // Observe the vk and start pc. challenger.observe(builder, vk.commitment); challenger.observe(builder, vk.pc_start); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.y.0); + // Observe the padding. let zero: Felt<_> = builder.eval(C::F::zero()); - for _ in 0..7 { - challenger.observe(builder, zero); - } + challenger.observe(builder, zero); // Observe the main commitment and public values. challenger.observe_slice( @@ -173,15 +164,7 @@ where shard_proof.public_values[0..machine.num_pv_elts()].iter().copied(), ); - let zero_ext: Ext = builder.eval(C::F::zero()); - StarkVerifier::verify_shard( - builder, - &vk, - machine, - &mut challenger, - &shard_proof, - &[zero_ext, zero_ext], - ); + StarkVerifier::verify_shard(builder, &vk, machine, &mut challenger, &shard_proof); // Get the current public values. let current_public_values: &RecursionPublicValues> = @@ -251,14 +234,6 @@ where *first_bit = *current_bit; } - // Initialize the leaf challenger public values. - leaf_challenger_values = current_public_values.leaf_challenger; - - // Initialize the initial reconstruct challenger public values. - initial_reconstruct_challenger_values = - current_public_values.start_reconstruct_challenger; - reconstruct_challenger_values = current_public_values.start_reconstruct_challenger; - // Assign the committed values and deferred proof digests. for (word, current_word) in committed_value_digest .iter_mut() @@ -310,7 +285,7 @@ where ); // A flag to indicate whether the first execution shard has been seen. We have: // - `is_first_execution_shard_seen` = current_contains_execution_shard && - // !execution_shard_seen_before. + // !execution_shard_seen_before. // Since `contains_execution_shard` is the boolean flag used to denote if we have // seen an execution shard, we can use it to denote if we have seen an execution // shard before. @@ -354,25 +329,10 @@ where builder.assert_felt_eq(*bit, *current_bit); } - // Assert that the leaf challenger is always the same. - for (current, expected) in - leaf_challenger_values.into_iter().zip(current_public_values.leaf_challenger) - { - builder.assert_felt_eq(current, expected); - } - - // Assert that the current challenger matches the start reconstruct challenger. - for (current, expected) in reconstruct_challenger_values - .into_iter() - .zip(current_public_values.start_reconstruct_challenger) - { - builder.assert_felt_eq(current, expected); - } - // Digest constraints. { - // If `committed_value_digest` is not zero, then `public_values.committed_value_digest - // should be the current. + // If `committed_value_digest` is not zero, then + // `public_values.committed_value_digest should be the current. // Set a flags to indicate whether `committed_value_digest` is non-zero. The flags // are given by the elements of the array, and they will be used as filters to @@ -442,8 +402,8 @@ where // If the current shard has an execution shard, then we update the flag in case it was // not already set. That is: - // - If the current shard has an execution shard and the flag is set to zero, it will - // be set to one. + // - If the current shard has an execution shard and the flag is set to zero, it will be + // set to one. // - If the current shard has an execution shard and the flag is set to one, it will // remain set to one. contains_execution_shard = builder.eval( @@ -489,17 +449,11 @@ where *bit = *next_bit; } - // Update the reconstruct challenger. - reconstruct_challenger_values = current_public_values.end_reconstruct_challenger; - - // Update the cumulative sum. - for (sum_element, current_sum_element) in - global_cumulative_sum.iter_mut().zip_eq(current_public_values.cumulative_sum.iter()) - { - *sum_element = builder.eval(*sum_element + *current_sum_element); - } + global_cumulative_sums.push(current_public_values.global_cumulative_sum); } + let global_cumulative_sum = builder.sum_digest_v2(global_cumulative_sums); + // Update the global values from the last accumulated values. // Set sp1_vk digest to the one from the proof values. compress_public_values.sp1_vk_digest = sp1_vk_digest; @@ -513,12 +467,6 @@ where compress_public_values.last_init_addr_bits = init_addr_bits; // Set the MemoryFinalize address bits to be the last MemoryFinalize address bits. compress_public_values.last_finalize_addr_bits = finalize_addr_bits; - // Set the leaf challenger to it's value. - compress_public_values.leaf_challenger = leaf_challenger_values; - // Set the start reconstruct challenger to be the initial reconstruct challenger. - compress_public_values.start_reconstruct_challenger = initial_reconstruct_challenger_values; - // Set the end reconstruct challenger to be the last reconstruct challenger. - compress_public_values.end_reconstruct_challenger = reconstruct_challenger_values; // Set the start reconstruct deferred digest to be the last reconstruct deferred digest. compress_public_values.end_reconstruct_deferred_digest = reconstruct_deferred_digest; // Assign the deferred proof digests. @@ -526,7 +474,7 @@ where // Assign the committed value digests. compress_public_values.committed_value_digest = committed_value_digest; // Assign the cumulative sum. - compress_public_values.cumulative_sum = global_cumulative_sum; + compress_public_values.global_cumulative_sum = global_cumulative_sum; // Assign the `is_complete` flag. compress_public_values.is_complete = is_complete; // Set the contains an execution shard flag. diff --git a/crates/recursion/circuit/src/machine/core.rs b/crates/recursion/circuit/src/machine/core.rs index 3ed9376c45..2efd713572 100644 --- a/crates/recursion/circuit/src/machine/core.rs +++ b/crates/recursion/circuit/src/machine/core.rs @@ -18,6 +18,8 @@ use sp1_core_machine::{ }; use sp1_recursion_core::air::PV_DIGEST_NUM_WORDS; +use sp1_stark::air::InteractionScope; +use sp1_stark::air::MachineAir; use sp1_stark::{ air::{PublicValues, POSEIDON_NUM_WORDS}, baby_bear_poseidon2::BabyBearPoseidon2, @@ -28,7 +30,7 @@ use sp1_stark::{ShardProof, StarkGenericConfig, StarkVerifyingKey}; use sp1_recursion_compiler::{ circuit::CircuitV2Builder, - ir::{Builder, Config, Ext, ExtConst, Felt, SymbolicFelt}, + ir::{Builder, Config, Felt, SymbolicFelt}, }; use sp1_recursion_core::{ @@ -37,9 +39,9 @@ use sp1_recursion_core::{ }; use crate::{ - challenger::{CanObserveVariable, DuplexChallengerVariable, FieldChallengerVariable}, - machine::recursion_public_values_digest, - stark::{dummy_challenger, dummy_vk_and_shard_proof, ShardProofVariable, StarkVerifier}, + challenger::{CanObserveVariable, DuplexChallengerVariable}, + machine::{assert_complete, recursion_public_values_digest}, + stark::{dummy_vk_and_shard_proof, ShardProofVariable, StarkVerifier}, BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, }; @@ -49,8 +51,7 @@ pub struct SP1RecursionWitnessVariable< > { pub vk: VerifyingKeyVariable, pub shard_proofs: Vec>, - pub leaf_challenger: SC::FriChallengerVariable, - pub initial_reconstruct_challenger: DuplexChallengerVariable, + pub reconstruct_deferred_digest: [Felt; DIGEST_SIZE], pub is_complete: Felt, pub is_first_shard: Felt, pub vk_root: [Felt; DIGEST_SIZE], @@ -62,11 +63,10 @@ pub struct SP1RecursionWitnessVariable< pub struct SP1RecursionWitnessValues { pub vk: StarkVerifyingKey, pub shard_proofs: Vec>, - pub leaf_challenger: SC::Challenger, - pub initial_reconstruct_challenger: SC::Challenger, pub is_complete: bool, pub is_first_shard: bool, pub vk_root: [SC::Val; DIGEST_SIZE], + pub reconstruct_deferred_digest: [SC::Val; 8], } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -84,10 +84,10 @@ pub struct SP1RecursiveVerifier { impl SP1RecursiveVerifier where SC: BabyBearFriConfigVariable< - C, - FriChallengerVariable = DuplexChallengerVariable, - DigestVariable = [Felt; DIGEST_SIZE], - >, + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, C: CircuitConfig>, >::ProverData>: Clone, { @@ -126,11 +126,10 @@ where let SP1RecursionWitnessVariable { vk, shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, is_complete, is_first_shard, vk_root, + reconstruct_deferred_digest, } = input; // Initialize shard variables. @@ -166,13 +165,8 @@ where let mut deferred_proofs_digest: [Felt<_>; POSEIDON_NUM_WORDS] = array::from_fn(|_| builder.uninit()); - // Initialize the challenger variables. - let leaf_challenger_public_values = leaf_challenger.public_values(builder); - let mut reconstruct_challenger: DuplexChallengerVariable<_> = - initial_reconstruct_challenger.copy(builder); - // Initialize the cumulative sum. - let mut global_cumulative_sum: Ext<_, _> = builder.eval(C::EF::zero().cons()); + let mut global_cumulative_sums = Vec::new(); // Assert that the number of proofs is not zero. assert!(!shard_proofs.is_empty()); @@ -259,22 +253,14 @@ where C::F::one(), ); - // If the initial shard is the first shard, we assert that the initial challenger - // is the same as a fresh challenger that absorbed the verifying key. - let mut first_shard_challenger = machine.config().challenger_variable(builder); - vk.observe_into(builder, &mut first_shard_challenger); - let first_challenger_public_values = first_shard_challenger.public_values(builder); - let initial_challenger_public_values = - initial_reconstruct_challenger.public_values(builder); - for (first, initial) in - first_challenger_public_values.into_iter().zip(initial_challenger_public_values) - { - builder.assert_felt_eq(is_first_shard * (first - initial), C::F::zero()); - } - // If it's the first shard (which is the first execution shard), then the `start_pc` // should be vk.pc_start. builder.assert_felt_eq(is_first_shard * (start_pc - vk.pc_start), C::F::zero()); + // If it's the first shard, we add the vk's `initial_global_cumulative_sum` to the digest. + global_cumulative_sums.push(builder.select_global_cumulative_sum( + is_first_shard, + vk.initial_global_cumulative_sum, + )); // Assert that `init_addr_bits` and `finalize_addr_bits` are zero for the first for bit in current_init_addr_bits.iter() { @@ -289,19 +275,26 @@ where // // Do not verify the cumulative sum here, since the permutation challenge is shared // between all shards. - let mut challenger = leaf_challenger.copy(builder); - let global_permutation_challenges = - (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); + // Prepare a challenger. + let mut challenger = machine.config().challenger_variable(builder); - StarkVerifier::verify_shard( + // Observe the vk and start pc. + challenger.observe(builder, vk.commitment); + challenger.observe(builder, vk.pc_start); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.y.0); + // Observe the padding. + let zero: Felt<_> = builder.eval(C::F::zero()); + challenger.observe(builder, zero); + + challenger.observe_slice( builder, - &vk, - machine, - &mut challenger, - &shard_proof, - &global_permutation_challenges, + shard_proof.public_values[0..machine.num_pv_elts()].iter().copied(), ); + StarkVerifier::verify_shard(builder, &vk, machine, &mut challenger, &shard_proof); + + let chips = machine.shard_chips_ordered(&shard_proof.chip_ordering).collect::>(); // Assert that first shard has a "CPU". Equivalently, assert that if the shard does // not have a "CPU", then the current shard is not 1. @@ -426,8 +419,8 @@ where // Digest constraints. { - // // If `committed_value_digest` is not zero, then the current value should be equal - // to `public_values.committed_value_digest`. + // // If `committed_value_digest` is not zero, then the current value should be + // equal to `public_values.committed_value_digest`. // Set flags to indicate whether `committed_value_digest` is non-zero. The flags are // given by the elements of the array, and they will be used as filters to constrain @@ -522,19 +515,16 @@ where // have shard < 2^{MAX_LOG_NUMBER_OF_SHARDS}. C::range_check_felt(builder, public_values.shard, MAX_LOG_NUMBER_OF_SHARDS); - // Update the reconstruct challenger. - reconstruct_challenger.observe(builder, shard_proof.commitment.global_main_commit); - for element in shard_proof.public_values.iter().take(machine.num_pv_elts()) { - reconstruct_challenger.observe(builder, *element); - } - // Cumulative sum is updated by sums of all chips. - for values in shard_proof.opened_values.chips.iter() { - global_cumulative_sum = - builder.eval(global_cumulative_sum + values.global_cumulative_sum); + for (chip, values) in chips.iter().zip(shard_proof.opened_values.chips.iter()) { + if chip.commit_scope() == InteractionScope::Global { + global_cumulative_sums.push(values.global_cumulative_sum); + } } } + let global_cumulative_sum = builder.sum_digest_v2(global_cumulative_sums); + // Assert that the last exit code is zero. builder.assert_felt_eq(exit_code, C::F::zero()); @@ -543,20 +533,8 @@ where // Compute the vk digest. let vk_digest = vk.hash(builder); - // Collect the public values for challengers. - let initial_challenger_public_values = - initial_reconstruct_challenger.public_values(builder); - let final_challenger_public_values = reconstruct_challenger.public_values(builder); - - // Collect the cumulative sum. - let global_cumulative_sum_array = builder.ext2felt_v2(global_cumulative_sum); - - // Collect the deferred proof digests. - let zero: Felt<_> = builder.eval(C::F::zero()); - let start_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - let end_deferred_digest = [zero; POSEIDON_NUM_WORDS]; - // Initialize the public values we will commit to. + let zero: Felt<_> = builder.eval(C::F::zero()); let mut recursion_public_values_stream = [zero; RECURSIVE_PROOF_NUM_PV_ELTS]; let recursion_public_values: &mut RecursionPublicValues<_> = recursion_public_values_stream.as_mut_slice().borrow_mut(); @@ -574,12 +552,9 @@ where initial_previous_finalize_addr_bits; recursion_public_values.last_finalize_addr_bits = current_finalize_addr_bits; recursion_public_values.sp1_vk_digest = vk_digest; - recursion_public_values.leaf_challenger = leaf_challenger_public_values; - recursion_public_values.start_reconstruct_challenger = initial_challenger_public_values; - recursion_public_values.end_reconstruct_challenger = final_challenger_public_values; - recursion_public_values.cumulative_sum = global_cumulative_sum_array; - recursion_public_values.start_reconstruct_deferred_digest = start_deferred_digest; - recursion_public_values.end_reconstruct_deferred_digest = end_deferred_digest; + recursion_public_values.global_cumulative_sum = global_cumulative_sum; + recursion_public_values.start_reconstruct_deferred_digest = reconstruct_deferred_digest; + recursion_public_values.end_reconstruct_deferred_digest = reconstruct_deferred_digest; recursion_public_values.exit_code = exit_code; recursion_public_values.is_complete = is_complete; // Set the contains an execution shard flag. @@ -615,8 +590,7 @@ impl SP1RecursionWitnessValues { Self { vk, shard_proofs, - leaf_challenger: dummy_challenger(machine.config()), - initial_reconstruct_challenger: dummy_challenger(machine.config()), + reconstruct_deferred_digest: [BabyBear::zero(); DIGEST_SIZE], is_complete: shape.is_complete, is_first_shard: false, vk_root: [BabyBear::zero(); DIGEST_SIZE], diff --git a/crates/recursion/circuit/src/machine/deferred.rs b/crates/recursion/circuit/src/machine/deferred.rs index d5ab720973..19c93143f1 100644 --- a/crates/recursion/circuit/src/machine/deferred.rs +++ b/crates/recursion/circuit/src/machine/deferred.rs @@ -10,10 +10,10 @@ use p3_baby_bear::BabyBear; use p3_commit::Mmcs; use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; - use sp1_primitives::consts::WORD_SIZE; -use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; - +use sp1_recursion_compiler::ir::{Builder, Felt}; +use sp1_stark::septic_curve::SepticCurve; +use sp1_stark::septic_digest::SepticDigest; use sp1_stark::{ air::{MachineAir, POSEIDON_NUM_WORDS}, baby_bear_poseidon2::BabyBearPoseidon2, @@ -30,7 +30,7 @@ use crate::{ constraints::RecursiveVerifierConstraintFolder, hash::{FieldHasher, FieldHasherVariable}, machine::assert_recursion_public_values_valid, - stark::{dummy_challenger, ShardProofVariable, StarkVerifier}, + stark::{ShardProofVariable, StarkVerifier}, BabyBearFriConfig, BabyBearFriConfigVariable, CircuitConfig, VerifyingKeyVariable, }; @@ -61,7 +61,6 @@ pub struct SP1DeferredWitnessValues, pub start_reconstruct_deferred_digest: [SC::Val; POSEIDON_NUM_WORDS], pub sp1_vk_digest: [SC::Val; DIGEST_SIZE], - pub leaf_challenger: SC::Challenger, pub committed_value_digest: [Word; PV_DIGEST_NUM_WORDS], pub deferred_proofs_digest: [SC::Val; POSEIDON_NUM_WORDS], pub end_pc: SC::Val, @@ -80,7 +79,6 @@ pub struct SP1DeferredWitnessVariable< pub vk_merkle_data: SP1MerkleProofWitnessVariable, pub start_reconstruct_deferred_digest: [Felt; POSEIDON_NUM_WORDS], pub sp1_vk_digest: [Felt; DIGEST_SIZE], - pub leaf_challenger: SC::FriChallengerVariable, pub committed_value_digest: [Word>; PV_DIGEST_NUM_WORDS], pub deferred_proofs_digest: [Felt; POSEIDON_NUM_WORDS], pub end_pc: Felt, @@ -94,10 +92,10 @@ pub struct SP1DeferredWitnessVariable< impl SP1DeferredVerifier where SC: BabyBearFriConfigVariable< - C, - FriChallengerVariable = DuplexChallengerVariable, - DigestVariable = [Felt; DIGEST_SIZE], - >, + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, C: CircuitConfig>, >::ProverData>: Clone, A: MachineAir + for<'a> Air>, @@ -122,7 +120,6 @@ where vk_merkle_data, start_reconstruct_deferred_digest, sp1_vk_digest, - leaf_challenger, committed_value_digest, deferred_proofs_digest, end_pc, @@ -157,10 +154,11 @@ where // Observe the vk and start pc. challenger.observe(builder, vk.commitment); challenger.observe(builder, vk.pc_start); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.y.0); + // Observe the padding. let zero: Felt<_> = builder.eval(C::F::zero()); - for _ in 0..7 { - challenger.observe(builder, zero); - } + challenger.observe(builder, zero); // Observe the and public values. challenger.observe_slice( @@ -168,15 +166,7 @@ where shard_proof.public_values[0..machine.num_pv_elts()].iter().copied(), ); - let zero_ext: Ext = builder.eval(C::F::zero()); - StarkVerifier::verify_shard( - builder, - &vk, - machine, - &mut challenger, - &shard_proof, - &[zero_ext, zero_ext], - ); + StarkVerifier::verify_shard(builder, &vk, machine, &mut challenger, &shard_proof); // Get the current public values. let current_public_values: &RecursionPublicValues> = @@ -232,11 +222,6 @@ where // Set the deferred proof digest to be the hitned value. deferred_public_values.deferred_proofs_digest = deferred_proofs_digest; - // Set the initial, end, and leaf challenger to be the hitned values. - let values = leaf_challenger.public_values(builder); - deferred_public_values.leaf_challenger = values; - deferred_public_values.start_reconstruct_challenger = values; - deferred_public_values.end_reconstruct_challenger = values; // Set the exit code to be zero for now. deferred_public_values.exit_code = builder.eval(C::F::zero()); // Assign the deferred proof digests. @@ -246,7 +231,10 @@ where // Set the `contains_execution_shard` flag. deferred_public_values.contains_execution_shard = builder.eval(C::F::zero()); // Set the cumulative sum to zero. - deferred_public_values.cumulative_sum = array::from_fn(|_| builder.eval(C::F::zero())); + deferred_public_values.global_cumulative_sum = + SepticDigest(SepticCurve::convert(SepticDigest::::zero().0, |value| { + builder.eval(value) + })); // Set the vk root from the witness. deferred_public_values.vk_root = vk_root; // Set the digest according to the previous values. @@ -271,7 +259,6 @@ impl SP1DeferredWitnessValues { Self { vks_and_proofs, vk_merkle_data, - leaf_challenger: dummy_challenger(machine.config()), is_complete: true, sp1_vk_digest: [BabyBear::zero(); DIGEST_SIZE], start_reconstruct_deferred_digest: [BabyBear::zero(); POSEIDON_NUM_WORDS], diff --git a/crates/recursion/circuit/src/machine/vkey_proof.rs b/crates/recursion/circuit/src/machine/vkey_proof.rs index eeefe314b4..e1cf167718 100644 --- a/crates/recursion/circuit/src/machine/vkey_proof.rs +++ b/crates/recursion/circuit/src/machine/vkey_proof.rs @@ -114,10 +114,10 @@ pub struct SP1CompressWithVKeyWitnessValues SP1CompressWithVKeyVerifier where SC: BabyBearFriConfigVariable< - C, - FriChallengerVariable = DuplexChallengerVariable, - DigestVariable = [Felt; DIGEST_SIZE], - >, + C, + FriChallengerVariable = DuplexChallengerVariable, + DigestVariable = [Felt; DIGEST_SIZE], + >, C: CircuitConfig>, >::ProverData>: Clone, A: MachineAir + for<'a> Air>, diff --git a/crates/recursion/circuit/src/machine/witness.rs b/crates/recursion/circuit/src/machine/witness.rs index 11b79831e1..debf82ea2d 100644 --- a/crates/recursion/circuit/src/machine/witness.rs +++ b/crates/recursion/circuit/src/machine/witness.rs @@ -89,14 +89,22 @@ where fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { let commitment = self.commit.read(builder); let pc_start = self.pc_start.read(builder); + let initial_global_cumulative_sum = self.initial_global_cumulative_sum.read(builder); let chip_information = self.chip_information.clone(); let chip_ordering = self.chip_ordering.clone(); - VerifyingKeyVariable { commitment, pc_start, chip_information, chip_ordering } + VerifyingKeyVariable { + commitment, + pc_start, + initial_global_cumulative_sum, + chip_information, + chip_ordering, + } } fn write(&self, witness: &mut impl WitnessWriter) { self.commit.write(witness); self.pc_start.write(witness); + self.initial_global_cumulative_sum.write(witness); } } @@ -109,18 +117,16 @@ where fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { let vk = self.vk.read(builder); let shard_proofs = self.shard_proofs.read(builder); - let leaf_challenger = self.leaf_challenger.read(builder); - let initial_reconstruct_challenger = self.initial_reconstruct_challenger.read(builder); + let reconstruct_deferred_digest = self.reconstruct_deferred_digest.read(builder); let is_complete = InnerVal::from_bool(self.is_complete).read(builder); let is_first_shard = InnerVal::from_bool(self.is_first_shard).read(builder); let vk_root = self.vk_root.read(builder); SP1RecursionWitnessVariable { vk, shard_proofs, - leaf_challenger, - initial_reconstruct_challenger, is_complete, is_first_shard, + reconstruct_deferred_digest, vk_root, } } @@ -128,8 +134,7 @@ where fn write(&self, witness: &mut impl WitnessWriter) { self.vk.write(witness); self.shard_proofs.write(witness); - self.leaf_challenger.write(witness); - self.initial_reconstruct_challenger.write(witness); + self.reconstruct_deferred_digest.write(witness); self.is_complete.write(witness); self.is_first_shard.write(witness); self.vk_root.write(witness); @@ -169,7 +174,6 @@ where let start_reconstruct_deferred_digest = self.start_reconstruct_deferred_digest.read(builder); let sp1_vk_digest = self.sp1_vk_digest.read(builder); - let leaf_challenger = self.leaf_challenger.read(builder); let committed_value_digest = self.committed_value_digest.read(builder); let deferred_proofs_digest = self.deferred_proofs_digest.read(builder); let end_pc = self.end_pc.read(builder); @@ -184,7 +188,6 @@ where vk_merkle_data, start_reconstruct_deferred_digest, sp1_vk_digest, - leaf_challenger, committed_value_digest, deferred_proofs_digest, end_pc, @@ -201,7 +204,6 @@ where self.vk_merkle_data.write(witness); self.start_reconstruct_deferred_digest.write(witness); self.sp1_vk_digest.write(witness); - self.leaf_challenger.write(witness); self.committed_value_digest.write(witness); self.deferred_proofs_digest.write(witness); self.end_pc.write(witness); diff --git a/crates/recursion/circuit/src/machine/wrap.rs b/crates/recursion/circuit/src/machine/wrap.rs index 0ec0d8db40..e3708e7e7f 100644 --- a/crates/recursion/circuit/src/machine/wrap.rs +++ b/crates/recursion/circuit/src/machine/wrap.rs @@ -5,7 +5,7 @@ use p3_baby_bear::BabyBear; use p3_commit::Mmcs; use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; -use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; +use sp1_recursion_compiler::ir::{Builder, Felt}; use sp1_stark::{air::MachineAir, StarkMachine}; use crate::{ @@ -62,24 +62,17 @@ where // Observe the vk and start pc. challenger.observe(builder, vk.commitment); challenger.observe(builder, vk.pc_start); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(builder, vk.initial_global_cumulative_sum.0.y.0); + // Observe the padding. let zero: Felt<_> = builder.eval(C::F::zero()); - for _ in 0..7 { - challenger.observe(builder, zero); - } + challenger.observe(builder, zero); // Observe the main commitment and public values. challenger .observe_slice(builder, proof.public_values[0..machine.num_pv_elts()].iter().copied()); - let zero_ext: Ext = builder.eval(C::F::zero()); - StarkVerifier::verify_shard( - builder, - &vk, - machine, - &mut challenger, - &proof, - &[zero_ext, zero_ext], - ); + StarkVerifier::verify_shard(builder, &vk, machine, &mut challenger, &proof); // Get the public values, and assert that they are valid. let public_values: &RootPublicValues> = proof.public_values.as_slice().borrow(); diff --git a/crates/recursion/circuit/src/merkle_tree.rs b/crates/recursion/circuit/src/merkle_tree.rs index a3524c0567..fe769b53d2 100644 --- a/crates/recursion/circuit/src/merkle_tree.rs +++ b/crates/recursion/circuit/src/merkle_tree.rs @@ -18,8 +18,8 @@ use crate::{ #[serde(bound(serialize = "HV::Digest: Serialize"))] #[serde(bound(deserialize = "HV::Digest: Deserialize<'de>"))] pub struct MerkleTree> { - /// The height of the tree, not counting the root layer. This is the same as the logarithm of the - /// number of leaves. + /// The height of the tree, not counting the root layer. This is the same as the logarithm of + /// the number of leaves. pub height: usize, /// All the layers but the root. If there are `n` leaves where `n` is a power of 2, there are diff --git a/crates/recursion/circuit/src/stark.rs b/crates/recursion/circuit/src/stark.rs index feb81e0ef4..4fc4f15930 100644 --- a/crates/recursion/circuit/src/stark.rs +++ b/crates/recursion/circuit/src/stark.rs @@ -8,18 +8,18 @@ use p3_baby_bear::BabyBear; use p3_commit::{Mmcs, Pcs, PolynomialSpace, TwoAdicMultiplicativeCoset}; use p3_field::{AbstractField, ExtensionField, Field, TwoAdicField}; use p3_matrix::{dense::RowMajorMatrix, Dimensions}; - use sp1_recursion_compiler::{ circuit::CircuitV2Builder, - ir::{Builder, Config, Ext, ExtConst}, + ir::{Builder, Config, DslIr, Ext, ExtConst}, prelude::Felt, }; use sp1_stark::{ - air::InteractionScope, baby_bear_poseidon2::BabyBearPoseidon2, AirOpenedValues, Challenger, - Chip, ChipOpenedValues, InnerChallenge, ProofShape, ShardCommitment, ShardOpenedValues, - ShardProof, Val, PROOF_MAX_NUM_PVS, + air::{InteractionScope, MachineAir}, + baby_bear_poseidon2::BabyBearPoseidon2, + AirOpenedValues, Challenger, Chip, ChipOpenedValues, InnerChallenge, ProofShape, + ShardCommitment, ShardOpenedValues, ShardProof, StarkGenericConfig, StarkMachine, + StarkVerifyingKey, Val, PROOF_MAX_NUM_PVS, }; -use sp1_stark::{air::MachineAir, StarkGenericConfig, StarkMachine, StarkVerifyingKey}; use crate::{ challenger::CanObserveVariable, @@ -33,12 +33,14 @@ use crate::{ domain::PolynomialSpaceVariable, fri::verify_two_adic_pcs, BabyBearFriConfigVariable, TwoAdicPcsRoundVariable, VerifyingKeyVariable, }; +use sp1_stark::septic_digest::SepticDigest; /// Reference: [sp1_core::stark::ShardProof] +#[allow(clippy::type_complexity)] #[derive(Clone)] pub struct ShardProofVariable, SC: BabyBearFriConfigVariable> { pub commitment: ShardCommitment, - pub opened_values: ShardOpenedValues>, + pub opened_values: ShardOpenedValues, Ext>, pub opening_proof: TwoAdicPcsProofVariable, pub chip_ordering: HashMap, pub public_values: Vec>, @@ -59,8 +61,7 @@ pub fn dummy_vk_and_shard_proof>( ) -> (StarkVerifyingKey, ShardProof) { // Make a dummy commitment. let commitment = ShardCommitment { - global_main_commit: dummy_hash(), - local_main_commit: dummy_hash(), + main_commit: dummy_hash(), permutation_commit: dummy_hash(), quotient_commit: dummy_hash(), }; @@ -73,8 +74,6 @@ pub fn dummy_vk_and_shard_proof>( .map(|(i, (name, _))| (name.clone(), i)) .collect::>(); let shard_chips = machine.shard_chips_ordered(&chip_ordering).collect::>(); - let chip_scopes = shard_chips.iter().map(|chip| chip.commit_scope()).collect::>(); - let has_global_main_commit = chip_scopes.contains(&InteractionScope::Global); let opened_values = ShardOpenedValues { chips: shard_chips .iter() @@ -87,14 +86,11 @@ pub fn dummy_vk_and_shard_proof>( let mut preprocessed_names_and_dimensions = vec![]; let mut preprocessed_batch_shape = vec![]; - let mut global_main_batch_shape = vec![]; - let mut local_main_batch_shape = vec![]; + let mut main_batch_shape = vec![]; let mut permutation_batch_shape = vec![]; let mut quotient_batch_shape = vec![]; - for ((chip, chip_opening), scope) in - shard_chips.iter().zip_eq(opened_values.chips.iter()).zip_eq(chip_scopes.iter()) - { + for (chip, chip_opening) in shard_chips.iter().zip_eq(opened_values.chips.iter()) { if !chip_opening.preprocessed.local.is_empty() { let prep_shape = PolynomialShape { width: chip_opening.preprocessed.local.len(), @@ -111,10 +107,7 @@ pub fn dummy_vk_and_shard_proof>( width: chip_opening.main.local.len(), log_degree: chip_opening.log_degree, }; - match scope { - InteractionScope::Global => global_main_batch_shape.push(main_shape), - InteractionScope::Local => local_main_batch_shape.push(main_shape), - } + main_batch_shape.push(main_shape); let permutation_shape = PolynomialShape { width: chip_opening.permutation.local.len(), log_degree: chip_opening.log_degree, @@ -129,22 +122,12 @@ pub fn dummy_vk_and_shard_proof>( } } - let batch_shapes = if has_global_main_commit { - vec![ - PolynomialBatchShape { shapes: preprocessed_batch_shape }, - PolynomialBatchShape { shapes: global_main_batch_shape }, - PolynomialBatchShape { shapes: local_main_batch_shape }, - PolynomialBatchShape { shapes: permutation_batch_shape }, - PolynomialBatchShape { shapes: quotient_batch_shape }, - ] - } else { - vec![ - PolynomialBatchShape { shapes: preprocessed_batch_shape }, - PolynomialBatchShape { shapes: local_main_batch_shape }, - PolynomialBatchShape { shapes: permutation_batch_shape }, - PolynomialBatchShape { shapes: quotient_batch_shape }, - ] - }; + let batch_shapes = vec![ + PolynomialBatchShape { shapes: preprocessed_batch_shape }, + PolynomialBatchShape { shapes: main_batch_shape }, + PolynomialBatchShape { shapes: permutation_batch_shape }, + PolynomialBatchShape { shapes: quotient_batch_shape }, + ]; let fri_queries = machine.config().fri_config().num_queries; let log_blowup = machine.config().fri_config().log_blowup; @@ -175,6 +158,7 @@ pub fn dummy_vk_and_shard_proof>( let vk = StarkVerifyingKey { commit: dummy_hash(), pc_start: BabyBear::zero(), + initial_global_cumulative_sum: SepticDigest::::zero(), chip_information: preprocessed_chip_information, chip_ordering: preprocessed_chip_ordering, }; @@ -188,7 +172,7 @@ pub fn dummy_vk_and_shard_proof>( fn dummy_opened_values, A: MachineAir>( chip: &Chip, log_degree: usize, -) -> ChipOpenedValues { +) -> ChipOpenedValues { let preprocessed_width = chip.preprocessed_width(); let preprocessed = AirOpenedValues { local: vec![EF::zero(); preprocessed_width], @@ -211,7 +195,7 @@ fn dummy_opened_values, A: MachineAir>( main, permutation, quotient, - global_cumulative_sum: EF::zero(), + global_cumulative_sum: SepticDigest::::zero(), local_cumulative_sum: EF::zero(), log_degree, } @@ -265,14 +249,10 @@ where machine: &StarkMachine, challenger: &mut SC::FriChallengerVariable, proof: &ShardProofVariable, - global_permutation_challenges: &[Ext], ) where A: for<'a> Air>, { let chips = machine.shard_chips_ordered(&proof.chip_ordering).collect::>(); - let chip_scopes = chips.iter().map(|chip| chip.commit_scope()).collect::>(); - - let has_global_main_commit = chip_scopes.contains(&InteractionScope::Global); let ShardProofVariable { commitment, @@ -309,33 +289,28 @@ where .map(|log_degree| Self::natural_domain_for_degree(machine.config(), 1 << log_degree)) .collect::>(); - let ShardCommitment { - global_main_commit, - local_main_commit, - permutation_commit, - quotient_commit, - } = *commitment; + let ShardCommitment { main_commit, permutation_commit, quotient_commit } = *commitment; - challenger.observe(builder, local_main_commit); + challenger.observe(builder, main_commit); let local_permutation_challenges = (0..2).map(|_| challenger.sample_ext(builder)).collect::>(); challenger.observe(builder, permutation_commit); for (opening, chip) in opened_values.chips.iter().zip_eq(chips.iter()) { - let global_sum = C::ext2felt(builder, opening.global_cumulative_sum); let local_sum = C::ext2felt(builder, opening.local_cumulative_sum); - challenger.observe_slice(builder, global_sum); + let global_sum = opening.global_cumulative_sum; + challenger.observe_slice(builder, local_sum); + challenger.observe_slice(builder, global_sum.0.x.0); + challenger.observe_slice(builder, global_sum.0.y.0); - let has_global_interactions = chip - .sends() - .iter() - .chain(chip.receives()) - .any(|i| i.scope == InteractionScope::Global); - if !has_global_interactions { - builder.assert_ext_eq(opening.global_cumulative_sum, C::EF::zero().cons()); + if chip.commit_scope() == InteractionScope::Local { + let is_real: Felt = builder.uninit(); + builder.push_op(DslIr::ImmF(is_real, C::F::one())); + builder.assert_digest_zero_v2(is_real, global_sum); } + let has_local_interactions = chip .sends() .iter() @@ -433,33 +408,15 @@ where }) .collect::>(); - // Split the main_domains_points_and_opens to the global and local chips. - let mut global_trace_points_and_openings = Vec::new(); - let mut local_trace_points_and_openings = Vec::new(); - for (i, points_and_openings) in - main_domains_points_and_opens.clone().into_iter().enumerate() - { - let scope = chip_scopes[i]; - if scope == InteractionScope::Global { - global_trace_points_and_openings.push(points_and_openings); - } else { - local_trace_points_and_openings.push(points_and_openings); - } - } - // Create the pcs rounds. let prep_commit = vk.commitment; let prep_round = TwoAdicPcsRoundVariable { batch_commit: prep_commit, domains_points_and_opens: preprocessed_domains_points_and_opens, }; - let global_main_round = TwoAdicPcsRoundVariable { - batch_commit: global_main_commit, - domains_points_and_opens: global_trace_points_and_openings, - }; - let local_main_round = TwoAdicPcsRoundVariable { - batch_commit: local_main_commit, - domains_points_and_opens: local_trace_points_and_openings, + let main_round = TwoAdicPcsRoundVariable { + batch_commit: main_commit, + domains_points_and_opens: main_domains_points_and_opens, }; let perm_round = TwoAdicPcsRoundVariable { batch_commit: permutation_commit, @@ -470,11 +427,7 @@ where domains_points_and_opens: quotient_domains_points_and_opens, }; - let rounds = if has_global_main_commit { - vec![prep_round, global_main_round, local_main_round, perm_round, quotient_round] - } else { - vec![prep_round, local_main_round, perm_round, quotient_round] - }; + let rounds = vec![prep_round, main_round, perm_round, quotient_round]; // Verify the pcs proof builder.cycle_tracker_v2_enter("stage-d-verify-pcs".to_string()); @@ -484,11 +437,7 @@ where // Verify the constrtaint evaluations. builder.cycle_tracker_v2_enter("stage-e-verify-constraints".to_string()); - let permutation_challenges = global_permutation_challenges - .iter() - .chain(local_permutation_challenges.iter()) - .copied() - .collect::>(); + let permutation_challenges = local_permutation_challenges; for (chip, trace_domain, qc_domains, values) in izip!(chips.iter(), trace_domains, quotient_chunk_domains, opened_values.chips.iter(),) @@ -544,8 +493,7 @@ impl, SC: BabyBearFriConfigVariable> ShardProof #[allow(unused_imports)] #[cfg(any(test, feature = "export-tests"))] pub mod tests { - use std::collections::VecDeque; - use std::fmt::Debug; + use std::{collections::VecDeque, fmt::Debug}; use crate::{ challenger::{CanCopyChallenger, CanObserveVariable, DuplexChallengerVariable}, @@ -553,22 +501,24 @@ pub mod tests { BabyBearFriConfig, }; - use sp1_core_executor::{programs::tests::FIBONACCI_ELF, Program}; + use sp1_core_executor::Program; use sp1_core_machine::{ io::SP1Stdin, riscv::RiscvAir, - utils::{prove, setup_logger}, + utils::{prove_core, prove_core_stream, setup_logger}, }; use sp1_recursion_compiler::{ config::{InnerConfig, OuterConfig}, ir::{Builder, DslIr, TracedVec}, }; + use sp1_core_executor::SP1Context; use sp1_recursion_core::{air::Block, machine::RecursionAir, stark::BabyBearPoseidon2Outer}; use sp1_stark::{ baby_bear_poseidon2::BabyBearPoseidon2, CpuProver, InnerVal, MachineProver, SP1CoreOpts, ShardProof, }; + use test_artifacts::FIBONACCI_ELF; use super::*; use crate::witness::*; @@ -589,20 +539,27 @@ pub mod tests { ) -> (TracedVec>, Vec>) { setup_logger(); + let program = Program::from(elf).unwrap(); let machine = RiscvAir::::machine(SC::default()); - let (_, vk) = machine.setup(&Program::from(elf).unwrap()); - let (proof, _, _) = prove::<_, CoreP>( - Program::from(elf).unwrap(), + let prover = CoreP::new(machine); + let (pk, vk) = prover.setup(&program); + + let (proof, _, _) = prove_core::<_, CoreP>( + &prover, + &pk, + &vk, + program, &SP1Stdin::new(), - SC::default(), opts, + SP1Context::default(), None, ) .unwrap(); + + let machine = RiscvAir::::machine(SC::default()); let mut challenger = machine.config().challenger(); machine.verify(&vk, &proof, &mut challenger).unwrap(); - // Observe all the commitments. let mut builder = Builder::::default(); let mut witness_stream = Vec::>::new(); @@ -624,29 +581,14 @@ pub mod tests { dummy_proof.read(&mut builder) }) .collect::>(); - // Observe all the commitments, and put the proofs into the witness stream. - for proof in proofs.iter() { - let ShardCommitment { global_main_commit, .. } = proof.commitment; - challenger.observe(&mut builder, global_main_commit); - let pv_slice = &proof.public_values[..machine.num_pv_elts()]; - challenger.observe_slice(&mut builder, pv_slice.iter().cloned()); - } - - let global_permutation_challenges = - (0..2).map(|_| challenger.sample_ext(&mut builder)).collect::>(); // Verify the first proof. let num_shards = num_shards_in_batch.unwrap_or(proofs.len()); for proof in proofs.into_iter().take(num_shards) { let mut challenger = challenger.copy(&mut builder); - StarkVerifier::verify_shard( - &mut builder, - &vk, - &machine, - &mut challenger, - &proof, - &global_permutation_challenges, - ); + let pv_slice = &proof.public_values[..machine.num_pv_elts()]; + challenger.observe_slice(&mut builder, pv_slice.iter().cloned()); + StarkVerifier::verify_shard(&mut builder, &vk, &machine, &mut challenger, &proof); } (builder.into_operations(), witness_stream) } diff --git a/crates/recursion/circuit/src/types.rs b/crates/recursion/circuit/src/types.rs index 16eab3bc92..fdb9afa5be 100644 --- a/crates/recursion/circuit/src/types.rs +++ b/crates/recursion/circuit/src/types.rs @@ -2,6 +2,7 @@ use hashbrown::HashMap; use p3_commit::TwoAdicMultiplicativeCoset; use p3_field::{AbstractField, TwoAdicField}; use p3_matrix::Dimensions; +use sp1_stark::septic_digest::SepticDigest; use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; @@ -17,6 +18,7 @@ use crate::{ pub struct VerifyingKeyVariable, SC: BabyBearFriConfigVariable> { pub commitment: SC::DigestVariable, pub pc_start: Felt, + pub initial_global_cumulative_sum: SepticDigest>, pub chip_information: Vec<(String, TwoAdicMultiplicativeCoset, Dimensions)>, pub chip_ordering: HashMap, } @@ -83,11 +85,12 @@ impl, SC: BabyBearFriConfigVariable> VerifyingK challenger.observe(builder, self.commitment); // Observe the pc_start. challenger.observe(builder, self.pc_start); + // Observe the initial global cumulative sum. + challenger.observe_slice(builder, self.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(builder, self.initial_global_cumulative_sum.0.y.0); // Observe the padding. let zero: Felt<_> = builder.eval(C::F::zero()); - for _ in 0..7 { - challenger.observe(builder, zero); - } + challenger.observe(builder, zero); } /// Hash the verifying key + prep domains into a single digest. diff --git a/crates/recursion/circuit/src/utils.rs b/crates/recursion/circuit/src/utils.rs index 9c7eeaf8f0..871c21ee09 100644 --- a/crates/recursion/circuit/src/utils.rs +++ b/crates/recursion/circuit/src/utils.rs @@ -1,20 +1,12 @@ -use std::mem::MaybeUninit; - use p3_baby_bear::BabyBear; use p3_bn254_fr::Bn254Fr; use p3_field::{AbstractField, PrimeField32}; use sp1_recursion_compiler::ir::{Builder, Config, Felt, Var}; -use sp1_recursion_core::{air::ChallengerPublicValues, DIGEST_SIZE}; +use sp1_recursion_core::DIGEST_SIZE; use sp1_stark::Word; -pub(crate) unsafe fn uninit_challenger_pv( - _builder: &mut Builder, -) -> ChallengerPublicValues> { - unsafe { MaybeUninit::zeroed().assume_init() } -} - /// Convert 8 BabyBear words into a Bn254Fr field element by shifting by 31 bits each time. The last /// word becomes the least significant bits. #[allow(dead_code)] @@ -101,7 +93,10 @@ pub(crate) mod tests { use std::sync::Arc; use sp1_core_machine::utils::{run_test_machine_with_prover, setup_logger}; - use sp1_recursion_compiler::{circuit::AsmCompiler, circuit::AsmConfig, ir::DslIr}; + use sp1_recursion_compiler::{ + circuit::{AsmCompiler, AsmConfig}, + ir::DslIr, + }; use sp1_recursion_compiler::ir::TracedVec; use sp1_recursion_core::{machine::RecursionAir, Runtime}; diff --git a/crates/recursion/circuit/src/witness/mod.rs b/crates/recursion/circuit/src/witness/mod.rs index 05b23de603..cdf87e4467 100644 --- a/crates/recursion/circuit/src/witness/mod.rs +++ b/crates/recursion/circuit/src/witness/mod.rs @@ -5,6 +5,7 @@ use sp1_recursion_compiler::ir::{Builder, Ext, Felt}; pub use outer::*; use sp1_stark::{ + septic_curve::SepticCurve, septic_digest::SepticDigest, septic_extension::SepticExtension, ChipOpenedValues, Com, InnerChallenge, InnerVal, OpeningProof, ShardCommitment, ShardOpenedValues, ShardProof, }; @@ -165,30 +166,23 @@ impl> Witnessable for ShardCommitment type WitnessVariable = ShardCommitment; fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { - let global_main_commit = self.global_main_commit.read(builder); - let local_main_commit = self.local_main_commit.read(builder); + let main_commit = self.main_commit.read(builder); let permutation_commit = self.permutation_commit.read(builder); let quotient_commit = self.quotient_commit.read(builder); - Self::WitnessVariable { - global_main_commit, - local_main_commit, - permutation_commit, - quotient_commit, - } + Self::WitnessVariable { main_commit, permutation_commit, quotient_commit } } fn write(&self, witness: &mut impl WitnessWriter) { - self.global_main_commit.write(witness); - self.local_main_commit.write(witness); + self.main_commit.write(witness); self.permutation_commit.write(witness); self.quotient_commit.write(witness); } } impl> Witnessable - for ShardOpenedValues + for ShardOpenedValues { - type WitnessVariable = ShardOpenedValues>; + type WitnessVariable = ShardOpenedValues, Ext>; fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { let chips = self.chips.read(builder); @@ -201,9 +195,26 @@ impl> Witnessable } impl> Witnessable - for ChipOpenedValues + for SepticDigest +{ + type WitnessVariable = SepticDigest>; + + fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { + let x = self.0.x.0.read(builder); + let y = self.0.y.0.read(builder); + SepticDigest(SepticCurve { x: SepticExtension(x), y: SepticExtension(y) }) + } + + fn write(&self, witness: &mut impl WitnessWriter) { + self.0.x.0.write(witness); + self.0.y.0.write(witness); + } +} + +impl> Witnessable + for ChipOpenedValues { - type WitnessVariable = ChipOpenedValues>; + type WitnessVariable = ChipOpenedValues, Ext>; fn read(&self, builder: &mut Builder) -> Self::WitnessVariable { let preprocessed = self.preprocessed.read(builder); diff --git a/crates/recursion/compiler/Cargo.toml b/crates/recursion/compiler/Cargo.toml index 3cd075f91a..61ce7d9229 100644 --- a/crates/recursion/compiler/Cargo.toml +++ b/crates/recursion/compiler/Cargo.toml @@ -21,10 +21,10 @@ sp1-recursion-core = { workspace = true } sp1-recursion-derive = { workspace = true } sp1-stark = { workspace = true } -itertools = "0.13.0" -serde = { version = "1.0.204", features = ["derive"] } +itertools = { workspace = true } +serde = { workspace = true, features = ["derive"] } backtrace = "0.3.71" -tracing = "0.1.40" +tracing = { workspace = true } vec_map = "0.8.2" [dev-dependencies] @@ -33,3 +33,7 @@ p3-dft = { workspace = true } p3-merkle-tree = { workspace = true } rand = "0.8.5" criterion = { version = "0.5.1", features = ["html_reports"] } + +[features] +default = ["debug"] +debug = [] \ No newline at end of file diff --git a/crates/recursion/compiler/src/circuit/builder.rs b/crates/recursion/compiler/src/circuit/builder.rs index f78145ad1d..0b0938d0f0 100644 --- a/crates/recursion/compiler/src/circuit/builder.rs +++ b/crates/recursion/compiler/src/circuit/builder.rs @@ -2,12 +2,15 @@ use std::iter::repeat; +use crate::prelude::*; +use itertools::Itertools; use p3_baby_bear::BabyBear; use p3_field::{AbstractExtensionField, AbstractField}; use sp1_recursion_core::air::RecursionPublicValues; - -use crate::prelude::*; use sp1_recursion_core::{chips::poseidon2_skinny::WIDTH, D, DIGEST_SIZE, HASH_RATE}; +use sp1_stark::septic_curve::SepticCurve; +use sp1_stark::septic_digest::SepticDigest; +use sp1_stark::septic_extension::SepticExtension; pub trait CircuitV2Builder { fn bits2num_v2_f( @@ -16,7 +19,7 @@ pub trait CircuitV2Builder { ) -> Felt; fn num2bits_v2_f(&mut self, num: Felt, num_bits: usize) -> Vec>; fn exp_reverse_bits_v2(&mut self, input: Felt, power_bits: Vec>) - -> Felt; + -> Felt; fn batch_fri_v2( &mut self, alphas: Vec>, @@ -31,6 +34,19 @@ pub trait CircuitV2Builder { ) -> [Felt; DIGEST_SIZE]; fn fri_fold_v2(&mut self, input: CircuitV2FriFoldInput) -> CircuitV2FriFoldOutput; fn ext2felt_v2(&mut self, ext: Ext) -> [Felt; D]; + fn add_curve_v2( + &mut self, + point1: SepticCurve>, + point2: SepticCurve>, + ) -> SepticCurve>; + fn assert_digest_zero_v2(&mut self, is_real: Felt, digest: SepticDigest>); + fn sum_digest_v2(&mut self, digests: Vec>>) + -> SepticDigest>; + fn select_global_cumulative_sum( + &mut self, + is_first_shard: Felt, + vk_digest: SepticDigest>, + ) -> SepticDigest>; fn commit_public_values_v2(&mut self, public_values: RecursionPublicValues>); fn cycle_tracker_v2_enter(&mut self, name: String); fn cycle_tracker_v2_exit(&mut self); @@ -188,6 +204,103 @@ impl> CircuitV2Builder for Builder { felts } + /// Adds two septic elliptic curve points. + fn add_curve_v2( + &mut self, + point1: SepticCurve>, + point2: SepticCurve>, + ) -> SepticCurve> { + let point_sum_x: [Felt; 7] = core::array::from_fn(|_| self.uninit()); + let point_sum_y: [Felt; 7] = core::array::from_fn(|_| self.uninit()); + let point = + SepticCurve { x: SepticExtension(point_sum_x), y: SepticExtension(point_sum_y) }; + self.push_op(DslIr::CircuitV2HintAddCurve(Box::new((point, point1, point2)))); + + let point1_symbolic = SepticCurve::convert(point1, |x| x.into()); + let point2_symbolic = SepticCurve::convert(point2, |x| x.into()); + let point_symbolic = SepticCurve::convert(point, |x| x.into()); + + let sum_checker_x = SepticCurve::>::sum_checker_x( + point1_symbolic, + point2_symbolic, + point_symbolic, + ); + + let sum_checker_y = SepticCurve::>::sum_checker_y( + point1_symbolic, + point2_symbolic, + point_symbolic, + ); + + for limb in sum_checker_x.0 { + self.assert_felt_eq(limb, C::F::zero()); + } + + for limb in sum_checker_y.0 { + self.assert_felt_eq(limb, C::F::zero()); + } + + point + } + + /// Asserts that the SepticDigest is zero. + fn assert_digest_zero_v2(&mut self, is_real: Felt, digest: SepticDigest>) { + let zero = SepticDigest::>::zero(); + for (digest_limb_x, zero_limb_x) in digest.0.x.0.into_iter().zip_eq(zero.0.x.0.into_iter()) + { + self.assert_felt_eq(is_real * digest_limb_x, is_real * zero_limb_x); + } + for (digest_limb_y, zero_limb_y) in digest.0.y.0.into_iter().zip_eq(zero.0.y.0.into_iter()) + { + self.assert_felt_eq(is_real * digest_limb_y, is_real * zero_limb_y); + } + } + + /// Returns the zero digest when `is_first_shard` is zero, and returns the `digest` when `is_first_shard` is one. + fn select_global_cumulative_sum( + &mut self, + is_first_shard: Felt, + vk_digest: SepticDigest>, + ) -> SepticDigest> { + let zero = SepticDigest::>::zero(); + let one: Felt = self.constant(C::F::one()); + let x = SepticExtension(core::array::from_fn(|i| { + self.eval(is_first_shard * vk_digest.0.x.0[i] + (one - is_first_shard) * zero.0.x.0[i]) + })); + let y = SepticExtension(core::array::from_fn(|i| { + self.eval(is_first_shard * vk_digest.0.y.0[i] + (one - is_first_shard) * zero.0.y.0[i]) + })); + SepticDigest(SepticCurve { x, y }) + } + + // Sums the digests into one. + fn sum_digest_v2( + &mut self, + digests: Vec>>, + ) -> SepticDigest> { + let mut convert_to_felt = + |point: SepticCurve| SepticCurve::convert(point, |value| self.eval(value)); + + let start = convert_to_felt(SepticDigest::starting_digest().0); + let zero_digest = convert_to_felt(SepticDigest::zero().0); + + if digests.is_empty() { + return SepticDigest(zero_digest); + } + + let neg_start = convert_to_felt(SepticDigest::starting_digest().0.neg()); + let neg_zero_digest = convert_to_felt(SepticDigest::zero().0.neg()); + + let mut ret = start; + for (i, digest) in digests.clone().into_iter().enumerate() { + ret = self.add_curve_v2(ret, digest.0); + if i != digests.len() - 1 { + ret = self.add_curve_v2(ret, neg_zero_digest) + } + } + SepticDigest(self.add_curve_v2(ret, neg_start)) + } + // Commits public values. fn commit_public_values_v2(&mut self, public_values: RecursionPublicValues>) { self.push_op(DslIr::CircuitV2CommitPublicValues(Box::new(public_values))); diff --git a/crates/recursion/compiler/src/circuit/compiler.rs b/crates/recursion/compiler/src/circuit/compiler.rs index 14aa320c98..941331cc3b 100644 --- a/crates/recursion/compiler/src/circuit/compiler.rs +++ b/crates/recursion/compiler/src/circuit/compiler.rs @@ -1,6 +1,8 @@ use chips::poseidon2_skinny::WIDTH; use core::fmt::Debug; -use instruction::{FieldEltType, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr}; +use instruction::{ + FieldEltType, HintAddCurveInstr, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr, +}; use itertools::Itertools; use p3_field::{ AbstractExtensionField, AbstractField, Field, PrimeField, PrimeField64, TwoAdicField, @@ -10,6 +12,7 @@ use sp1_recursion_core::{ air::{Block, RecursionPublicValues, RECURSIVE_PROOF_NUM_PV_ELTS}, BaseAluInstr, BaseAluOpcode, }; +use sp1_stark::septic_curve::SepticCurve; use std::{borrow::Borrow, collections::HashMap, iter::repeat, mem::transmute}; use vec_map::VecMap; @@ -245,6 +248,7 @@ where f(self.ext_alu(DivE, out, Imm::EF(C::EF::one()), diff)); } + #[inline(always)] fn poseidon2_permute( &mut self, dst: [impl Reg; WIDTH], @@ -259,6 +263,7 @@ where })) } + #[inline(always)] fn select( &mut self, bit: impl Reg, @@ -307,6 +312,32 @@ where }) } + fn add_curve( + &mut self, + output: SepticCurve>, + input1: SepticCurve>, + input2: SepticCurve>, + ) -> Instruction { + Instruction::HintAddCurve(Box::new(HintAddCurveInstr { + output_x_addrs_mults: output + .x + .0 + .into_iter() + .map(|r| (r.write(self), C::F::zero())) + .collect(), + output_y_addrs_mults: output + .y + .0 + .into_iter() + .map(|r| (r.write(self), C::F::zero())) + .collect(), + input1_x_addrs: input1.x.0.into_iter().map(|value| value.read_ghost(self)).collect(), + input1_y_addrs: input1.y.0.into_iter().map(|value| value.read_ghost(self)).collect(), + input2_x_addrs: input2.x.0.into_iter().map(|value| value.read_ghost(self)).collect(), + input2_y_addrs: input2.y.0.into_iter().map(|value| value.read_ghost(self)).collect(), + })) + } + fn fri_fold( &mut self, CircuitV2FriFoldOutput { alpha_pow_output, ro_output }: CircuitV2FriFoldOutput, @@ -411,6 +442,7 @@ where /// /// We do not simply return a `Vec` for performance reasons --- results would be immediately fed /// to `flat_map`, so we employ fusion/deforestation to eliminate intermediate data structures. + #[inline] pub fn compile_one( &mut self, ir_instr: DslIr, @@ -507,6 +539,7 @@ where DslIr::CircuitV2CommitPublicValues(public_values) => { f(self.commit_public_values(&public_values)) } + DslIr::CircuitV2HintAddCurve(data) => f(self.add_curve(data.0, data.1, data.2)), DslIr::PrintV(dst) => f(self.print_f(dst)), DslIr::PrintF(dst) => f(self.print_f(dst)), @@ -545,6 +578,7 @@ where Ok(instr) => { span_builder.item(instr_name(&instr)); instrs.push(instr); + #[cfg(feature = "debug")] traces.push(trace.clone()); } Err(CompileOneErr::CycleTrackerEnter(name)) => { @@ -655,6 +689,17 @@ where .iter_mut() .for_each(|(addr, mult)| backfill((mult, addr))); } + Instruction::HintAddCurve(instr) => { + let HintAddCurveInstr { + output_x_addrs_mults, output_y_addrs_mults, .. + } = instr.as_mut(); + output_x_addrs_mults + .iter_mut() + .for_each(|(addr, mult)| backfill((mult, addr))); + output_y_addrs_mults + .iter_mut() + .for_each(|(addr, mult)| backfill((mult, addr))); + } // Instructions that do not write to memory. Instruction::Mem(MemInstr { kind: MemAccessKind::Read, .. }) | Instruction::CommitPublicValues(_) @@ -666,7 +711,7 @@ where // Initialize constants. let total_consts = self.consts.len(); let instrs_consts = - self.consts.drain().sorted_by_key(|x| x.1 .0 .0).map(|(imm, (addr, mult))| { + self.consts.drain().sorted_by_key(|x| x.1.0.0).map(|(imm, (addr, mult))| { Instruction::Mem(MemInstr { addrs: MemIo { inner: addr }, vals: MemIo { inner: imm.as_block() }, @@ -707,6 +752,7 @@ const fn instr_name(instr: &Instruction) -> &'static str { Instruction::Print(_) => "Print", Instruction::HintExt2Felts(_) => "HintExt2Felts", Instruction::Hint(_) => "Hint", + Instruction::HintAddCurve(_) => "HintAddCurve", Instruction::CommitPublicValues(_) => "CommitPublicValues", } } diff --git a/crates/recursion/compiler/src/ir/builder.rs b/crates/recursion/compiler/src/ir/builder.rs index 09ba4789bb..c11d25b142 100644 --- a/crates/recursion/compiler/src/ir/builder.rs +++ b/crates/recursion/compiler/src/ir/builder.rs @@ -2,7 +2,6 @@ use std::{cell::UnsafeCell, iter::Zip, ptr, vec::IntoIter}; use backtrace::Backtrace; use p3_field::AbstractField; -use sp1_core_machine::utils::sp1_debug_mode; use sp1_primitives::types::RecursionProgramType; use super::{ @@ -33,22 +32,27 @@ impl From> for TracedVec { } impl TracedVec { - pub const fn new() -> Self { - Self { vec: Vec::new(), traces: Vec::new() } + pub fn new() -> Self { + Self { vec: Vec::with_capacity(10_000_000), traces: Vec::new() } } + #[inline(always)] pub fn push(&mut self, value: T) { self.vec.push(value); - self.traces.push(None); + + #[cfg(feature = "debug")] + { + self.traces.push(None); + } } /// Pushes a value to the vector and records a backtrace if SP1_DEBUG is enabled pub fn trace_push(&mut self, value: T) { self.vec.push(value); - if sp1_debug_mode() { + + #[cfg(feature = "debug")] + { self.traces.push(Some(Backtrace::new_unresolved())); - } else { - self.traces.push(None); } } @@ -73,7 +77,12 @@ impl IntoIterator for TracedVec { type IntoIter = Zip, IntoIter>>; fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter().zip(self.traces) + let vec_len = self.vec.len(); + let mut traces = self.traces; + if traces.len() < vec_len { + traces.extend(std::iter::repeat(None).take(vec_len - traces.len())); + } + self.vec.into_iter().zip(traces) } } @@ -162,6 +171,7 @@ impl Builder { } /// Pushes an operation to the builder. + #[inline(always)] pub fn push_op(&mut self, op: DslIr) { self.inner.get_mut().operations.push(op); } @@ -684,8 +694,8 @@ impl<'a, C: Config> IfBuilder<'a, C> { // let lhs: Var = self.builder.eval(lhs); // IfCondition::NeI(lhs, rhs) // } - // (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), true) => IfCondition::Eq(lhs, rhs), - // (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), false) => { + // (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), true) => IfCondition::Eq(lhs, + // rhs), (SymbolicVar::Val(lhs, _), SymbolicVar::Val(rhs, _), false) => { // IfCondition::Ne(lhs, rhs) // } // (SymbolicVar::Val(lhs, _), rhs, true) => { diff --git a/crates/recursion/compiler/src/ir/instructions.rs b/crates/recursion/compiler/src/ir/instructions.rs index 04ddb26713..06f811ccf4 100644 --- a/crates/recursion/compiler/src/ir/instructions.rs +++ b/crates/recursion/compiler/src/ir/instructions.rs @@ -1,4 +1,7 @@ +#![deny(clippy::large_enum_variant)] + use sp1_recursion_core::air::RecursionPublicValues; +use sp1_stark::septic_curve::SepticCurve; use super::{ Array, CircuitV2FriFoldInput, CircuitV2FriFoldOutput, Config, Ext, Felt, FriFoldInput, @@ -120,7 +123,8 @@ pub enum DslIr { /// Inverts an extension field element (ext = 1 / ext). InvE(Ext, Ext), - /// Selects order of felts based on a bit (should_swap, first result, second result, first input, second input) + /// Selects order of felts based on a bit (should_swap, first result, second result, first + /// input, second input) Select(Felt, Felt, Felt, Felt, Felt), // Control flow. @@ -274,6 +278,11 @@ pub enum DslIr { /// Should only be used when target is a gnark circuit. CircuitCommitCommittedValuesDigest(Var), + /// Adds two elliptic curve points. (sum, point_1, point_2). + CircuitV2HintAddCurve( + Box<(SepticCurve>, SepticCurve>, SepticCurve>)>, + ), + // FRI specific instructions. /// Executes a FRI fold operation. 1st field is the size of the fri fold input array. 2nd /// field is the fri fold input array. See [`FriFoldInput`] for more details. @@ -283,7 +292,8 @@ pub enum DslIr { /// more details. CircuitV2FriFold(Box<(CircuitV2FriFoldOutput, CircuitV2FriFoldInput)>), // FRI specific instructions. - /// Executes a Batch FRI loop. Input is the power of alphas, evaluations at z, and evaluations at x. + /// Executes a Batch FRI loop. Input is the power of alphas, evaluations at z, and evaluations + /// at x. CircuitV2BatchFRI( Box<(Ext, Vec>, Vec>, Vec>)>, ), diff --git a/crates/recursion/compiler/src/ir/symbolic.rs b/crates/recursion/compiler/src/ir/symbolic.rs index 40ed4c4bf6..5c114923f2 100644 --- a/crates/recursion/compiler/src/ir/symbolic.rs +++ b/crates/recursion/compiler/src/ir/symbolic.rs @@ -1262,7 +1262,7 @@ impl, E: Any> ExtensionOperand for E { let value_ref = unsafe { mem::transmute::<&E, &ExtOperand>(&self) }; value_ref.clone() } - _ => unimplemented!("unsupported type"), + _ => unimplemented!("unsupported type {:?}", self.type_id()), } } } diff --git a/crates/recursion/core/Cargo.toml b/crates/recursion/core/Cargo.toml index 1a93e7b098..7d3c6682e0 100644 --- a/crates/recursion/core/Cargo.toml +++ b/crates/recursion/core/Cargo.toml @@ -8,6 +8,7 @@ license = { workspace = true } repository = { workspace = true } keywords = { workspace = true } categories = { workspace = true } +links = "sp1-recursion-core-sys" [dependencies] p3-field = { workspace = true } @@ -20,11 +21,11 @@ p3-poseidon2 = { workspace = true } p3-symmetric = { workspace = true } sp1-derive = { workspace = true } sp1-primitives = { workspace = true } -tracing = "0.1.40" +tracing = { workspace = true } sp1-core-machine = { workspace = true } sp1-stark = { workspace = true } -hashbrown = { version = "0.14.5", features = ["serde"] } -itertools = "0.13.0" +hashbrown = { workspace = true, features = ["serde"] } +itertools = { workspace = true } p3-bn254-fr = { workspace = true } p3-merkle-tree = { workspace = true } p3-commit = { workspace = true } @@ -33,11 +34,24 @@ p3-challenger = { workspace = true } p3-fri = { workspace = true } zkhash = "0.2.0" ff = { version = "0.13", features = ["derive", "derive_bits"] } -serde = { version = "1.0", features = ["derive", "rc"] } +serde = { workspace = true, features = ["derive", "rc"] } backtrace = { version = "0.3.71", features = ["serde"] } static_assertions = "1.1.0" thiserror = "1.0.60" vec_map = "0.8.2" +num_cpus = "1.16.0" [dev-dependencies] rand = "0.8.5" + +[build-dependencies] +sp1-stark = { workspace = true } +sp1-primitives = { workspace = true } +p3-baby-bear = { workspace = true } +cbindgen = "0.27.0" +cc = "1.1" +pathdiff = "0.2.1" +glob = "0.3.1" + +[features] +sys = ["sp1-core-machine/sys"] \ No newline at end of file diff --git a/crates/recursion/core/build.rs b/crates/recursion/core/build.rs new file mode 100644 index 0000000000..7a819dd546 --- /dev/null +++ b/crates/recursion/core/build.rs @@ -0,0 +1,199 @@ +fn main() { + #[cfg(feature = "sys")] + sys::build_ffi(); +} + +#[cfg(feature = "sys")] +mod sys { + use std::{ + env, fs, os, + path::{Path, PathBuf}, + }; + + use pathdiff::diff_paths; + + /// The library name, used for the static library archive and the headers. + /// Should be chosen as to not conflict with other library/header names. + const LIB_NAME: &str = "sp1-recursion-core-sys"; + + /// The name of all include directories involved, used to find and output header files. + const INCLUDE_DIRNAME: &str = "include"; + + /// The name of the directory to recursively search for source files in. + const SOURCE_DIRNAME: &str = "cpp"; + + /// The warning placed in the cbindgen header. + const AUTOGEN_WARNING: &str = + "/* Automatically generated by `cbindgen`. Not intended for manual editing. */"; + + pub fn build_ffi() { + // The name of the header generated by `cbindgen`. + let cbindgen_hpp = &format!("{LIB_NAME}-cbindgen.hpp"); + + // The crate directory. + let crate_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + + // The output directory, where built artifacts should be placed. + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + + // The target directory that the cargo invocation is using. + // Headers are symlinked into `target/include` purely for IDE purposes. + let target_dir = { + let mut dir = out_dir.clone(); + loop { + if dir.ends_with("target") { + break dir; + } + if !dir.pop() { + panic!("OUT_DIR does not have parent called \"target\": {:?}", out_dir); + } + } + }; + + // The directory to read headers from. + let source_include_dir = crate_dir.join(INCLUDE_DIRNAME); + + // The directory to place headers into. + let target_include_dir = out_dir.join(INCLUDE_DIRNAME); + + // The directory to place symlinks to headers into. Has the fixed path "target/include". + let target_include_dir_fixed = target_dir.join(INCLUDE_DIRNAME); + + // The directory to read source files from. + let source_dir = crate_dir.join(SOURCE_DIRNAME); + + let headers = glob::glob(source_include_dir.join("**/*.hpp").to_str().unwrap()) + .unwrap() + .collect::, _>>() + .unwrap(); + + let compilation_units = glob::glob(source_dir.join("**/*.cpp").to_str().unwrap()) + .unwrap() + .collect::, _>>() + .unwrap(); + + // Tell Cargo that if the given file changes, to rerun this build script. + println!("cargo::rerun-if-changed={INCLUDE_DIRNAME}"); + println!("cargo::rerun-if-changed={SOURCE_DIRNAME}"); + println!("cargo::rerun-if-changed=src"); + println!("cargo::rerun-if-changed=Cargo.toml"); + + // Cargo build script metadata, used by dependents' build scripts. + // The root directory containing the library archive. + println!("cargo::metadata=root={}", out_dir.to_str().unwrap()); + + // The include path defining the library's API. + println!("cargo::metadata=include={}", target_include_dir.to_str().unwrap()); + + // Generate a header containing bindings to the crate. + match cbindgen::Builder::new() + .with_pragma_once(true) + .with_autogen_warning(AUTOGEN_WARNING) + .with_no_includes() + .with_sys_include("cstdint") + .with_sys_include("cstddef") + .with_parse_deps(true) + .with_parse_include(&[ + "sp1-stark", + "sp1-primitives", + "sp1-core-machine", + "p3-baby-bear", + "sp1-core-executor", + ]) + .with_parse_extra_bindings(&["sp1-stark", "sp1-primitives", "p3-baby-bear"]) + .rename_item("BabyBear", "BabyBearP3") + .include_item("BaseAluEvent") + .include_item("BaseAluValueCols") + .include_item("BaseAluAccessCols") + .include_item("BaseAluInstr") + .include_item("ExtAluEvent") + .include_item("ExtAluValueCols") + .include_item("ExtAluInstr") + .include_item("ExtAluAccessCols") + .include_item("BatchFRIEvent") + .include_item("BatchFRICols") + .include_item("BatchFRIInstrFFI") + .include_item("BatchFRIPreprocessedCols") + .include_item("ExpReverseBitsEventFFI") + .include_item("ExpReverseBitsLenCols") + .include_item("ExpReverseBitsInstrFFI") + .include_item("ExpReverseBitsLenPreprocessedCols") + .include_item("FriFoldEvent") + .include_item("FriFoldCols") + .include_item("FriFoldInstrFFI") + .include_item("FriFoldPreprocessedCols") + .include_item("SelectEvent") + .include_item("SelectCols") + .include_item("CommitPublicValuesEvent") + .include_item("PublicValuesCols") + .include_item("CommitPublicValuesInstr") + .include_item("PublicValuesPreprocessedCols") + .include_item("SelectEvent") + .include_item("SelectCols") + .include_item("SelectInstr") + .include_item("SelectPreprocessedCols") + .include_item("Poseidon2Event") + .include_item("Poseidon2") + .include_item("Poseidon2Instr") + .include_item("Poseidon2PreprocessedColsSkinny") + .include_item("Poseidon2PreprocessedColsWide") + .with_namespace("sp1_recursion_core_sys") + .with_crate(crate_dir) + .generate() + { + Ok(bindings) => { + // Write the bindings to the target include directory. + let header_path = target_include_dir.join(cbindgen_hpp); + if bindings.write_to_file(&header_path) { + // Symlink the header to the fixed include directory. + rel_symlink_file(header_path, target_include_dir_fixed.join(cbindgen_hpp)); + } + } + Err(cbindgen::Error::ParseSyntaxError { .. }) => {} // Ignore parse errors so rust-analyzer can run. + Err(e) => panic!("{:?}", e), + } + + // Copy the headers to the include directory and symlink them to the fixed include directory. + for header in &headers { + // Get the path of the header relative to the source include directory. + let relpath = diff_paths(header, &source_include_dir).unwrap(); + + // Let the destination path be the same place relative to the target include directory. + let dst = target_include_dir.join(&relpath); + + // Create the parent directory if it does not exist. + if let Some(parent) = dst.parent() { + fs::create_dir_all(parent).unwrap(); + } + fs::copy(header, &dst).unwrap(); + rel_symlink_file(dst, target_include_dir_fixed.join(relpath)); + } + + println!("cargo::rustc-link-lib=static=sp1-core-machine-sys"); + let include_dir = env::var("DEP_SP1_CORE_MACHINE_SYS_INCLUDE").unwrap(); + + // Use the `cc` crate to build the library and statically link it to the crate. + let mut cc_builder = cc::Build::new(); + cc_builder.files(&compilation_units).include(target_include_dir).include(include_dir); + cc_builder.cpp(true).std("c++20"); + cc_builder.compile(LIB_NAME) + } + + /// Place a relative symlink pointing to `original` at `link`. + fn rel_symlink_file(original: P, link: Q) + where + P: AsRef, + Q: AsRef, + { + #[cfg(unix)] + use os::unix::fs::symlink; + #[cfg(windows)] + use os::windows::fs::symlink_file as symlink; + + let target_dir = link.as_ref().parent().unwrap(); + fs::create_dir_all(target_dir).unwrap(); + let _ = fs::remove_file(&link); + let relpath = diff_paths(original, target_dir).unwrap(); + symlink(relpath, link).unwrap(); + } +} diff --git a/crates/recursion/core/cpp/extern.cpp b/crates/recursion/core/cpp/extern.cpp new file mode 100644 index 0000000000..c1408944e4 --- /dev/null +++ b/crates/recursion/core/cpp/extern.cpp @@ -0,0 +1,141 @@ +#include "bb31_t.hpp" +#include "sys.hpp" + +namespace sp1_recursion_core_sys { +extern void alu_base_event_to_row_babybear(const BaseAluIo* io, + BaseAluValueCols* cols) { + alu_base::event_to_row( + *reinterpret_cast*>(io), + *reinterpret_cast*>(cols)); +} +extern void alu_base_instr_to_row_babybear( + const BaseAluInstr* instr, + BaseAluAccessCols* access) { + alu_base::instr_to_row( + *reinterpret_cast*>(instr), + *reinterpret_cast*>(access)); +} + +extern void alu_ext_event_to_row_babybear(const ExtAluIo>* io, + ExtAluValueCols* cols) { + alu_ext::event_to_row( + *reinterpret_cast>*>(io), + *reinterpret_cast*>(cols)); +} +extern void alu_ext_instr_to_row_babybear( + const ExtAluInstr* instr, + ExtAluAccessCols* access) { + alu_ext::instr_to_row( + *reinterpret_cast*>(instr), + *reinterpret_cast*>(access)); +} + +extern void batch_fri_event_to_row_babybear(const BatchFRIEvent* io, + BatchFRICols* cols) { + batch_fri::event_to_row( + *reinterpret_cast*>(io), + *reinterpret_cast*>(cols)); +} +extern void batch_fri_instr_to_row_babybear( + const BatchFRIInstrFFI* instr, + BatchFRIPreprocessedCols* cols) { + batch_fri::instr_to_row( + *reinterpret_cast*>(instr), + *reinterpret_cast*>(cols)); +} + +extern void exp_reverse_bits_event_to_row_babybear( + const ExpReverseBitsEventFFI* io, size_t i, + ExpReverseBitsLenCols* cols) { + exp_reverse_bits::event_to_row( + *reinterpret_cast*>(io), i, + *reinterpret_cast*>(cols)); +} +extern void exp_reverse_bits_instr_to_row_babybear( + const ExpReverseBitsInstrFFI* instr, size_t i, size_t len, + ExpReverseBitsLenPreprocessedCols* cols) { + exp_reverse_bits::instr_to_row( + *reinterpret_cast*>(instr), i, len, + *reinterpret_cast*>(cols)); +} + +extern void fri_fold_event_to_row_babybear(const FriFoldEvent* io, + FriFoldCols* cols) { + fri_fold::event_to_row( + *reinterpret_cast*>(io), + *reinterpret_cast*>(cols)); +} +extern void fri_fold_instr_to_row_babybear( + const FriFoldInstrFFI* instr, size_t i, + FriFoldPreprocessedCols* cols) { + fri_fold::instr_to_row( + *reinterpret_cast*>(instr), i, + *reinterpret_cast*>(cols)); +} + +extern void public_values_event_to_row_babybear( + const CommitPublicValuesEvent* io, size_t digest_idx, + PublicValuesCols* cols) { + public_values::event_to_row( + *reinterpret_cast*>(io), digest_idx, + *reinterpret_cast*>(cols)); +} +extern void public_values_instr_to_row_babybear( + const CommitPublicValuesInstr* instr, size_t digest_idx, + PublicValuesPreprocessedCols* cols) { + public_values::instr_to_row( + *reinterpret_cast*>(instr), + digest_idx, + *reinterpret_cast*>(cols)); +} + +extern void select_event_to_row_babybear(const SelectEvent* io, + SelectCols* cols) { + select::event_to_row( + *reinterpret_cast*>(io), + *reinterpret_cast*>(cols)); +} +extern void select_instr_to_row_babybear( + const SelectInstr* instr, + SelectPreprocessedCols* cols) { + select::instr_to_row( + *reinterpret_cast*>(instr), + *reinterpret_cast*>(cols)); +} + +extern void poseidon2_skinny_event_to_row_babybear( + const Poseidon2Event* event, Poseidon2* cols) { + poseidon2_skinny::event_to_row( + *reinterpret_cast*>(event), + reinterpret_cast*>(cols)); +} +extern void poseidon2_skinny_instr_to_row_babybear( + const Poseidon2Instr* instr, size_t i, + Poseidon2PreprocessedColsSkinny* cols) { + poseidon2_skinny::instr_to_row( + *reinterpret_cast*>(instr), i, + *reinterpret_cast*>(cols)); +} + +extern "C" void poseidon2_wide_event_to_row_babybear( + const BabyBearP3* input, BabyBearP3* external_rounds_state, + BabyBearP3* internal_rounds_state, BabyBearP3* internal_rounds_s0, + BabyBearP3* external_sbox, BabyBearP3* internal_sbox, + BabyBearP3* output_state) { + poseidon2_wide::event_to_row( + reinterpret_cast(input), + reinterpret_cast(external_rounds_state), + reinterpret_cast(internal_rounds_state), + reinterpret_cast(internal_rounds_s0), + reinterpret_cast(external_sbox), + reinterpret_cast(internal_sbox), + reinterpret_cast(output_state)); +} +extern void poseidon2_wide_instr_to_row_babybear( + const Poseidon2SkinnyInstr* instr, + Poseidon2PreprocessedColsWide* cols) { + poseidon2_wide::instr_to_row( + *reinterpret_cast*>(instr), + *reinterpret_cast*>(cols)); +} +} // namespace sp1_recursion_core_sys diff --git a/crates/recursion/core/include/alu_base.hpp b/crates/recursion/core/include/alu_base.hpp new file mode 100644 index 0000000000..4a85532e00 --- /dev/null +++ b/crates/recursion/core/include/alu_base.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::alu_base { +template +__SP1_HOSTDEV__ void event_to_row(const BaseAluEvent& event, + BaseAluValueCols& cols) { + cols.vals = event; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const BaseAluInstr& instr, + BaseAluAccessCols& access) { + access.addrs = instr.addrs; + access.is_add = F(0); + access.is_sub = F(0); + access.is_mul = F(0); + access.is_div = F(0); + access.mult = instr.mult; + + switch (instr.opcode) { + case BaseAluOpcode::AddF: + access.is_add = F(1); + break; + case BaseAluOpcode::SubF: + access.is_sub = F(1); + break; + case BaseAluOpcode::MulF: + access.is_mul = F(1); + break; + case BaseAluOpcode::DivF: + access.is_div = F(1); + break; + } +} +} // namespace sp1_recursion_core_sys::alu_base diff --git a/crates/recursion/core/include/alu_ext.hpp b/crates/recursion/core/include/alu_ext.hpp new file mode 100644 index 0000000000..0448e6830c --- /dev/null +++ b/crates/recursion/core/include/alu_ext.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::alu_ext { +template +__SP1_HOSTDEV__ void event_to_row(const ExtAluEvent& event, + ExtAluValueCols& cols) { + cols.vals = event; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const ExtAluInstr& instr, + ExtAluAccessCols& access) { + access.addrs = instr.addrs; + access.is_add = F(0); + access.is_sub = F(0); + access.is_mul = F(0); + access.is_div = F(0); + access.mult = instr.mult; + + switch (instr.opcode) { + case ExtAluOpcode::AddE: + access.is_add = F(1); + break; + case ExtAluOpcode::SubE: + access.is_sub = F(1); + break; + case ExtAluOpcode::MulE: + access.is_mul = F(1); + break; + case ExtAluOpcode::DivE: + access.is_div = F(1); + break; + } +} +} // namespace sp1_recursion_core_sys::alu_ext diff --git a/crates/recursion/core/include/batch_fri.hpp b/crates/recursion/core/include/batch_fri.hpp new file mode 100644 index 0000000000..b9ab20ea23 --- /dev/null +++ b/crates/recursion/core/include/batch_fri.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::batch_fri { +template +__SP1_HOSTDEV__ void event_to_row(const BatchFRIEvent& event, + BatchFRICols& cols) { + cols.acc = event.ext_single.acc; + cols.alpha_pow = event.ext_vec.alpha_pow; + cols.p_at_z = event.ext_vec.p_at_z; + cols.p_at_x = event.base_vec.p_at_x; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const BatchFRIInstrFFI& instr, + BatchFRIPreprocessedCols& cols) { + cols.is_real = F(1); + cols.is_end = + F(instr.ext_vec_addrs_p_at_z_ptr == + instr.ext_vec_addrs_p_at_z_ptr + instr.ext_vec_addrs_p_at_z_len - 1); + cols.acc_addr = instr.ext_single_addrs->acc; + cols.alpha_pow_addr = instr.ext_vec_addrs_alpha_pow_ptr[0]; + cols.p_at_z_addr = instr.ext_vec_addrs_p_at_z_ptr[0]; + cols.p_at_x_addr = instr.base_vec_addrs_p_at_x_ptr[0]; +} +} // namespace sp1_recursion_core_sys::batch_fri diff --git a/crates/recursion/core/include/exp_reverse_bits.hpp b/crates/recursion/core/include/exp_reverse_bits.hpp new file mode 100644 index 0000000000..840258315f --- /dev/null +++ b/crates/recursion/core/include/exp_reverse_bits.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::exp_reverse_bits { +template +__SP1_HOSTDEV__ void event_to_row(const ExpReverseBitsEventFFI& event, + size_t i, ExpReverseBitsLenCols& cols) { + cols.x = *event.base; + cols.current_bit = event.exp_ptr[i]; + cols.multiplier = (event.exp_ptr[i] == F::one()) ? *event.base : F::one(); +} + +template +__SP1_HOSTDEV__ void instr_to_row(const ExpReverseBitsInstrFFI& instr, + size_t i, size_t len, + ExpReverseBitsLenPreprocessedCols& cols) { + cols.is_real = F::one(); + cols.iteration_num = F::from_canonical_u32(i); + cols.is_first = F::from_bool(i == 0); + cols.is_last = F::from_bool(i == len - 1); + + cols.x_mem.addr = *instr.base; + cols.x_mem.mult = F::zero() - F::from_bool(i == 0); + + cols.exponent_mem.addr = instr.exp_ptr[i]; + cols.exponent_mem.mult = F::zero() - F::one(); + + cols.result_mem.addr = *instr.result; + cols.result_mem.mult = *instr.mult * F::from_bool(i == len - 1); +} +} // namespace sp1_recursion_core_sys::exp_reverse_bits diff --git a/crates/recursion/core/include/fri_fold.hpp b/crates/recursion/core/include/fri_fold.hpp new file mode 100644 index 0000000000..3ea30aa351 --- /dev/null +++ b/crates/recursion/core/include/fri_fold.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::fri_fold { +template +__SP1_HOSTDEV__ void event_to_row(const FriFoldEvent& event, + FriFoldCols& cols) { + cols.x = event.base_single.x; + cols.z = event.ext_single.z; + cols.alpha = event.ext_single.alpha; + + cols.p_at_z = event.ext_vec.ps_at_z; + cols.p_at_x = event.ext_vec.mat_opening; + cols.alpha_pow_input = event.ext_vec.alpha_pow_input; + cols.ro_input = event.ext_vec.ro_input; + + cols.alpha_pow_output = event.ext_vec.alpha_pow_output; + cols.ro_output = event.ext_vec.ro_output; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const FriFoldInstrFFI& instr, size_t i, + FriFoldPreprocessedCols& cols) { + + cols.is_real = F::one(); + cols.is_first = F::from_bool(i == 0); + + cols.z_mem.addr = instr.ext_single_addrs->z; + cols.z_mem.mult = F::zero() - F::from_bool(i == 0); + + cols.x_mem.addr = instr.base_single_addrs->x; + cols.x_mem.mult = F::zero() - F::from_bool(i == 0); + + cols.alpha_mem.addr = instr.ext_single_addrs->alpha; + cols.alpha_mem.mult = F::zero() - F::from_bool(i == 0); + + cols.alpha_pow_input_mem.addr = instr.ext_vec_addrs_alpha_pow_input_ptr[i]; + cols.alpha_pow_input_mem.mult = F::zero() - F::one(); + + cols.ro_input_mem.addr = instr.ext_vec_addrs_ro_input_ptr[i]; + cols.ro_input_mem.mult = F::zero() - F::one(); + + cols.p_at_z_mem.addr = instr.ext_vec_addrs_ps_at_z_ptr[i]; + cols.p_at_z_mem.mult = F::zero() - F::one(); + + cols.p_at_x_mem.addr = instr.ext_vec_addrs_mat_opening_ptr[i]; + cols.p_at_x_mem.mult = F::zero() - F::one(); + + cols.alpha_pow_output_mem.addr = instr.ext_vec_addrs_alpha_pow_output_ptr[i]; + cols.alpha_pow_output_mem.mult = instr.alpha_pow_mults_ptr[i]; + + cols.ro_output_mem.addr = instr.ext_vec_addrs_ro_output_ptr[i]; + cols.ro_output_mem.mult = instr.ro_mults_ptr[i]; +} +} // namespace sp1_recursion_core_sys::fri_fold diff --git a/crates/recursion/core/include/poseidon2.hpp b/crates/recursion/core/include/poseidon2.hpp new file mode 100644 index 0000000000..7bbb6e8088 --- /dev/null +++ b/crates/recursion/core/include/poseidon2.hpp @@ -0,0 +1,611 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::poseidon2 { +constexpr size_t OUTPUT_ROUND_IDX = NUM_EXTERNAL_ROUNDS + 2; +constexpr size_t INPUT_ROUND_IDX = 0; +constexpr size_t INTERNAL_ROUND_IDX = NUM_EXTERNAL_ROUNDS / 2 + 1; + +constexpr uint32_t RC_16_30_U32[30][16] = { + { + 2110014213U, + 3964964605U, + 2190662774U, + 2732996483U, + 640767983U, + 3403899136U, + 1716033721U, + 1606702601U, + 3759873288U, + 1466015491U, + 1498308946U, + 2844375094U, + 3042463841U, + 1969905919U, + 4109944726U, + 3925048366U, + }, + { + 3706859504U, + 759122502U, + 3167665446U, + 1131812921U, + 1080754908U, + 4080114493U, + 893583089U, + 2019677373U, + 3128604556U, + 580640471U, + 3277620260U, + 842931656U, + 548879852U, + 3608554714U, + 3575647916U, + 81826002U, + }, + { + 4289086263U, + 1563933798U, + 1440025885U, + 184445025U, + 2598651360U, + 1396647410U, + 1575877922U, + 3303853401U, + 137125468U, + 765010148U, + 633675867U, + 2037803363U, + 2573389828U, + 1895729703U, + 541515871U, + 1783382863U, + }, + { + 2641856484U, + 3035743342U, + 3672796326U, + 245668751U, + 2025460432U, + 201609705U, + 286217151U, + 4093475563U, + 2519572182U, + 3080699870U, + 2762001832U, + 1244250808U, + 606038199U, + 3182740831U, + 73007766U, + 2572204153U, + }, + { + 1196780786U, + 3447394443U, + 747167305U, + 2968073607U, + 1053214930U, + 1074411832U, + 4016794508U, + 1570312929U, + 113576933U, + 4042581186U, + 3634515733U, + 1032701597U, + 2364839308U, + 3840286918U, + 888378655U, + 2520191583U, + }, + { + 36046858U, + 2927525953U, + 3912129105U, + 4004832531U, + 193772436U, + 1590247392U, + 4125818172U, + 2516251696U, + 4050945750U, + 269498914U, + 1973292656U, + 891403491U, + 1845429189U, + 2611996363U, + 2310542653U, + 4071195740U, + }, + { + 3505307391U, + 786445290U, + 3815313971U, + 1111591756U, + 4233279834U, + 2775453034U, + 1991257625U, + 2940505809U, + 2751316206U, + 1028870679U, + 1282466273U, + 1059053371U, + 834521354U, + 138721483U, + 3100410803U, + 3843128331U, + }, + { + 3878220780U, + 4058162439U, + 1478942487U, + 799012923U, + 496734827U, + 3521261236U, + 755421082U, + 1361409515U, + 392099473U, + 3178453393U, + 4068463721U, + 7935614U, + 4140885645U, + 2150748066U, + 1685210312U, + 3852983224U, + }, + { + 2896943075U, + 3087590927U, + 992175959U, + 970216228U, + 3473630090U, + 3899670400U, + 3603388822U, + 2633488197U, + 2479406964U, + 2420952999U, + 1852516800U, + 4253075697U, + 979699862U, + 1163403191U, + 1608599874U, + 3056104448U, + }, + { + 3779109343U, + 536205958U, + 4183458361U, + 1649720295U, + 1444912244U, + 3122230878U, + 384301396U, + 4228198516U, + 1662916865U, + 4082161114U, + 2121897314U, + 1706239958U, + 4166959388U, + 1626054781U, + 3005858978U, + 1431907253U, + }, + { + 1418914503U, + 1365856753U, + 3942715745U, + 1429155552U, + 3545642795U, + 3772474257U, + 1621094396U, + 2154399145U, + 826697382U, + 1700781391U, + 3539164324U, + 652815039U, + 442484755U, + 2055299391U, + 1064289978U, + 1152335780U, + }, + { + 3417648695U, + 186040114U, + 3475580573U, + 2113941250U, + 1779573826U, + 1573808590U, + 3235694804U, + 2922195281U, + 1119462702U, + 3688305521U, + 1849567013U, + 667446787U, + 753897224U, + 1896396780U, + 3143026334U, + 3829603876U, + }, + { + 859661334U, + 3898844357U, + 180258337U, + 2321867017U, + 3599002504U, + 2886782421U, + 3038299378U, + 1035366250U, + 2038912197U, + 2920174523U, + 1277696101U, + 2785700290U, + 3806504335U, + 3518858933U, + 654843672U, + 2127120275U, + }, + { + 1548195514U, + 2378056027U, + 390914568U, + 1472049779U, + 1552596765U, + 1905886441U, + 1611959354U, + 3653263304U, + 3423946386U, + 340857935U, + 2208879480U, + 139364268U, + 3447281773U, + 3777813707U, + 55640413U, + 4101901741U, + }, + {104929687U, 1459980974U, 1831234737U, 457139004U, 2581487628U, 2112044563U, + 3567013861U, 2792004347U, 576325418U, 41126132U, 2713562324U, 151213722U, + 2891185935U, 546846420U, 2939794919U, 2543469905U}, + { + 2191909784U, + 3315138460U, + 530414574U, + 1242280418U, + 1211740715U, + 3993672165U, + 2505083323U, + 3845798801U, + 538768466U, + 2063567560U, + 3366148274U, + 1449831887U, + 2408012466U, + 294726285U, + 3943435493U, + 924016661U, + }, + { + 3633138367U, + 3222789372U, + 809116305U, + 30100013U, + 2655172876U, + 2564247117U, + 2478649732U, + 4113689151U, + 4120146082U, + 2512308515U, + 650406041U, + 4240012393U, + 2683508708U, + 951073977U, + 3460081988U, + 339124269U, + }, + { + 130182653U, + 2755946749U, + 542600513U, + 2816103022U, + 1931786340U, + 2044470840U, + 1709908013U, + 2938369043U, + 3640399693U, + 1374470239U, + 2191149676U, + 2637495682U, + 4236394040U, + 2289358846U, + 3833368530U, + 974546524U, + }, + { + 3306659113U, + 2234814261U, + 1188782305U, + 223782844U, + 2248980567U, + 2309786141U, + 2023401627U, + 3278877413U, + 2022138149U, + 575851471U, + 1612560780U, + 3926656936U, + 3318548977U, + 2591863678U, + 188109355U, + 4217723909U, + }, + { + 1564209905U, + 2154197895U, + 2459687029U, + 2870634489U, + 1375012945U, + 1529454825U, + 306140690U, + 2855578299U, + 1246997295U, + 3024298763U, + 1915270363U, + 1218245412U, + 2479314020U, + 2989827755U, + 814378556U, + 4039775921U, + }, + { + 1165280628U, + 1203983801U, + 3814740033U, + 1919627044U, + 600240215U, + 773269071U, + 486685186U, + 4254048810U, + 1415023565U, + 502840102U, + 4225648358U, + 510217063U, + 166444818U, + 1430745893U, + 1376516190U, + 1775891321U, + }, + { + 1170945922U, + 1105391877U, + 261536467U, + 1401687994U, + 1022529847U, + 2476446456U, + 2603844878U, + 3706336043U, + 3463053714U, + 1509644517U, + 588552318U, + 65252581U, + 3696502656U, + 2183330763U, + 3664021233U, + 1643809916U, + }, + { + 2922875898U, + 3740690643U, + 3932461140U, + 161156271U, + 2619943483U, + 4077039509U, + 2921201703U, + 2085619718U, + 2065264646U, + 2615693812U, + 3116555433U, + 246100007U, + 4281387154U, + 4046141001U, + 4027749321U, + 111611860U, + }, + { + 2066954820U, + 2502099969U, + 2915053115U, + 2362518586U, + 366091708U, + 2083204932U, + 4138385632U, + 3195157567U, + 1318086382U, + 521723799U, + 702443405U, + 2507670985U, + 1760347557U, + 2631999893U, + 1672737554U, + 1060867760U, + }, + { + 2359801781U, + 2800231467U, + 3010357035U, + 1035997899U, + 1210110952U, + 1018506770U, + 2799468177U, + 1479380761U, + 1536021911U, + 358993854U, + 579904113U, + 3432144800U, + 3625515809U, + 199241497U, + 4058304109U, + 2590164234U, + }, + { + 1688530738U, + 1580733335U, + 2443981517U, + 2206270565U, + 2780074229U, + 2628739677U, + 2940123659U, + 4145206827U, + 3572278009U, + 2779607509U, + 1098718697U, + 1424913749U, + 2224415875U, + 1108922178U, + 3646272562U, + 3935186184U, + }, + { + 820046587U, + 1393386250U, + 2665818575U, + 2231782019U, + 672377010U, + 1920315467U, + 1913164407U, + 2029526876U, + 2629271820U, + 384320012U, + 4112320585U, + 3131824773U, + 2347818197U, + 2220997386U, + 1772368609U, + 2579960095U, + }, + { + 3544930873U, + 225847443U, + 3070082278U, + 95643305U, + 3438572042U, + 3312856509U, + 615850007U, + 1863868773U, + 803582265U, + 3461976859U, + 2903025799U, + 1482092434U, + 3902972499U, + 3872341868U, + 1530411808U, + 2214923584U, + }, + { + 3118792481U, + 2241076515U, + 3983669831U, + 3180915147U, + 3838626501U, + 1921630011U, + 3415351771U, + 2249953859U, + 3755081630U, + 486327260U, + 1227575720U, + 3643869379U, + 2982026073U, + 2466043731U, + 1982634375U, + 3769609014U, + }, + { + 2195455495U, + 2596863283U, + 4244994973U, + 1983609348U, + 4019674395U, + 3469982031U, + 1458697570U, + 1593516217U, + 1963896497U, + 3115309118U, + 1659132465U, + 2536770756U, + 3059294171U, + 2618031334U, + 2040903247U, + 3799795076U, + }}; + +static const bb31_t POSEIDON2_INTERNAL_MATRIX_DIAG_16_BABYBEAR_MONTY[16] = { + bb31_t::from_canonical_u32(0x78000001u - 2), // BabyBear::ORDER_U32 - 2 + bb31_t::from_canonical_u32(1), // 1 + bb31_t::from_canonical_u32(1 << 1), // 1 << 1 + bb31_t::from_canonical_u32(1 << 2), // 1 << 2 + bb31_t::from_canonical_u32(1 << 3), // 1 << 3 + bb31_t::from_canonical_u32(1 << 4), // 1 << 4 + bb31_t::from_canonical_u32(1 << 5), // 1 << 5 + bb31_t::from_canonical_u32(1 << 6), // 1 << 6 + bb31_t::from_canonical_u32(1 << 7), // 1 << 7 + bb31_t::from_canonical_u32(1 << 8), // 1 << 8 + bb31_t::from_canonical_u32(1 << 9), // 1 << 9 + bb31_t::from_canonical_u32(1 << 10), // 1 << 10 + bb31_t::from_canonical_u32(1 << 11), // 1 << 11 + bb31_t::from_canonical_u32(1 << 12), // 1 << 12 + bb31_t::from_canonical_u32(1 << 13), // 1 << 13 + bb31_t::from_canonical_u32(1 << 15), // 1 << 15 +}; + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void external_linear_layer(F* state_var) { + for (size_t j = 0; j < WIDTH; j += 4) { + F t01 = state_var[j + 0] + state_var[j + 1]; + F t23 = state_var[j + 2] + state_var[j + 3]; + F t0123 = t01 + t23; + F t01123 = t0123 + state_var[j + 1]; + F t01233 = t0123 + state_var[j + 3]; + + // The order here is important. Need to overwrite x[0] and x[2] after x[1] and x[3]. + state_var[j + 3] = + t01233 + + (state_var[j + 0] + state_var[j + 0]); // 3*x[0] + x[1] + x[2] + 2*x[3] + state_var[j + 1] = + t01123 + + (state_var[j + 2] + state_var[j + 2]); // x[0] + 2*x[1] + 3*x[2] + x[3] + state_var[j + 0] = t01123 + t01; // 2*x[0] + 3*x[1] + x[2] + x[3] + state_var[j + 2] = t01233 + t23; // x[0] + x[1] + 2*x[2] + 3*x[3] + } + + F sums[4] = {F::zero(), F::zero(), F::zero(), F::zero()}; + for (size_t k = 0; k < 4; k++) { + for (size_t j = 0; j < WIDTH; j += 4) { + sums[k] = sums[k] + state_var[j + k]; + } + } + + for (size_t j = 0; j < WIDTH; j++) { + state_var[j] = state_var[j] + sums[j % 4]; + } +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void internal_linear_layer(F* state) { + F matmul_constants[WIDTH]; + for (size_t i = 0; i < WIDTH; i++) { + matmul_constants[i] = F(F::to_monty(F::from_monty( + POSEIDON2_INTERNAL_MATRIX_DIAG_16_BABYBEAR_MONTY[i].val))); + } + + F sum = F::zero(); + for (size_t i = 0; i < WIDTH; i++) { + sum = sum + state[i]; + } + + for (size_t i = 0; i < WIDTH; i++) { + state[i] = state[i] * matmul_constants[i]; + state[i] = state[i] + sum; + } + + F monty_inverse = F(F::to_monty(F::from_monty(1))); + for (size_t i = 0; i < WIDTH; i++) { + state[i] = state[i] * monty_inverse; + } +} +} // namespace sp1_recursion_core_sys::poseidon2 \ No newline at end of file diff --git a/crates/recursion/core/include/poseidon2_skinny.hpp b/crates/recursion/core/include/poseidon2_skinny.hpp new file mode 100644 index 0000000000..9e75bcce9a --- /dev/null +++ b/crates/recursion/core/include/poseidon2_skinny.hpp @@ -0,0 +1,115 @@ +#pragma once + +#include "poseidon2.hpp" +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::poseidon2_skinny { +using namespace poseidon2; + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void populate_external_round(F* round_state, + size_t r, + F* next_state_var) { + size_t round = + (r < NUM_EXTERNAL_ROUNDS / 2) ? r : r + NUM_INTERNAL_ROUNDS - 1; + + for (size_t i = 0; i < WIDTH; i++) { + F add_rc = round_state[i] + F(F::to_monty(RC_16_30_U32[round][i])); + + F sbox_deg_3 = add_rc * add_rc * add_rc; + next_state_var[i] = sbox_deg_3 * sbox_deg_3 * add_rc; + } + + external_linear_layer(next_state_var); +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void populate_internal_rounds( + F* state, F* internal_rounds_s0, F* next_state_var) { + for (size_t i = 0; i < WIDTH; i++) { + next_state_var[i] = state[i]; + } + + for (size_t r = 0; r < NUM_INTERNAL_ROUNDS; r++) { + size_t round = r + NUM_EXTERNAL_ROUNDS / 2; + F add_rc = next_state_var[0] + F(F::to_monty(RC_16_30_U32[round][0])); + + F sbox_deg_3 = add_rc * add_rc * add_rc; + F sbox_deg_7 = sbox_deg_3 * sbox_deg_3 * add_rc; + + next_state_var[0] = sbox_deg_7; + internal_linear_layer(next_state_var); + + if (r < NUM_INTERNAL_ROUNDS - 1) { + internal_rounds_s0[r] = next_state_var[0]; + } + } +} + +template +__SP1_HOSTDEV__ void event_to_row(const Poseidon2Event& event, + Poseidon2* cols) { + Poseidon2& first_row = cols[0]; + for (size_t i = 0; i < 16; i++) { + first_row.state_var[i] = event.input[i]; + } + + Poseidon2& second_row = cols[1]; + for (size_t i = 0; i < 16; i++) { + second_row.state_var[i] = event.input[i]; + } + + external_linear_layer(second_row.state_var); + + for (size_t i = 1; i < OUTPUT_ROUND_IDX; i++) { + Poseidon2& col = cols[i]; + Poseidon2& next_row_cols = cols[i + 1]; + + if (i != INTERNAL_ROUND_IDX) { + populate_external_round(col.state_var, i - 1, next_row_cols.state_var); + } else { + populate_internal_rounds(col.state_var, col.internal_rounds_s0, + next_row_cols.state_var); + } + } +} + +template +__SP1_HOSTDEV__ void instr_to_row(const Poseidon2Instr& instr, size_t i, + Poseidon2PreprocessedColsSkinny& cols) { + cols.round_counters_preprocessed.is_input_round = + F::from_bool(i == INPUT_ROUND_IDX); + bool is_external_round = + i != INPUT_ROUND_IDX && i != INTERNAL_ROUND_IDX && i != OUTPUT_ROUND_IDX; + cols.round_counters_preprocessed.is_external_round = + F::from_bool(is_external_round); + cols.round_counters_preprocessed.is_internal_round = + F::from_bool(i == INTERNAL_ROUND_IDX); + + for (size_t j = 0; j < WIDTH; j++) { + if (is_external_round) { + size_t r = i - 1; + size_t round = (i < INTERNAL_ROUND_IDX) ? r : r + NUM_INTERNAL_ROUNDS - 1; + cols.round_counters_preprocessed.round_constants[j] = + F(F::to_monty(RC_16_30_U32[round][j])); + } else if (i == INTERNAL_ROUND_IDX) { + cols.round_counters_preprocessed.round_constants[j] = + F(F::to_monty(RC_16_30_U32[NUM_EXTERNAL_ROUNDS / 2 + j][0])); + } else { + cols.round_counters_preprocessed.round_constants[j] = F::zero(); + } + } + + if (i == INPUT_ROUND_IDX) { + for (size_t j = 0; j < WIDTH; j++) { + cols.memory_preprocessed[j].addr = instr.addrs.input[j]; + cols.memory_preprocessed[j].mult = F::zero() - F::one(); + } + } else if (i == OUTPUT_ROUND_IDX) { + for (size_t j = 0; j < WIDTH; j++) { + cols.memory_preprocessed[j].addr = instr.addrs.output[j]; + cols.memory_preprocessed[j].mult = instr.mults[j]; + } + } +} +} // namespace sp1_recursion_core_sys::poseidon2_skinny diff --git a/crates/recursion/core/include/poseidon2_wide.hpp b/crates/recursion/core/include/poseidon2_wide.hpp new file mode 100644 index 0000000000..9d82401d6b --- /dev/null +++ b/crates/recursion/core/include/poseidon2_wide.hpp @@ -0,0 +1,149 @@ +#pragma once + +#include "poseidon2.hpp" +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::poseidon2_wide { +using namespace poseidon2; + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void populate_external_round( + const F* external_rounds_state, F* sbox, size_t r, F* next_state) { + F round_state[WIDTH]; + if (r == 0) { + // external_linear_layer_immut + F temp_round_state[WIDTH]; + for (size_t i = 0; i < WIDTH; i++) { + temp_round_state[i] = external_rounds_state[r * WIDTH + i]; + } + external_linear_layer(temp_round_state); + for (size_t i = 0; i < WIDTH; i++) { + round_state[i] = temp_round_state[i]; + } + } else { + for (size_t i = 0; i < WIDTH; i++) { + round_state[i] = external_rounds_state[r * WIDTH + i]; + } + } + + size_t round = r < NUM_EXTERNAL_ROUNDS / 2 ? r : r + NUM_INTERNAL_ROUNDS; + F add_rc[WIDTH]; + for (size_t i = 0; i < WIDTH; i++) { + add_rc[i] = round_state[i] + F(F::to_monty(RC_16_30_U32[round][i])); + } + + F sbox_deg_3[WIDTH]; + F sbox_deg_7[WIDTH]; + for (size_t i = 0; i < WIDTH; i++) { + sbox_deg_3[i] = add_rc[i] * add_rc[i] * add_rc[i]; + sbox_deg_7[i] = sbox_deg_3[i] * sbox_deg_3[i] * add_rc[i]; + } + + if (sbox != nullptr) { + for (size_t i = 0; i < WIDTH; i++) { + sbox[r * WIDTH + i] = sbox_deg_3[i]; + } + } + + for (size_t i = 0; i < WIDTH; i++) { + next_state[i] = sbox_deg_7[i]; + } + external_linear_layer(next_state); +} + +template +__SP1_HOSTDEV__ __SP1_INLINE__ void populate_internal_rounds( + const F* internal_rounds_state, F* internal_rounds_s0, F* sbox, + F* ret_state) { + F state[WIDTH]; + for (size_t i = 0; i < WIDTH; i++) { + state[i] = internal_rounds_state[i]; + } + + F sbox_deg_3[NUM_INTERNAL_ROUNDS]; + for (size_t r = 0; r < NUM_INTERNAL_ROUNDS; r++) { + size_t round = r + NUM_EXTERNAL_ROUNDS / 2; + F add_rc = state[0] + F(F::to_monty(RC_16_30_U32[round][0])); + + sbox_deg_3[r] = add_rc * add_rc * add_rc; + F sbox_deg_7 = sbox_deg_3[r] * sbox_deg_3[r] * add_rc; + + state[0] = sbox_deg_7; + internal_linear_layer(state); + + if (r < NUM_INTERNAL_ROUNDS - 1) { + internal_rounds_s0[r] = state[0]; + } + } + + for (size_t i = 0; i < WIDTH; i++) { + ret_state[i] = state[i]; + } + + // Store sbox values if pointer is not null + if (sbox != nullptr) { + for (size_t r = 0; r < NUM_INTERNAL_ROUNDS; r++) { + sbox[r] = sbox_deg_3[r]; + } + } +} + +template +__SP1_HOSTDEV__ void event_to_row(const F* input, F* external_rounds_state, + F* internal_rounds_state, + F* internal_rounds_s0, F* external_sbox, + F* internal_sbox, F* output_state) { + for (size_t i = 0; i < WIDTH; i++) { + external_rounds_state[i] = input[i]; + } + + for (size_t r = 0; r < NUM_EXTERNAL_ROUNDS / 2; r++) { + F next_state[WIDTH]; + populate_external_round(external_rounds_state, external_sbox, r, + next_state); + if (r == NUM_EXTERNAL_ROUNDS / 2 - 1) { + for (size_t i = 0; i < WIDTH; i++) { + internal_rounds_state[i] = next_state[i]; + } + } else { + for (size_t i = 0; i < WIDTH; i++) { + external_rounds_state[(r + 1) * WIDTH + i] = next_state[i]; + } + } + } + + F ret_state[WIDTH]; + populate_internal_rounds(internal_rounds_state, internal_rounds_s0, + internal_sbox, ret_state); + size_t row = NUM_EXTERNAL_ROUNDS / 2; + for (size_t i = 0; i < WIDTH; i++) { + external_rounds_state[row * WIDTH + i] = ret_state[i]; + } + + for (size_t r = NUM_EXTERNAL_ROUNDS / 2; r < NUM_EXTERNAL_ROUNDS; r++) { + F next_state[WIDTH]; + populate_external_round(external_rounds_state, external_sbox, r, + next_state); + if (r == NUM_EXTERNAL_ROUNDS - 1) { + for (size_t i = 0; i < WIDTH; i++) { + output_state[i] = next_state[i]; + } + } else { + for (size_t i = 0; i < WIDTH; i++) { + external_rounds_state[(r + 1) * WIDTH + i] = next_state[i]; + } + } + } +} + +template +__SP1_HOSTDEV__ void instr_to_row(const Poseidon2SkinnyInstr& instr, + Poseidon2PreprocessedColsWide& cols) { + for (size_t i = 0; i < WIDTH; i++) { + cols.input[i] = instr.addrs.input[i]; + cols.output[i] = MemoryAccessColsChips{.addr = instr.addrs.output[i], + .mult = instr.mults[i]}; + } + cols.is_real_neg = F::zero() - F::one(); +} +} // namespace sp1_recursion_core_sys::poseidon2_wide diff --git a/crates/recursion/core/include/prelude.hpp b/crates/recursion/core/include/prelude.hpp new file mode 100644 index 0000000000..38cd08f262 --- /dev/null +++ b/crates/recursion/core/include/prelude.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include "sp1-recursion-core-sys-cbindgen.hpp" + +#ifndef __CUDACC__ +#define __SP1_HOSTDEV__ +#define __SP1_INLINE__ inline +#include + +namespace sp1_recursion_core_sys { +template +using array_t = std::array; +} // namespace sp1_recursion_core_sys +#else +#define __SP1_HOSTDEV__ __host__ __device__ +#define __SP1_INLINE__ __forceinline__ +#include + +namespace sp1_recursion_core_sys { +template +using array_t = cuda::std::array; +} // namespace sp1_recursion_core_sys +#endif diff --git a/crates/recursion/core/include/public_values.hpp b/crates/recursion/core/include/public_values.hpp new file mode 100644 index 0000000000..a4a527086a --- /dev/null +++ b/crates/recursion/core/include/public_values.hpp @@ -0,0 +1,21 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::public_values { +template +__SP1_HOSTDEV__ void event_to_row(const CommitPublicValuesEvent& event, + size_t digest_idx, + PublicValuesCols& cols) { + cols.pv_element = event.public_values.digest[digest_idx]; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const CommitPublicValuesInstr& instr, + size_t digest_idx, + PublicValuesPreprocessedCols& cols) { + cols.pv_idx[digest_idx] = F::one(); + cols.pv_mem.addr = instr.pv_addrs.digest[digest_idx]; + cols.pv_mem.mult = F::zero() - F::one(); +} +} // namespace sp1_recursion_core_sys::public_values diff --git a/crates/recursion/core/include/select.hpp b/crates/recursion/core/include/select.hpp new file mode 100644 index 0000000000..79285c0bb0 --- /dev/null +++ b/crates/recursion/core/include/select.hpp @@ -0,0 +1,20 @@ +#pragma once + +#include "prelude.hpp" + +namespace sp1_recursion_core_sys::select { +template +__SP1_HOSTDEV__ void event_to_row(const SelectEvent& event, + SelectCols& cols) { + cols.vals = event; +} + +template +__SP1_HOSTDEV__ void instr_to_row(const SelectInstr& instr, + SelectPreprocessedCols& cols) { + cols.is_real = F::one(); + cols.addrs = instr.addrs; + cols.mult1 = instr.mult1; + cols.mult2 = instr.mult2; +} +} // namespace sp1_recursion_core_sys::select diff --git a/crates/recursion/core/include/sys.hpp b/crates/recursion/core/include/sys.hpp new file mode 100644 index 0000000000..36510ab046 --- /dev/null +++ b/crates/recursion/core/include/sys.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include "alu_base.hpp" +#include "alu_ext.hpp" +#include "batch_fri.hpp" +#include "exp_reverse_bits.hpp" +#include "fri_fold.hpp" +#include "public_values.hpp" +#include "select.hpp" +#include "poseidon2_skinny.hpp" +#include "poseidon2_wide.hpp" +#include "sp1-recursion-core-sys-cbindgen.hpp" diff --git a/crates/recursion/core/src/air/public_values.rs b/crates/recursion/core/src/air/public_values.rs index fd5baed415..f5c8b86ee9 100644 --- a/crates/recursion/core/src/air/public_values.rs +++ b/crates/recursion/core/src/air/public_values.rs @@ -7,7 +7,7 @@ use p3_symmetric::CryptographicPermutation; use serde::{Deserialize, Serialize}; use sp1_core_machine::utils::indices_arr; use sp1_derive::AlignedBorrow; -use sp1_stark::{air::POSEIDON_NUM_WORDS, Word, PROOF_MAX_NUM_PVS}; +use sp1_stark::{air::POSEIDON_NUM_WORDS, septic_digest::SepticDigest, Word, PROOF_MAX_NUM_PVS}; use static_assertions::const_assert_eq; use std::{ borrow::BorrowMut, @@ -113,12 +113,6 @@ pub struct RecursionPublicValues { /// Last MemoryFinalize address bits. pub last_finalize_addr_bits: [T; 32], - /// Start state of reconstruct_challenger. - pub start_reconstruct_challenger: ChallengerPublicValues, - - /// End state of reconstruct_challenger. - pub end_reconstruct_challenger: ChallengerPublicValues, - /// Start state of reconstruct_deferred_digest. pub start_reconstruct_deferred_digest: [T; POSEIDON_NUM_WORDS], @@ -131,12 +125,9 @@ pub struct RecursionPublicValues { /// The root of the vk merkle tree. pub vk_root: [T; DIGEST_SIZE], - /// The leaf challenger containing the entropy from the main trace commitment. - pub leaf_challenger: ChallengerPublicValues, - - /// Current cumulative sum of lookup bus. Note that for recursive proofs for core proofs, this - /// contains the global cumulative sum. For all other proofs, it's the local cumulative sum. - pub cumulative_sum: [T; 4], + /// Current cumulative sum of lookup bus. Note that for recursive proofs for core proofs, this + /// contains the global cumulative sum. + pub global_cumulative_sum: SepticDigest, /// Whether the proof completely proves the program execution. pub is_complete: T, diff --git a/crates/recursion/core/src/chips/alu_base.rs b/crates/recursion/core/src/chips/alu_base.rs index f587c73ce8..b4fe17c467 100644 --- a/crates/recursion/core/src/chips/alu_base.rs +++ b/crates/recursion/core/src/chips/alu_base.rs @@ -253,4 +253,146 @@ mod tests { run_recursion_test_machines(program); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let shard = ExecutionRecord { + base_alu_events: vec![BaseAluIo { out: F::one(), in1: F::one(), in2: F::one() }], + ..Default::default() + }; + + let chip = BaseAluChip; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.base_alu_events; + let nb_rows = events.len().div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW); + let fixed_log2_rows = input.fixed_log2_rows(&BaseAluChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * NUM_BASE_ALU_COLS]; + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + let populate_len = events.len() * NUM_BASE_ALU_VALUE_COLS; + + values[..populate_len] + .par_chunks_mut(chunk_size * NUM_BASE_ALU_VALUE_COLS) + .enumerate() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_BASE_ALU_VALUE_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let cols: &mut BaseAluValueCols<_> = row.borrow_mut(); + unsafe { + crate::sys::alu_base_event_to_row_babybear(&events[idx], cols); + } + } + }); + }); + + RowMajorMatrix::new(values, NUM_BASE_ALU_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::BaseAlu(BaseAluInstr { + opcode: BaseAluOpcode::AddF, + mult: F::one(), + addrs: BaseAluIo { + out: Address(F::zero()), + in1: Address(F::one()), + in2: Address(F::two()), + }, + })], + ..Default::default() + }; + + let chip = BaseAluChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::BaseAlu(BaseAluInstr { + opcode: BaseAluOpcode::AddF, + mult: F::one(), + addrs: BaseAluIo { + out: Address(F::zero()), + in1: Address(F::one()), + in2: Address(F::two()), + }, + })], + ..Default::default() + }; + + let chip = BaseAluChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Instruction::BaseAlu(x) => Some(x), + _ => None, + }) + .collect::>(); + + let nb_rows = instrs.len().div_ceil(NUM_BASE_ALU_ENTRIES_PER_ROW); + let fixed_log2_rows = program.fixed_log2_rows(&BaseAluChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * NUM_BASE_ALU_PREPROCESSED_COLS]; + + let chunk_size = std::cmp::max(instrs.len() / num_cpus::get(), 1); + let populate_len = instrs.len() * NUM_BASE_ALU_ACCESS_COLS; + + values[..populate_len] + .par_chunks_mut(chunk_size * NUM_BASE_ALU_ACCESS_COLS) + .enumerate() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_BASE_ALU_ACCESS_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < instrs.len() { + let access: &mut BaseAluAccessCols<_> = row.borrow_mut(); + unsafe { + crate::sys::alu_base_instr_to_row_babybear(instrs[idx], access); + } + } + }); + }); + + RowMajorMatrix::new(values, NUM_BASE_ALU_PREPROCESSED_COLS) + } } diff --git a/crates/recursion/core/src/chips/alu_ext.rs b/crates/recursion/core/src/chips/alu_ext.rs index b698a5d209..4dbe0ca605 100644 --- a/crates/recursion/core/src/chips/alu_ext.rs +++ b/crates/recursion/core/src/chips/alu_ext.rs @@ -265,4 +265,149 @@ mod tests { run_recursion_test_machines(program); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let shard = ExecutionRecord { + ext_alu_events: vec![ExtAluIo { + out: F::one().into(), + in1: F::one().into(), + in2: F::one().into(), + }], + ..Default::default() + }; + + let chip = ExtAluChip; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.ext_alu_events; + let nb_rows = events.len().div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW); + let fixed_log2_rows = input.fixed_log2_rows(&ExtAluChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * NUM_EXT_ALU_COLS]; + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + let populate_len = events.len() * NUM_EXT_ALU_VALUE_COLS; + + values[..populate_len] + .par_chunks_mut(chunk_size * NUM_EXT_ALU_VALUE_COLS) + .enumerate() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_EXT_ALU_VALUE_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let cols: &mut ExtAluValueCols<_> = row.borrow_mut(); + unsafe { + crate::sys::alu_ext_event_to_row_babybear(&events[idx], cols); + } + } + }); + }); + + RowMajorMatrix::new(values, NUM_EXT_ALU_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::ExtAlu(ExtAluInstr { + opcode: ExtAluOpcode::AddE, + mult: F::one(), + addrs: ExtAluIo { + out: Address(F::zero()), + in1: Address(F::one()), + in2: Address(F::two()), + }, + })], + ..Default::default() + }; + let chip = ExtAluChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::ExtAlu(ExtAluInstr { + opcode: ExtAluOpcode::AddE, + mult: F::one(), + addrs: ExtAluIo { + out: Address(F::zero()), + in1: Address(F::one()), + in2: Address(F::two()), + }, + })], + ..Default::default() + }; + + let chip = ExtAluChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Instruction::ExtAlu(x) => Some(x), + _ => None, + }) + .collect::>(); + + let nb_rows = instrs.len().div_ceil(NUM_EXT_ALU_ENTRIES_PER_ROW); + let fixed_log2_rows = program.fixed_log2_rows(&ExtAluChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * NUM_EXT_ALU_PREPROCESSED_COLS]; + + let chunk_size = std::cmp::max(instrs.len() / num_cpus::get(), 1); + let populate_len = instrs.len() * NUM_EXT_ALU_ACCESS_COLS; + + values[..populate_len] + .par_chunks_mut(chunk_size * NUM_EXT_ALU_ACCESS_COLS) + .enumerate() + .for_each(|(i, rows)| { + rows.chunks_mut(NUM_EXT_ALU_ACCESS_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < instrs.len() { + let access: &mut ExtAluAccessCols<_> = row.borrow_mut(); + unsafe { + crate::sys::alu_ext_instr_to_row_babybear(instrs[idx], access); + } + } + }); + }); + + RowMajorMatrix::new(values, NUM_EXT_ALU_PREPROCESSED_COLS) + } } diff --git a/crates/recursion/core/src/chips/batch_fri.rs b/crates/recursion/core/src/chips/batch_fri.rs index 6522a9881d..db073f6479 100644 --- a/crates/recursion/core/src/chips/batch_fri.rs +++ b/crates/recursion/core/src/chips/batch_fri.rs @@ -72,6 +72,7 @@ impl MachineAir for BatchFRIChip usize { NUM_BATCH_FRI_PREPROCESSED_COLS } + fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { let mut rows: Vec<[F; NUM_BATCH_FRI_PREPROCESSED_COLS]> = Vec::new(); program @@ -91,14 +92,14 @@ impl MachineAir for BatchFRIChip = row.as_mut_slice().borrow_mut(); row.is_real = F::one(); - row.is_end = F::from_bool(i == len - 1); + row.is_end = F::from_bool(_i == len - 1); row.acc_addr = ext_single_addrs.acc; - row.alpha_pow_addr = ext_vec_addrs.alpha_pow[i]; - row.p_at_z_addr = ext_vec_addrs.p_at_z[i]; - row.p_at_x_addr = base_vec_addrs.p_at_x[i]; + row.alpha_pow_addr = ext_vec_addrs.alpha_pow[_i]; + row.p_at_z_addr = ext_vec_addrs.p_at_z[_i]; + row.p_at_x_addr = base_vec_addrs.p_at_x[_i]; }); rows.extend(row_add); }); @@ -229,3 +230,168 @@ where self.eval_batch_fri::(builder, local, next, prepr_local, prepr_next); } } + +#[cfg(test)] +mod tests { + use crate::{BatchFRIBaseVecIo, BatchFRIEvent, BatchFRIExtSingleIo, BatchFRIExtVecIo}; + use p3_baby_bear::BabyBear; + use p3_field::AbstractField; + use p3_matrix::dense::RowMajorMatrix; + + use super::*; + + #[test] + fn generate_trace() { + type F = BabyBear; + + let shard = ExecutionRecord { + batch_fri_events: vec![BatchFRIEvent { + ext_single: BatchFRIExtSingleIo { acc: Block::default() }, + ext_vec: BatchFRIExtVecIo { alpha_pow: Block::default(), p_at_z: Block::default() }, + base_vec: BatchFRIBaseVecIo { p_at_x: F::one() }, + }], + ..Default::default() + }; + let chip = BatchFRIChip::<2>; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + println!("{:?}", trace.values) + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let shard = ExecutionRecord { + batch_fri_events: vec![BatchFRIEvent { + ext_single: BatchFRIExtSingleIo { acc: Block::default() }, + ext_vec: BatchFRIExtVecIo { alpha_pow: Block::default(), p_at_z: Block::default() }, + base_vec: BatchFRIBaseVecIo { p_at_x: F::one() }, + }], + ..Default::default() + }; + + let chip = BatchFRIChip::<2>; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.batch_fri_events; + let mut rows = vec![[F::zero(); NUM_BATCH_FRI_COLS]; events.len()]; + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + rows.chunks_mut(chunk_size).enumerate().for_each(|(i, chunk)| { + chunk.iter_mut().enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let cols: &mut BatchFRICols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::batch_fri_event_to_row_babybear(&events[idx], cols); + } + } + }); + }); + + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_BATCH_FRI_COLS], + input.fixed_log2_rows(&BatchFRIChip::<2>), + ); + + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_BATCH_FRI_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Instruction::BatchFRI(Box::new(BatchFRIInstr { + base_vec_addrs: BatchFRIBaseVecIo { p_at_x: vec![Address(F::zero())] }, + ext_single_addrs: BatchFRIExtSingleIo { acc: Address(F::zero()) }, + ext_vec_addrs: BatchFRIExtVecIo { + alpha_pow: vec![Address(F::zero())], + p_at_z: vec![Address(F::zero())], + }, + acc_mult: F::one(), + }))], + ..Default::default() + }; + + let chip = BatchFRIChip::<2>; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Instruction::BatchFRI(Box::new(BatchFRIInstr { + base_vec_addrs: BatchFRIBaseVecIo { p_at_x: vec![Address(F::zero())] }, + ext_single_addrs: BatchFRIExtSingleIo { acc: Address(F::zero()) }, + ext_vec_addrs: BatchFRIExtVecIo { + alpha_pow: vec![Address(F::zero())], + p_at_z: vec![Address(F::zero())], + }, + acc_mult: F::one(), + }))], + ..Default::default() + }; + + let chip = BatchFRIChip::<2>; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Instruction::BatchFRI(x) => Some(x), + _ => None, + }) + .collect::>(); + + let mut rows = Vec::new(); + instrs.iter().for_each(|instruction| { + let BatchFRIInstr { base_vec_addrs: _, ext_single_addrs: _, ext_vec_addrs, acc_mult } = + instruction.as_ref(); + let len = ext_vec_addrs.p_at_z.len(); + let mut row_add = vec![[F::zero(); NUM_BATCH_FRI_PREPROCESSED_COLS]; len]; + debug_assert_eq!(*acc_mult, F::one()); + + row_add.iter_mut().for_each(|row| { + let cols: &mut BatchFRIPreprocessedCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::batch_fri_instr_to_row_babybear(&instruction.into(), cols); + } + }); + rows.extend(row_add); + }); + + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_BATCH_FRI_PREPROCESSED_COLS], + program.fixed_log2_rows(&BatchFRIChip::<2>), + ); + + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_BATCH_FRI_PREPROCESSED_COLS) + } +} diff --git a/crates/recursion/core/src/chips/exp_reverse_bits.rs b/crates/recursion/core/src/chips/exp_reverse_bits.rs index b5a8655236..e5c772a019 100644 --- a/crates/recursion/core/src/chips/exp_reverse_bits.rs +++ b/crates/recursion/core/src/chips/exp_reverse_bits.rs @@ -16,7 +16,7 @@ use crate::{ ExpReverseBitsInstr, Instruction, }; -use super::mem::MemoryAccessCols; +use super::mem::{MemoryAccessCols, MemoryAccessColsChips}; pub const NUM_EXP_REVERSE_BITS_LEN_COLS: usize = core::mem::size_of::>(); pub const NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS: usize = @@ -28,9 +28,9 @@ pub struct ExpReverseBitsLenChip; #[derive(AlignedBorrow, Clone, Copy, Debug)] #[repr(C)] pub struct ExpReverseBitsLenPreprocessedCols { - pub x_mem: MemoryAccessCols, - pub exponent_mem: MemoryAccessCols, - pub result_mem: MemoryAccessCols, + pub x_mem: MemoryAccessColsChips, + pub exponent_mem: MemoryAccessColsChips, + pub result_mem: MemoryAccessColsChips, pub iteration_num: T, pub is_first: T, pub is_last: T, @@ -312,9 +312,12 @@ mod tests { machine::tests::run_recursion_test_machines, runtime::{instruction as instr, ExecutionRecord}, stark::BabyBearPoseidon2Outer, - ExpReverseBitsEvent, Instruction, MemAccessKind, RecursionProgram, + Address, ExpReverseBitsEvent, ExpReverseBitsIo, Instruction, MemAccessKind, + RecursionProgram, }; + use super::*; + #[test] fn prove_babybear_circuit_erbl() { setup_logger(); @@ -387,4 +390,185 @@ mod tests { let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } + + #[test] + fn generate_erbl_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { + addrs: ExpReverseBitsIo { + base: Address(F::zero()), + exp: vec![Address(F::one()), Address(F::zero()), Address(F::one())], + result: Address(F::from_canonical_u32(4)), + }, + mult: F::one(), + })], + ..Default::default() + }; + + let chip = ExpReverseBitsLenChip::<3>; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let shard = ExecutionRecord { + exp_reverse_bits_len_events: vec![ExpReverseBitsEvent { + base: F::two(), + exp: vec![F::zero(), F::one(), F::one()], + result: F::two().exp_u64(0b110), + }], + ..Default::default() + }; + + let chip = ExpReverseBitsLenChip::<3>; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.exp_reverse_bits_len_events; + let mut overall_rows = Vec::new(); + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + events.chunks(chunk_size).for_each(|chunk| { + chunk.iter().for_each(|event| { + let mut rows = + vec![vec![F::zero(); NUM_EXP_REVERSE_BITS_LEN_COLS]; event.exp.len()]; + let mut accum = F::one(); + + rows.iter_mut().enumerate().for_each(|(i, row)| { + let cols: &mut ExpReverseBitsLenCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::exp_reverse_bits_event_to_row_babybear(&event.into(), i, cols); + } + + // Accumulate after the event is converted to a row + let prev_accum = accum; + accum = prev_accum * prev_accum * cols.multiplier; + + cols.accum = accum; + cols.accum_squared = accum * accum; + cols.prev_accum_squared = prev_accum * prev_accum; + cols.prev_accum_squared_times_multiplier = + cols.prev_accum_squared * cols.multiplier; + }); + + overall_rows.extend(rows); + }); + }); + + pad_rows_fixed( + &mut overall_rows, + || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_COLS].to_vec(), + input.fixed_log2_rows(&ExpReverseBitsLenChip::<3>), + ); + + RowMajorMatrix::new( + overall_rows.into_iter().flatten().collect(), + NUM_EXP_REVERSE_BITS_LEN_COLS, + ) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { + addrs: ExpReverseBitsIo { + base: Address(F::zero()), + exp: vec![Address(F::zero()), Address(F::one())], + result: Address(F::zero()), + }, + mult: F::one(), + })], + ..Default::default() + }; + + let chip = ExpReverseBitsLenChip::<3>; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Instruction::ExpReverseBitsLen(ExpReverseBitsInstr { + addrs: ExpReverseBitsIo { + base: Address(F::zero()), + exp: vec![Address(F::zero()), Address(F::one())], + result: Address(F::zero()), + }, + mult: F::one(), + })], + ..Default::default() + }; + + let chip = ExpReverseBitsLenChip::<3>; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Instruction::ExpReverseBitsLen(x) => Some(x), + _ => None, + }) + .collect::>(); + + let mut rows = Vec::new(); + instrs.iter().for_each(|instruction| { + let len = instruction.addrs.exp.len(); + let mut row_add = vec![[F::zero(); NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS]; len]; + + row_add.iter_mut().enumerate().for_each(|(i, row)| { + let cols: &mut ExpReverseBitsLenPreprocessedCols = + row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::exp_reverse_bits_instr_to_row_babybear( + &(*instruction).into(), + i, + len, + cols, + ); + } + }); + rows.extend(row_add); + }); + + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS], + program.fixed_log2_rows(&ExpReverseBitsLenChip::<3>), + ); + + RowMajorMatrix::new( + rows.into_iter().flatten().collect(), + NUM_EXP_REVERSE_BITS_LEN_PREPROCESSED_COLS, + ) + } } diff --git a/crates/recursion/core/src/chips/fri_fold.rs b/crates/recursion/core/src/chips/fri_fold.rs index 063037032c..41167c4a40 100644 --- a/crates/recursion/core/src/chips/fri_fold.rs +++ b/crates/recursion/core/src/chips/fri_fold.rs @@ -21,7 +21,7 @@ use crate::{ ExecutionRecord, FriFoldInstr, }; -use super::mem::MemoryAccessCols; +use super::mem::{MemoryAccessCols, MemoryAccessColsChips}; pub const NUM_FRI_FOLD_COLS: usize = core::mem::size_of::>(); pub const NUM_FRI_FOLD_PREPROCESSED_COLS: usize = @@ -45,19 +45,19 @@ pub struct FriFoldPreprocessedCols { pub is_first: T, // Memory accesses for the single fields. - pub z_mem: MemoryAccessCols, - pub alpha_mem: MemoryAccessCols, - pub x_mem: MemoryAccessCols, + pub z_mem: MemoryAccessColsChips, + pub alpha_mem: MemoryAccessColsChips, + pub x_mem: MemoryAccessColsChips, // Memory accesses for the vector field inputs. - pub alpha_pow_input_mem: MemoryAccessCols, - pub ro_input_mem: MemoryAccessCols, - pub p_at_x_mem: MemoryAccessCols, - pub p_at_z_mem: MemoryAccessCols, + pub alpha_pow_input_mem: MemoryAccessColsChips, + pub ro_input_mem: MemoryAccessColsChips, + pub p_at_x_mem: MemoryAccessColsChips, + pub p_at_z_mem: MemoryAccessColsChips, // Memory accesses for the vector field outputs. - pub ro_output_mem: MemoryAccessCols, - pub alpha_pow_output_mem: MemoryAccessCols, + pub ro_output_mem: MemoryAccessColsChips, + pub alpha_pow_output_mem: MemoryAccessColsChips, pub is_real: T, } @@ -100,6 +100,7 @@ impl MachineAir for FriFoldChip fn preprocessed_width(&self) -> usize { NUM_FRI_FOLD_PREPROCESSED_COLS } + fn generate_preprocessed_trace(&self, program: &Self::Program) -> Option> { let mut rows: Vec<[F; NUM_FRI_FOLD_PREPROCESSED_COLS]> = Vec::new(); program @@ -362,13 +363,15 @@ mod tests { use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; + use super::*; + use crate::{ air::Block, chips::fri_fold::FriFoldChip, machine::tests::run_recursion_test_machines, runtime::{instruction as instr, ExecutionRecord}, stark::BabyBearPoseidon2Outer, - FriFoldBaseIo, FriFoldEvent, FriFoldExtSingleIo, FriFoldExtVecIo, Instruction, + Address, FriFoldBaseIo, FriFoldEvent, FriFoldExtSingleIo, FriFoldExtVecIo, Instruction, MemAccessKind, RecursionProgram, }; @@ -545,4 +548,191 @@ mod tests { let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let mut rng2 = StdRng::seed_from_u64(0xDEADBEEF); + let mut random_felt = move || -> F { F::from_canonical_u32(rng.gen_range(0..1 << 16)) }; + let mut random_block = move || Block::from([random_felt(); 4]); + + let shard = ExecutionRecord { + fri_fold_events: (0..17) + .map(|_| FriFoldEvent { + base_single: FriFoldBaseIo { + x: F::from_canonical_u32(rng2.gen_range(0..1 << 16)), + }, + ext_single: FriFoldExtSingleIo { z: random_block(), alpha: random_block() }, + ext_vec: crate::FriFoldExtVecIo { + mat_opening: random_block(), + ps_at_z: random_block(), + alpha_pow_input: random_block(), + ro_input: random_block(), + alpha_pow_output: random_block(), + ro_output: random_block(), + }, + }) + .collect(), + ..Default::default() + }; + + let chip = FriFoldChip::<3>::default(); + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.fri_fold_events; + let mut rows = events.iter().map(|_| [F::zero(); NUM_FRI_FOLD_COLS]).collect_vec(); + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + rows.chunks_mut(chunk_size).enumerate().for_each(|(i, chunk)| { + chunk.iter_mut().enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let cols: &mut FriFoldCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::fri_fold_event_to_row_babybear(&events[idx], cols); + } + } + }); + }); + + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_FRI_FOLD_COLS], + input.fixed_log2_rows(&FriFoldChip::<3>::default()), + ); + + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_FRI_FOLD_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let mut random_addr = move || -> F { F::from_canonical_u32(rng.gen_range(0..1 << 16)) }; + + // Create a program with a few FriFold instructions + let program = RecursionProgram:: { + instructions: (0..17) + .map(|_| { + Instruction::FriFold(Box::new(FriFoldInstr:: { + base_single_addrs: FriFoldBaseIo { x: Address(random_addr()) }, + ext_single_addrs: FriFoldExtSingleIo { + z: Address(random_addr()), + alpha: Address(random_addr()), + }, + ext_vec_addrs: FriFoldExtVecIo { + mat_opening: vec![Address(random_addr())], + ps_at_z: vec![Address(random_addr())], + alpha_pow_input: vec![Address(random_addr())], + ro_input: vec![Address(random_addr())], + alpha_pow_output: vec![Address(random_addr())], + ro_output: vec![Address(random_addr())], + }, + alpha_pow_mults: vec![F::one()], + ro_mults: vec![F::one()], + })) + }) + .collect(), + ..Default::default() + }; + + let chip = FriFoldChip::<3>::default(); + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let mut random_addr = move || -> F { F::from_canonical_u32(rng.gen_range(0..1 << 16)) }; + + // Create a program with a few FriFold instructions + let program = RecursionProgram:: { + instructions: (0..17) + .map(|_| { + Instruction::FriFold(Box::new(FriFoldInstr:: { + base_single_addrs: FriFoldBaseIo { x: Address(random_addr()) }, + ext_single_addrs: FriFoldExtSingleIo { + z: Address(random_addr()), + alpha: Address(random_addr()), + }, + ext_vec_addrs: FriFoldExtVecIo { + mat_opening: vec![Address(random_addr())], + ps_at_z: vec![Address(random_addr())], + alpha_pow_input: vec![Address(random_addr())], + ro_input: vec![Address(random_addr())], + alpha_pow_output: vec![Address(random_addr())], + ro_output: vec![Address(random_addr())], + }, + alpha_pow_mults: vec![F::one()], + ro_mults: vec![F::one()], + })) + }) + .collect(), + ..Default::default() + }; + + let chip = FriFoldChip::<3>::default(); + let trace_rust = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace_rust); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let mut rows = Vec::new(); + program + .instructions + .iter() + .filter_map(|instruction| { + if let Instruction::FriFold(instr) = instruction { + Some(instr) + } else { + None + } + }) + .for_each(|instruction| { + let mut row_add = vec![ + [F::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS]; + instruction.ext_vec_addrs.ps_at_z.len() + ]; + + row_add.iter_mut().enumerate().for_each(|(row_idx, row)| { + let cols: &mut FriFoldPreprocessedCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::fri_fold_instr_to_row_babybear( + &instruction.into(), + row_idx, + cols, + ); + } + }); + + rows.extend(row_add); + }); + + pad_rows_fixed(&mut rows, || [F::zero(); NUM_FRI_FOLD_PREPROCESSED_COLS], None); + + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_FRI_FOLD_PREPROCESSED_COLS) + } } diff --git a/crates/recursion/core/src/chips/mem/mod.rs b/crates/recursion/core/src/chips/mem/mod.rs index f318db027a..cace2a1026 100644 --- a/crates/recursion/core/src/chips/mem/mod.rs +++ b/crates/recursion/core/src/chips/mem/mod.rs @@ -13,10 +13,13 @@ pub const NUM_MEM_ACCESS_COLS: usize = core::mem::size_of:: /// Data describing in what manner to access a particular memory block. #[derive(AlignedBorrow, Debug, Clone, Copy)] #[repr(C)] -pub struct MemoryAccessCols { +pub struct MemoryAccessColsChips { /// The address to access. pub addr: Address, /// The multiplicity which to read/write. /// "Positive" values indicate a write, and "negative" values indicate a read. pub mult: F, } + +/// Avoids cbindgen naming collisions. +pub type MemoryAccessCols = MemoryAccessColsChips; diff --git a/crates/recursion/core/src/chips/mem/variable.rs b/crates/recursion/core/src/chips/mem/variable.rs index 98ae999ba4..853984bbbf 100644 --- a/crates/recursion/core/src/chips/mem/variable.rs +++ b/crates/recursion/core/src/chips/mem/variable.rs @@ -1,5 +1,5 @@ use core::borrow::Borrow; -use instruction::{HintBitsInstr, HintExt2FeltsInstr, HintInstr}; +use instruction::{HintAddCurveInstr, HintBitsInstr, HintExt2FeltsInstr, HintInstr}; use p3_air::{Air, BaseAir, PairBuilder}; use p3_field::PrimeField32; use p3_matrix::{dense::RowMajorMatrix, Matrix}; @@ -70,6 +70,13 @@ impl MachineAir for MemoryChip { output_addrs_mults, input_addr: _, // No receive interaction for the hint operation }) => output_addrs_mults.iter().collect(), + Instruction::HintAddCurve(instr) => { + let HintAddCurveInstr { + output_x_addrs_mults, + output_y_addrs_mults, .. // No receive interaction for the hint operation + } = instr.as_ref(); + output_x_addrs_mults.iter().chain(output_y_addrs_mults.iter()).collect() + } _ => vec![], }) .collect::>(); diff --git a/crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs b/crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs index 7338082179..a8080c7a35 100644 --- a/crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs +++ b/crates/recursion/core/src/chips/poseidon2_skinny/columns/mod.rs @@ -14,10 +14,12 @@ const fn make_col_map_degree9() -> Poseidon2 { } pub const POSEIDON2_DEGREE9_COL_MAP: Poseidon2 = make_col_map_degree9(); +pub const NUM_INTERNAL_ROUNDS_S0: usize = NUM_INTERNAL_ROUNDS - 1; + /// Struct for the poseidon2 skinny non preprocessed column. #[derive(AlignedBorrow, Clone, Copy)] #[repr(C)] pub struct Poseidon2 { pub state_var: [T; WIDTH], - pub internal_rounds_s0: [T; NUM_INTERNAL_ROUNDS - 1], + pub internal_rounds_s0: [T; NUM_INTERNAL_ROUNDS_S0], } diff --git a/crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs b/crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs index 02f5e041a8..90a857255b 100644 --- a/crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs +++ b/crates/recursion/core/src/chips/poseidon2_skinny/columns/preprocessed.rs @@ -1,6 +1,6 @@ use sp1_derive::AlignedBorrow; -use crate::chips::{mem::MemoryAccessCols, poseidon2_skinny::WIDTH}; +use crate::chips::{mem::MemoryAccessColsChips, poseidon2_skinny::WIDTH}; #[derive(AlignedBorrow, Clone, Copy, Debug)] #[repr(C)] @@ -13,7 +13,9 @@ pub struct RoundCountersPreprocessedCols { #[derive(AlignedBorrow, Clone, Copy, Debug)] #[repr(C)] -pub struct Poseidon2PreprocessedCols { - pub memory_preprocessed: [MemoryAccessCols; WIDTH], +pub struct Poseidon2PreprocessedColsSkinny { + pub memory_preprocessed: [MemoryAccessColsChips; WIDTH], pub round_counters_preprocessed: RoundCountersPreprocessedCols, } + +pub type Poseidon2PreprocessedCols = Poseidon2PreprocessedColsSkinny; diff --git a/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs b/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs index 7e67f54362..51619fa232 100644 --- a/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs +++ b/crates/recursion/core/src/chips/poseidon2_skinny/trace.rs @@ -276,9 +276,11 @@ mod tests { use crate::{ chips::poseidon2_skinny::{Poseidon2SkinnyChip, WIDTH}, - ExecutionRecord, Poseidon2Event, + Address, ExecutionRecord, Poseidon2Event, Poseidon2Instr, Poseidon2Io, }; + use super::*; + #[test] fn generate_trace() { type F = BabyBear; @@ -299,4 +301,146 @@ mod tests { let chip_9 = Poseidon2SkinnyChip::<9>::default(); let _: RowMajorMatrix = chip_9.generate_trace(&shard, &mut ExecutionRecord::default()); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + let input_0 = [F::one(); WIDTH]; + let permuter = inner_perm(); + let output_0 = permuter.permute(input_0); + let mut rng = rand::thread_rng(); + + let input_1 = [F::rand(&mut rng); WIDTH]; + let output_1 = permuter.permute(input_1); + let shard = ExecutionRecord { + poseidon2_events: vec![ + Poseidon2Event { input: input_0, output: output_0 }, + Poseidon2Event { input: input_1, output: output_1 }, + ], + ..Default::default() + }; + + let chip = Poseidon2SkinnyChip::<9>::default(); + let trace_rust = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace_rust); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + let mut rows = Vec::new(); + + for event in &input.poseidon2_events { + // We have one row for input, one row for output, NUM_EXTERNAL_ROUNDS rows for the + // external rounds, and one row for all internal rounds. + let mut row_add = [[F::zero(); NUM_POSEIDON2_COLS]; NUM_EXTERNAL_ROUNDS + 3]; + let cols_ptr = row_add.as_mut_ptr() as *mut Poseidon2Cols; + unsafe { + crate::sys::poseidon2_skinny_event_to_row_babybear(event, cols_ptr); + } + + rows.extend(row_add.into_iter()); + } + + // Pad the trace to a power of two. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_POSEIDON2_COLS], + input.fixed_log2_rows(&Poseidon2SkinnyChip::<9>::default()), + ); + + // Convert the trace to a row major matrix. + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_POSEIDON2_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Poseidon2(Box::new(Poseidon2Instr { + addrs: Poseidon2Io { + input: [Address(F::one()); WIDTH], + output: [Address(F::two()); WIDTH], + }, + mults: [F::one(); WIDTH], + }))], + ..Default::default() + }; + + let chip_9 = Poseidon2SkinnyChip::<9>::default(); + let preprocessed: Option> = chip_9.generate_preprocessed_trace(&program); + assert!(preprocessed.is_some()); + } + + // ... existing code ... + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Poseidon2(Box::new(Poseidon2Instr { + addrs: Poseidon2Io { + input: [Address(F::one()); WIDTH], + output: [Address(F::two()); WIDTH], + }, + mults: [F::one(); WIDTH], + }))], + ..Default::default() + }; + + let chip = Poseidon2SkinnyChip::<9>::default(); + let trace_rust = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace_rust); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instructions = + program.instructions.iter().filter_map(|instruction| match instruction { + Poseidon2(instr) => Some(instr), + _ => None, + }); + + let num_instructions = instructions.clone().count(); + + let mut rows = vec![ + [F::zero(); PREPROCESSED_POSEIDON2_WIDTH]; + num_instructions * (NUM_EXTERNAL_ROUNDS + 3) + ]; + + instructions.zip_eq(&rows.iter_mut().chunks(NUM_EXTERNAL_ROUNDS + 3)).for_each( + |(instruction, row_add)| { + row_add.into_iter().enumerate().for_each(|(i, row)| { + let cols: &mut Poseidon2PreprocessedCols<_> = + (*row).as_mut_slice().borrow_mut(); + unsafe { + crate::sys::poseidon2_skinny_instr_to_row_babybear(instruction, i, cols); + } + }); + }, + ); + + pad_rows_fixed( + &mut rows, + || [F::zero(); PREPROCESSED_POSEIDON2_WIDTH], + program.fixed_log2_rows(&Poseidon2SkinnyChip::<9>::default()), + ); + + RowMajorMatrix::new( + rows.into_iter().flatten().collect::>(), + PREPROCESSED_POSEIDON2_WIDTH, + ) + } } diff --git a/crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs b/crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs index 41ec59cd76..47480ba4ef 100644 --- a/crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/columns/preprocessed.rs @@ -1,14 +1,16 @@ use sp1_derive::AlignedBorrow; use crate::{ - chips::{mem::MemoryAccessCols, poseidon2_wide::WIDTH}, + chips::{mem::MemoryAccessColsChips, poseidon2_wide::WIDTH}, Address, }; #[derive(AlignedBorrow, Clone, Copy, Debug)] #[repr(C)] -pub struct Poseidon2PreprocessedCols { +pub struct Poseidon2PreprocessedColsWide { pub input: [Address; WIDTH], - pub output: [MemoryAccessCols; WIDTH], + pub output: [MemoryAccessColsChips; WIDTH], pub is_real_neg: T, } + +pub type Poseidon2PreprocessedCols = Poseidon2PreprocessedColsWide; diff --git a/crates/recursion/core/src/chips/poseidon2_wide/trace.rs b/crates/recursion/core/src/chips/poseidon2_wide/trace.rs index e13717bfae..90f8fb7d56 100644 --- a/crates/recursion/core/src/chips/poseidon2_wide/trace.rs +++ b/crates/recursion/core/src/chips/poseidon2_wide/trace.rs @@ -288,9 +288,11 @@ mod tests { use crate::{ chips::poseidon2_wide::{Poseidon2WideChip, WIDTH}, - ExecutionRecord, Poseidon2Event, + Address, ExecutionRecord, Poseidon2Event, Poseidon2Instr, Poseidon2Io, }; + use super::*; + #[test] fn generate_trace_deg_3() { type F = BabyBear; @@ -334,4 +336,211 @@ mod tests { let chip_9 = Poseidon2WideChip::<9>; let _: RowMajorMatrix = chip_9.generate_trace(&shard, &mut ExecutionRecord::default()); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + let input_0 = [F::one(); WIDTH]; + let permuter = inner_perm(); + let output_0 = permuter.permute(input_0); + let mut rng = rand::thread_rng(); + + let input_1 = [F::rand(&mut rng); WIDTH]; + let output_1 = permuter.permute(input_1); + + let shard = ExecutionRecord { + poseidon2_events: vec![ + Poseidon2Event { input: input_0, output: output_0 }, + Poseidon2Event { input: input_1, output: output_1 }, + ], + ..Default::default() + }; + + let chip = Poseidon2WideChip::<9>; + let trace_rust = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace_rust); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + let padded_nb_rows = match input.fixed_log2_rows(&Poseidon2WideChip::<9>) { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(input.poseidon2_events.len(), None), + }; + let num_columns = as BaseAir>::width(&Poseidon2WideChip::<9>); + let mut values = vec![F::zero(); padded_nb_rows * num_columns]; + + let populate_len = input.poseidon2_events.len() * num_columns; + let (values_pop, values_dummy) = values.split_at_mut(populate_len); + + join( + || { + values_pop + .par_chunks_mut(num_columns) + .zip_eq(&input.poseidon2_events) + .for_each(|(row, event)| populate_perm_ffi::<9>(&event.input, row)) + }, + || { + let mut dummy_row = vec![F::zero(); num_columns]; + populate_perm_ffi::<9>(&[F::zero(); WIDTH], &mut dummy_row); + values_dummy + .par_chunks_mut(num_columns) + .for_each(|row| row.copy_from_slice(&dummy_row)) + }, + ); + + RowMajorMatrix::new(values, num_columns) + } + + #[cfg(feature = "sys")] + fn populate_perm_ffi( + input: &[BabyBear; WIDTH], + input_row: &mut [BabyBear], + ) { + let permutation = permutation_mut::(input_row); + + let ( + external_rounds_state, + internal_rounds_state, + internal_rounds_s0, + mut external_sbox, + mut internal_sbox, + output_state, + ) = permutation.get_cols_mut(); + + // Create temporary arrays with the correct types + let mut ext_rounds = [[BabyBear::zero(); WIDTH]; NUM_EXTERNAL_ROUNDS]; + for (dst, src) in ext_rounds.iter_mut().zip(external_rounds_state.iter()) { + *dst = *src; + } + + // Handle external_sbox - create temporary array only if Some + let mut ext_sbox = [[BabyBear::zero(); NUM_EXTERNAL_ROUNDS]; WIDTH]; + if let Some(sbox) = external_sbox.as_mut() { + for i in 0..WIDTH { + for j in 0..NUM_EXTERNAL_ROUNDS { + ext_sbox[i][j] = sbox[j][i]; + } + } + } + + // Create temporary array for internal_sbox only if Some + let mut int_sbox = [BabyBear::zero(); NUM_INTERNAL_ROUNDS]; + if let Some(sbox) = internal_sbox.as_mut() { + int_sbox.copy_from_slice(sbox.as_slice()); + } + + unsafe { + crate::sys::poseidon2_wide_event_to_row_babybear( + input, + ext_rounds.as_mut_ptr() as *mut _, + internal_rounds_state, + internal_rounds_s0, + if external_sbox.is_some() { &mut ext_sbox } else { std::ptr::null_mut() }, + if internal_sbox.is_some() { &mut int_sbox } else { std::ptr::null_mut() }, + output_state, + ); + + // Copy back the results if needed + for (dst, src) in external_rounds_state.iter_mut().zip(ext_rounds.iter()) { + *dst = *src; + } + + // Copy back external_sbox results if needed + if let Some(sbox) = external_sbox.as_mut() { + for i in 0..WIDTH { + for j in 0..NUM_EXTERNAL_ROUNDS { + sbox[j][i] = ext_sbox[i][j]; + } + } + } + + // Copy back internal_sbox results if needed + if let Some(sbox) = internal_sbox.as_mut() { + sbox.copy_from_slice(&int_sbox); + } + } + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Poseidon2(Box::new(Poseidon2Instr { + addrs: Poseidon2Io { + input: [Address(F::one()); WIDTH], + output: [Address(F::two()); WIDTH], + }, + mults: [F::one(); WIDTH], + }))], + ..Default::default() + }; + + let chip_9 = Poseidon2WideChip::<9>; + let preprocessed: Option> = chip_9.generate_preprocessed_trace(&program); + assert!(preprocessed.is_some()); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram:: { + instructions: vec![Poseidon2(Box::new(Poseidon2Instr { + addrs: Poseidon2Io { + input: [Address(F::one()); WIDTH], + output: [Address(F::two()); WIDTH], + }, + mults: [F::one(); WIDTH], + }))], + ..Default::default() + }; + + let chip = Poseidon2WideChip::<9>; + let trace_rust = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace_rust); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Poseidon2(instr) => Some(instr.as_ref()), + _ => None, + }) + .collect::>(); + + let padded_nb_rows = match program.fixed_log2_rows(&Poseidon2WideChip::<9>) { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(instrs.len(), None), + }; + let mut values = vec![F::zero(); padded_nb_rows * PREPROCESSED_POSEIDON2_WIDTH]; + + let populate_len = instrs.len() * PREPROCESSED_POSEIDON2_WIDTH; + values[..populate_len] + .par_chunks_mut(PREPROCESSED_POSEIDON2_WIDTH) + .zip_eq(instrs) + .for_each(|(row, instr)| { + let cols: &mut Poseidon2PreprocessedCols<_> = row.borrow_mut(); + unsafe { + crate::sys::poseidon2_wide_instr_to_row_babybear(instr, cols); + } + }); + + RowMajorMatrix::new(values, PREPROCESSED_POSEIDON2_WIDTH) + } } diff --git a/crates/recursion/core/src/chips/public_values.rs b/crates/recursion/core/src/chips/public_values.rs index e81ed89758..c2c6a9450f 100644 --- a/crates/recursion/core/src/chips/public_values.rs +++ b/crates/recursion/core/src/chips/public_values.rs @@ -16,7 +16,7 @@ use crate::{ use crate::DIGEST_SIZE; -use super::mem::MemoryAccessCols; +use super::mem::{MemoryAccessCols, MemoryAccessColsChips}; pub const NUM_PUBLIC_VALUES_COLS: usize = core::mem::size_of::>(); pub const NUM_PUBLIC_VALUES_PREPROCESSED_COLS: usize = @@ -32,7 +32,7 @@ pub struct PublicValuesChip; #[repr(C)] pub struct PublicValuesPreprocessedCols { pub pv_idx: [T; DIGEST_SIZE], - pub pv_mem: MemoryAccessCols, + pub pv_mem: MemoryAccessColsChips, } /// The cols for a CommitPVHash invocation. @@ -188,6 +188,8 @@ mod tests { use p3_field::AbstractField; use p3_matrix::dense::RowMajorMatrix; + use super::*; + use crate::{ air::{RecursionPublicValues, NUM_PV_ELMS_TO_HASH, RECURSIVE_PROOF_NUM_PV_ELTS}, chips::public_values::PublicValuesChip, @@ -249,4 +251,148 @@ mod tests { let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); println!("{:?}", trace.values) } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let mut rng = StdRng::seed_from_u64(0xDEADBEEF); + let random_felts: [F; RECURSIVE_PROOF_NUM_PV_ELTS] = + array::from_fn(|_| F::from_canonical_u32(rng.gen_range(0..1 << 16))); + let random_public_values: &RecursionPublicValues = random_felts.as_slice().borrow(); + + let shard = ExecutionRecord { + commit_pv_hash_events: vec![CommitPublicValuesEvent { + public_values: *random_public_values, + }], + ..Default::default() + }; + + let chip = PublicValuesChip; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + if input.commit_pv_hash_events.len() != 1 { + tracing::warn!("Expected exactly one CommitPVHash event."); + } + + let mut rows: Vec<[F; NUM_PUBLIC_VALUES_COLS]> = Vec::new(); + + // We only take 1 commit pv hash instruction, since our air only checks for one public + // values hash. + for event in input.commit_pv_hash_events.iter().take(1) { + for i in 0..DIGEST_SIZE { + let mut row = [F::zero(); NUM_PUBLIC_VALUES_COLS]; + let cols: &mut PublicValuesCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::public_values_event_to_row_babybear(event, i, cols); + } + rows.push(row); + } + } + + // Pad the trace to 8 rows. + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PUBLIC_VALUES_COLS], + Some(PUB_VALUES_LOG_HEIGHT), + ); + + RowMajorMatrix::new(rows.into_iter().flatten().collect(), NUM_PUBLIC_VALUES_COLS) + } + + #[test] + fn generate_public_values_preprocessed_trace() { + type F = BabyBear; + + let addr = 0u32; + let public_values_a: [u32; RECURSIVE_PROOF_NUM_PV_ELTS] = + array::from_fn(|i| i as u32 + addr); + let public_values: &RecursionPublicValues = public_values_a.as_slice().borrow(); + + let program = RecursionProgram:: { + instructions: vec![instr::commit_public_values(public_values)], + ..Default::default() + }; + + let chip = PublicValuesChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + let addr = 0u32; + let public_values_a: [u32; RECURSIVE_PROOF_NUM_PV_ELTS] = + array::from_fn(|i| i as u32 + addr); + let public_values: &RecursionPublicValues = public_values_a.as_slice().borrow(); + + let program = RecursionProgram { + instructions: vec![instr::commit_public_values(public_values)], + ..Default::default() + }; + + let chip = PublicValuesChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let mut rows: Vec<[F; NUM_PUBLIC_VALUES_PREPROCESSED_COLS]> = Vec::new(); + let commit_pv_hash_instrs = program + .instructions + .iter() + .filter_map(|instruction| { + if let Instruction::CommitPublicValues(instr) = instruction { + Some(instr) + } else { + None + } + }) + .collect::>(); + + if commit_pv_hash_instrs.len() != 1 { + tracing::warn!("Expected exactly one CommitPVHash instruction."); + } + + // We only take 1 commit pv hash instruction + for instr in commit_pv_hash_instrs.iter().take(1) { + for i in 0..DIGEST_SIZE { + let mut row = [F::zero(); NUM_PUBLIC_VALUES_PREPROCESSED_COLS]; + let cols: &mut PublicValuesPreprocessedCols = row.as_mut_slice().borrow_mut(); + unsafe { + crate::sys::public_values_instr_to_row_babybear(instr, i, cols); + } + rows.push(row); + } + } + + // Pad the preprocessed rows to 8 rows + pad_rows_fixed( + &mut rows, + || [F::zero(); NUM_PUBLIC_VALUES_PREPROCESSED_COLS], + Some(PUB_VALUES_LOG_HEIGHT), + ); + + RowMajorMatrix::new( + rows.into_iter().flatten().collect(), + NUM_PUBLIC_VALUES_PREPROCESSED_COLS, + ) + } } diff --git a/crates/recursion/core/src/chips/select.rs b/crates/recursion/core/src/chips/select.rs index d1c44d9b94..82231306ed 100644 --- a/crates/recursion/core/src/chips/select.rs +++ b/crates/recursion/core/src/chips/select.rs @@ -1,7 +1,6 @@ use core::borrow::Borrow; use p3_air::{Air, BaseAir, PairBuilder}; -use p3_field::AbstractField; -use p3_field::{Field, PrimeField32}; +use p3_field::{AbstractField, Field, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::*; use sp1_core_machine::utils::next_power_of_two; @@ -229,4 +228,168 @@ mod tests { run_recursion_test_machines(program); } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_trace_ffi_eq_rust() { + type F = BabyBear; + + let shard = ExecutionRecord { + select_events: vec![SelectIo { + bit: F::one(), + out1: F::from_canonical_u32(5), + out2: F::from_canonical_u32(3), + in1: F::from_canonical_u32(3), + in2: F::from_canonical_u32(5), + }], + ..Default::default() + }; + + let chip = SelectChip; + let trace: RowMajorMatrix = chip.generate_trace(&shard, &mut ExecutionRecord::default()); + let trace_ffi = generate_trace_ffi(&shard); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_trace_ffi(input: &ExecutionRecord) -> RowMajorMatrix { + type F = BabyBear; + + let events = &input.select_events; + let nb_rows = events.len(); + let fixed_log2_rows = input.fixed_log2_rows(&SelectChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * SELECT_COLS]; + + let chunk_size = std::cmp::max(events.len() / num_cpus::get(), 1); + let populate_len = events.len() * SELECT_COLS; + + values[..populate_len].par_chunks_mut(chunk_size * SELECT_COLS).enumerate().for_each( + |(i, rows)| { + rows.chunks_mut(SELECT_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < events.len() { + let cols: &mut SelectCols<_> = row.borrow_mut(); + unsafe { + crate::sys::select_event_to_row_babybear(&events[idx], cols); + } + } + }); + }, + ); + + RowMajorMatrix::new(values, SELECT_COLS) + } + + #[test] + fn generate_preprocessed_trace() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![ + Instruction::Select(SelectInstr { + addrs: SelectIo { + bit: Address(F::zero()), + out1: Address(F::one()), + out2: Address(F::from_canonical_u32(2)), + in1: Address(F::from_canonical_u32(3)), + in2: Address(F::from_canonical_u32(4)), + }, + mult1: F::one(), + mult2: F::one(), + }), + Instruction::Select(SelectInstr { + addrs: SelectIo { + bit: Address(F::from_canonical_u32(5)), + out1: Address(F::from_canonical_u32(6)), + out2: Address(F::from_canonical_u32(7)), + in1: Address(F::from_canonical_u32(8)), + in2: Address(F::from_canonical_u32(9)), + }, + mult1: F::one(), + mult2: F::one(), + }), + ], + ..Default::default() + }; + + let chip = SelectChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + println!("{:?}", trace.values); + } + + #[cfg(feature = "sys")] + #[test] + fn test_generate_preprocessed_trace_ffi_eq_rust() { + type F = BabyBear; + + let program = RecursionProgram { + instructions: vec![Instruction::Select(SelectInstr { + addrs: SelectIo { + bit: Address(F::zero()), + out1: Address(F::one()), + out2: Address(F::from_canonical_u32(2)), + in1: Address(F::from_canonical_u32(3)), + in2: Address(F::from_canonical_u32(4)), + }, + mult1: F::one(), + mult2: F::one(), + })], + ..Default::default() + }; + + let chip = SelectChip; + let trace = chip.generate_preprocessed_trace(&program).unwrap(); + let trace_ffi = generate_preprocessed_trace_ffi(&program); + + assert_eq!(trace_ffi, trace); + } + + #[cfg(feature = "sys")] + fn generate_preprocessed_trace_ffi( + program: &RecursionProgram, + ) -> RowMajorMatrix { + type F = BabyBear; + + let instrs = program + .instructions + .iter() + .filter_map(|instruction| match instruction { + Instruction::Select(x) => Some(x), + _ => None, + }) + .collect::>(); + + let nb_rows = instrs.len(); + let fixed_log2_rows = program.fixed_log2_rows(&SelectChip); + let padded_nb_rows = match fixed_log2_rows { + Some(log2_rows) => 1 << log2_rows, + None => next_power_of_two(nb_rows, None), + }; + let mut values = vec![F::zero(); padded_nb_rows * SELECT_PREPROCESSED_COLS]; + + let chunk_size = std::cmp::max(instrs.len() / num_cpus::get(), 1); + let populate_len = instrs.len() * SELECT_PREPROCESSED_COLS; + + values[..populate_len] + .par_chunks_mut(chunk_size * SELECT_PREPROCESSED_COLS) + .enumerate() + .for_each(|(i, rows)| { + rows.chunks_mut(SELECT_PREPROCESSED_COLS).enumerate().for_each(|(j, row)| { + let idx = i * chunk_size + j; + if idx < instrs.len() { + let cols: &mut SelectPreprocessedCols<_> = row.borrow_mut(); + unsafe { + crate::sys::select_instr_to_row_babybear(instrs[idx], cols); + } + } + }); + }); + + RowMajorMatrix::new(values, SELECT_PREPROCESSED_COLS) + } } diff --git a/crates/recursion/core/src/lib.rs b/crates/recursion/core/src/lib.rs index bec9e0b0ab..2a7281f27c 100644 --- a/crates/recursion/core/src/lib.rs +++ b/crates/recursion/core/src/lib.rs @@ -11,6 +11,8 @@ pub mod machine; pub mod runtime; pub mod shape; pub mod stark; +#[cfg(feature = "sys")] +pub mod sys; pub use runtime::*; @@ -47,6 +49,7 @@ pub type BaseAluEvent = BaseAluIo; /// An instruction invoking the extension field ALU. #[derive(Clone, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct BaseAluInstr { pub opcode: BaseAluOpcode, pub mult: F, @@ -68,6 +71,7 @@ pub type ExtAluEvent = ExtAluIo>; /// An instruction invoking the extension field ALU. #[derive(Clone, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct ExtAluInstr { pub opcode: ExtAluOpcode, pub mult: F, @@ -102,6 +106,7 @@ pub enum MemAccessKind { /// The inputs and outputs to a Poseidon2 permutation. #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct Poseidon2Io { pub input: [V; WIDTH], pub output: [V; WIDTH], @@ -109,6 +114,7 @@ pub struct Poseidon2Io { /// An instruction invoking the Poseidon2 permutation. #[derive(Clone, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct Poseidon2SkinnyInstr { pub addrs: Poseidon2Io>, pub mults: [F; WIDTH], @@ -118,6 +124,7 @@ pub type Poseidon2Event = Poseidon2Io; /// The inputs and outputs to a select operation. #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct SelectIo { pub bit: V, pub out1: V, @@ -128,6 +135,7 @@ pub struct SelectIo { /// An instruction invoking the select operation. #[derive(Clone, Copy, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct SelectInstr { pub addrs: SelectIo>, pub mult1: F, @@ -156,6 +164,30 @@ pub struct ExpReverseBitsInstr { pub mult: F, } +#[derive(Clone, Debug, PartialEq, Eq)] +#[repr(C)] +pub struct ExpReverseBitsInstrFFI<'a, F> { + pub base: &'a Address, + pub exp_ptr: *const Address, + pub exp_len: usize, + pub result: &'a Address, + + pub mult: &'a F, +} + +impl<'a, F> From<&'a ExpReverseBitsInstr> for ExpReverseBitsInstrFFI<'a, F> { + fn from(instr: &'a ExpReverseBitsInstr) -> Self { + Self { + base: &instr.addrs.base, + exp_ptr: instr.addrs.exp.as_ptr(), + exp_len: instr.addrs.exp.len(), + result: &instr.addrs.result, + + mult: &instr.mult, + } + } +} + /// The event encoding the inputs and outputs of an exp-reverse-bits operation. The `len` operand is /// now stored as the length of the `exp` field. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -165,6 +197,26 @@ pub struct ExpReverseBitsEvent { pub result: F, } +#[derive(Clone, Debug, PartialEq, Eq)] +#[repr(C)] +pub struct ExpReverseBitsEventFFI<'a, F> { + pub base: &'a F, + pub exp_ptr: *const F, + pub exp_len: usize, + pub result: &'a F, +} + +impl<'a, F> From<&'a ExpReverseBitsEvent> for ExpReverseBitsEventFFI<'a, F> { + fn from(event: &'a ExpReverseBitsEvent) -> Self { + Self { + base: &event.base, + exp_ptr: event.exp.as_ptr(), + exp_len: event.exp.len(), + result: &event.result, + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct FriFoldIo { pub ext_single: FriFoldExtSingleIo>, @@ -174,6 +226,7 @@ pub struct FriFoldIo { /// The extension-field-valued single inputs to the FRI fold operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct FriFoldExtSingleIo { pub z: V, pub alpha: V, @@ -181,6 +234,7 @@ pub struct FriFoldExtSingleIo { /// The extension-field-valued vector inputs to the FRI fold operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct FriFoldExtVecIo { pub mat_opening: V, pub ps_at_z: V, @@ -192,6 +246,7 @@ pub struct FriFoldExtVecIo { /// The base-field-valued inputs to the FRI fold operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct FriFoldBaseIo { pub x: V, } @@ -207,10 +262,71 @@ pub struct FriFoldInstr { pub ro_mults: Vec, } +#[derive(Clone, Debug, PartialEq, Eq)] +#[repr(C)] +pub struct FriFoldInstrFFI<'a, F> { + pub base_single_addrs: &'a FriFoldBaseIo>, + pub ext_single_addrs: &'a FriFoldExtSingleIo>, + + pub ext_vec_addrs_mat_opening_ptr: *const Address, + pub ext_vec_addrs_mat_opening_len: usize, + pub ext_vec_addrs_ps_at_z_ptr: *const Address, + pub ext_vec_addrs_ps_at_z_len: usize, + pub ext_vec_addrs_alpha_pow_input_ptr: *const Address, + pub ext_vec_addrs_alpha_pow_input_len: usize, + pub ext_vec_addrs_ro_input_ptr: *const Address, + pub ext_vec_addrs_ro_input_len: usize, + pub ext_vec_addrs_alpha_pow_output_ptr: *const Address, + pub ext_vec_addrs_alpha_pow_output_len: usize, + pub ext_vec_addrs_ro_output_ptr: *const Address, + pub ext_vec_addrs_ro_output_len: usize, + + pub alpha_pow_mults_ptr: *const F, + pub alpha_pow_mults_len: usize, + + pub ro_mults_ptr: *const F, + pub ro_mults_len: usize, +} + +impl<'a, F> From<&'a FriFoldInstr> for FriFoldInstrFFI<'a, F> { + fn from(instr: &'a FriFoldInstr) -> Self { + Self { + base_single_addrs: &instr.base_single_addrs, + ext_single_addrs: &instr.ext_single_addrs, + + ext_vec_addrs_mat_opening_ptr: instr.ext_vec_addrs.mat_opening.as_ptr(), + ext_vec_addrs_mat_opening_len: instr.ext_vec_addrs.mat_opening.len(), + ext_vec_addrs_ps_at_z_ptr: instr.ext_vec_addrs.ps_at_z.as_ptr(), + ext_vec_addrs_ps_at_z_len: instr.ext_vec_addrs.ps_at_z.len(), + ext_vec_addrs_alpha_pow_input_ptr: instr.ext_vec_addrs.alpha_pow_input.as_ptr(), + ext_vec_addrs_alpha_pow_input_len: instr.ext_vec_addrs.alpha_pow_input.len(), + ext_vec_addrs_ro_input_ptr: instr.ext_vec_addrs.ro_input.as_ptr(), + ext_vec_addrs_ro_input_len: instr.ext_vec_addrs.ro_input.len(), + ext_vec_addrs_alpha_pow_output_ptr: instr.ext_vec_addrs.alpha_pow_output.as_ptr(), + ext_vec_addrs_alpha_pow_output_len: instr.ext_vec_addrs.alpha_pow_output.len(), + ext_vec_addrs_ro_output_ptr: instr.ext_vec_addrs.ro_output.as_ptr(), + ext_vec_addrs_ro_output_len: instr.ext_vec_addrs.ro_output.len(), + + alpha_pow_mults_ptr: instr.alpha_pow_mults.as_ptr(), + alpha_pow_mults_len: instr.alpha_pow_mults.len(), + + ro_mults_ptr: instr.ro_mults.as_ptr(), + ro_mults_len: instr.ro_mults.len(), + } + } +} + +impl<'a, F> From<&'a Box>> for FriFoldInstrFFI<'a, F> { + fn from(instr: &'a Box>) -> Self { + Self::from(instr.as_ref()) + } +} + /// The event encoding the data of a single iteration within the FRI fold operation. /// For any given event, we are accessing a single element of the `Vec` inputs, so that the event /// is not a type alias for `FriFoldIo` like many of the other events. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct FriFoldEvent { pub base_single: FriFoldBaseIo, pub ext_single: FriFoldExtSingleIo>, @@ -226,12 +342,14 @@ pub struct BatchFRIIo { /// The extension-field-valued single inputs to the batch FRI operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct BatchFRIExtSingleIo { pub acc: V, } /// The extension-field-valued vector inputs to the batch FRI operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct BatchFRIExtVecIo { pub p_at_z: V, pub alpha_pow: V, @@ -239,6 +357,7 @@ pub struct BatchFRIExtVecIo { /// The base-field-valued vector inputs to the batch FRI operation. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct BatchFRIBaseVecIo { pub p_at_x: V, } @@ -253,10 +372,51 @@ pub struct BatchFRIInstr { pub acc_mult: F, } +#[derive(Clone, Debug, PartialEq, Eq)] +#[repr(C)] +pub struct BatchFRIInstrFFI<'a, F> { + pub base_vec_addrs_p_at_x_ptr: *const Address, + pub base_vec_addrs_p_at_x_len: usize, + + pub ext_single_addrs: &'a BatchFRIExtSingleIo>, + + pub ext_vec_addrs_p_at_z_ptr: *const Address, + pub ext_vec_addrs_p_at_z_len: usize, + pub ext_vec_addrs_alpha_pow_ptr: *const Address, + pub ext_vec_addrs_alpha_pow_len: usize, + + pub acc_mult: &'a F, +} + +impl<'a, F> From<&'a BatchFRIInstr> for BatchFRIInstrFFI<'a, F> { + fn from(instr: &'a BatchFRIInstr) -> Self { + Self { + base_vec_addrs_p_at_x_ptr: instr.base_vec_addrs.p_at_x.as_ptr(), + base_vec_addrs_p_at_x_len: instr.base_vec_addrs.p_at_x.len(), + + ext_single_addrs: &instr.ext_single_addrs, + + ext_vec_addrs_p_at_z_ptr: instr.ext_vec_addrs.p_at_z.as_ptr(), + ext_vec_addrs_p_at_z_len: instr.ext_vec_addrs.p_at_z.len(), + ext_vec_addrs_alpha_pow_ptr: instr.ext_vec_addrs.alpha_pow.as_ptr(), + ext_vec_addrs_alpha_pow_len: instr.ext_vec_addrs.alpha_pow.len(), + + acc_mult: &instr.acc_mult, + } + } +} + +impl<'a, 'b: 'a, F> From<&'b &'b Box>> for BatchFRIInstrFFI<'a, F> { + fn from(instr: &'b &'b Box>) -> Self { + Self::from(instr.as_ref()) + } +} + /// The event encoding the data of a single iteration within the batch FRI operation. /// For any given event, we are accessing a single element of the `Vec` inputs, so that the event /// is not a type alias for `BatchFRIIo` like many of the other events. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[repr(C)] pub struct BatchFRIEvent { pub base_vec: BatchFRIBaseVecIo, pub ext_single: BatchFRIExtSingleIo>, @@ -266,12 +426,14 @@ pub struct BatchFRIEvent { /// An instruction that will save the public values to the execution record and will commit to /// it's digest. #[derive(Clone, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct CommitPublicValuesInstr { pub pv_addrs: RecursionPublicValues>, } /// The event for committing to the public values. #[derive(Clone, Debug, Serialize, Deserialize)] +#[repr(C)] pub struct CommitPublicValuesEvent { pub public_values: RecursionPublicValues, } diff --git a/crates/recursion/core/src/machine.rs b/crates/recursion/core/src/machine.rs index 7c32cc34d8..d754367ef6 100644 --- a/crates/recursion/core/src/machine.rs +++ b/crates/recursion/core/src/machine.rs @@ -231,6 +231,10 @@ impl AddAssign<&Instruction> for RecursionAirEventCount { Instruction::BatchFRI(instr) => { self.batch_fri_events += instr.base_vec_addrs.p_at_x.len() } + Instruction::HintAddCurve(instr) => { + self.mem_var_events += instr.output_x_addrs_mults.len(); + self.mem_var_events += instr.output_y_addrs_mults.len(); + } Instruction::CommitPublicValues(_) => {} Instruction::Print(_) => {} } diff --git a/crates/recursion/core/src/runtime/instruction.rs b/crates/recursion/core/src/runtime/instruction.rs index 7a74097246..97e2c203fc 100644 --- a/crates/recursion/core/src/runtime/instruction.rs +++ b/crates/recursion/core/src/runtime/instruction.rs @@ -14,6 +14,7 @@ pub enum Instruction { Select(SelectInstr), ExpReverseBitsLen(ExpReverseBitsInstr), HintBits(HintBitsInstr), + HintAddCurve(Box>), FriFold(Box>), BatchFRI(Box>), Print(PrintInstr), @@ -36,6 +37,15 @@ pub struct PrintInstr { pub addr: Address, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct HintAddCurveInstr { + pub output_x_addrs_mults: Vec<(Address, F)>, + pub output_y_addrs_mults: Vec<(Address, F)>, + pub input1_x_addrs: Vec>, + pub input1_y_addrs: Vec>, + pub input2_x_addrs: Vec>, + pub input2_y_addrs: Vec>, +} #[derive(Clone, Debug, Serialize, Deserialize)] pub struct HintInstr { /// Addresses and mults of the output felts. diff --git a/crates/recursion/core/src/runtime/mod.rs b/crates/recursion/core/src/runtime/mod.rs index 90bdd1fb55..4c31c592f8 100644 --- a/crates/recursion/core/src/runtime/mod.rs +++ b/crates/recursion/core/src/runtime/mod.rs @@ -6,14 +6,23 @@ mod record; // Avoid triggering annoying branch of thiserror derive macro. use backtrace::Backtrace as Trace; +use hashbrown::HashMap; +use instruction::HintAddCurveInstr; pub use instruction::Instruction; use instruction::{FieldEltType, HintBitsInstr, HintExt2FeltsInstr, HintInstr, PrintInstr}; +use itertools::Itertools; use machine::RecursionAirEventCount; use memory::*; pub use opcode::*; +use p3_field::AbstractExtensionField; +use p3_field::{AbstractField, ExtensionField, PrimeField32}; +use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; +use p3_symmetric::{CryptographicPermutation, Permutation}; +use p3_util::reverse_bits_len; pub use program::*; pub use record::*; - +use sp1_stark::septic_curve::SepticCurve; +use sp1_stark::septic_extension::SepticExtension; use std::{ array, borrow::Borrow, @@ -24,13 +33,6 @@ use std::{ marker::PhantomData, sync::Arc, }; - -use hashbrown::HashMap; -use itertools::Itertools; -use p3_field::{AbstractField, ExtensionField, PrimeField32}; -use p3_poseidon2::{Poseidon2, Poseidon2ExternalMatrixGeneral}; -use p3_symmetric::{CryptographicPermutation, Permutation}; -use p3_util::reverse_bits_len; use thiserror::Error; use crate::air::{Block, RECURSIVE_PROOF_NUM_PV_ELTS}; @@ -40,9 +42,8 @@ use crate::*; /// The heap pointer address. pub const HEAP_PTR: i32 = -4; -pub const HEAP_START_ADDRESS: usize = STACK_SIZE + 4; - pub const STACK_SIZE: usize = 1 << 24; +pub const HEAP_START_ADDRESS: usize = STACK_SIZE + 4; pub const MEMORY_SIZE: usize = 1 << 28; /// The width of the Poseidon2 permutation. @@ -415,6 +416,44 @@ where self.record.mem_var_events.push(MemEvent { inner: bit }); } } + Instruction::HintAddCurve(instr) => { + let HintAddCurveInstr { + output_x_addrs_mults, + output_y_addrs_mults, + input1_x_addrs, + input1_y_addrs, + input2_x_addrs, + input2_y_addrs, + } = *instr; + let input1_x = SepticExtension::::from_base_fn(|i| { + self.memory.mr_mult(input1_x_addrs[i], F::zero()).val[0] + }); + let input1_y = SepticExtension::::from_base_fn(|i| { + self.memory.mr_mult(input1_y_addrs[i], F::zero()).val[0] + }); + let input2_x = SepticExtension::::from_base_fn(|i| { + self.memory.mr_mult(input2_x_addrs[i], F::zero()).val[0] + }); + let input2_y = SepticExtension::::from_base_fn(|i| { + self.memory.mr_mult(input2_y_addrs[i], F::zero()).val[0] + }); + let point1 = SepticCurve { x: input1_x, y: input1_y }; + let point2 = SepticCurve { x: input2_x, y: input2_y }; + let output = point1.add_incomplete(point2); + + for (val, (addr, mult)) in + output.x.0.into_iter().zip(output_x_addrs_mults.into_iter()) + { + self.memory.mw(addr, Block::from(val), mult); + self.record.mem_var_events.push(MemEvent { inner: Block::from(val) }); + } + for (val, (addr, mult)) in + output.y.0.into_iter().zip(output_y_addrs_mults.into_iter()) + { + self.memory.mw(addr, Block::from(val), mult); + self.record.mem_var_events.push(MemEvent { inner: Block::from(val) }); + } + } Instruction::FriFold(instr) => { let FriFoldInstr { diff --git a/crates/recursion/core/src/runtime/opcode.rs b/crates/recursion/core/src/runtime/opcode.rs index 96a748d065..16e9ef575d 100644 --- a/crates/recursion/core/src/runtime/opcode.rs +++ b/crates/recursion/core/src/runtime/opcode.rs @@ -1,6 +1,7 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[repr(C)] pub enum BaseAluOpcode { AddF, SubF, @@ -9,6 +10,7 @@ pub enum BaseAluOpcode { } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[repr(C)] pub enum ExtAluOpcode { AddE, SubE, diff --git a/crates/recursion/core/src/runtime/program.rs b/crates/recursion/core/src/runtime/program.rs index 38fb29ca8a..f77aeeed07 100644 --- a/crates/recursion/core/src/runtime/program.rs +++ b/crates/recursion/core/src/runtime/program.rs @@ -1,10 +1,10 @@ +use crate::*; use backtrace::Backtrace; use p3_field::Field; use serde::{Deserialize, Serialize}; use shape::RecursionShape; use sp1_stark::air::{MachineAir, MachineProgram}; - -use crate::*; +use sp1_stark::septic_digest::SepticDigest; #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct RecursionProgram { @@ -19,6 +19,10 @@ impl MachineProgram for RecursionProgram { fn pc_start(&self) -> F { F::zero() } + + fn initial_global_cumulative_sum(&self) -> SepticDigest { + SepticDigest::::zero() + } } impl RecursionProgram { diff --git a/crates/recursion/core/src/sys.rs b/crates/recursion/core/src/sys.rs new file mode 100644 index 0000000000..1ddb6ce0b9 --- /dev/null +++ b/crates/recursion/core/src/sys.rs @@ -0,0 +1,117 @@ +use crate::{ + air::Block, + chips::{ + alu_base::{BaseAluAccessCols, BaseAluValueCols}, + alu_ext::{ExtAluAccessCols, ExtAluValueCols}, + batch_fri::{BatchFRICols, BatchFRIPreprocessedCols}, + exp_reverse_bits::{ExpReverseBitsLenCols, ExpReverseBitsLenPreprocessedCols}, + fri_fold::{FriFoldCols, FriFoldPreprocessedCols}, + poseidon2_skinny::{ + columns::{preprocessed::Poseidon2PreprocessedColsSkinny, Poseidon2}, + NUM_EXTERNAL_ROUNDS, NUM_INTERNAL_ROUNDS, + }, + poseidon2_wide::columns::preprocessed::Poseidon2PreprocessedColsWide, + public_values::{PublicValuesCols, PublicValuesPreprocessedCols}, + select::{SelectCols, SelectPreprocessedCols}, + }, + BaseAluInstr, BaseAluIo, BatchFRIEvent, BatchFRIInstrFFI, CommitPublicValuesEvent, + CommitPublicValuesInstr, ExpReverseBitsEventFFI, ExpReverseBitsInstrFFI, ExtAluInstr, ExtAluIo, + FriFoldEvent, FriFoldInstrFFI, Poseidon2Event, Poseidon2Instr, SelectEvent, SelectInstr, WIDTH, +}; +use p3_baby_bear::BabyBear; + +#[link(name = "sp1-recursion-core-sys", kind = "static")] +extern "C-unwind" { + pub fn alu_base_event_to_row_babybear( + io: &BaseAluIo, + cols: &mut BaseAluValueCols, + ); + pub fn alu_base_instr_to_row_babybear( + instr: &BaseAluInstr, + cols: &mut BaseAluAccessCols, + ); + + pub fn alu_ext_event_to_row_babybear( + io: &ExtAluIo>, + cols: &mut ExtAluValueCols, + ); + pub fn alu_ext_instr_to_row_babybear( + instr: &ExtAluInstr, + cols: &mut ExtAluAccessCols, + ); + + pub fn batch_fri_event_to_row_babybear( + io: &BatchFRIEvent, + cols: &mut BatchFRICols, + ); + pub fn batch_fri_instr_to_row_babybear( + instr: &BatchFRIInstrFFI, + cols: &mut BatchFRIPreprocessedCols, + ); + + pub fn exp_reverse_bits_event_to_row_babybear( + io: &ExpReverseBitsEventFFI, + i: usize, + cols: &mut ExpReverseBitsLenCols, + ); + pub fn exp_reverse_bits_instr_to_row_babybear( + instr: &ExpReverseBitsInstrFFI, + i: usize, + len: usize, + cols: &mut ExpReverseBitsLenPreprocessedCols, + ); + + pub fn fri_fold_event_to_row_babybear( + io: &FriFoldEvent, + cols: &mut FriFoldCols, + ); + pub fn fri_fold_instr_to_row_babybear( + instr: &FriFoldInstrFFI, + i: usize, + cols: &mut FriFoldPreprocessedCols, + ); + + pub fn public_values_event_to_row_babybear( + io: &CommitPublicValuesEvent, + digest_idx: usize, + cols: &mut PublicValuesCols, + ); + pub fn public_values_instr_to_row_babybear( + instr: &CommitPublicValuesInstr, + digest_idx: usize, + cols: &mut PublicValuesPreprocessedCols, + ); + + pub fn select_event_to_row_babybear( + io: &SelectEvent, + cols: &mut SelectCols, + ); + pub fn select_instr_to_row_babybear( + instr: &SelectInstr, + cols: &mut SelectPreprocessedCols, + ); + + pub fn poseidon2_skinny_event_to_row_babybear( + io: &Poseidon2Event, + cols: *mut Poseidon2, + ); + pub fn poseidon2_skinny_instr_to_row_babybear( + instr: &Poseidon2Instr, + i: usize, + cols: &mut Poseidon2PreprocessedColsSkinny, + ); + + pub fn poseidon2_wide_event_to_row_babybear( + input: &[BabyBear; WIDTH], + external_rounds_state: *mut BabyBear, + internal_rounds_state: &mut [BabyBear; WIDTH], + internal_rounds_s0: &mut [BabyBear; NUM_INTERNAL_ROUNDS - 1], + external_sbox: *mut [[BabyBear; NUM_EXTERNAL_ROUNDS]; WIDTH], + internal_sbox: *mut [BabyBear; NUM_INTERNAL_ROUNDS], + output_state: &mut [BabyBear; WIDTH], + ); + pub fn poseidon2_wide_instr_to_row_babybear( + instr: &Poseidon2Instr, + cols: &mut Poseidon2PreprocessedColsWide, + ); +} diff --git a/crates/recursion/gnark-ffi/Cargo.toml b/crates/recursion/gnark-ffi/Cargo.toml index 3276efb80d..14a7001596 100644 --- a/crates/recursion/gnark-ffi/Cargo.toml +++ b/crates/recursion/gnark-ffi/Cargo.toml @@ -16,8 +16,8 @@ p3-baby-bear = { workspace = true } sp1-recursion-compiler = { workspace = true } sp1-core-machine = { workspace = true } sp1-stark = { workspace = true } -serde = "1.0.204" -serde_json = "1.0.121" +serde = { workspace = true } +serde_json = { workspace = true } tempfile = "3.10.1" log = "0.4.22" num-bigint = "0.4.6" diff --git a/crates/recursion/gnark-ffi/src/ffi/docker.rs b/crates/recursion/gnark-ffi/src/ffi/docker.rs index 119253d492..56d416be8d 100644 --- a/crates/recursion/gnark-ffi/src/ffi/docker.rs +++ b/crates/recursion/gnark-ffi/src/ffi/docker.rs @@ -1,5 +1,4 @@ -use crate::ProofBn254; -use crate::{Groth16Bn254Proof, PlonkBn254Proof}; +use crate::{Groth16Bn254Proof, PlonkBn254Proof, ProofBn254}; use anyhow::{anyhow, Result}; use sp1_core_machine::SP1_CIRCUIT_VERSION; use std::{io::Write, process::Command}; @@ -47,8 +46,10 @@ fn call_docker(args: &[&str], mounts: &[(&str, &str)]) -> Result<()> { } cmd.arg(get_docker_image()); cmd.args(args); - if !cmd.status()?.success() { + let result = cmd.status()?; + if !result.success() { log::error!("Failed to run `docker run`: {:?}", cmd); + log::error!("Execution result: {:?}", result); return Err(anyhow!("docker command failed")); } Ok(()) diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 9d3b66d6d8..a55b6caf39 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -11,7 +11,7 @@ categories = { workspace = true } [dependencies] prost = { version = "0.13", optional = true } -serde = { version = "1.0.204", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } twirp = { package = "twirp-rs", version = "0.13.0-succinct", optional = true } async-trait = "0.1.81" reqwest-middleware = { version = "0.3.2", optional = true } @@ -24,6 +24,7 @@ anyhow = "1.0.83" sp1-prover = { workspace = true } sp1-core-machine = { workspace = true } sp1-cuda = { workspace = true, optional = true } +sp1-build = { workspace = true } futures = "0.3.30" bincode = "1.3.3" tokio = { version = "1.39.2", features = ["full"], optional = true } @@ -31,7 +32,7 @@ p3-field = { workspace = true } p3-baby-bear = { workspace = true } p3-fri = { workspace = true } indicatif = "0.17.8" -tracing = "0.1.40" +tracing = { workspace = true } hex = "0.4.3" log = "0.4.22" dirs = "5.0.1" @@ -40,11 +41,11 @@ cfg-if = "1.0" strum = "0.26.3" strum_macros = "0.26.4" thiserror = "1.0.63" -hashbrown = "0.14.5" +hashbrown = { workspace = true } sp1-core-executor = { workspace = true } sp1-stark = { workspace = true } sp1-primitives = { workspace = true } -itertools = "0.13.0" +itertools = { workspace = true } tonic = { version = "0.12", features = ["tls", "tls-roots"], optional = true } alloy-sol-types = { version = "0.8", optional = true } alloy-signer = { version = "0.5", optional = true } @@ -52,6 +53,9 @@ alloy-signer-local = { version = "0.5", optional = true } alloy-primitives = { version = "0.8", optional = true } backoff = { version = "0.4", features = ["tokio"], optional = true } +[dev-dependencies] +test-artifacts = { workspace = true } + [features] default = ["network"] native-gnark = ["sp1-prover/native-gnark"] diff --git a/crates/sdk/src/action.rs b/crates/sdk/src/action.rs index ae50058f52..58b38da010 100644 --- a/crates/sdk/src/action.rs +++ b/crates/sdk/src/action.rs @@ -69,6 +69,12 @@ impl<'a> Execute<'a> { self.context_builder.max_cycles(max_cycles); self } + + /// Skip deferred proof verification. + pub fn set_skip_deferred_proof_verification(mut self, value: bool) -> Self { + self.context_builder.set_skip_deferred_proof_verification(value); + self + } } /// Builder to prepare and configure proving execution of a program on an input. @@ -217,4 +223,10 @@ impl<'a> Prove<'a> { self.timeout = Some(timeout); self } + + /// Set the skip deferred proof verification flag. + pub fn set_skip_deferred_proof_verification(mut self, value: bool) -> Self { + self.context_builder.set_skip_deferred_proof_verification(value); + self + } } diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index ac97f2e47c..7b7bb96f2b 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -13,6 +13,9 @@ pub mod network; #[cfg(feature = "network-v2")] #[path = "network-v2/mod.rs"] pub mod network_v2; + +use std::env; + #[cfg(feature = "network")] pub use crate::network::prover::NetworkProver as NetworkProverV1; #[cfg(feature = "network-v2")] @@ -31,18 +34,17 @@ pub use proof::*; pub use provers::SP1VerificationError; use sp1_prover::components::DefaultProverComponents; -use std::env; - #[cfg(any(feature = "network", feature = "network-v2"))] use {std::future::Future, tokio::task::block_in_place}; pub use provers::{CpuProver, MockProver, Prover}; +pub use sp1_build::include_elf; pub use sp1_core_executor::{ExecutionReport, HookEnv, SP1Context, SP1ContextBuilder}; pub use sp1_core_machine::{io::SP1Stdin, riscv::cost::CostEstimator, SP1_CIRCUIT_VERSION}; pub use sp1_primitives::io::SP1PublicValues; pub use sp1_prover::{ - CoreSC, HashableKey, InnerSC, OuterSC, PlonkBn254Proof, SP1Prover, SP1ProvingKey, + CoreSC, HashableKey, InnerSC, OuterSC, PlonkBn254Proof, ProverMode, SP1Prover, SP1ProvingKey, SP1VerifyingKey, }; @@ -75,7 +77,7 @@ impl ProverClient { "mock" => Self { prover: Box::new(MockProver::new()) }, "local" => { #[cfg(debug_assertions)] - println!("Warning: Local prover in dev mode is not recommended. Proof generation may be slow."); + eprintln!("Warning: Local prover in dev mode is not recommended. Proof generation may be slow."); Self { #[cfg(not(feature = "cuda"))] prover: Box::new(CpuProver::new()), @@ -84,14 +86,20 @@ impl ProverClient { } } "network" => { + let private_key = env::var("SP1_PRIVATE_KEY") + .expect("SP1_PRIVATE_KEY must be set for remote proving"); + let rpc_url = env::var("PROVER_NETWORK_RPC").ok(); + let skip_simulation = + env::var("SKIP_SIMULATION").map(|val| val == "true").unwrap_or_default(); + cfg_if! { if #[cfg(feature = "network-v2")] { Self { - prover: Box::new(NetworkProverV2::new()), + prover: Box::new(NetworkProverV2::new(&private_key, rpc_url, skip_simulation)), } } else if #[cfg(feature = "network")] { Self { - prover: Box::new(NetworkProverV1::new()), + prover: Box::new(NetworkProverV1::new(&private_key, rpc_url, skip_simulation)), } } else { panic!("network feature is not enabled") @@ -104,11 +112,13 @@ impl ProverClient { } } + /// Returns a [ProverClientBuilder] to easily create a [ProverClient]. + pub fn builder() -> ProverClientBuilder { + ProverClientBuilder::default() + } + /// Creates a new [ProverClient] with the mock prover. /// - /// Recommended for testing and development. You can also use [ProverClient::new] to set the - /// prover to `mock` with the `SP1_PROVER` environment variable. - /// /// ### Examples /// /// ```no_run @@ -120,10 +130,7 @@ impl ProverClient { Self { prover: Box::new(MockProver::new()) } } - /// Creates a new [ProverClient] with the local prover. - /// - /// Recommended for proving end-to-end locally. You can also use [ProverClient::new] to set the - /// prover to `local` with the `SP1_PROVER` environment variable. + /// Creates a new [ProverClient] with the local prover, using the CPU. /// /// ### Examples /// @@ -132,31 +139,62 @@ impl ProverClient { /// /// let client = ProverClient::local(); /// ``` + #[deprecated(note = "Please use `cpu` instead")] pub fn local() -> Self { Self { prover: Box::new(CpuProver::new()) } } - /// Creates a new [ProverClient] with the network prover. + /// Creates a new [ProverClient] with the local prover, using the CPU. /// - /// Recommended for outsourcing proof generation to an RPC. You can also use [ProverClient::new] - /// to set the prover to `network` with the `SP1_PROVER` environment variable. + /// ### Examples + /// + /// ```no_run + /// use sp1_sdk::ProverClient; + /// + /// let client = ProverClient::cpu(); + /// ``` + pub fn cpu() -> Self { + Self { prover: Box::new(CpuProver::new()) } + } + + /// Creates a new [ProverClient] with the local prover, using the GPU. /// /// ### Examples /// /// ```no_run /// use sp1_sdk::ProverClient; /// - /// let client = ProverClient::network(); + /// let client = ProverClient::cuda(); /// ``` - pub fn network() -> Self { + #[cfg(feature = "cuda")] + pub fn cuda() -> Self { + Self { prover: Box::new(CudaProver::new(SP1Prover::new())) } + } + + /// Creates a new [ProverClient] with the network prover. + /// + /// ### Examples + /// + /// ```no_run + /// use sp1_sdk::ProverClient; + /// + /// let private_key = std::env::var("SP1_PRIVATE_KEY").unwrap(); + /// let rpc_url = std::env::var("PROVER_NETWORK_RPC").ok(); + /// let skip_simulation = + /// std::env::var("SKIP_SIMULATION").map(|val| val == "true").unwrap_or_default(); + /// + /// let client = ProverClient::network(private_key, rpc_url, skip_simulation); + /// ``` + #[cfg(any(feature = "network", feature = "network-v2"))] + pub fn network(private_key: String, rpc_url: Option, skip_simulation: bool) -> Self { cfg_if! { if #[cfg(feature = "network-v2")] { Self { - prover: Box::new(NetworkProverV2::new()), + prover: Box::new(NetworkProverV2::new(&private_key, rpc_url, skip_simulation)), } } else if #[cfg(feature = "network")] { Self { - prover: Box::new(NetworkProverV1::new()), + prover: Box::new(NetworkProverV1::new(&private_key, rpc_url, skip_simulation)), } } else { panic!("network feature is not enabled") @@ -176,7 +214,7 @@ impl ProverClient { /// use sp1_sdk::{ProverClient, SP1Context, SP1Stdin}; /// /// // Load the program. - /// let elf = include_bytes!("../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); + /// let elf = test_artifacts::FIBONACCI_ELF; /// /// // Initialize the prover client. /// let client = ProverClient::new(); @@ -206,7 +244,7 @@ impl ProverClient { /// use sp1_sdk::{ProverClient, SP1Context, SP1Stdin}; /// /// // Load the program. - /// let elf = include_bytes!("../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); + /// let elf = test_artifacts::FIBONACCI_ELF; /// /// // Initialize the prover client. /// let client = ProverClient::new(); @@ -232,7 +270,7 @@ impl ProverClient { /// ```no_run /// use sp1_sdk::{ProverClient, SP1Stdin}; /// - /// let elf = include_bytes!("../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); + /// let elf = test_artifacts::FIBONACCI_ELF; /// let client = ProverClient::new(); /// let (pk, vk) = client.setup(elf); /// let mut stdin = SP1Stdin::new(); @@ -265,7 +303,7 @@ impl ProverClient { /// ```no_run /// use sp1_sdk::{ProverClient, SP1Stdin}; /// - /// let elf = include_bytes!("../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); + /// let elf = test_artifacts::FIBONACCI_ELF; /// let client = ProverClient::new(); /// let mut stdin = SP1Stdin::new(); /// stdin.write(&10usize); @@ -282,6 +320,121 @@ impl Default for ProverClient { } } +/// Builder type for [`ProverClient`]. +#[derive(Debug, Default)] +pub struct ProverClientBuilder { + mode: Option, + private_key: Option, + rpc_url: Option, + skip_simulation: bool, +} + +impl ProverClientBuilder { + /// Sets the mode of the prover client being created. + pub fn mode(mut self, mode: ProverMode) -> Self { + self.mode = Some(mode); + self + } + + /// Sets the private key. + pub fn private_key(mut self, private_key: String) -> Self { + self.private_key = Some(private_key); + self + } + + /// Sets the RPC URL. + pub fn rpc_url(mut self, rpc_url: String) -> Self { + self.rpc_url = Some(rpc_url); + self + } + + /// Skips simulation. + pub fn skip_simulation(mut self) -> Self { + self.skip_simulation = true; + self + } + + /// Builds a [ProverClient], using the provided private key. + pub fn build(self) -> ProverClient { + match self.mode.expect("The prover mode is required") { + ProverMode::Cpu => ProverClient::cpu(), + ProverMode::Cuda => { + cfg_if! { + if #[cfg(feature = "cuda")] { + ProverClient::cuda() + } else { + panic!("cuda feature is not enabled") + } + } + } + ProverMode::Network => { + let private_key = self.private_key.expect("The private key is required"); + + cfg_if! { + if #[cfg(feature = "network-v2")] { + ProverClient { + prover: Box::new(NetworkProverV2::new(&private_key, self.rpc_url, self.skip_simulation)), + } + } else if #[cfg(feature = "network")] { + ProverClient { + prover: Box::new(NetworkProverV1::new(&private_key, self.rpc_url, self.skip_simulation)), + } + } else { + panic!("network feature is not enabled") + } + } + } + ProverMode::Mock => ProverClient::mock(), + } + } +} + +/// Builder type for network prover. +#[cfg(any(feature = "network", feature = "network-v2"))] +#[derive(Debug, Default)] +pub struct NetworkProverBuilder { + private_key: Option, + rpc_url: Option, + skip_simulation: bool, +} + +#[cfg(any(feature = "network", feature = "network-v2"))] +impl NetworkProverBuilder { + /// Sets the private key. + pub fn private_key(mut self, private_key: String) -> Self { + self.private_key = Some(private_key); + self + } + + /// Sets the RPC URL. + pub fn rpc_url(mut self, rpc_url: String) -> Self { + self.rpc_url = Some(rpc_url); + self + } + + /// Skips simulation. + pub fn skip_simulation(mut self) -> Self { + self.skip_simulation = true; + self + } + + /// Creates a new [NetworkProverV1]. + #[cfg(feature = "network")] + pub fn build(self) -> NetworkProverV1 { + let private_key = self.private_key.expect("The private key is required"); + + NetworkProverV1::new(&private_key, self.rpc_url, self.skip_simulation) + } + + /// Creates a new [NetworkProverV2]. + #[cfg(feature = "network-v2")] + pub fn build_v2(self) -> NetworkProverV2 { + let private_key = self.private_key.expect("The private key is required"); + + NetworkProverV2::new(&private_key, self.rpc_url, self.skip_simulation) + } +} + /// Utility method for blocking on an async function. /// /// If we're already in a tokio runtime, we'll block in place. Otherwise, we'll create a new @@ -298,22 +451,6 @@ pub fn block_on(fut: impl Future) -> T { } } -/// Returns the raw ELF bytes by the zkVM program target name. -/// -/// Note that this only works when using `sp1_build::build_program` or -/// `sp1_build::build_program_with_args` in a build script. -/// -/// By default, the program target name is the same as the program crate name. However, this might -/// not be the case for non-standard project structures. For example, placing the entrypoint source -/// file at `src/bin/my_entry.rs` would result in the program target being named `my_entry`, in -/// which case the invocation should be `include_elf!("my_entry")` instead. -#[macro_export] -macro_rules! include_elf { - ($arg:tt) => {{ - include_bytes!(env!(concat!("SP1_ELF_", $arg))) - }}; -} - #[cfg(test)] mod tests { @@ -324,9 +461,8 @@ mod tests { #[test] fn test_execute() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = - include_bytes!("../../../examples/fibonacci/program/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::FIBONACCI_ELF; let mut stdin = SP1Stdin::new(); stdin.write(&10usize); let (_, report) = client.execute(elf, stdin).run().unwrap(); @@ -337,8 +473,8 @@ mod tests { #[should_panic] fn test_execute_panic() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = include_bytes!("../../../tests/panic/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::PANIC_ELF; let mut stdin = SP1Stdin::new(); stdin.write(&10usize); client.execute(elf, stdin).run().unwrap(); @@ -348,8 +484,8 @@ mod tests { #[test] fn test_cycle_limit_fail() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = include_bytes!("../../../tests/panic/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::PANIC_ELF; let mut stdin = SP1Stdin::new(); stdin.write(&10usize); client.execute(elf, stdin).max_cycles(1).run().unwrap(); @@ -358,8 +494,8 @@ mod tests { #[test] fn test_e2e_core() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = include_bytes!("../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::FIBONACCI_ELF; let (pk, vk) = client.setup(elf); let mut stdin = SP1Stdin::new(); stdin.write(&10usize); @@ -378,8 +514,8 @@ mod tests { #[test] fn test_e2e_compressed() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = include_bytes!("../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::FIBONACCI_ELF; let (pk, vk) = client.setup(elf); let mut stdin = SP1Stdin::new(); stdin.write(&10usize); @@ -398,8 +534,8 @@ mod tests { #[test] fn test_e2e_prove_plonk() { utils::setup_logger(); - let client = ProverClient::local(); - let elf = include_bytes!("../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); + let client = ProverClient::cpu(); + let elf = test_artifacts::FIBONACCI_ELF; let (pk, vk) = client.setup(elf); let mut stdin = SP1Stdin::new(); stdin.write(&10usize); @@ -419,7 +555,7 @@ mod tests { fn test_e2e_prove_plonk_mock() { utils::setup_logger(); let client = ProverClient::mock(); - let elf = include_bytes!("../../../tests/fibonacci/elf/riscv32im-succinct-zkvm-elf"); + let elf = test_artifacts::FIBONACCI_ELF; let (pk, vk) = client.setup(elf); let mut stdin = SP1Stdin::new(); stdin.write(&10usize); diff --git a/crates/sdk/src/network-v2/client.rs b/crates/sdk/src/network-v2/client.rs index a428dee332..91c5eea001 100644 --- a/crates/sdk/src/network-v2/client.rs +++ b/crates/sdk/src/network-v2/client.rs @@ -1,29 +1,31 @@ -use std::{env, time::Duration}; +use std::time::Duration; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use anyhow::{Context, Ok, Result}; use reqwest_middleware::ClientWithMiddleware as HttpClientWithMiddleware; -use serde::de::DeserializeOwned; -use serde::Serialize; +use serde::{de::DeserializeOwned, Serialize}; use sp1_core_machine::io::SP1Stdin; use sp1_prover::SP1VerifyingKey; -use std::str::FromStr; -use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::try_join; -use tonic::transport::channel::ClientTlsConfig; -use tonic::transport::Channel; - -use crate::network_v2::proto::artifact::{ - artifact_store_client::ArtifactStoreClient, CreateArtifactRequest, +use std::{ + str::FromStr, + time::{SystemTime, UNIX_EPOCH}, }; -use crate::network_v2::proto::network::{ - prover_network_client::ProverNetworkClient, GetFilteredProofRequestsRequest, - GetFilteredProofRequestsResponse, GetNonceRequest, GetProofRequestStatusRequest, - GetProofRequestStatusResponse, ProofMode, ProofStatus, ProofStrategy, RequestProofRequest, - RequestProofRequestBody, RequestProofResponse, +use tokio::try_join; +use tonic::transport::{channel::ClientTlsConfig, Channel}; + +use crate::network_v2::{ + proto::{ + artifact::{artifact_store_client::ArtifactStoreClient, CreateArtifactRequest}, + network::{ + prover_network_client::ProverNetworkClient, GetFilteredProofRequestsRequest, + GetFilteredProofRequestsResponse, GetNonceRequest, GetProofRequestStatusRequest, + GetProofRequestStatusResponse, ProofMode, ProofStatus, ProofStrategy, + RequestProofRequest, RequestProofRequestBody, RequestProofResponse, + }, + }, + Signable, }; -use crate::network_v2::Signable; /// The default RPC endpoint for the Succinct prover network. pub const DEFAULT_PROVER_NETWORK_RPC: &str = "https://rpc.production.succinct.tools/"; @@ -31,11 +33,12 @@ pub const DEFAULT_PROVER_NETWORK_RPC: &str = "https://rpc.production.succinct.to pub struct NetworkClient { signer: PrivateKeySigner, http: HttpClientWithMiddleware, + rpc_url: String, } impl NetworkClient { /// Create a new network client with the given private key. - pub fn new(private_key: &str) -> Self { + pub fn new(private_key: &str, rpc_url: Option) -> Self { let signer = PrivateKeySigner::from_str(private_key).unwrap(); let http_client = reqwest::Client::builder() @@ -44,17 +47,21 @@ impl NetworkClient { .build() .unwrap(); - Self { signer, http: http_client.into() } + Self { + signer, + http: http_client.into(), + rpc_url: rpc_url.unwrap_or_else(|| DEFAULT_PROVER_NETWORK_RPC.to_string()), + } } /// Returns the currently configured RPC endpoint for the Succinct prover network. - pub fn rpc_url() -> String { - env::var("PROVER_NETWORK_RPC").unwrap_or_else(|_| DEFAULT_PROVER_NETWORK_RPC.to_string()) + pub fn rpc_url(&self) -> String { + self.rpc_url.clone() } /// Get a connected RPC client. async fn get_rpc(&self) -> Result> { - let rpc_url = Self::rpc_url(); + let rpc_url = self.rpc_url(); let mut endpoint = Channel::from_shared(rpc_url.clone())?; // Check if the URL scheme is HTTPS and configure TLS. @@ -69,7 +76,7 @@ impl NetworkClient { /// Get a connected artifact store client. async fn get_store(&self) -> Result> { - let rpc_url = Self::rpc_url(); + let rpc_url = self.rpc_url(); let mut endpoint = Channel::from_shared(rpc_url.clone())?; // Check if the URL scheme is HTTPS and configure TLS. diff --git a/crates/sdk/src/network-v2/prover.rs b/crates/sdk/src/network-v2/prover.rs index d67ed34386..11d1872455 100644 --- a/crates/sdk/src/network-v2/prover.rs +++ b/crates/sdk/src/network-v2/prover.rs @@ -1,12 +1,10 @@ -use std::{ - env, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; use crate::{ network_v2::client::NetworkClient, network_v2::proto::network::{ProofMode, ProofStatus, ProofStrategy}, - Prover, SP1Context, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, SP1VerifyingKey, + NetworkProverBuilder, Prover, SP1Context, SP1ProofKind, SP1ProofWithPublicValues, + SP1ProvingKey, SP1VerifyingKey, }; use anyhow::Result; use backoff::{future::retry, ExponentialBackoff}; @@ -16,7 +14,8 @@ use sp1_prover::{components::DefaultProverComponents, SP1Prover, SP1_CIRCUIT_VER use sp1_stark::SP1ProverOpts; use tonic::Code; -use {crate::block_on, tokio::time::sleep}; +use crate::block_on; +use tokio::time::sleep; use crate::provers::{CpuProver, ProofOpts, ProverType}; @@ -30,23 +29,22 @@ const DEFAULT_CYCLE_LIMIT: u64 = 100_000_000; pub struct NetworkProver { client: NetworkClient, local_prover: CpuProver, + skip_simulation: bool, } impl NetworkProver { - /// Creates a new [NetworkProver] with the private key set in `SP1_PRIVATE_KEY`. - pub fn new() -> Self { - let private_key = env::var("SP1_PRIVATE_KEY") - .unwrap_or_else(|_| panic!("SP1_PRIVATE_KEY must be set for remote proving")); - Self::new_from_key(&private_key) - } - /// Creates a new [NetworkProver] with the given private key. - pub fn new_from_key(private_key: &str) -> Self { + pub fn new(private_key: &str, rpc_url: Option, skip_simulation: bool) -> Self { let version = SP1_CIRCUIT_VERSION; log::info!("Client circuit version: {}", version); let local_prover = CpuProver::new(); - let client = NetworkClient::new(private_key); - Self { client, local_prover } + let client = NetworkClient::new(private_key, rpc_url); + Self { client, local_prover, skip_simulation } + } + + /// Creates a new network prover builder. See [`NetworkProverBuilder`] for more details. + pub fn builder() -> NetworkProverBuilder { + NetworkProverBuilder::default() } /// Requests a proof from the prover network, returning the request ID. @@ -58,8 +56,7 @@ impl NetworkProver { timeout: Option, ) -> Result> { // Simulate and get the cycle limit. - let skip_simulation = env::var("SKIP_SIMULATION").map(|val| val == "true").unwrap_or(false); - let cycle_limit = if !skip_simulation { + let cycle_limit = if !self.skip_simulation { let (_, report) = self.local_prover.sp1_prover().execute(elf, &stdin, Default::default())?; let cycles = report.total_instruction_count(); @@ -231,12 +228,6 @@ impl Prover for NetworkProver { } } -impl Default for NetworkProver { - fn default() -> Self { - Self::new() - } -} - /// Warns if `opts` or `context` are not default values, since they are currently unsupported. fn warn_if_not_default(opts: &SP1ProverOpts, context: &SP1Context) { let _guard = tracing::warn_span!("network_prover").entered(); diff --git a/crates/sdk/src/network/client.rs b/crates/sdk/src/network/client.rs index 9717c38395..2e39f9b08b 100644 --- a/crates/sdk/src/network/client.rs +++ b/crates/sdk/src/network/client.rs @@ -1,4 +1,4 @@ -use std::{env, time::Duration}; +use std::time::Duration; use crate::{ network::{ @@ -40,16 +40,12 @@ pub struct NetworkClient { pub rpc: TwirpClient, pub http: HttpClientWithMiddleware, pub auth: NetworkAuth, + pub is_using_prover_network: bool, } impl NetworkClient { - /// Returns the currently configured RPC endpoint for the Succinct prover network. - pub fn rpc_url() -> String { - env::var("PROVER_NETWORK_RPC").unwrap_or_else(|_| DEFAULT_PROVER_NETWORK_RPC.to_string()) - } - /// Create a new NetworkClient with the given private key for authentication. - pub fn new(private_key: &str) -> Self { + pub fn new(private_key: &str, rpc_url: Option) -> Self { let auth = NetworkAuth::new(private_key); let twirp_http_client = HttpClient::builder() @@ -59,7 +55,7 @@ impl NetworkClient { .build() .unwrap(); - let rpc_url = Self::rpc_url(); + let rpc_url = rpc_url.unwrap_or_else(|| DEFAULT_PROVER_NETWORK_RPC.to_string()); let rpc = TwirpClient::new(Url::parse(&rpc_url).unwrap(), twirp_http_client, vec![]).unwrap(); @@ -70,7 +66,12 @@ impl NetworkClient { .build() .unwrap(); - Self { auth, rpc, http: http_client.into() } + Self { + auth, + rpc, + http: http_client.into(), + is_using_prover_network: rpc_url == DEFAULT_PROVER_NETWORK_RPC, + } } /// Gets the latest nonce for this auth's account. diff --git a/crates/sdk/src/network/prover.rs b/crates/sdk/src/network/prover.rs index f5939fd2cd..7b9a480f70 100644 --- a/crates/sdk/src/network/prover.rs +++ b/crates/sdk/src/network/prover.rs @@ -1,12 +1,12 @@ -use std::{ - env, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; use crate::{ - network::client::{NetworkClient, DEFAULT_PROVER_NETWORK_RPC}, - network::proto::network::{ProofMode, ProofStatus}, - Prover, SP1Context, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, SP1VerifyingKey, + network::{ + client::NetworkClient, + proto::network::{ProofMode, ProofStatus}, + }, + NetworkProverBuilder, Prover, SP1Context, SP1ProofKind, SP1ProofWithPublicValues, + SP1ProvingKey, SP1VerifyingKey, }; use anyhow::Result; use sp1_core_machine::io::SP1Stdin; @@ -15,7 +15,8 @@ use sp1_stark::SP1ProverOpts; use super::proto::network::GetProofStatusResponse; -use {crate::block_on, tokio::time::sleep}; +use crate::block_on; +use tokio::time::sleep; use crate::provers::{CpuProver, ProofOpts, ProverType}; @@ -26,23 +27,22 @@ const MAX_CONSECUTIVE_ERRORS: usize = 10; pub struct NetworkProver { client: NetworkClient, local_prover: CpuProver, + skip_simulation: bool, } impl NetworkProver { - /// Creates a new [NetworkProver] with the private key set in `SP1_PRIVATE_KEY`. - pub fn new() -> Self { - let private_key = env::var("SP1_PRIVATE_KEY") - .unwrap_or_else(|_| panic!("SP1_PRIVATE_KEY must be set for remote proving")); - Self::new_from_key(&private_key) - } - /// Creates a new [NetworkProver] with the given private key. - pub fn new_from_key(private_key: &str) -> Self { + pub fn new(private_key: &str, rpc_url: Option, skip_simulation: bool) -> Self { let version = SP1_CIRCUIT_VERSION; log::info!("Client circuit version: {}", version); let local_prover = CpuProver::new(); - Self { client: NetworkClient::new(private_key), local_prover } + Self { client: NetworkClient::new(private_key, rpc_url), local_prover, skip_simulation } + } + + /// Creates a new network prover builder. See [`NetworkProverBuilder`] for more details. + pub fn builder() -> NetworkProverBuilder { + NetworkProverBuilder::default() } /// Requests a proof from the prover network, returning the proof ID. @@ -54,9 +54,7 @@ impl NetworkProver { ) -> Result { let client = &self.client; - let skip_simulation = env::var("SKIP_SIMULATION").map(|val| val == "true").unwrap_or(false); - - if !skip_simulation { + if !self.skip_simulation { let (_, report) = self.local_prover.sp1_prover().execute(elf, &stdin, Default::default())?; log::info!("Simulation complete, cycles: {}", report.total_instruction_count()); @@ -67,7 +65,7 @@ impl NetworkProver { let proof_id = client.create_proof(elf, &stdin, mode, SP1_CIRCUIT_VERSION).await?; log::info!("Created {}", proof_id); - if NetworkClient::rpc_url() == DEFAULT_PROVER_NETWORK_RPC { + if self.client.is_using_prover_network { log::info!("View in explorer: https://explorer.succinct.xyz/{}", proof_id); } Ok(proof_id) @@ -183,12 +181,6 @@ impl Prover for NetworkProver { } } -impl Default for NetworkProver { - fn default() -> Self { - Self::new() - } -} - /// Warns if `opts` or `context` are not default values, since they are currently unsupported. fn warn_if_not_default(opts: &SP1ProverOpts, context: &SP1Context) { if opts != &SP1ProverOpts::default() { diff --git a/crates/sdk/src/proof.rs b/crates/sdk/src/proof.rs index 222e69f135..42d5c92865 100644 --- a/crates/sdk/src/proof.rs +++ b/crates/sdk/src/proof.rs @@ -70,6 +70,12 @@ impl SP1ProofWithPublicValues { [plonk_proof.plonk_vkey_hash[..4].to_vec(), proof_bytes].concat() } SP1Proof::Groth16(groth16_proof) => { + if groth16_proof.encoded_proof.is_empty() { + // If the proof is empty, then this is a mock proof. The mock SP1 verifier + // expects an empty byte array for verification, so return an empty byte array. + return Vec::new(); + } + let proof_bytes = hex::decode(&groth16_proof.encoded_proof).expect("Invalid Groth16 proof"); [groth16_proof.groth16_vkey_hash[..4].to_vec(), proof_bytes].concat() @@ -82,3 +88,86 @@ impl SP1ProofWithPublicValues { pub type SP1CoreProofVerificationError = MachineVerificationError; pub type SP1CompressedProofVerificationError = MachineVerificationError; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_plonk_proof_bytes() { + let plonk_proof = SP1ProofWithPublicValues { + proof: SP1Proof::Plonk(PlonkBn254Proof { + encoded_proof: "ab".to_string(), + plonk_vkey_hash: [0; 32], + public_inputs: ["".to_string(), "".to_string()], + raw_proof: "".to_string(), + }), + stdin: SP1Stdin::new(), + public_values: SP1PublicValues::new(), + sp1_version: "".to_string(), + }; + let expected_bytes = [vec![0, 0, 0, 0], hex::decode("ab").unwrap()].concat(); + assert_eq!(plonk_proof.bytes(), expected_bytes); + } + + #[test] + fn test_groth16_proof_bytes() { + let groth16_proof = SP1ProofWithPublicValues { + proof: SP1Proof::Groth16(Groth16Bn254Proof { + encoded_proof: "ab".to_string(), + groth16_vkey_hash: [0; 32], + public_inputs: ["".to_string(), "".to_string()], + raw_proof: "".to_string(), + }), + stdin: SP1Stdin::new(), + public_values: SP1PublicValues::new(), + sp1_version: "".to_string(), + }; + let expected_bytes = [vec![0, 0, 0, 0], hex::decode("ab").unwrap()].concat(); + assert_eq!(groth16_proof.bytes(), expected_bytes); + } + + #[test] + fn test_mock_plonk_proof_bytes() { + let mock_plonk_proof = SP1ProofWithPublicValues { + proof: SP1Proof::Plonk(PlonkBn254Proof { + encoded_proof: "".to_string(), + plonk_vkey_hash: [0; 32], + public_inputs: ["".to_string(), "".to_string()], + raw_proof: "".to_string(), + }), + stdin: SP1Stdin::new(), + public_values: SP1PublicValues::new(), + sp1_version: "".to_string(), + }; + assert_eq!(mock_plonk_proof.bytes(), Vec::::new()); + } + + #[test] + fn test_mock_groth16_proof_bytes() { + let mock_groth16_proof = SP1ProofWithPublicValues { + proof: SP1Proof::Groth16(Groth16Bn254Proof { + encoded_proof: "".to_string(), + groth16_vkey_hash: [0; 32], + public_inputs: ["".to_string(), "".to_string()], + raw_proof: "".to_string(), + }), + stdin: SP1Stdin::new(), + public_values: SP1PublicValues::new(), + sp1_version: "".to_string(), + }; + assert_eq!(mock_groth16_proof.bytes(), Vec::::new()); + } + + #[test] + #[should_panic(expected = "only Plonk and Groth16 proofs are verifiable onchain")] + fn test_core_proof_bytes_unimplemented() { + let core_proof = SP1ProofWithPublicValues { + proof: SP1Proof::Core(vec![]), + stdin: SP1Stdin::new(), + public_values: SP1PublicValues::new(), + sp1_version: "".to_string(), + }; + core_proof.bytes(); + } +} diff --git a/crates/sdk/src/provers/cpu.rs b/crates/sdk/src/provers/cpu.rs index 234e663909..cb3a98289f 100644 --- a/crates/sdk/src/provers/cpu.rs +++ b/crates/sdk/src/provers/cpu.rs @@ -2,11 +2,11 @@ use anyhow::Result; use sp1_core_executor::SP1Context; use sp1_core_machine::io::SP1Stdin; use sp1_prover::{components::DefaultProverComponents, SP1Prover}; +use sp1_stark::MachineProver; -use crate::install::try_install_circuit_artifacts; use crate::{ - provers::ProofOpts, Prover, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, - SP1VerifyingKey, + install::try_install_circuit_artifacts, provers::ProofOpts, Prover, SP1Proof, SP1ProofKind, + SP1ProofWithPublicValues, SP1ProvingKey, SP1VerifyingKey, }; use super::ProverType; @@ -35,7 +35,8 @@ impl Prover for CpuProver { } fn setup(&self, elf: &[u8]) -> (SP1ProvingKey, SP1VerifyingKey) { - self.prover.setup(elf) + let (pkey, _, _, vk) = self.prover.setup(elf); + (pkey, vk) } fn sp1_prover(&self) -> &SP1Prover { @@ -51,8 +52,11 @@ impl Prover for CpuProver { kind: SP1ProofKind, ) -> Result { // Generate the core proof. + let program = self.prover.get_program(&pk.elf).unwrap(); + let pk_d = self.prover.core_prover.pk_to_device(&pk.pk); + let proof: sp1_prover::SP1ProofWithMetadata = - self.prover.prove_core(pk, &stdin, opts.sp1_prover_opts, context)?; + self.prover.prove_core(&pk_d, program, &stdin, opts.sp1_prover_opts, context)?; if kind == SP1ProofKind::Core { return Ok(SP1ProofWithPublicValues { proof: SP1Proof::Core(proof.proof.0), diff --git a/crates/sdk/src/provers/cuda.rs b/crates/sdk/src/provers/cuda.rs index 5f8ab983aa..251eb00fb3 100644 --- a/crates/sdk/src/provers/cuda.rs +++ b/crates/sdk/src/provers/cuda.rs @@ -4,10 +4,9 @@ use sp1_cuda::SP1CudaProver; use sp1_prover::{components::DefaultProverComponents, SP1Prover}; use super::ProverType; -use crate::install::try_install_circuit_artifacts; use crate::{ - provers::ProofOpts, Prover, SP1Context, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, - SP1ProvingKey, SP1VerifyingKey, + install::try_install_circuit_artifacts, provers::ProofOpts, Prover, SP1Context, SP1Proof, + SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, SP1VerifyingKey, }; /// An implementation of [crate::ProverClient] that can generate proofs locally using CUDA. @@ -30,7 +29,8 @@ impl Prover for CudaProver { } fn setup(&self, elf: &[u8]) -> (SP1ProvingKey, SP1VerifyingKey) { - self.prover.setup(elf) + let (pk, _, _, vk) = self.prover.setup(elf); + (pk, vk) } fn sp1_prover(&self) -> &SP1Prover { @@ -48,7 +48,8 @@ impl Prover for CudaProver { tracing::warn!("opts and context are ignored for the cuda prover"); // Generate the core proof. - let proof = self.cuda_prover.prove_core(pk, &stdin)?; + let (_, _) = self.cuda_prover.setup(&pk.elf).unwrap(); + let proof = self.cuda_prover.prove_core(&stdin)?; if kind == SP1ProofKind::Core { return Ok(SP1ProofWithPublicValues { proof: SP1Proof::Core(proof.proof.0), diff --git a/crates/sdk/src/provers/mock.rs b/crates/sdk/src/provers/mock.rs index ca317972ac..efecdee539 100644 --- a/crates/sdk/src/provers/mock.rs +++ b/crates/sdk/src/provers/mock.rs @@ -2,7 +2,9 @@ use hashbrown::HashMap; use sp1_core_executor::{SP1Context, SP1ReduceProof}; use sp1_core_machine::io::SP1Stdin; -use sp1_stark::{ShardCommitment, ShardOpenedValues, ShardProof, StarkVerifyingKey}; +use sp1_stark::{ + septic_digest::SepticDigest, ShardCommitment, ShardOpenedValues, ShardProof, StarkVerifyingKey, +}; use crate::{ Prover, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, SP1ProvingKey, SP1VerificationError, @@ -39,7 +41,8 @@ impl Prover for MockProver { } fn setup(&self, elf: &[u8]) -> (SP1ProvingKey, SP1VerifyingKey) { - self.prover.setup(elf) + let (pk, _, _, vk) = self.prover.setup(elf); + (pk, vk) } fn sp1_prover(&self) -> &SP1Prover { @@ -69,8 +72,7 @@ impl Prover for MockProver { let shard_proof = ShardProof { commitment: ShardCommitment { - global_main_commit: [BabyBear::zero(); 8].into(), - local_main_commit: [BabyBear::zero(); 8].into(), + main_commit: [BabyBear::zero(); 8].into(), permutation_commit: [BabyBear::zero(); 8].into(), quotient_commit: [BabyBear::zero(); 8].into(), }, @@ -91,6 +93,7 @@ impl Prover for MockProver { let reduce_vk = StarkVerifyingKey { commit: [BabyBear::zero(); 8].into(), pc_start: BabyBear::zero(), + initial_global_cumulative_sum: SepticDigest::::zero(), chip_information: vec![], chip_ordering: HashMap::new(), }; diff --git a/crates/sdk/src/provers/mod.rs b/crates/sdk/src/provers/mod.rs index 1db5309fac..626c5cdb30 100644 --- a/crates/sdk/src/provers/mod.rs +++ b/crates/sdk/src/provers/mod.rs @@ -10,8 +10,7 @@ pub use mock::MockProver; use itertools::Itertools; use p3_field::PrimeField32; -use std::borrow::Borrow; -use std::time::Duration; +use std::{borrow::Borrow, time::Duration}; use anyhow::Result; use sp1_core_executor::SP1Context; @@ -24,8 +23,9 @@ use sp1_stark::{air::PublicValues, MachineVerificationError, SP1ProverOpts, Word use strum_macros::EnumString; use thiserror::Error; -use crate::install::try_install_circuit_artifacts; -use crate::{SP1Proof, SP1ProofKind, SP1ProofWithPublicValues}; +use crate::{ + install::try_install_circuit_artifacts, SP1Proof, SP1ProofKind, SP1ProofWithPublicValues, +}; /// The type of prover. #[derive(Debug, PartialEq, EnumString)] diff --git a/crates/stark/Cargo.toml b/crates/stark/Cargo.toml index 548dc57bbe..b8f47c0b22 100644 --- a/crates/stark/Cargo.toml +++ b/crates/stark/Cargo.toml @@ -31,12 +31,14 @@ p3-symmetric = { workspace = true } p3-poseidon2 = { workspace = true } # misc -serde = { version = "1.0.207", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } hashbrown = { version = "0.14.5", features = ["serde", "inline-more"] } -itertools = "0.13.0" -tracing = "0.1.40" +itertools = { workspace = true } +tracing = { workspace = true } rayon-scan = "0.1.1" arrayref = "0.3.8" +num-bigint = { version = "0.4.3", default-features = false } + strum = "0.26.3" strum_macros = "0.26.4" sysinfo = "0.30.13" diff --git a/crates/stark/src/air/builder.rs b/crates/stark/src/air/builder.rs index dc89f80d2e..8d1c6fb7b4 100644 --- a/crates/stark/src/air/builder.rs +++ b/crates/stark/src/air/builder.rs @@ -10,7 +10,9 @@ use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumIter}; use super::{interaction::AirInteraction, BinomialExtension}; -use crate::{lookup::InteractionKind, Word}; +use crate::{ + lookup::InteractionKind, septic_digest::SepticDigest, septic_extension::SepticExtension, Word, +}; /// The scope of an interaction. #[derive( @@ -186,7 +188,6 @@ pub trait AluAirBuilder: BaseAirBuilder { b: Word>, c: Word>, shard: impl Into, - nonce: impl Into, multiplicity: impl Into, ) { let values = once(opcode.into()) @@ -194,7 +195,6 @@ pub trait AluAirBuilder: BaseAirBuilder { .chain(b.0.into_iter().map(Into::into)) .chain(c.0.into_iter().map(Into::into)) .chain(once(shard.into())) - .chain(once(nonce.into())) .collect(); self.send( @@ -212,7 +212,6 @@ pub trait AluAirBuilder: BaseAirBuilder { b: Word>, c: Word>, shard: impl Into, - nonce: impl Into, multiplicity: impl Into, ) { let values = once(opcode.into()) @@ -220,7 +219,6 @@ pub trait AluAirBuilder: BaseAirBuilder { .chain(b.0.into_iter().map(Into::into)) .chain(c.0.into_iter().map(Into::into)) .chain(once(shard.into())) - .chain(once(nonce.into())) .collect(); self.receive( @@ -235,7 +233,6 @@ pub trait AluAirBuilder: BaseAirBuilder { &mut self, shard: impl Into + Clone, clk: impl Into + Clone, - nonce: impl Into + Clone, syscall_id: impl Into + Clone, arg1: impl Into + Clone, arg2: impl Into + Clone, @@ -247,7 +244,6 @@ pub trait AluAirBuilder: BaseAirBuilder { vec![ shard.clone().into(), clk.clone().into(), - nonce.clone().into(), syscall_id.clone().into(), arg1.clone().into(), arg2.clone().into(), @@ -265,7 +261,6 @@ pub trait AluAirBuilder: BaseAirBuilder { &mut self, shard: impl Into + Clone, clk: impl Into + Clone, - nonce: impl Into + Clone, syscall_id: impl Into + Clone, arg1: impl Into + Clone, arg2: impl Into + Clone, @@ -277,7 +272,6 @@ pub trait AluAirBuilder: BaseAirBuilder { vec![ shard.clone().into(), clk.clone().into(), - nonce.clone().into(), syscall_id.clone().into(), arg1.clone().into(), arg2.clone().into(), @@ -328,19 +322,39 @@ pub trait ExtensionAirBuilder: BaseAirBuilder { } } +/// A builder that can operation on septic extension elements. +pub trait SepticExtensionAirBuilder: BaseAirBuilder { + /// Asserts that the two field extensions are equal. + fn assert_septic_ext_eq>( + &mut self, + left: SepticExtension, + right: SepticExtension, + ) { + for (left, right) in left.0.into_iter().zip(right.0) { + self.assert_eq(left, right); + } + } +} + /// A builder that implements a permutation argument. pub trait MultiTableAirBuilder<'a>: PermutationAirBuilder { - /// The type of the cumulative sum. - type Sum: Into + Copy; + /// The type of the local cumulative sum. + type LocalSum: Into + Copy; + + /// The type of the global cumulative sum; + type GlobalSum: Into + Copy; + + /// Returns the local cumulative sum of the permutation. + fn local_cumulative_sum(&self) -> &'a Self::LocalSum; - /// Returns the cumulative sum of the permutation. - fn cumulative_sums(&self) -> &'a [Self::Sum]; + /// Returns the global cumulative sum of the permutation. + fn global_cumulative_sum(&self) -> &'a SepticDigest; } /// A trait that contains the common helper methods for building `SP1 recursion` and SP1 machine /// AIRs. pub trait MachineAirBuilder: - BaseAirBuilder + ExtensionAirBuilder + AirBuilderWithPublicValues + BaseAirBuilder + ExtensionAirBuilder + SepticExtensionAirBuilder + AirBuilderWithPublicValues { } @@ -362,6 +376,7 @@ impl ByteAirBuilder for AB {} impl AluAirBuilder for AB {} impl ExtensionAirBuilder for AB {} +impl SepticExtensionAirBuilder for AB {} impl MachineAirBuilder for AB {} impl SP1AirBuilder for AB {} diff --git a/crates/stark/src/air/machine.rs b/crates/stark/src/air/machine.rs index 0a9b0af4a8..2398e19858 100644 --- a/crates/stark/src/air/machine.rs +++ b/crates/stark/src/air/machine.rs @@ -2,7 +2,7 @@ use p3_air::BaseAir; use p3_field::Field; use p3_matrix::dense::RowMajorMatrix; -use crate::MachineRecord; +use crate::{septic_digest::SepticDigest, MachineRecord}; pub use sp1_derive::MachineAir; @@ -59,4 +59,6 @@ pub trait MachineAir: BaseAir + 'static + Send + Sync { pub trait MachineProgram: Send + Sync { /// Gets the starting program counter. fn pc_start(&self) -> F; + /// Gets the initial global cumulative sum. + fn initial_global_cumulative_sum(&self) -> SepticDigest; } diff --git a/crates/stark/src/chip.rs b/crates/stark/src/chip.rs index 2d627d2ccd..eced8524f9 100644 --- a/crates/stark/src/chip.rs +++ b/crates/stark/src/chip.rs @@ -7,24 +7,26 @@ use p3_uni_stark::{get_max_constraint_degree, SymbolicAirBuilder}; use p3_util::log2_ceil_usize; use crate::{ - air::{MachineAir, MultiTableAirBuilder, SP1AirBuilder}, + air::{InteractionScope, MachineAir, MultiTableAirBuilder, SP1AirBuilder}, + local_permutation_trace_width, lookup::{Interaction, InteractionBuilder, InteractionKind}, }; use super::{ - eval_permutation_constraints, generate_permutation_trace, get_grouped_maps, PROOF_MAX_NUM_PVS, + eval_permutation_constraints, generate_permutation_trace, scoped_interactions, + PROOF_MAX_NUM_PVS, }; /// An Air that encodes lookups based on interactions. pub struct Chip { /// The underlying AIR of the chip for constraint evaluation. - air: A, + pub air: A, /// The interactions that the chip sends. - sends: Vec>, + pub sends: Vec>, /// The interactions that the chip receives. - receives: Vec>, + pub receives: Vec>, /// The relative log degree of the quotient polynomial, i.e. `log2(max_constraint_degree - 1)`. - log_quotient_degree: usize, + pub log_quotient_degree: usize, } impl Chip { @@ -119,13 +121,13 @@ where preprocessed: Option<&RowMajorMatrix>, main: &RowMajorMatrix, random_elements: &[EF], - ) -> (RowMajorMatrix, EF, EF) + ) -> (RowMajorMatrix, EF) where F: PrimeField, A: MachineAir, { let batch_size = self.logup_batch_size(); - generate_permutation_trace( + generate_permutation_trace::( &self.sends, &self.receives, preprocessed, @@ -138,10 +140,15 @@ where /// Returns the width of the permutation trace. #[inline] pub fn permutation_width(&self) -> usize { - let (_, _, grouped_widths) = - get_grouped_maps(self.sends(), self.receives(), self.logup_batch_size()); - - grouped_widths.values().sum() + let (scoped_sends, scoped_receives) = scoped_interactions(self.sends(), self.receives()); + let empty = Vec::new(); + let local_sends = scoped_sends.get(&InteractionScope::Local).unwrap_or(&empty); + let local_receives = scoped_receives.get(&InteractionScope::Local).unwrap_or(&empty); + + local_permutation_trace_width( + local_sends.len() + local_receives.len(), + self.logup_batch_size(), + ) } /// Returns the cost of a row in the chip. @@ -223,7 +230,7 @@ where impl<'a, F, A, AB> Air for Chip where F: Field, - A: Air, + A: Air + MachineAir, AB: SP1AirBuilder + MultiTableAirBuilder<'a> + PairBuilder + 'a, { fn eval(&self, builder: &mut AB) { @@ -231,7 +238,13 @@ where self.air.eval(builder); // Evaluate permutation constraints. let batch_size = self.logup_batch_size(); - eval_permutation_constraints(&self.sends, &self.receives, batch_size, builder); + eval_permutation_constraints( + &self.sends, + &self.receives, + batch_size, + self.air.commit_scope(), + builder, + ); } } diff --git a/crates/stark/src/debug.rs b/crates/stark/src/debug.rs index 78a7d0c95c..8808458165 100644 --- a/crates/stark/src/debug.rs +++ b/crates/stark/src/debug.rs @@ -14,11 +14,13 @@ use p3_matrix::{ stack::VerticalPair, Matrix, }; -use p3_maybe_rayon::prelude::ParallelBridge; -use p3_maybe_rayon::prelude::ParallelIterator; +use p3_maybe_rayon::prelude::{ParallelBridge, ParallelIterator}; use super::{MachineChip, StarkGenericConfig, Val}; -use crate::air::{EmptyMessageBuilder, MachineAir, MultiTableAirBuilder}; +use crate::{ + air::{EmptyMessageBuilder, MachineAir, MultiTableAirBuilder}, + septic_digest::SepticDigest, +}; /// Checks that the constraints of the given AIR are satisfied, including the permutation trace. /// @@ -31,7 +33,8 @@ pub fn debug_constraints( perm: &RowMajorMatrix, perm_challenges: &[SC::Challenge], public_values: &[Val], - cumulative_sums: &[SC::Challenge], + local_cumulative_sum: &SC::Challenge, + global_cumulative_sum: &SepticDigest>, ) where SC: StarkGenericConfig, Val: PrimeField32, @@ -84,7 +87,8 @@ pub fn debug_constraints( RowMajorMatrixView::new_row(perm_next), ), perm_challenges, - cumulative_sums, + local_cumulative_sum, + global_cumulative_sum, is_first_row: Val::::zero(), is_last_row: Val::::zero(), is_transition: Val::::one(), @@ -130,7 +134,8 @@ pub struct DebugConstraintBuilder<'a, F: Field, EF: ExtensionField> { pub(crate) preprocessed: VerticalPair, RowMajorMatrixView<'a, F>>, pub(crate) main: VerticalPair, RowMajorMatrixView<'a, F>>, pub(crate) perm: VerticalPair, RowMajorMatrixView<'a, EF>>, - pub(crate) cumulative_sums: &'a [EF], + pub(crate) local_cumulative_sum: &'a EF, + pub(crate) global_cumulative_sum: &'a SepticDigest, pub(crate) perm_challenges: &'a [EF], pub(crate) is_first_row: F, pub(crate) is_last_row: F, @@ -257,10 +262,15 @@ where F: Field, EF: ExtensionField, { - type Sum = EF; + type LocalSum = EF; + type GlobalSum = F; + + fn local_cumulative_sum(&self) -> &'a Self::LocalSum { + self.local_cumulative_sum + } - fn cumulative_sums(&self) -> &'a [Self::Sum] { - self.cumulative_sums + fn global_cumulative_sum(&self) -> &'a SepticDigest { + self.global_cumulative_sum } } diff --git a/crates/stark/src/folder.rs b/crates/stark/src/folder.rs index 4666e2e94c..e6688e26c4 100644 --- a/crates/stark/src/folder.rs +++ b/crates/stark/src/folder.rs @@ -7,7 +7,10 @@ use p3_field::{AbstractField, ExtensionField, Field}; use p3_matrix::{dense::RowMajorMatrixView, stack::VerticalPair}; use super::{Challenge, PackedChallenge, PackedVal, StarkGenericConfig, Val}; -use crate::air::{EmptyMessageBuilder, MultiTableAirBuilder}; +use crate::{ + air::{EmptyMessageBuilder, MultiTableAirBuilder}, + septic_digest::SepticDigest, +}; use p3_air::{ AirBuilder, AirBuilderWithPublicValues, ExtensionBuilder, PairBuilder, PermutationAirBuilder, }; @@ -27,8 +30,10 @@ pub struct ProverConstraintFolder<'a, SC: StarkGenericConfig> { >, /// The challenges for the permutation. pub perm_challenges: &'a [PackedChallenge], - /// The cumulative sums for the permutation. - pub cumulative_sums: &'a [PackedChallenge], + /// The local cumulative sum for the permutation. + pub local_cumulative_sum: &'a PackedChallenge, + /// The global cumulative sum for the permutation. + pub global_cumulative_sum: &'a SepticDigest>, /// The selector for the first row. pub is_first_row: PackedVal, /// The selector for the last row. @@ -112,10 +117,15 @@ impl<'a, SC: StarkGenericConfig> PermutationAirBuilder for ProverConstraintFolde } impl<'a, SC: StarkGenericConfig> MultiTableAirBuilder<'a> for ProverConstraintFolder<'a, SC> { - type Sum = PackedChallenge; + type LocalSum = PackedChallenge; + type GlobalSum = Val; + + fn local_cumulative_sum(&self) -> &'a Self::LocalSum { + self.local_cumulative_sum + } - fn cumulative_sums(&self) -> &'a [Self::Sum] { - self.cumulative_sums + fn global_cumulative_sum(&self) -> &'a SepticDigest { + self.global_cumulative_sum } } @@ -155,8 +165,10 @@ pub struct GenericVerifierConstraintFolder<'a, F, EF, PubVar, Var, Expr> { pub perm: VerticalPair, RowMajorMatrixView<'a, Var>>, /// The challenges for the permutation. pub perm_challenges: &'a [Var], - /// The cumulative sums of the permutation. - pub cumulative_sums: &'a [Var], + /// The local cumulative sum of the permutation. + pub local_cumulative_sum: &'a Var, + /// The global cumulative sum of the permutation. + pub global_cumulative_sum: &'a SepticDigest, /// The selector for the first row. pub is_first_row: Var, /// The selector for the last row. @@ -345,10 +357,15 @@ where + Sync, PubVar: Into + Copy, { - type Sum = Var; + type LocalSum = Var; + type GlobalSum = PubVar; + + fn local_cumulative_sum(&self) -> &'a Self::LocalSum { + self.local_cumulative_sum + } - fn cumulative_sums(&self) -> &'a [Self::Sum] { - self.cumulative_sums + fn global_cumulative_sum(&self) -> &'a SepticDigest { + self.global_cumulative_sum } } diff --git a/crates/stark/src/lib.rs b/crates/stark/src/lib.rs index 924dcb3356..539d2dc886 100644 --- a/crates/stark/src/lib.rs +++ b/crates/stark/src/lib.rs @@ -33,6 +33,9 @@ mod permutation; mod prover; mod quotient; mod record; +pub mod septic_curve; +pub mod septic_digest; +pub mod septic_extension; mod types; mod util; mod verifier; diff --git a/crates/stark/src/machine.rs b/crates/stark/src/machine.rs index 5a5f5ea8db..28724396a3 100644 --- a/crates/stark/src/machine.rs +++ b/crates/stark/src/machine.rs @@ -1,3 +1,6 @@ +use crate::{ + septic_curve::SepticCurve, septic_digest::SepticDigest, septic_extension::SepticExtension, +}; use hashbrown::HashMap; use itertools::Itertools; use p3_air::Air; @@ -7,7 +10,7 @@ use p3_field::{AbstractExtensionField, AbstractField, Field, PrimeField32}; use p3_matrix::{dense::RowMajorMatrix, Dimensions, Matrix}; use p3_maybe_rayon::prelude::*; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{array, cmp::Reverse, env, fmt::Debug, time::Instant}; +use std::{cmp::Reverse, env, fmt::Debug, iter::once, time::Instant}; use tracing::instrument; use super::{debug_constraints, Dom}; @@ -60,6 +63,8 @@ pub struct StarkProvingKey { pub commit: Com, /// The start pc of the program. pub pc_start: Val, + /// The starting global digest of the program, after incorporating the initial memory. + pub initial_global_cumulative_sum: SepticDigest>, /// The preprocessed traces. pub traces: Vec>>, /// The pcs data for the preprocessed traces. @@ -75,9 +80,10 @@ impl StarkProvingKey { pub fn observe_into(&self, challenger: &mut SC::Challenger) { challenger.observe(self.commit.clone()); challenger.observe(self.pc_start); - for _ in 0..7 { - challenger.observe(Val::::zero()); - } + challenger.observe_slice(&self.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(&self.initial_global_cumulative_sum.0.y.0); + // Observe the padding. + challenger.observe(Val::::zero()); } } @@ -90,6 +96,8 @@ pub struct StarkVerifyingKey { pub commit: Com, /// The start pc of the program. pub pc_start: Val, + /// The starting global digest of the program, after incorporating the initial memory. + pub initial_global_cumulative_sum: SepticDigest>, /// The chip information. pub chip_information: Vec<(String, Dom, Dimensions)>, /// The chip ordering. @@ -101,9 +109,10 @@ impl StarkVerifyingKey { pub fn observe_into(&self, challenger: &mut SC::Challenger) { challenger.observe(self.commit.clone()); challenger.observe(self.pc_start); - for _ in 0..7 { - challenger.observe(Val::::zero()); - } + challenger.observe_slice(&self.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(&self.initial_global_cumulative_sum.0.y.0); + // Observe the padding. + challenger.observe(Val::::zero()); } } @@ -237,17 +246,25 @@ impl>> StarkMachine { named_preprocessed_traces.into_iter().map(|(_, _, trace)| trace).collect::>(); let pc_start = program.pc_start(); + let initial_global_cumulative_sum = program.initial_global_cumulative_sum(); ( StarkProvingKey { commit: commit.clone(), pc_start, + initial_global_cumulative_sum, traces, data, chip_ordering: chip_ordering.clone(), local_only, }, - StarkVerifyingKey { commit, pc_start, chip_information, chip_ordering }, + StarkVerifyingKey { + commit, + pc_start, + initial_global_cumulative_sum, + chip_information, + chip_ordering, + }, ) } @@ -301,45 +318,28 @@ impl>> StarkMachine { SC::Challenger: Clone, A: for<'a> Air>, { - let contains_global_bus = self.contains_global_bus(); - // Observe the preprocessed commitment. vk.observe_into(challenger); - tracing::debug_span!("observe challenges for all shards").in_scope(|| { - proof.shard_proofs.iter().for_each(|shard_proof| { - if contains_global_bus { - challenger.observe(shard_proof.commitment.global_main_commit.clone()); - } - challenger.observe_slice(&shard_proof.public_values[0..self.num_pv_elts()]); - }); - }); // Verify the shard proofs. if proof.shard_proofs.is_empty() { return Err(MachineVerificationError::EmptyProof); } - // Obtain the challenges used for the global permutation argument. - let global_permutation_challenges: [SC::Challenge; 2] = array::from_fn(|_| { - if contains_global_bus { - challenger.sample_ext_element() - } else { - SC::Challenge::zero() - } - }); - tracing::debug_span!("verify shard proofs").in_scope(|| { for (i, shard_proof) in proof.shard_proofs.iter().enumerate() { tracing::debug_span!("verifying shard", shard = i).in_scope(|| { let chips = self.shard_chips_ordered(&shard_proof.chip_ordering).collect::>(); + let mut shard_challenger = challenger.clone(); + shard_challenger + .observe_slice(&shard_proof.public_values[0..self.num_pv_elts()]); Verifier::verify_shard( &self.config, vk, &chips, - &mut challenger.clone(), + &mut shard_challenger, shard_proof, - &global_permutation_challenges, ) .map_err(MachineVerificationError::InvalidShardProof) })?; @@ -353,8 +353,9 @@ impl>> StarkMachine { let sum = proof .shard_proofs .iter() - .map(|proof| proof.cumulative_sum(InteractionScope::Global)) - .sum::(); + .map(ShardProof::global_cumulative_sum) + .chain(once(vk.initial_global_cumulative_sum)) + .sum::>>(); if !sum.is_zero() { return Err(MachineVerificationError::NonZeroCumulativeSum( @@ -386,12 +387,9 @@ impl>> StarkMachine { permutation_challenges.push(challenger.sample_ext_element()); } - // Obtain the challenges used for the local permutation argument. - for _ in 0..2 { - permutation_challenges.push(challenger.sample_ext_element()); - } + let mut global_cumulative_sums = Vec::new(); + global_cumulative_sums.push(pk.initial_global_cumulative_sum); - let mut global_cumulative_sum = SC::Challenge::zero(); for shard in records.iter() { // Filter the chips based on what is used. let chips = self.shard_chips(shard).collect::>(); @@ -409,27 +407,40 @@ impl>> StarkMachine { // Generate the permutation traces. let mut permutation_traces = Vec::with_capacity(chips.len()); - let mut cumulative_sums = Vec::with_capacity(chips.len()); + let mut chip_cumulative_sums = Vec::with_capacity(chips.len()); tracing::debug_span!("generate permutation traces").in_scope(|| { chips .par_iter() .zip(traces.par_iter_mut()) .map(|(chip, (main_trace, pre_trace))| { - let (trace, global_sum, local_sum) = chip.generate_permutation_trace( + let (trace, local_sum) = chip.generate_permutation_trace( *pre_trace, main_trace, &permutation_challenges, ); - (trace, [global_sum, local_sum]) + let global_sum = if chip.commit_scope() == InteractionScope::Local { + SepticDigest::>::zero() + } else { + let main_trace_size = main_trace.height() * main_trace.width(); + let last_row = + &main_trace.values[main_trace_size - 14..main_trace_size]; + SepticDigest(SepticCurve { + x: SepticExtension::>::from_base_fn(|i| last_row[i]), + y: SepticExtension::>::from_base_fn(|i| last_row[i + 7]), + }) + }; + (trace, (global_sum, local_sum)) }) - .unzip_into_vecs(&mut permutation_traces, &mut cumulative_sums); + .unzip_into_vecs(&mut permutation_traces, &mut chip_cumulative_sums); }); - global_cumulative_sum += - cumulative_sums.iter().map(|sum| sum[0]).sum::(); + let global_cumulative_sum = + chip_cumulative_sums.iter().map(|sums| sums.0).sum::>>(); + global_cumulative_sums.push(global_cumulative_sum); let local_cumulative_sum = - cumulative_sums.iter().map(|sum| sum[1]).sum::(); + chip_cumulative_sums.iter().map(|sums| sums.1).sum::(); + if !local_cumulative_sum.is_zero() { tracing::warn!("Local cumulative sum is not zero"); tracing::debug_span!("debug local interactions").in_scope(|| { @@ -474,7 +485,8 @@ impl>> StarkMachine { &permutation_traces[i], &permutation_challenges, &shard.public_values(), - &cumulative_sums[i], + &chip_cumulative_sums[i].1, + &chip_cumulative_sums[i].0, ); } }); @@ -483,6 +495,9 @@ impl>> StarkMachine { tracing::info!("Constraints verified successfully"); + let global_cumulative_sum: SepticDigest> = + global_cumulative_sums.iter().copied().sum(); + // If the global cumulative sum is not zero, debug the interactions. if !global_cumulative_sum.is_zero() { tracing::warn!("Global cumulative sum is not zero"); diff --git a/crates/stark/src/opts.rs b/crates/stark/src/opts.rs index 34ea0d81b7..a006d5f17e 100644 --- a/crates/stark/src/opts.rs +++ b/crates/stark/src/opts.rs @@ -155,7 +155,7 @@ impl SplitOpts { keccak: deferred_shift_threshold / 24, sha_extend: deferred_shift_threshold / 48, sha_compress: deferred_shift_threshold / 80, - memory: deferred_shift_threshold * 4, + memory: deferred_shift_threshold * 16, } } } diff --git a/crates/stark/src/permutation.rs b/crates/stark/src/permutation.rs index 1d0fa11574..58725f7123 100644 --- a/crates/stark/src/permutation.rs +++ b/crates/stark/src/permutation.rs @@ -1,35 +1,32 @@ -use std::borrow::Borrow; - +use crate::{ + air::{InteractionScope, MultiTableAirBuilder}, + lookup::Interaction, +}; use hashbrown::HashMap; use itertools::Itertools; -use p3_air::{ExtensionBuilder, PairBuilder}; -use p3_field::{AbstractExtensionField, AbstractField, ExtensionField, Field, PrimeField}; +use p3_air::{AirBuilder, ExtensionBuilder, PairBuilder}; +use p3_field::AbstractExtensionField; +use p3_field::AbstractField; +use p3_field::{ExtensionField, Field, PrimeField}; use p3_matrix::{dense::RowMajorMatrix, Matrix}; use p3_maybe_rayon::prelude::*; use rayon_scan::ScanParallelIterator; -use strum::IntoEnumIterator; - -use crate::{ - air::{InteractionScope, MultiTableAirBuilder}, - lookup::Interaction, -}; +use std::borrow::Borrow; -/// Computes the width of the permutation trace. -#[inline] +/// Computes the width of the local permutation trace in terms of extension field elements. #[must_use] -pub const fn permutation_trace_width(num_interactions: usize, batch_size: usize) -> usize { - if num_interactions == 0 { - 0 - } else { - num_interactions.div_ceil(batch_size) + 1 +pub const fn local_permutation_trace_width(nb_interactions: usize, batch_size: usize) -> usize { + if nb_interactions == 0 { + return 0; } + nb_interactions.div_ceil(batch_size) + 1 } -/// Populates a permutation row. +/// Populates a local permutation row. #[inline] #[allow(clippy::too_many_arguments)] #[allow(clippy::needless_pass_by_value)] -pub fn populate_permutation_row>( +pub fn populate_local_permutation_row>( row: &mut [EF], preprocessed_row: &[F], main_row: &[F], @@ -39,9 +36,7 @@ pub fn populate_permutation_row>( batch_size: usize, ) { let alpha = random_elements[0]; - - // Generate the RLC elements to uniquely identify each item in the looked up tuple. - let betas = random_elements[1].powers(); + let betas = random_elements[1].powers(); // TODO: optimize let interaction_chunks = &sends .iter() @@ -75,15 +70,11 @@ pub fn populate_permutation_row>( /// Returns the sends, receives, and permutation trace width grouped by scope. #[allow(clippy::type_complexity)] -pub fn get_grouped_maps( +pub fn scoped_interactions( sends: &[Interaction], receives: &[Interaction], - batch_size: usize, -) -> ( - HashMap>>, - HashMap>>, - HashMap, -) { +) -> (HashMap>>, HashMap>>) +{ // Create a hashmap of scope -> vec. let mut sends = sends.to_vec(); sends.sort_by_key(|k| k.scope); @@ -104,23 +95,11 @@ pub fn get_grouped_maps( .map(|(k, values)| (k, values.cloned().collect_vec())) .collect(); - // Create a hashmap of scope -> permutation trace width. - let grouped_widths = InteractionScope::iter() - .map(|scope| { - let empty_vec = vec![]; - let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); - let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); - (scope, permutation_trace_width(sends.len() + receives.len(), batch_size)) - }) - .collect(); - - (grouped_sends, grouped_receives, grouped_widths) + (grouped_sends, grouped_receives) } /// Generates the permutation trace for the given chip and main trace based on a variant of `LogUp`. -/// -/// The permutation trace has `(N+1)*EF::NUM_COLS` columns, where N is the number of interactions in -/// the chip. +#[allow(clippy::too_many_lines)] pub fn generate_permutation_trace>( sends: &[Interaction], receives: &[Interaction], @@ -128,109 +107,81 @@ pub fn generate_permutation_trace>( main: &RowMajorMatrix, random_elements: &[EF], batch_size: usize, -) -> (RowMajorMatrix, EF, EF) { - let (grouped_sends, grouped_receives, grouped_widths) = - get_grouped_maps(sends, receives, batch_size); +) -> (RowMajorMatrix, EF) { + let empty = vec![]; + let (scoped_sends, scoped_receives) = scoped_interactions(sends, receives); + let local_sends = scoped_sends.get(&InteractionScope::Local).unwrap_or(&empty); + let local_receives = scoped_receives.get(&InteractionScope::Local).unwrap_or(&empty); + + let local_permutation_width = + local_permutation_trace_width(local_sends.len() + local_receives.len(), batch_size); let height = main.height(); - let permutation_trace_width = grouped_widths.values().sum::(); + let permutation_trace_width = local_permutation_width; let mut permutation_trace = RowMajorMatrix::new( vec![EF::zero(); permutation_trace_width * height], permutation_trace_width, ); - let mut global_cumulative_sum = EF::zero(); let mut local_cumulative_sum = EF::zero(); - for scope in InteractionScope::iter() { - let empty_vec = vec![]; - let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); - let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); - - if sends.is_empty() && receives.is_empty() { - continue; - } - - let random_elements = match scope { - InteractionScope::Global => &random_elements[0..2], - InteractionScope::Local => &random_elements[2..4], - }; - - let row_range = match scope { - InteractionScope::Global => { - 0..*grouped_widths.get(&InteractionScope::Global).expect("Expected global scope") - } - InteractionScope::Local => { - let global_perm_width = - *grouped_widths.get(&InteractionScope::Global).expect("Expected global scope"); - let local_perm_width = - *grouped_widths.get(&InteractionScope::Local).expect("Expected local scope"); - global_perm_width..global_perm_width + local_perm_width - } - }; - - // Compute the permutation trace values in parallel. - match preprocessed { - Some(prep) => { - permutation_trace - .par_rows_mut() - .zip_eq(prep.par_row_slices()) - .zip_eq(main.par_row_slices()) - .for_each(|((row, prep_row), main_row)| { - populate_permutation_row( - &mut row[row_range.start..row_range.end], - prep_row, - main_row, - sends, - receives, - random_elements, - batch_size, - ); - }); - } - None => { - permutation_trace.par_rows_mut().zip_eq(main.par_row_slices()).for_each( - |(row, main_row)| { - populate_permutation_row( - &mut row[row_range.start..row_range.end], - &[], - main_row, - sends, - receives, - random_elements, - batch_size, - ); - }, - ); - } + let random_elements = &random_elements[0..2]; + let local_row_range = 0..local_permutation_width; + + if !local_sends.is_empty() || !local_receives.is_empty() { + if let Some(prep) = preprocessed { + permutation_trace + .par_rows_mut() + .zip_eq(prep.par_row_slices()) + .zip_eq(main.par_row_slices()) + .for_each(|((row, prep_row), main_row)| { + populate_local_permutation_row::( + &mut row[0..local_permutation_width], + prep_row, + main_row, + local_sends, + local_receives, + random_elements, + batch_size, + ); + }); + } else { + permutation_trace.par_rows_mut().zip_eq(main.par_row_slices()).for_each( + |(row, main_row)| { + populate_local_permutation_row::( + &mut row[0..local_permutation_width], + &[], + main_row, + local_sends, + local_receives, + random_elements, + batch_size, + ); + }, + ); } let zero = EF::zero(); - let cumulative_sums = permutation_trace + let local_cumulative_sums = permutation_trace .par_rows_mut() - .map(|row| row[row_range.start..row_range.end - 1].iter().copied().sum::()) + .map(|row| { + row[local_row_range.start..local_row_range.end - 1].iter().copied().sum::() + }) .collect::>(); - let cumulative_sums = - cumulative_sums.into_par_iter().scan(|a, b| *a + *b, zero).collect::>(); + let local_cumulative_sums = + local_cumulative_sums.into_par_iter().scan(|a, b| *a + *b, zero).collect::>(); - match scope { - InteractionScope::Global => { - global_cumulative_sum = *cumulative_sums.last().unwrap(); - } - InteractionScope::Local => { - local_cumulative_sum = *cumulative_sums.last().unwrap(); - } - } + local_cumulative_sum = *local_cumulative_sums.last().unwrap(); - permutation_trace.par_rows_mut().zip_eq(cumulative_sums.clone().into_par_iter()).for_each( - |(row, cumulative_sum)| { - row[row_range.end - 1] = cumulative_sum; + permutation_trace.par_rows_mut().zip_eq(local_cumulative_sums.into_par_iter()).for_each( + |(row, local_cumulative_sum)| { + row[local_row_range.end - 1] = local_cumulative_sum; }, ); } - (permutation_trace, global_cumulative_sum, local_cumulative_sum) + (permutation_trace, local_cumulative_sum) } /// Evaluates the permutation constraints for the given chip. @@ -238,12 +189,13 @@ pub fn generate_permutation_trace>( /// In particular, the constraints checked here are: /// - The running sum column starts at zero. /// - That the RLC per interaction is computed correctly. -/// - The running sum column ends at the (currently) given cumalitive sum. +/// - The running sum column ends at the (currently) given cumulative sum. #[allow(clippy::too_many_lines)] pub fn eval_permutation_constraints<'a, F, AB>( sends: &[Interaction], receives: &[Interaction], batch_size: usize, + commit_scope: InteractionScope, builder: &mut AB, ) where F: Field, @@ -251,15 +203,16 @@ pub fn eval_permutation_constraints<'a, F, AB>( AB: MultiTableAirBuilder<'a, F = F> + PairBuilder, AB: 'a, { - let (grouped_sends, grouped_receives, grouped_widths) = - get_grouped_maps(sends, receives, batch_size); + let empty = vec![]; + let (scoped_sends, scoped_receives) = scoped_interactions(sends, receives); + let local_sends = scoped_sends.get(&InteractionScope::Local).unwrap_or(&empty); + let local_receives = scoped_receives.get(&InteractionScope::Local).unwrap_or(&empty); + + let local_permutation_width = + local_permutation_trace_width(local_sends.len() + local_receives.len(), batch_size); + + let permutation_trace_width = local_permutation_width; - // Get the permutation challenges. - let permutation_challenges = builder.permutation_randomness(); - let random_elements: Vec = - permutation_challenges.iter().map(|x| (*x).into()).collect(); - let cumulative_sums: Vec = - builder.cumulative_sums().iter().map(|x| (*x).into()).collect(); let preprocessed = builder.preprocessed(); let main = builder.main(); let perm = builder.permutation().to_row_major_matrix(); @@ -268,67 +221,41 @@ pub fn eval_permutation_constraints<'a, F, AB>( let main_local = main.to_row_major_matrix(); let main_local = main_local.row_slice(0); let main_local: &[AB::Var] = (*main_local).borrow(); - let perm_width = perm.width(); let perm_local = perm.row_slice(0); let perm_local: &[AB::VarEF] = (*perm_local).borrow(); let perm_next = perm.row_slice(1); let perm_next: &[AB::VarEF] = (*perm_next).borrow(); + let perm_width = perm.width(); // Assert that the permutation trace width is correct. - let expected_perm_width = grouped_widths.values().sum::(); - if perm_width != expected_perm_width { + if perm_width != permutation_trace_width { panic!( - "permutation trace width is incorrect: expected {expected_perm_width}, got {perm_width}", + "permutation trace width is incorrect: expected {permutation_trace_width}, got {perm_width}", ); } - for scope in InteractionScope::iter() { - let random_elements = match scope { - InteractionScope::Global => &random_elements[0..2], - InteractionScope::Local => &random_elements[2..4], - }; - - let (alpha, beta) = (&random_elements[0], &random_elements[1]); - - let perm_local = match scope { - InteractionScope::Global => &perm_local[0..*grouped_widths.get(&scope).unwrap()], - InteractionScope::Local => { - let global_perm_width = *grouped_widths.get(&InteractionScope::Global).unwrap(); - &perm_local - [global_perm_width..global_perm_width + *grouped_widths.get(&scope).unwrap()] - } - }; - - let perm_next = match scope { - InteractionScope::Global => &perm_next[0..*grouped_widths.get(&scope).unwrap()], - InteractionScope::Local => { - let global_perm_width = *grouped_widths.get(&InteractionScope::Global).unwrap(); - &perm_next - [global_perm_width..global_perm_width + *grouped_widths.get(&scope).unwrap()] - } - }; - - let empty_vec = vec![]; - let sends = grouped_sends.get(&scope).unwrap_or(&empty_vec); - let receives = grouped_receives.get(&scope).unwrap_or(&empty_vec); - - if sends.is_empty() && receives.is_empty() { - continue; - } + // Get the permutation challenges. + let permutation_challenges = builder.permutation_randomness(); + let random_elements: Vec = + permutation_challenges.iter().map(|x| (*x).into()).collect(); + let local_cumulative_sum = builder.local_cumulative_sum(); + let random_elements = &random_elements[0..2]; + let (alpha, beta) = (&random_elements[0], &random_elements[1]); + if !local_sends.is_empty() || !local_receives.is_empty() { // Ensure that each batch sum m_i/f_i is computed correctly. - let interaction_chunks = &sends + let interaction_chunks = &local_sends .iter() .map(|int| (int, true)) - .chain(receives.iter().map(|int| (int, false))) + .chain(local_receives.iter().map(|int| (int, false))) .chunks(batch_size); // Assert that the i-eth entry is equal to the sum_i m_i/rlc_i by constraints: - // entry * \prod_i rlc_i = \sum_i m_i * \prod_{j!=i} rlc_j over all columns of the permutation - // trace except the last column. + // entry * \prod_i rlc_i = \sum_i m_i * \prod_{j!=i} rlc_j over all columns of the + // permutation trace except the last column. for (entry, chunk) in perm_local[0..perm_local.len() - 1].iter().zip(interaction_chunks) { - // First, we calculate the random linear combinations and multiplicities with the correct - // sign depending on wetther the interaction is a send or a receive. + // First, we calculate the random linear combinations and multiplicities with the + // correct sign depending on wetther the interaction is a send or a receive. let mut rlcs: Vec = Vec::with_capacity(batch_size); let mut multiplicities: Vec = Vec::with_capacity(batch_size); for (interaction, is_send) in chunk { @@ -376,10 +303,14 @@ pub fn eval_permutation_constraints<'a, F, AB>( } // Compute the running local and next permutation sums. - let perm_width = grouped_widths.get(&scope).unwrap(); - let sum_local = - perm_local[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); - let sum_next = perm_next[..perm_width - 1].iter().map(|x| (*x).into()).sum::(); + let sum_local = perm_local[..local_permutation_width - 1] + .iter() + .map(|x| (*x).into()) + .sum::(); + let sum_next = perm_next[..local_permutation_width - 1] + .iter() + .map(|x| (*x).into()) + .sum::(); let phi_local: AB::ExprEF = (*perm_local.last().unwrap()).into(); let phi_next: AB::ExprEF = (*perm_next.last().unwrap()).into(); @@ -389,13 +320,19 @@ pub fn eval_permutation_constraints<'a, F, AB>( // Assert that the cumulative sum is constrained to `phi_next - phi_local` on the transition // rows. builder.when_transition().assert_eq_ext(phi_next - phi_local.clone(), sum_next); + builder.when_last_row().assert_eq_ext(*perm_local.last().unwrap(), *local_cumulative_sum); + } - // Assert that the cumulative sum is constrained to `phi_local` on the last row. - let cumulative_sum = match scope { - InteractionScope::Global => &cumulative_sums[0], - InteractionScope::Local => &cumulative_sums[1], - }; - - builder.when_last_row().assert_eq_ext(*perm_local.last().unwrap(), cumulative_sum.clone()); + // Handle global permutations. + let global_cumulative_sum = builder.global_cumulative_sum(); + if commit_scope == InteractionScope::Global { + for i in 0..7 { + builder + .when_last_row() + .assert_eq(main_local[main_local.len() - 14 + i], global_cumulative_sum.0.x.0[i]); + builder + .when_last_row() + .assert_eq(main_local[main_local.len() - 7 + i], global_cumulative_sum.0.y.0[i]); + } } } diff --git a/crates/stark/src/prover.rs b/crates/stark/src/prover.rs index 95d3da46c5..ab4022fdc2 100644 --- a/crates/stark/src/prover.rs +++ b/crates/stark/src/prover.rs @@ -1,8 +1,10 @@ +use crate::septic_curve::SepticCurve; +use crate::septic_digest::SepticDigest; +use crate::septic_extension::SepticExtension; use core::fmt::Display; -use hashbrown::HashMap; use itertools::Itertools; use serde::{de::DeserializeOwned, Serialize}; -use std::{array, cmp::Reverse, error::Error, time::Instant}; +use std::{cmp::Reverse, error::Error, time::Instant}; use crate::{air::InteractionScope, AirOpenedValues, ChipOpenedValues, ShardOpenedValues}; use p3_air::Air; @@ -18,20 +20,11 @@ use super::{ VerifierConstraintFolder, }; use crate::{ - air::MachineAir, config::ZeroCommitment, lookup::InteractionBuilder, opts::SP1CoreOpts, - record::MachineRecord, Challenger, DebugConstraintBuilder, MachineChip, MachineProof, - PackedChallenge, PcsProverData, ProverConstraintFolder, ShardCommitment, ShardMainData, - ShardProof, StarkVerifyingKey, + air::MachineAir, lookup::InteractionBuilder, opts::SP1CoreOpts, record::MachineRecord, + Challenger, DebugConstraintBuilder, MachineChip, MachineProof, PackedChallenge, PcsProverData, + ProverConstraintFolder, ShardCommitment, ShardMainData, ShardProof, StarkVerifyingKey, }; -/// A merged prover data item from the global and local prover data. -pub struct MergedProverDataItem<'a, M> { - /// The trace. - pub trace: &'a M, - /// The main data index. - pub main_data_idx: usize, -} - /// An algorithmic & hardware independent prover implementation for any [`MachineAir`]. pub trait MachineProver>: 'static + Send + Sync @@ -64,22 +57,13 @@ pub trait MachineProver>: fn pk_to_host(&self, pk: &Self::DeviceProvingKey) -> StarkProvingKey; /// Generate the main traces. - fn generate_traces( - &self, - record: &A::Record, - interaction_scope: InteractionScope, - ) -> Vec<(String, RowMajorMatrix>)> { + fn generate_traces(&self, record: &A::Record) -> Vec<(String, RowMajorMatrix>)> { let shard_chips = self.shard_chips(record).collect::>(); - let chips = shard_chips - .iter() - .filter(|chip| chip.commit_scope() == interaction_scope) - .collect::>(); - assert!(!chips.is_empty()); // For each chip, generate the trace. let parent_span = tracing::debug_span!("generate traces for shard"); parent_span.in_scope(|| { - chips + shard_chips .par_iter() .map(|chip| { let chip_name = chip.name(); @@ -122,10 +106,8 @@ pub trait MachineProver>: fn open( &self, pk: &Self::DeviceProvingKey, - global_data: Option>, - local_data: ShardMainData, + data: ShardMainData, challenger: &mut SC::Challenger, - global_permutation_challenges: &[SC::Challenge], ) -> Result, Self::Error>; /// Generate a proof for the given records. @@ -173,102 +155,6 @@ pub trait MachineProver>: { self.machine().debug_constraints(pk, records, challenger); } - - /// Merge the global and local chips' sorted traces. - #[allow(clippy::type_complexity)] - fn merge_shard_traces<'a, 'b>( - &'a self, - global_traces: &'b [Self::DeviceMatrix], - global_chip_ordering: &'b HashMap, - local_traces: &'b [Self::DeviceMatrix], - local_chip_ordering: &'b HashMap, - ) -> ( - HashMap, - Vec, - Vec>, - ) - where - 'a: 'b, - { - // Get the sort order of the chips. - let global_chips = global_chip_ordering - .iter() - .sorted_by_key(|(_, &i)| i) - .map(|chip| chip.0.clone()) - .collect::>(); - let local_chips = local_chip_ordering - .iter() - .sorted_by_key(|(_, &i)| i) - .map(|chip| chip.0.clone()) - .collect::>(); - - let mut merged_chips = Vec::with_capacity(global_traces.len() + local_traces.len()); - let mut merged_prover_data = Vec::with_capacity(global_chips.len() + local_chips.len()); - - assert!(global_traces.len() == global_chips.len()); - let mut global_iter = global_traces.iter().zip(global_chips.iter()).enumerate(); - assert!(local_traces.len() == local_chips.len()); - let mut local_iter = local_traces.iter().zip(local_chips.iter()).enumerate(); - - let mut global_next = global_iter.next(); - let mut local_next = local_iter.next(); - - let mut chip_scopes = Vec::new(); - - while global_next.is_some() || local_next.is_some() { - match (global_next, local_next) { - (Some(global), Some(local)) => { - let (global_prover_data_idx, (global_trace, global_chip)) = global; - let (local_prover_data_idx, (local_trace, local_chip)) = local; - if (Reverse(global_trace.height()), global_chip) - < (Reverse(local_trace.height()), local_chip) - { - merged_chips.push(global_chip.clone()); - chip_scopes.push(InteractionScope::Global); - merged_prover_data.push(MergedProverDataItem { - trace: global_trace, - main_data_idx: global_prover_data_idx, - }); - global_next = global_iter.next(); - } else { - merged_chips.push(local_chip.clone()); - chip_scopes.push(InteractionScope::Local); - merged_prover_data.push(MergedProverDataItem { - trace: local_trace, - main_data_idx: local_prover_data_idx, - }); - local_next = local_iter.next(); - } - } - (Some(global), None) => { - let (global_prover_data_idx, (global_trace, global_chip)) = global; - merged_chips.push(global_chip.clone()); - chip_scopes.push(InteractionScope::Global); - merged_prover_data.push(MergedProverDataItem { - trace: global_trace, - main_data_idx: global_prover_data_idx, - }); - global_next = global_iter.next(); - } - (None, Some(local)) => { - let (local_prover_data_idx, (local_trace, local_chip)) = local; - merged_chips.push(local_chip.clone()); - chip_scopes.push(InteractionScope::Local); - merged_prover_data.push(MergedProverDataItem { - trace: local_trace, - main_data_idx: local_prover_data_idx, - }); - local_next = local_iter.next(); - } - (None, None) => break, - } - } - - let chip_ordering = - merged_chips.iter().enumerate().map(|(i, name)| (name.clone(), i)).collect(); - - (chip_ordering, chip_scopes, merged_prover_data) - } } /// A proving key for any [`MachineAir`] that is agnostic to hardware. @@ -279,6 +165,9 @@ pub trait MachineProvingKey: Send + Sync { /// The start pc. fn pc_start(&self) -> Val; + /// The initial global cumulative sum. + fn initial_global_cumulative_sum(&self) -> SepticDigest>; + /// Observe itself in the challenger. fn observe_into(&self, challenger: &mut Challenger); } @@ -374,49 +263,15 @@ where fn open( &self, pk: &StarkProvingKey, - global_data: Option>, - local_data: ShardMainData, + data: ShardMainData, challenger: &mut ::Challenger, - global_permutation_challenges: &[SC::Challenge], ) -> Result, Self::Error> { - let (global_traces, global_main_commit, global_main_data, global_chip_ordering) = - if let Some(global_data) = global_data { - let ShardMainData { - traces: global_traces, - main_commit: global_main_commit, - main_data: global_main_data, - chip_ordering: global_chip_ordering, - public_values: _, - } = global_data; - (global_traces, global_main_commit, Some(global_main_data), global_chip_ordering) - } else { - (vec![], self.config().pcs().zero_commitment(), None, HashMap::new()) - }; - - let ShardMainData { - traces: local_traces, - main_commit: local_main_commit, - main_data: local_main_data, - chip_ordering: local_chip_ordering, - public_values: local_public_values, - } = local_data; - - // Merge the chip ordering and traces from the global and local data. - let (all_chips_ordering, all_chip_scopes, all_shard_data) = self.merge_shard_traces( - &global_traces, - &global_chip_ordering, - &local_traces, - &local_chip_ordering, - ); - - let chips = self.machine().shard_chips_ordered(&all_chips_ordering).collect::>(); - - assert!(chips.len() == all_shard_data.len()); + let chips = self.machine().shard_chips_ordered(&data.chip_ordering).collect::>(); + let traces = data.traces; let config = self.machine().config(); - let degrees = - all_shard_data.iter().map(|shard_data| shard_data.trace.height()).collect::>(); + let degrees = traces.iter().map(|trace| trace.height()).collect::>(); let log_degrees = degrees.iter().map(|degree| log2_strict_usize(*degree)).collect::>(); @@ -428,8 +283,9 @@ where let trace_domains = degrees.iter().map(|degree| pcs.natural_domain_for_degree(*degree)).collect::>(); - // Observe the main commitment. - challenger.observe(local_main_commit.clone()); + // Observe the public values and the main commitment. + challenger.observe_slice(&data.public_values[0..self.num_pv_elts()]); + challenger.observe(data.main_commit.clone()); // Obtain the challenges used for the local permutation argument. let mut local_permutation_challenges: Vec = Vec::new(); @@ -437,41 +293,46 @@ where local_permutation_challenges.push(challenger.sample_ext_element()); } - let permutation_challenges = global_permutation_challenges + let packed_perm_challenges = local_permutation_challenges .iter() - .chain(local_permutation_challenges.iter()) - .copied() - .collect::>(); - - let packed_perm_challenges = permutation_challenges - .iter() - .chain(local_permutation_challenges.iter()) .map(|c| PackedChallenge::::from_f(*c)) .collect::>(); // Generate the permutation traces. - let ((permutation_traces, prep_traces), cumulative_sums): ((Vec<_>, Vec<_>), Vec<_>) = - tracing::debug_span!("generate permutation traces").in_scope(|| { - chips - .par_iter() - .zip(all_shard_data.par_iter()) - .map(|(chip, shard_data)| { - let preprocessed_trace = - pk.chip_ordering.get(&chip.name()).map(|&index| &pk.traces[index]); - let (perm_trace, global_sum, local_sum) = chip.generate_permutation_trace( - preprocessed_trace, - shard_data.trace, - &permutation_challenges, - ); - ((perm_trace, preprocessed_trace), [global_sum, local_sum]) - }) - .unzip() - }); + let ((permutation_traces, prep_traces), (global_cumulative_sums, local_cumulative_sums)): ( + (Vec<_>, Vec<_>), + (Vec<_>, Vec<_>), + ) = tracing::debug_span!("generate permutation traces").in_scope(|| { + chips + .par_iter() + .zip(traces.par_iter()) + .map(|(chip, main_trace)| { + let preprocessed_trace = + pk.chip_ordering.get(&chip.name()).map(|&index| &pk.traces[index]); + let (perm_trace, local_sum) = chip.generate_permutation_trace( + preprocessed_trace, + main_trace, + &local_permutation_challenges, + ); + let global_sum = if chip.commit_scope() == InteractionScope::Local { + SepticDigest::>::zero() + } else { + let main_trace_size = main_trace.height() * main_trace.width(); + let last_row = &main_trace.values[main_trace_size - 14..main_trace_size]; + SepticDigest(SepticCurve { + x: SepticExtension::>::from_base_fn(|i| last_row[i]), + y: SepticExtension::>::from_base_fn(|i| last_row[i + 7]), + }) + }; + ((perm_trace, preprocessed_trace), (global_sum, local_sum)) + }) + .unzip() + }); // Compute some statistics. for i in 0..chips.len() { - let trace_width = all_shard_data[i].trace.width(); - let trace_height = all_shard_data[i].trace.height(); + let trace_width = traces[i].width(); + let trace_height = traces[i].height(); let prep_width = prep_traces[i].map_or(0, |x| x.width()); let permutation_width = permutation_traces[i].width(); let total_width = trace_width @@ -508,13 +369,15 @@ where // Observe the permutation commitment and cumulative sums. challenger.observe(permutation_commit.clone()); - for [global_sum, local_sum] in cumulative_sums.iter() { - challenger.observe_slice(global_sum.as_base_slice()); + for (local_sum, global_sum) in + local_cumulative_sums.iter().zip(global_cumulative_sums.iter()) + { challenger.observe_slice(local_sum.as_base_slice()); + challenger.observe_slice(&global_sum.0.x.0); + challenger.observe_slice(&global_sum.0.y.0); } // Compute the quotient polynomial for all chips. - let quotient_domains = trace_domains .iter() .zip_eq(log_degrees.iter()) @@ -537,25 +400,18 @@ where let preprocessed_trace_on_quotient_domains = pk.chip_ordering.get(&chips[i].name()).map(|&index| { pcs.get_evaluations_on_domain(&pk.data, index, *quotient_domain) + .to_row_major_matrix() }); - let scope = all_chip_scopes[i]; - let main_data = if scope == InteractionScope::Global { - global_main_data - .as_ref() - .expect("Expected global_main_data to be Some") - } else { - &local_main_data - }; - let main_trace_on_quotient_domains = pcs.get_evaluations_on_domain( - main_data, - all_shard_data[i].main_data_idx, - *quotient_domain, - ); + let main_trace_on_quotient_domains = pcs + .get_evaluations_on_domain(&data.main_data, i, *quotient_domain) + .to_row_major_matrix(); let permutation_trace_on_quotient_domains = pcs - .get_evaluations_on_domain(&permutation_data, i, *quotient_domain); + .get_evaluations_on_domain(&permutation_data, i, *quotient_domain) + .to_row_major_matrix(); quotient_values( chips[i], - &cumulative_sums[i], + &local_cumulative_sums[i], + &global_cumulative_sums[i], trace_domains[i], *quotient_domain, preprocessed_trace_on_quotient_domains, @@ -563,7 +419,7 @@ where permutation_trace_on_quotient_domains, &packed_perm_challenges, alpha, - &local_public_values, + &data.public_values, ) }) }) @@ -640,61 +496,22 @@ where let quotient_opening_points = (0..num_quotient_chunks).map(|_| vec![zeta]).collect::>(); - // Split the trace_opening_points to the global and local chips. - let mut global_trace_opening_points = Vec::with_capacity(global_chip_ordering.len()); - let mut local_trace_opening_points = Vec::with_capacity(local_chip_ordering.len()); - for (i, trace_opening_point) in main_trace_opening_points.clone().into_iter().enumerate() { - let scope = all_chip_scopes[i]; - if scope == InteractionScope::Global { - global_trace_opening_points.push(trace_opening_point); - } else { - local_trace_opening_points.push(trace_opening_point); - } - } - - let rounds = if let Some(global_main_data) = global_main_data.as_ref() { - vec![ - (&pk.data, preprocessed_opening_points), - (global_main_data, global_trace_opening_points), - (&local_main_data, local_trace_opening_points), - (&permutation_data, permutation_trace_opening_points), - ("ient_data, quotient_opening_points), - ] - } else { - vec![ - (&pk.data, preprocessed_opening_points), - (&local_main_data, local_trace_opening_points), - (&permutation_data, permutation_trace_opening_points), - ("ient_data, quotient_opening_points), - ] - }; - - let (openings, opening_proof) = - tracing::debug_span!("open multi batches").in_scope(|| pcs.open(rounds, challenger)); - - // Collect the opened values for each chip. - let ( - preprocessed_values, - global_main_values, - local_main_values, - permutation_values, - mut quotient_values, - ) = if global_main_data.is_some() { - let [preprocessed_values, global_main_values, local_main_values, permutation_values, quotient_values] = - openings.try_into().unwrap(); - ( - preprocessed_values, - Some(global_main_values), - local_main_values, - permutation_values, - quotient_values, + let (openings, opening_proof) = tracing::debug_span!("open multi batches").in_scope(|| { + pcs.open( + vec![ + (&pk.data, preprocessed_opening_points), + (&data.main_data, main_trace_opening_points.clone()), + (&permutation_data, permutation_trace_opening_points.clone()), + ("ient_data, quotient_opening_points), + ], + challenger, ) - } else { - let [preprocessed_values, local_main_values, permutation_values, quotient_values] = - openings.try_into().unwrap(); - (preprocessed_values, None, local_main_values, permutation_values, quotient_values) - }; + }); + // Collect the opened values for each chip. + let [preprocessed_values, main_values, permutation_values, mut quotient_values] = + openings.try_into().unwrap(); + assert!(main_values.len() == chips.len()); let preprocessed_opened_values = preprocessed_values .into_iter() .zip(pk.local_only.iter()) @@ -710,30 +527,11 @@ where }) .collect::>(); - // Merge the global and local main values. - let mut main_values = - Vec::with_capacity(global_chip_ordering.len() + local_chip_ordering.len()); - for chip in chips.iter() { - let global_order = global_chip_ordering.get(&chip.name()); - let local_order = local_chip_ordering.get(&chip.name()); - match (global_order, local_order) { - (Some(&global_order), None) => { - let global_main_values = - global_main_values.as_ref().expect("Global main values should be Some"); - main_values.push((global_main_values[global_order].clone(), chip.local_only())); - } - (None, Some(&local_order)) => { - main_values.push((local_main_values[local_order].clone(), chip.local_only())); - } - _ => unreachable!(), - } - } - assert!(main_values.len() == chips.len()); - let main_opened_values = main_values .into_iter() - .map(|(op, local_only)| { - if !local_only { + .zip(chips.iter()) + .map(|(op, chip)| { + if !chip.local_only() { let [local, next] = op.try_into().unwrap(); AirOpenedValues { local, next } } else { @@ -750,7 +548,6 @@ where AirOpenedValues { local, next } }) .collect::>(); - let mut quotient_opened_values = Vec::with_capacity(log_quotient_degrees.len()); for log_quotient_degree in log_quotient_degrees.iter() { let degree = 1 << *log_quotient_degree; @@ -762,38 +559,49 @@ where .into_iter() .zip_eq(permutation_opened_values) .zip_eq(quotient_opened_values) - .zip_eq(cumulative_sums) + .zip_eq(local_cumulative_sums) + .zip_eq(global_cumulative_sums) .zip_eq(log_degrees.iter()) .enumerate() - .map(|(i, ((((main, permutation), quotient), cumulative_sums), log_degree))| { - let preprocessed = pk - .chip_ordering - .get(&chips[i].name()) - .map(|&index| preprocessed_opened_values[index].clone()) - .unwrap_or(AirOpenedValues { local: vec![], next: vec![] }); - ChipOpenedValues { - preprocessed, - main, - permutation, - quotient, - global_cumulative_sum: cumulative_sums[0], - local_cumulative_sum: cumulative_sums[1], - log_degree: *log_degree, - } - }) + .map( + |( + i, + ( + ( + (((main, permutation), quotient), local_cumulative_sum), + global_cumulative_sum, + ), + log_degree, + ), + )| { + let preprocessed = pk + .chip_ordering + .get(&chips[i].name()) + .map(|&index| preprocessed_opened_values[index].clone()) + .unwrap_or(AirOpenedValues { local: vec![], next: vec![] }); + ChipOpenedValues { + preprocessed, + main, + permutation, + quotient, + global_cumulative_sum, + local_cumulative_sum, + log_degree: *log_degree, + } + }, + ) .collect::>(); Ok(ShardProof:: { commitment: ShardCommitment { - global_main_commit, - local_main_commit, + main_commit: data.main_commit.clone(), permutation_commit, quotient_commit, }, opened_values: ShardOpenedValues { chips: opened_values }, opening_proof, - chip_ordering: all_chips_ordering, - public_values: local_public_values, + chip_ordering: data.chip_ordering, + public_values: data.public_values, }) } @@ -812,69 +620,19 @@ where where A: for<'a> Air, SC::Challenge>>, { + // Generate dependencies. + self.machine().generate_dependencies(&mut records, &opts, None); + // Observe the preprocessed commitment. pk.observe_into(challenger); - let contains_global_bus = self.machine().contains_global_bus(); - - if contains_global_bus { - // Generate dependencies. - self.machine().generate_dependencies(&mut records, &opts, None); - } - - // Generate and commit the global traces for each shard. - let global_data = records - .par_iter() - .map(|record| { - if contains_global_bus { - let global_named_traces = - self.generate_traces(record, InteractionScope::Global); - Some(self.commit(record, global_named_traces)) - } else { - None - } - }) - .collect::>(); - - // Observe the challenges for each segment. - tracing::debug_span!("observing all challenges").in_scope(|| { - global_data.iter().zip_eq(records.iter()).for_each(|(global_data, record)| { - if contains_global_bus { - challenger.observe( - global_data - .as_ref() - .expect("must have a global commitment") - .main_commit - .clone(), - ); - } - challenger.observe_slice(&record.public_values::()[0..self.num_pv_elts()]); - }); - }); - - // Obtain the challenges used for the global permutation argument. - let global_permutation_challenges: [SC::Challenge; 2] = array::from_fn(|_| { - if contains_global_bus { - challenger.sample_ext_element() - } else { - SC::Challenge::zero() - } - }); - let shard_proofs = tracing::info_span!("prove_shards").in_scope(|| { - global_data + records .into_par_iter() - .zip_eq(records.par_iter()) - .map(|(global_shard_data, record)| { - let local_named_traces = self.generate_traces(record, InteractionScope::Local); - let local_shard_data = self.commit(record, local_named_traces); - self.open( - pk, - global_shard_data, - local_shard_data, - &mut challenger.clone(), - &global_permutation_challenges, - ) + .map(|record| { + let named_traces = self.generate_traces(&record); + let shard_data = self.commit(&record, named_traces); + self.open(pk, shard_data, &mut challenger.clone()) }) .collect::, _>>() })?; @@ -897,13 +655,17 @@ where self.pc_start } + fn initial_global_cumulative_sum(&self) -> SepticDigest> { + self.initial_global_cumulative_sum + } + fn observe_into(&self, challenger: &mut Challenger) { challenger.observe(self.commit.clone()); challenger.observe(self.pc_start); + challenger.observe_slice(&self.initial_global_cumulative_sum.0.x.0); + challenger.observe_slice(&self.initial_global_cumulative_sum.0.y.0); let zero = Val::::zero(); - for _ in 0..7 { - challenger.observe(zero); - } + challenger.observe(zero); } } diff --git a/crates/stark/src/quotient.rs b/crates/stark/src/quotient.rs index ee98d5e7c2..8d014b77e7 100644 --- a/crates/stark/src/quotient.rs +++ b/crates/stark/src/quotient.rs @@ -5,7 +5,7 @@ use p3_matrix::{dense::RowMajorMatrixView, stack::VerticalPair, Matrix}; use p3_maybe_rayon::prelude::*; use p3_util::log2_strict_usize; -use crate::air::MachineAir; +use crate::{air::MachineAir, septic_digest::SepticDigest}; use super::{ folder::ProverConstraintFolder, Chip, Domain, PackedChallenge, PackedVal, StarkGenericConfig, @@ -18,7 +18,8 @@ use super::{ #[allow(clippy::too_many_lines)] pub fn quotient_values( chip: &Chip, A>, - cumulative_sums: &[SC::Challenge], + local_cumulative_sum: &SC::Challenge, + global_cumulative_sum: &SepticDigest>, trace_domain: Domain, quotient_domain: Domain, preprocessed_trace_on_quotient_domain: Option, @@ -127,10 +128,7 @@ where let accumulator = PackedChallenge::::zero(); - let packed_cumulative_sums = cumulative_sums - .iter() - .map(|c| PackedChallenge::::from_f(*c)) - .collect::>(); + let packed_local_cumulative_sum = PackedChallenge::::from_f(*local_cumulative_sum); let mut folder = ProverConstraintFolder { preprocessed: VerticalPair::new( @@ -146,7 +144,8 @@ where RowMajorMatrixView::new_row(&perm_next), ), perm_challenges, - cumulative_sums: &packed_cumulative_sums, + local_cumulative_sum: &packed_local_cumulative_sum, + global_cumulative_sum, is_first_row, is_last_row, is_transition, diff --git a/crates/stark/src/septic_curve.rs b/crates/stark/src/septic_curve.rs new file mode 100644 index 0000000000..b1350dff81 --- /dev/null +++ b/crates/stark/src/septic_curve.rs @@ -0,0 +1,346 @@ +//! Elliptic Curve `y^2 = x^3 + 2x + 26z^5` over the `F_{p^7} = F_p[z]/(z^7 - 2z - 5)` extension field. +use crate::septic_extension::SepticExtension; +use p3_field::{AbstractExtensionField, AbstractField, Field, PrimeField}; +use serde::{Deserialize, Serialize}; +use std::ops::Add; + +/// A septic elliptic curve point on y^2 = x^3 + 2x + 26z^5 over field `F_{p^7} = F_p[z]/(z^7 - 2z - 5)`. +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[repr(C)] +pub struct SepticCurve { + /// The x-coordinate of an elliptic curve point. + pub x: SepticExtension, + /// The y-coordinate of an elliptic curve point. + pub y: SepticExtension, +} + +/// Linear coefficient for pairwise independent hash, derived from digits of pi. +pub const A_EC_LOGUP: [u32; 7] = + [0x31415926, 0x53589793, 0x23846264, 0x33832795, 0x02884197, 0x16939937, 0x51058209]; + +/// Constant coefficient for pairwise independent hash, derived from digits of pi. +pub const B_EC_LOGUP: [u32; 7] = + [0x74944592, 0x30781640, 0x62862089, 0x9862803, 0x48253421, 0x17067982, 0x14808651]; + +/// The x-coordinate for a curve point used as a witness for padding interactions. +pub const CURVE_WITNESS_DUMMY_POINT_X: [u32; 7] = + [0x2738281, 0x8284590, 0x4523536, 0x0287471, 0x3526624, 0x9775724, 0x7093699]; + +/// The y-coordinate for a curve point used as a witness for padding interactions. +pub const CURVE_WITNESS_DUMMY_POINT_Y: [u32; 7] = + [48041908, 550064556, 415267377, 1726976249, 1253299140, 209439863, 1302309485]; + +impl SepticCurve { + /// Returns the dummy point. + #[must_use] + pub fn dummy() -> Self { + Self { + x: SepticExtension::from_base_fn(|i| { + F::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_X[i]) + }), + y: SepticExtension::from_base_fn(|i| { + F::from_canonical_u32(CURVE_WITNESS_DUMMY_POINT_Y[i]) + }), + } + } + + /// Check if a `SepticCurve` struct is on the elliptic curve. + pub fn check_on_point(&self) -> bool { + self.y.square() == Self::curve_formula(self.x) + } + + /// Negates a `SepticCurve` point. + #[must_use] + pub fn neg(&self) -> Self { + SepticCurve { x: self.x, y: -self.y } + } + + #[must_use] + /// Adds two elliptic curve points, assuming that the addition doesn't lead to the exception cases of weierstrass addition. + pub fn add_incomplete(&self, other: SepticCurve) -> Self { + let slope = (other.y - self.y) / (other.x - self.x); + let result_x = slope.square() - self.x - other.x; + let result_y = slope * (self.x - result_x) - self.y; + Self { x: result_x, y: result_y } + } + + /// Add assigns an elliptic curve point, assuming that the addition doesn't lead to the exception cases of weierstrass addition. + pub fn add_assign(&mut self, other: SepticCurve) { + let result = self.add_incomplete(other); + self.x = result.x; + self.y = result.y; + } + + #[must_use] + /// Double the elliptic curve point. + pub fn double(&self) -> Self { + let slope = (self.x * self.x * F::from_canonical_u8(3u8) + F::two()) / (self.y * F::two()); + let result_x = slope.square() - self.x * F::two(); + let result_y = slope * (self.x - result_x) - self.y; + Self { x: result_x, y: result_y } + } + + /// Subtracts two elliptic curve points, assuming that the subtraction doesn't lead to the exception cases of weierstrass addition. + #[must_use] + pub fn sub_incomplete(&self, other: SepticCurve) -> Self { + self.add_incomplete(other.neg()) + } + + /// Subtract assigns an elliptic curve point, assuming that the subtraction doesn't lead to the exception cases of weierstrass addition. + pub fn sub_assign(&mut self, other: SepticCurve) { + let result = self.add_incomplete(other.neg()); + self.x = result.x; + self.y = result.y; + } +} + +impl SepticCurve { + /// Convert a message into an x-coordinate by a pairwise independent hash `am + b`. + pub fn universal_hash(m: SepticExtension) -> SepticExtension { + let a_ec_logup = + SepticExtension::::from_base_fn(|i| F::from_canonical_u32(A_EC_LOGUP[i])); + let b_ec_logup = + SepticExtension::::from_base_fn(|i| F::from_canonical_u32(B_EC_LOGUP[i])); + a_ec_logup * m + b_ec_logup + } + + /// Evaluates the curve formula x^3 + 2x + 26z^5 + pub fn curve_formula(x: SepticExtension) -> SepticExtension { + x.cube() + + x * F::two() + + SepticExtension::from_base_slice(&[ + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::from_canonical_u32(26), + F::zero(), + ]) + } +} + +impl SepticCurve { + /// Lift an x coordinate into an elliptic curve. + /// As an x-coordinate may not be a valid one, we allow additions of [0, 256) * 2^16 to the first entry of the x-coordinate. + /// Also, we always return the curve point with y-coordinate within [0, (p-1)/2), where p is the characteristic. + /// The returned values are the curve point and the offset used. + pub fn lift_x(m: SepticExtension) -> (Self, u8) { + for offset in 0..=255 { + let m_trial = + m + SepticExtension::from_base(F::from_canonical_u32((offset as u32) << 16)); + let x_trial = Self::universal_hash(m_trial); + let y_sq = Self::curve_formula(x_trial); + if let Some(y) = y_sq.sqrt() { + if y.is_exception() { + continue; + } + if y.is_send() { + return (Self { x: x_trial, y: -y }, offset); + } + return (Self { x: x_trial, y }, offset); + } + } + panic!("curve point couldn't be found after 256 attempts"); + } +} + +impl SepticCurve { + /// Given three points p1, p2, p3, the function is zero if and only if p3.x == (p1 + p2).x assuming that p1 != p2. + pub fn sum_checker_x( + p1: SepticCurve, + p2: SepticCurve, + p3: SepticCurve, + ) -> SepticExtension { + (p1.x.clone() + p2.x.clone() + p3.x) * (p2.x.clone() - p1.x.clone()).square() + - (p2.y - p1.y).square() + } + + /// Given three points p1, p2, p3, the function is zero if and only if p3.y == (p1 + p2).y assuming that p1 != p2. + pub fn sum_checker_y( + p1: SepticCurve, + p2: SepticCurve, + p3: SepticCurve, + ) -> SepticExtension { + (p1.y.clone() + p3.y.clone()) * (p2.x.clone() - p1.x.clone()) + - (p2.y - p1.y.clone()) * (p1.x - p3.x) + } +} + +impl SepticCurve { + /// Convert a `SepticCurve` into `SepticCurve`, with a map that implements `FnMut(S) -> T`. + pub fn convert T>(point: SepticCurve, mut f: G) -> Self { + SepticCurve { + x: SepticExtension(point.x.0.map(&mut f)), + y: SepticExtension(point.y.0.map(&mut f)), + } + } +} + +/// A septic elliptic curve point on y^2 = x^3 + 2x + 26z^5 over field `F_{p^7} = F_p[z]/(z^7 - 2z - 5)`, including the point at infinity. +#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum SepticCurveComplete { + /// The point at infinity. + Infinity, + /// The affine point which can be represented with a `SepticCurve` structure. + Affine(SepticCurve), +} + +impl Add for SepticCurveComplete { + type Output = Self; + fn add(self, rhs: Self) -> Self::Output { + if self.is_infinity() { + return rhs; + } + if rhs.is_infinity() { + return self; + } + let point1 = self.point(); + let point2 = rhs.point(); + if point1.x != point2.x { + return Self::Affine(point1.add_incomplete(point2)); + } + if point1.y == point2.y { + return Self::Affine(point1.double()); + } + Self::Infinity + } +} + +impl SepticCurveComplete { + /// Returns whether or not the point is a point at infinity. + pub fn is_infinity(&self) -> bool { + match self { + Self::Infinity => true, + Self::Affine(_) => false, + } + } + + /// Asserts that the point is not a point at infinity, and returns the `SepticCurve` value. + pub fn point(&self) -> SepticCurve { + match self { + Self::Infinity => panic!("point() called for point at infinity"), + Self::Affine(point) => *point, + } + } +} + +#[cfg(test)] +mod tests { + use p3_baby_bear::BabyBear; + use p3_maybe_rayon::prelude::ParallelIterator; + use p3_maybe_rayon::prelude::{IndexedParallelIterator, IntoParallelIterator}; + use rayon_scan::ScanParallelIterator; + use std::time::Instant; + + use super::*; + + #[test] + fn test_lift_x() { + let x: SepticExtension = SepticExtension::from_base_slice(&[ + BabyBear::from_canonical_u32(0x2013), + BabyBear::from_canonical_u32(0x2015), + BabyBear::from_canonical_u32(0x2016), + BabyBear::from_canonical_u32(0x2023), + BabyBear::from_canonical_u32(0x2024), + BabyBear::from_canonical_u32(0x2016), + BabyBear::from_canonical_u32(0x2017), + ]); + let (curve_point, _) = SepticCurve::::lift_x(x); + assert!(curve_point.check_on_point()); + assert!(curve_point.x.is_receive()); + } + + #[test] + fn test_double() { + let x: SepticExtension = SepticExtension::from_base_slice(&[ + BabyBear::from_canonical_u32(0x2013), + BabyBear::from_canonical_u32(0x2015), + BabyBear::from_canonical_u32(0x2016), + BabyBear::from_canonical_u32(0x2023), + BabyBear::from_canonical_u32(0x2024), + BabyBear::from_canonical_u32(0x2016), + BabyBear::from_canonical_u32(0x2017), + ]); + let (curve_point, _) = SepticCurve::::lift_x(x); + let double_point = curve_point.double(); + assert!(double_point.check_on_point()); + } + + #[test] + #[ignore] + fn test_simple_bench() { + const D: u32 = 1 << 16; + let mut vec = Vec::with_capacity(D as usize); + let mut sum = Vec::with_capacity(D as usize); + let start = Instant::now(); + for i in 0..D { + let x: SepticExtension = SepticExtension::from_base_slice(&[ + BabyBear::from_canonical_u32(i + 25), + BabyBear::from_canonical_u32(2 * i + 376), + BabyBear::from_canonical_u32(4 * i + 23), + BabyBear::from_canonical_u32(8 * i + 531), + BabyBear::from_canonical_u32(16 * i + 542), + BabyBear::from_canonical_u32(32 * i + 196), + BabyBear::from_canonical_u32(64 * i + 667), + ]); + let (curve_point, _) = SepticCurve::::lift_x(x); + vec.push(curve_point); + } + println!("Time elapsed: {:?}", start.elapsed()); + let start = Instant::now(); + for i in 0..D { + sum.push(vec[i as usize].add_incomplete(vec[((i + 1) % D) as usize])); + } + println!("Time elapsed: {:?}", start.elapsed()); + let start = Instant::now(); + for i in 0..(D as usize) { + assert!( + SepticCurve::::sum_checker_x(vec[i], vec[(i + 1) % D as usize], sum[i]) + == SepticExtension::::zero() + ); + assert!( + SepticCurve::::sum_checker_y(vec[i], vec[(i + 1) % D as usize], sum[i]) + == SepticExtension::::zero() + ); + } + println!("Time elapsed: {:?}", start.elapsed()); + } + + #[test] + #[ignore] + fn test_parallel_bench() { + const D: u32 = 1 << 20; + let mut vec = Vec::with_capacity(D as usize); + let start = Instant::now(); + for i in 0..D { + let x: SepticExtension = SepticExtension::from_base_slice(&[ + BabyBear::from_canonical_u32(i + 25), + BabyBear::from_canonical_u32(2 * i + 376), + BabyBear::from_canonical_u32(4 * i + 23), + BabyBear::from_canonical_u32(8 * i + 531), + BabyBear::from_canonical_u32(16 * i + 542), + BabyBear::from_canonical_u32(32 * i + 196), + BabyBear::from_canonical_u32(64 * i + 667), + ]); + let (curve_point, _) = SepticCurve::::lift_x(x); + vec.push(SepticCurveComplete::Affine(curve_point)); + } + println!("Time elapsed: {:?}", start.elapsed()); + + let mut cum_sum = SepticCurveComplete::Infinity; + let start = Instant::now(); + for point in &vec { + cum_sum = cum_sum + *point; + } + println!("Time elapsed: {:?}", start.elapsed()); + let start = Instant::now(); + let par_sum = vec + .into_par_iter() + .with_min_len(1 << 16) + .scan(|a, b| *a + *b, SepticCurveComplete::Infinity) + .collect::>>(); + println!("Time elapsed: {:?}", start.elapsed()); + assert_eq!(cum_sum, *par_sum.last().unwrap()); + } +} diff --git a/crates/stark/src/septic_digest.rs b/crates/stark/src/septic_digest.rs new file mode 100644 index 0000000000..579c5cb9bb --- /dev/null +++ b/crates/stark/src/septic_digest.rs @@ -0,0 +1,98 @@ +//! Elliptic Curve digests with a starting point to avoid weierstrass addition exceptions. +use crate::septic_curve::SepticCurve; +use crate::septic_extension::SepticExtension; +use p3_field::{AbstractExtensionField, AbstractField, Field}; +use serde::{Deserialize, Serialize}; +use std::iter::Sum; + +/// The x-coordinate for a curve point used as a starting cumulative sum for global permutation trace generation. +pub const CURVE_CUMULATIVE_SUM_START_X: [u32; 7] = + [0x1434213, 0x5623730, 0x9504880, 0x1688724, 0x2096980, 0x7856967, 0x1875376]; + +/// The y-coordinate for a curve point used as a starting cumulative sum for global permutation trace generation. +pub const CURVE_CUMULATIVE_SUM_START_Y: [u32; 7] = + [885797405, 1130275556, 567836311, 52700240, 239639200, 442612155, 1839439733]; + +/// The x-coordinate for a curve point used as a starting random point for digest accumulation. +pub const DIGEST_SUM_START_X: [u32; 7] = + [0x1742050, 0x8075688, 0x7729352, 0x7446341, 0x5058723, 0x6694280, 0x5253810]; + +/// The y-coordinate for a curve point used as a starting random point for digest accumulation. +pub const DIGEST_SUM_START_Y: [u32; 7] = + [462194069, 1842131493, 281651264, 1684885851, 483907222, 1097389352, 1648978901]; + +/// A global cumulative sum digest, a point on the elliptic curve that `SepticCurve` represents. +/// As these digests start with the `CURVE_CUMULATIVE_SUM_START` point, they require special summing logic. +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[repr(C)] +pub struct SepticDigest(pub SepticCurve); + +impl SepticDigest { + #[must_use] + /// The zero digest, the starting point of the accumulation of curve points derived from the scheme. + pub fn zero() -> Self { + SepticDigest(SepticCurve { + x: SepticExtension::::from_base_fn(|i| { + F::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_X[i]) + }), + y: SepticExtension::::from_base_fn(|i| { + F::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_Y[i]) + }), + }) + } + + #[must_use] + /// The digest used for starting the accumulation of digests. + pub fn starting_digest() -> Self { + SepticDigest(SepticCurve { + x: SepticExtension::::from_base_fn(|i| F::from_canonical_u32(DIGEST_SUM_START_X[i])), + y: SepticExtension::::from_base_fn(|i| F::from_canonical_u32(DIGEST_SUM_START_Y[i])), + }) + } +} + +impl SepticDigest { + /// Checks that the digest is zero, the starting point of the accumulation. + pub fn is_zero(&self) -> bool { + *self == SepticDigest::::zero() + } +} + +impl Sum for SepticDigest { + fn sum>(iter: I) -> Self { + let start = SepticDigest::::starting_digest().0; + + // Computation order is start + (digest1 - offset) + (digest2 - offset) + ... + (digestN - offset) + offset - start. + let mut ret = iter.fold(start, |acc, x| { + let sum_offset = acc.add_incomplete(x.0); + sum_offset.sub_incomplete(SepticDigest::::zero().0) + }); + + ret.add_assign(SepticDigest::::zero().0); + ret.sub_assign(start); + SepticDigest(ret) + } +} + +#[cfg(test)] +mod test { + use super::*; + use p3_baby_bear::BabyBear; + #[test] + fn test_const_points() { + let x: SepticExtension = SepticExtension::from_base_fn(|i| { + BabyBear::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_X[i]) + }); + let y: SepticExtension = SepticExtension::from_base_fn(|i| { + BabyBear::from_canonical_u32(CURVE_CUMULATIVE_SUM_START_Y[i]) + }); + let point = SepticCurve { x, y }; + assert!(point.check_on_point()); + let x: SepticExtension = + SepticExtension::from_base_fn(|i| BabyBear::from_canonical_u32(DIGEST_SUM_START_X[i])); + let y: SepticExtension = + SepticExtension::from_base_fn(|i| BabyBear::from_canonical_u32(DIGEST_SUM_START_Y[i])); + let point = SepticCurve { x, y }; + assert!(point.check_on_point()); + } +} diff --git a/crates/stark/src/septic_extension.rs b/crates/stark/src/septic_extension.rs new file mode 100644 index 0000000000..651e3c7fa0 --- /dev/null +++ b/crates/stark/src/septic_extension.rs @@ -0,0 +1,883 @@ +//! A septic extension with an irreducible polynomial `z^7 - 2z - 5`. +use num_bigint::BigUint; +use num_traits::One; +use p3_field::PrimeField; +use p3_field::{AbstractExtensionField, AbstractField, ExtensionField, Field, Packable}; +use serde::{Deserialize, Serialize}; +use std::array; +use std::fmt::Display; +use std::iter::{Product, Sum}; +use std::ops::{Add, AddAssign, Div, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign}; + +use crate::air::{SP1AirBuilder, SepticExtensionAirBuilder}; + +/// A septic extension with an irreducible polynomial `z^7 - 2z - 5`. +/// +/// The field can be constructed as `F_{p^7} = F_p[z]/(z^7 - 2z - 5)`. +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[repr(C)] +pub struct SepticExtension(pub [F; 7]); + +impl AbstractField for SepticExtension { + type F = SepticExtension; + + fn zero() -> Self { + SepticExtension([ + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn one() -> Self { + SepticExtension([ + F::one(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn two() -> Self { + SepticExtension([ + F::two(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn neg_one() -> Self { + SepticExtension([ + F::neg_one(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_f(f: Self::F) -> Self { + SepticExtension([ + F::from_f(f.0[0]), + F::from_f(f.0[1]), + F::from_f(f.0[2]), + F::from_f(f.0[3]), + F::from_f(f.0[4]), + F::from_f(f.0[5]), + F::from_f(f.0[6]), + ]) + } + + fn from_bool(b: bool) -> Self { + SepticExtension([ + F::from_bool(b), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_canonical_u8(n: u8) -> Self { + SepticExtension([ + F::from_canonical_u8(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_canonical_u16(n: u16) -> Self { + SepticExtension([ + F::from_canonical_u16(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_canonical_u32(n: u32) -> Self { + SepticExtension([ + F::from_canonical_u32(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_canonical_u64(n: u64) -> Self { + SepticExtension([ + F::from_canonical_u64(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_canonical_usize(n: usize) -> Self { + SepticExtension([ + F::from_canonical_usize(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_wrapped_u32(n: u32) -> Self { + SepticExtension([ + F::from_wrapped_u32(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn from_wrapped_u64(n: u64) -> Self { + SepticExtension([ + F::from_wrapped_u64(n), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + F::zero(), + ]) + } + + fn generator() -> Self { + SepticExtension([F::two(), F::one(), F::zero(), F::zero(), F::zero(), F::zero(), F::zero()]) + } +} + +impl Field for SepticExtension { + type Packing = Self; + + fn try_inverse(&self) -> Option { + if self.is_zero() { + return None; + } + Some(self.inv()) + } + + fn order() -> BigUint { + F::order().pow(7) + } +} + +impl AbstractExtensionField for SepticExtension { + const D: usize = 7; + + fn from_base(b: F) -> Self { + SepticExtension([b, F::zero(), F::zero(), F::zero(), F::zero(), F::zero(), F::zero()]) + } + + fn from_base_slice(bs: &[F]) -> Self { + SepticExtension([ + bs[0].clone(), + bs[1].clone(), + bs[2].clone(), + bs[3].clone(), + bs[4].clone(), + bs[5].clone(), + bs[6].clone(), + ]) + } + + fn from_base_fn F>(f: G) -> Self { + Self(array::from_fn(f)) + } + + fn as_base_slice(&self) -> &[F] { + self.0.as_slice() + } +} + +impl ExtensionField for SepticExtension { + type ExtensionPacking = SepticExtension; +} + +impl Packable for SepticExtension {} + +impl Add for SepticExtension { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + let mut res = self.0; + for (r, rhs_val) in res.iter_mut().zip(rhs.0) { + *r = (*r).clone() + rhs_val; + } + Self(res) + } +} + +impl AddAssign for SepticExtension { + fn add_assign(&mut self, rhs: Self) { + self.0[0] += rhs.0[0].clone(); + self.0[1] += rhs.0[1].clone(); + self.0[2] += rhs.0[2].clone(); + self.0[3] += rhs.0[3].clone(); + self.0[4] += rhs.0[4].clone(); + self.0[5] += rhs.0[5].clone(); + self.0[6] += rhs.0[6].clone(); + } +} + +impl Sub for SepticExtension { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + let mut res = self.0; + for (r, rhs_val) in res.iter_mut().zip(rhs.0) { + *r = (*r).clone() - rhs_val; + } + Self(res) + } +} + +impl SubAssign for SepticExtension { + fn sub_assign(&mut self, rhs: Self) { + self.0[0] -= rhs.0[0].clone(); + } +} + +impl Neg for SepticExtension { + type Output = Self; + + fn neg(self) -> Self::Output { + let mut res = self.0; + for r in res.iter_mut() { + *r = -r.clone(); + } + Self(res) + } +} + +impl Mul for SepticExtension { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + let mut res: [F; 13] = core::array::from_fn(|_| F::zero()); + for i in 0..7 { + for j in 0..7 { + res[i + j] = res[i + j].clone() + self.0[i].clone() * rhs.0[j].clone(); + } + } + let mut ret: [F; 7] = core::array::from_fn(|i| res[i].clone()); + for i in 7..13 { + ret[i - 7] = ret[i - 7].clone() + res[i].clone() * F::from_canonical_u32(5); + ret[i - 6] = ret[i - 6].clone() + res[i].clone() * F::from_canonical_u32(2); + } + Self(ret) + } +} + +impl MulAssign for SepticExtension { + fn mul_assign(&mut self, rhs: Self) { + let res = self.clone() * rhs; + *self = res; + } +} + +impl Product for SepticExtension { + fn product>(iter: I) -> Self { + let one = Self::one(); + iter.fold(one, |acc, x| acc * x) + } +} + +impl Sum for SepticExtension { + fn sum>(iter: I) -> Self { + let zero = Self::zero(); + iter.fold(zero, |acc, x| acc + x) + } +} + +impl From for SepticExtension { + fn from(f: F) -> Self { + SepticExtension([f, F::zero(), F::zero(), F::zero(), F::zero(), F::zero(), F::zero()]) + } +} + +impl Add for SepticExtension { + type Output = Self; + + fn add(self, rhs: F) -> Self::Output { + SepticExtension([ + self.0[0].clone() + rhs, + self.0[1].clone(), + self.0[2].clone(), + self.0[3].clone(), + self.0[4].clone(), + self.0[5].clone(), + self.0[6].clone(), + ]) + } +} + +impl AddAssign for SepticExtension { + fn add_assign(&mut self, rhs: F) { + self.0[0] += rhs; + } +} + +impl Sub for SepticExtension { + type Output = Self; + + fn sub(self, rhs: F) -> Self::Output { + self + (-rhs) + } +} + +impl SubAssign for SepticExtension { + fn sub_assign(&mut self, rhs: F) { + self.0[0] -= rhs; + } +} + +impl Mul for SepticExtension { + type Output = Self; + + fn mul(self, rhs: F) -> Self::Output { + SepticExtension([ + self.0[0].clone() * rhs.clone(), + self.0[1].clone() * rhs.clone(), + self.0[2].clone() * rhs.clone(), + self.0[3].clone() * rhs.clone(), + self.0[4].clone() * rhs.clone(), + self.0[5].clone() * rhs.clone(), + self.0[6].clone() * rhs.clone(), + ]) + } +} + +impl MulAssign for SepticExtension { + fn mul_assign(&mut self, rhs: F) { + for i in 0..7 { + self.0[i] *= rhs.clone(); + } + } +} + +impl Div for SepticExtension { + type Output = Self; + + #[allow(clippy::suspicious_arithmetic_impl)] + fn div(self, rhs: Self) -> Self::Output { + self * rhs.inverse() + } +} + +impl Display for SepticExtension { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl SepticExtension { + fn z_pow_p(index: u32) -> Self { + // The constants written below are specifically for the BabyBear field. + debug_assert_eq!(F::order(), BigUint::from(2013265921u32)); + if index == 0 { + return Self::one(); + } + if index == 1 { + return SepticExtension([ + F::from_canonical_u32(954599710), + F::from_canonical_u32(1359279693), + F::from_canonical_u32(566669999), + F::from_canonical_u32(1982781815), + F::from_canonical_u32(1735718361), + F::from_canonical_u32(1174868538), + F::from_canonical_u32(1120871770), + ]); + } + if index == 2 { + return SepticExtension([ + F::from_canonical_u32(862825265), + F::from_canonical_u32(597046311), + F::from_canonical_u32(978840770), + F::from_canonical_u32(1790138282), + F::from_canonical_u32(1044777201), + F::from_canonical_u32(835869808), + F::from_canonical_u32(1342179023), + ]); + } + if index == 3 { + return SepticExtension([ + F::from_canonical_u32(596273169), + F::from_canonical_u32(658837454), + F::from_canonical_u32(1515468261), + F::from_canonical_u32(367059247), + F::from_canonical_u32(781278880), + F::from_canonical_u32(1544222616), + F::from_canonical_u32(155490465), + ]); + } + if index == 4 { + return SepticExtension([ + F::from_canonical_u32(557608863), + F::from_canonical_u32(1173670028), + F::from_canonical_u32(1749546888), + F::from_canonical_u32(1086464137), + F::from_canonical_u32(803900099), + F::from_canonical_u32(1288818584), + F::from_canonical_u32(1184677604), + ]); + } + if index == 5 { + return SepticExtension([ + F::from_canonical_u32(763416381), + F::from_canonical_u32(1252567168), + F::from_canonical_u32(628856225), + F::from_canonical_u32(1771903394), + F::from_canonical_u32(650712211), + F::from_canonical_u32(19417363), + F::from_canonical_u32(57990258), + ]); + } + if index == 6 { + return SepticExtension([ + F::from_canonical_u32(1734711039), + F::from_canonical_u32(1749813853), + F::from_canonical_u32(1227235221), + F::from_canonical_u32(1707730636), + F::from_canonical_u32(424560395), + F::from_canonical_u32(1007029514), + F::from_canonical_u32(498034669), + ]); + } + unreachable!(); + } + + fn z_pow_p2(index: u32) -> Self { + // The constants written below are specifically for the BabyBear field. + debug_assert_eq!(F::order(), BigUint::from(2013265921u32)); + if index == 0 { + return Self::one(); + } + if index == 1 { + return SepticExtension([ + F::from_canonical_u32(1013489358), + F::from_canonical_u32(1619071628), + F::from_canonical_u32(304593143), + F::from_canonical_u32(1949397349), + F::from_canonical_u32(1564307636), + F::from_canonical_u32(327761151), + F::from_canonical_u32(415430835), + ]); + } + if index == 2 { + return SepticExtension([ + F::from_canonical_u32(209824426), + F::from_canonical_u32(1313900768), + F::from_canonical_u32(38410482), + F::from_canonical_u32(256593180), + F::from_canonical_u32(1708830551), + F::from_canonical_u32(1244995038), + F::from_canonical_u32(1555324019), + ]); + } + if index == 3 { + return SepticExtension([ + F::from_canonical_u32(1475628651), + F::from_canonical_u32(777565847), + F::from_canonical_u32(704492386), + F::from_canonical_u32(1218528120), + F::from_canonical_u32(1245363405), + F::from_canonical_u32(475884575), + F::from_canonical_u32(649166061), + ]); + } + if index == 4 { + return SepticExtension([ + F::from_canonical_u32(550038364), + F::from_canonical_u32(948935655), + F::from_canonical_u32(68722023), + F::from_canonical_u32(1251345762), + F::from_canonical_u32(1692456177), + F::from_canonical_u32(1177958698), + F::from_canonical_u32(350232928), + ]); + } + if index == 5 { + return SepticExtension([ + F::from_canonical_u32(882720258), + F::from_canonical_u32(821925756), + F::from_canonical_u32(199955840), + F::from_canonical_u32(812002876), + F::from_canonical_u32(1484951277), + F::from_canonical_u32(1063138035), + F::from_canonical_u32(491712810), + ]); + } + if index == 6 { + return SepticExtension([ + F::from_canonical_u32(738287111), + F::from_canonical_u32(1955364991), + F::from_canonical_u32(552724293), + F::from_canonical_u32(1175775744), + F::from_canonical_u32(341623997), + F::from_canonical_u32(1454022463), + F::from_canonical_u32(408193320), + ]); + } + unreachable!(); + } + + #[must_use] + fn frobenius(&self) -> Self { + let mut result = Self::zero(); + result += self.0[0]; + result += Self::z_pow_p(1) * self.0[1]; + result += Self::z_pow_p(2) * self.0[2]; + result += Self::z_pow_p(3) * self.0[3]; + result += Self::z_pow_p(4) * self.0[4]; + result += Self::z_pow_p(5) * self.0[5]; + result += Self::z_pow_p(6) * self.0[6]; + result + } + + #[must_use] + fn double_frobenius(&self) -> Self { + let mut result = Self::zero(); + result += self.0[0]; + result += Self::z_pow_p2(1) * self.0[1]; + result += Self::z_pow_p2(2) * self.0[2]; + result += Self::z_pow_p2(3) * self.0[3]; + result += Self::z_pow_p2(4) * self.0[4]; + result += Self::z_pow_p2(5) * self.0[5]; + result += Self::z_pow_p2(6) * self.0[6]; + result + } + + #[must_use] + fn pow_r_1(&self) -> Self { + let base = self.frobenius() * self.double_frobenius(); + let base_p2 = base.double_frobenius(); + let base_p4 = base_p2.double_frobenius(); + base * base_p2 * base_p4 + } + + #[must_use] + fn inv(&self) -> Self { + let pow_r_1 = self.pow_r_1(); + let pow_r = pow_r_1 * *self; + pow_r_1 * pow_r.0[0].inverse() + } + + fn is_square(&self) -> (F, bool) { + let pow_r_1 = self.pow_r_1(); + let pow_r = pow_r_1 * *self; + let exp = (F::order() - BigUint::one()) / BigUint::from(2u8); + let exp = exp.to_u64_digits()[0]; + + (pow_r.0[0], pow_r.0[0].exp_u64(exp) == F::one()) + } + + /// Computes the square root of the septic field extension element. + /// Returns None if the element is not a square, and Some(result) if it is a square. + pub fn sqrt(&self) -> Option { + let n = *self; + + if n == Self::zero() || n == Self::one() { + return Some(n); + } + + let (numerator, is_square) = n.is_square(); + + if !is_square { + return None; + } + + let mut n_iter = n; + let mut n_power = n; + for i in 1..30 { + n_iter *= n_iter; + if i >= 26 { + n_power *= n_iter; + } + } + + let mut n_frobenius = n_power.frobenius(); + let mut denominator = n_frobenius; + + n_frobenius = n_frobenius.double_frobenius(); + denominator *= n_frobenius; + n_frobenius = n_frobenius.double_frobenius(); + denominator *= n_frobenius; + denominator *= n; + + let base = numerator.inverse(); + let g = F::generator(); + let mut a = F::one(); + let mut nonresidue = F::one() - base; + let legendre_exp = (F::order() - BigUint::one()) / BigUint::from(2u8); + + while nonresidue.exp_u64(legendre_exp.to_u64_digits()[0]) == F::one() { + a *= g; + nonresidue = a.square() - base; + } + + let order = F::order(); + let cipolla_pow = (&order + BigUint::one()) / BigUint::from(2u8); + let mut x = CipollaExtension::new(a, F::one()); + x = x.pow(&cipolla_pow, nonresidue); + + Some(denominator * x.real) + } +} + +impl SepticExtension { + /// Returns whether the extension field element viewed as an y-coordinate of a digest represents a receive interaction. + pub fn is_receive(&self) -> bool { + BigUint::from(1u32) <= self.0[6].as_canonical_biguint() + && self.0[6].as_canonical_biguint() + <= (F::order() - BigUint::from(1u32)) / BigUint::from(2u32) + } + + /// Returns whether the extension field element viewed as an y-coordinate of a digest represents a send interaction. + pub fn is_send(&self) -> bool { + (F::order() + BigUint::from(1u32)) / BigUint::from(2u32) <= self.0[6].as_canonical_biguint() + && self.0[6].as_canonical_biguint() <= (F::order() - BigUint::from(1u32)) + } + + /// Returns whether the extension field element viewed as an y-coordinate of a digest cannot represent anything. + pub fn is_exception(&self) -> bool { + self.0[6].as_canonical_biguint() == BigUint::from(0u32) + } +} + +/// Extension field for Cipolla's algorithm, taken from . +#[derive(Clone, Copy, Debug)] +struct CipollaExtension { + real: F, + imag: F, +} + +impl CipollaExtension { + fn new(real: F, imag: F) -> Self { + Self { real, imag } + } + + fn one() -> Self { + Self::new(F::one(), F::zero()) + } + + fn mul_ext(&self, other: Self, nonresidue: F) -> Self { + Self::new( + self.real * other.real + nonresidue * self.imag * other.imag, + self.real * other.imag + self.imag * other.real, + ) + } + + fn pow(&self, exp: &BigUint, nonresidue: F) -> Self { + let mut result = Self::one(); + let mut base = *self; + let bits = exp.bits(); + + for i in 0..bits { + if exp.bit(i) { + result = result.mul_ext(base, nonresidue); + } + base = base.mul_ext(base, nonresidue); + } + result + } +} + +/// A block of columns for septic extension. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[repr(C)] +pub struct SepticBlock(pub [T; 7]); + +impl SepticBlock { + /// Maps a `SepticBlock` to `SepticBlock` based on a map from `T` to `U`. + pub fn map(self, f: F) -> SepticBlock + where + F: FnMut(T) -> U, + { + SepticBlock(self.0.map(f)) + } + + /// A function similar to `core:array::from_fn`. + pub fn from_base_fn T>(f: G) -> Self { + Self(array::from_fn(f)) + } +} + +impl SepticBlock { + /// Takes a `SepticBlock` into a `SepticExtension` of expressions. + pub fn as_extension>( + &self, + ) -> SepticExtension { + let arr: [AB::Expr; 7] = self.0.clone().map(|x| AB::Expr::zero() + x); + SepticExtension(arr) + } + + /// Takes a single expression into a `SepticExtension` of expressions. + pub fn as_extension_from_base>( + &self, + base: AB::Expr, + ) -> SepticExtension { + let mut arr: [AB::Expr; 7] = self.0.clone().map(|_| AB::Expr::zero()); + arr[0] = base; + + SepticExtension(arr) + } +} + +impl From<[T; 7]> for SepticBlock { + fn from(arr: [T; 7]) -> Self { + Self(arr) + } +} + +impl From for SepticBlock { + fn from(value: T) -> Self { + Self([value, T::zero(), T::zero(), T::zero(), T::zero(), T::zero(), T::zero()]) + } +} + +impl From<&[T]> for SepticBlock { + fn from(slice: &[T]) -> Self { + let arr: [T; 7] = slice.try_into().unwrap(); + Self(arr) + } +} + +impl Index for SepticBlock +where + [T]: Index, +{ + type Output = <[T] as Index>::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(&self.0, index) + } +} + +impl IndexMut for SepticBlock +where + [T]: IndexMut, +{ + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(&mut self.0, index) + } +} + +impl IntoIterator for SepticBlock { + type Item = T; + type IntoIter = std::array::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[cfg(test)] +mod tests { + use p3_baby_bear::BabyBear; + + use super::*; + + #[test] + fn test_mul() { + let a: SepticExtension = SepticExtension::from_canonical_u32(1); + let b: SepticExtension = SepticExtension::from_canonical_u32(2); + let c = a * b; + println!("{c}"); + } + + #[test] + fn test_inv() { + for i in 0..256 { + let a: SepticExtension = SepticExtension([ + BabyBear::from_canonical_u32(i + 3), + BabyBear::from_canonical_u32(2 * i + 6), + BabyBear::from_canonical_u32(5 * i + 17), + BabyBear::from_canonical_u32(6 * i + 91), + BabyBear::from_canonical_u32(8 * i + 37), + BabyBear::from_canonical_u32(11 * i + 35), + BabyBear::from_canonical_u32(14 * i + 33), + ]); + let b = a.inv(); + assert_eq!(a * b, SepticExtension::::one()); + } + } + + #[test] + fn test_legendre() { + let a: SepticExtension = SepticExtension::generator(); + let mut b = SepticExtension::::one(); + for i in 1..256 { + b *= a; + let (_, c) = b.is_square(); + assert!(c == (i % 2 == 0)); + } + } + + #[test] + fn test_sqrt() { + for i in 0..256 { + let a: SepticExtension = SepticExtension([ + BabyBear::from_canonical_u32(i + 3), + BabyBear::from_canonical_u32(2 * i + 6), + BabyBear::from_canonical_u32(5 * i + 17), + BabyBear::from_canonical_u32(6 * i + 91), + BabyBear::from_canonical_u32(8 * i + 37), + BabyBear::from_canonical_u32(11 * i + 35), + BabyBear::from_canonical_u32(14 * i + 33), + ]); + let b = a * a; + let recovered_a = b.sqrt().unwrap(); + assert_eq!(recovered_a * recovered_a, b); + } + let mut b = SepticExtension::::one(); + for i in 1..256 { + let a: SepticExtension = SepticExtension::generator(); + b *= a; + let c = b.sqrt(); + if i % 2 == 1 { + assert!(c.is_none()); + } else { + let c = c.unwrap(); + assert_eq!(c * c, b); + } + } + } +} diff --git a/crates/stark/src/types.rs b/crates/stark/src/types.rs index 533a8006da..cff129e364 100644 --- a/crates/stark/src/types.rs +++ b/crates/stark/src/types.rs @@ -13,7 +13,7 @@ use p3_matrix::{ use serde::{Deserialize, Serialize}; use super::{Challenge, Com, OpeningProof, StarkGenericConfig, Val}; -use crate::air::InteractionScope; +use crate::septic_digest::SepticDigest; pub type QuotientOpenedValues = Vec; @@ -39,8 +39,7 @@ impl ShardMainData { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ShardCommitment { - pub global_main_commit: C, - pub local_main_commit: C, + pub main_commit: C, pub permutation_commit: C, pub quotient_commit: C, } @@ -54,33 +53,33 @@ pub struct AirOpenedValues { } #[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound(serialize = "T: Serialize"))] -#[serde(bound(deserialize = "T: Deserialize<'de>"))] -pub struct ChipOpenedValues { - pub preprocessed: AirOpenedValues, - pub main: AirOpenedValues, - pub permutation: AirOpenedValues, - pub quotient: Vec>, - pub global_cumulative_sum: T, - pub local_cumulative_sum: T, +#[serde(bound(serialize = "F: Serialize, EF: Serialize"))] +#[serde(bound(deserialize = "F: Deserialize<'de>, EF: Deserialize<'de>"))] +pub struct ChipOpenedValues { + pub preprocessed: AirOpenedValues, + pub main: AirOpenedValues, + pub permutation: AirOpenedValues, + pub quotient: Vec>, + pub global_cumulative_sum: SepticDigest, + pub local_cumulative_sum: EF, pub log_degree: usize, } #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ShardOpenedValues { - pub chips: Vec>, +pub struct ShardOpenedValues { + pub chips: Vec>, } /// The maximum number of elements that can be stored in the public values vec. Both SP1 and /// recursive proofs need to pad their public values vec to this length. This is required since the /// recursion verification program expects the public values vec to be fixed length. -pub const PROOF_MAX_NUM_PVS: usize = 371; +pub const PROOF_MAX_NUM_PVS: usize = 231; #[derive(Serialize, Deserialize, Clone)] #[serde(bound = "")] pub struct ShardProof { pub commitment: ShardCommitment>, - pub opened_values: ShardOpenedValues>, + pub opened_values: ShardOpenedValues, Challenge>, pub opening_proof: OpeningProof, pub chip_ordering: HashMap, pub public_values: Vec>, @@ -93,18 +92,22 @@ pub struct ProofShape { impl ProofShape { #[must_use] - pub fn from_traces( - global_traces: Option<&[(String, RowMajorMatrix)]>, - local_traces: &[(String, RowMajorMatrix)], - ) -> Self { - global_traces - .into_iter() - .flatten() - .chain(local_traces.iter()) + pub fn from_traces(traces: &[(String, RowMajorMatrix)]) -> Self { + traces + .iter() .map(|(name, trace)| (name.clone(), trace.height().ilog2() as usize)) .sorted_by_key(|(_, height)| *height) .collect() } + + #[must_use] + pub fn from_log2_heights(traces: &[(String, usize)]) -> Self { + traces + .iter() + .map(|(name, height)| (name.clone(), *height)) + .sorted_by_key(|(_, height)| *height) + .collect() + } } impl Debug for ShardProof { @@ -123,15 +126,12 @@ impl AirOpenedValues { } impl ShardProof { - pub fn cumulative_sum(&self, scope: InteractionScope) -> Challenge { - self.opened_values - .chips - .iter() - .map(|c| match scope { - InteractionScope::Global => c.global_cumulative_sum, - InteractionScope::Local => c.local_cumulative_sum, - }) - .sum() + pub fn local_cumulative_sum(&self) -> Challenge { + self.opened_values.chips.iter().map(|c| c.local_cumulative_sum).sum() + } + + pub fn global_cumulative_sum(&self) -> SepticDigest> { + self.opened_values.chips.iter().map(|c| c.global_cumulative_sum).sum() } pub fn log_degree_cpu(&self) -> usize { diff --git a/crates/stark/src/verifier.rs b/crates/stark/src/verifier.rs index 2fb3e8cef3..28e80612e2 100644 --- a/crates/stark/src/verifier.rs +++ b/crates/stark/src/verifier.rs @@ -33,7 +33,6 @@ impl>> Verifier { chips: &[&MachineChip], challenger: &mut SC::Challenger, proof: &ShardProof, - global_permutation_challenges: &[SC::Challenge], ) -> Result<(), VerificationError> where A: for<'a> Air>, @@ -55,8 +54,6 @@ impl>> Verifier { return Err(VerificationError::ChipOpeningLengthMismatch); } - let chip_scopes = chips.iter().map(|chip| chip.commit_scope()).collect::>(); - // Assert that the byte multiplicities don't overflow. let mut max_byte_lookup_mult = 0u64; chips.iter().zip(opened_values.chips.iter()).for_each(|(chip, val)| { @@ -84,14 +81,9 @@ impl>> Verifier { .map(|log_degree| pcs.natural_domain_for_degree(1 << log_degree)) .collect::>(); - let ShardCommitment { - global_main_commit, - local_main_commit, - permutation_commit, - quotient_commit, - } = commitment; + let ShardCommitment { main_commit, permutation_commit, quotient_commit } = commitment; - challenger.observe(local_main_commit.clone()); + challenger.observe(main_commit.clone()); let local_permutation_challenges = (0..2).map(|_| challenger.sample_ext_element::()).collect::>(); @@ -100,21 +92,19 @@ impl>> Verifier { // Observe the cumulative sums and constrain any sum without a corresponding scope to be // zero. for (opening, chip) in opened_values.chips.iter().zip_eq(chips.iter()) { - let global_sum = opening.global_cumulative_sum; let local_sum = opening.local_cumulative_sum; - challenger.observe_slice(global_sum.as_base_slice()); + let global_sum = opening.global_cumulative_sum; + challenger.observe_slice(local_sum.as_base_slice()); + challenger.observe_slice(&global_sum.0.x.0); + challenger.observe_slice(&global_sum.0.y.0); - let has_global_interactions = chip - .sends() - .iter() - .chain(chip.receives()) - .any(|i| i.scope == InteractionScope::Global); - if !has_global_interactions && !global_sum.is_zero() { + if chip.commit_scope() == InteractionScope::Local && !global_sum.is_zero() { return Err(VerificationError::CumulativeSumsError( - "global cumulative sum is non-zero, but no global interactions", + "global cumulative sum is non-zero, but chip is Local", )); } + let has_local_interactions = chip .sends() .iter() @@ -210,47 +200,19 @@ impl>> Verifier { }) .collect::>(); - // Split the main_domains_points_and_opens to the global and local chips. - let mut global_trace_points_and_openings = Vec::new(); - let mut local_trace_points_and_openings = Vec::new(); - for (i, points_and_openings) in - main_domains_points_and_opens.clone().into_iter().enumerate() - { - let scope = chip_scopes[i]; - if scope == InteractionScope::Global { - global_trace_points_and_openings.push(points_and_openings); - } else { - local_trace_points_and_openings.push(points_and_openings); - } - } - - let rounds = if !global_trace_points_and_openings.is_empty() { - vec![ - (vk.commit.clone(), preprocessed_domains_points_and_opens), - (global_main_commit.clone(), global_trace_points_and_openings), - (local_main_commit.clone(), local_trace_points_and_openings), - (permutation_commit.clone(), perm_domains_points_and_opens), - (quotient_commit.clone(), quotient_domains_points_and_opens), - ] - } else { - vec![ - (vk.commit.clone(), preprocessed_domains_points_and_opens), - (local_main_commit.clone(), local_trace_points_and_openings), - (permutation_commit.clone(), perm_domains_points_and_opens), - (quotient_commit.clone(), quotient_domains_points_and_opens), - ] - }; + let rounds = vec![ + (vk.commit.clone(), preprocessed_domains_points_and_opens), + (main_commit.clone(), main_domains_points_and_opens), + (permutation_commit.clone(), perm_domains_points_and_opens), + (quotient_commit.clone(), quotient_domains_points_and_opens), + ]; config .pcs() .verify(rounds, opening_proof, challenger) .map_err(|e| VerificationError::InvalidopeningArgument(e))?; - let permutation_challenges = global_permutation_challenges - .iter() - .chain(local_permutation_challenges.iter()) - .copied() - .collect::>(); + let permutation_challenges = local_permutation_challenges; // Verify the constrtaint evaluations. for (chip, trace_domain, qc_domains, values) in @@ -273,7 +235,7 @@ impl>> Verifier { .map_err(|_| VerificationError::OodEvaluationMismatch(chip.name()))?; } // Verify that the local cumulative sum is zero. - let local_cumulative_sum = proof.cumulative_sum(InteractionScope::Local); + let local_cumulative_sum = proof.local_cumulative_sum(); if local_cumulative_sum != SC::Challenge::zero() { return Err(VerificationError::CumulativeSumsError("local cumulative sum is not zero")); } @@ -283,7 +245,7 @@ impl>> Verifier { fn verify_opening_shape( chip: &MachineChip, - opening: &ChipOpenedValues, + opening: &ChipOpenedValues, SC::Challenge>, ) -> Result<(), OpeningShapeError> { // Verify that the preprocessed width matches the expected value for the chip. if opening.preprocessed.local.len() != chip.preprocessed_width() { @@ -326,7 +288,6 @@ impl>> Verifier { opening.permutation.next.len(), )); } - // Verift that the number of quotient chunks matches the expected value for the chip. if opening.quotient.len() != chip.quotient_width() { return Err(OpeningShapeError::QuotientWidthMismatch( @@ -352,7 +313,7 @@ impl>> Verifier { #[allow(clippy::needless_pass_by_value)] fn verify_constraints( chip: &MachineChip, - opening: &ChipOpenedValues, + opening: &ChipOpenedValues, SC::Challenge>, trace_domain: Domain, qc_domains: Vec>, zeta: SC::Challenge, @@ -389,7 +350,7 @@ impl>> Verifier { /// Evaluates the constraints for a chip and opening. pub fn eval_constraints( chip: &MachineChip, - opening: &ChipOpenedValues, + opening: &ChipOpenedValues, SC::Challenge>, selectors: &LagrangeSelectors, alpha: SC::Challenge, permutation_challenges: &[SC::Challenge], @@ -412,14 +373,13 @@ impl>> Verifier { next: unflatten(&opening.permutation.next), }; - let cumulative_sums = [opening.global_cumulative_sum, opening.local_cumulative_sum]; - let cumulative_sums = cumulative_sums.as_slice(); let mut folder = VerifierConstraintFolder:: { preprocessed: opening.preprocessed.view(), main: opening.main.view(), perm: perm_opening.view(), perm_challenges: permutation_challenges, - cumulative_sums, + local_cumulative_sum: &opening.local_cumulative_sum, + global_cumulative_sum: &opening.global_cumulative_sum, is_first_row: selectors.is_first_row, is_last_row: selectors.is_last_row, is_transition: selectors.is_transition, @@ -436,7 +396,7 @@ impl>> Verifier { /// Recomputes the quotient for a chip and opening. pub fn recompute_quotient( - opening: &ChipOpenedValues, + opening: &ChipOpenedValues, SC::Challenge>, qc_domains: &[Domain], zeta: SC::Challenge, ) -> SC::Challenge { diff --git a/crates/test-artifacts/Cargo.toml b/crates/test-artifacts/Cargo.toml new file mode 100644 index 0000000000..6954ca7c0c --- /dev/null +++ b/crates/test-artifacts/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "test-artifacts" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +sp1-build = { workspace = true } + +[build-dependencies] +sp1-build = { workspace = true } \ No newline at end of file diff --git a/crates/test-artifacts/Makefile b/crates/test-artifacts/Makefile new file mode 100644 index 0000000000..772dd9805d --- /dev/null +++ b/crates/test-artifacts/Makefile @@ -0,0 +1,8 @@ +all: + for dir in programs/*/ ; do \ + echo "Building in $${dir}..."; \ + cd $${dir} && cargo prove build || { echo "Failed at command: cd $${dir} && cargo prove build"; exit 1; }; \ + cd ../..; \ + done + +.PHONY: all diff --git a/crates/test-artifacts/build.rs b/crates/test-artifacts/build.rs new file mode 100644 index 0000000000..971d05e860 --- /dev/null +++ b/crates/test-artifacts/build.rs @@ -0,0 +1,20 @@ +use std::{ + io::{Error, Result}, + path::PathBuf, +}; + +use sp1_build::build_program_with_args; + +fn main() -> Result<()> { + let tests_path = + [env!("CARGO_MANIFEST_DIR"), "programs"].iter().collect::().canonicalize()?; + + build_program_with_args( + tests_path + .to_str() + .ok_or_else(|| Error::other(format!("expected {tests_path:?} to be valid UTF-8")))?, + Default::default(), + ); + + Ok(()) +} diff --git a/tests/Cargo.lock b/crates/test-artifacts/programs/Cargo.lock similarity index 91% rename from tests/Cargo.lock rename to crates/test-artifacts/programs/Cargo.lock index d286799538..d433b38993 100644 --- a/tests/Cargo.lock +++ b/crates/test-artifacts/programs/Cargo.lock @@ -107,7 +107,7 @@ version = "1.1.0" dependencies = [ "common-test-utils", "sp1-curves", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-zkvm", ] @@ -115,7 +115,6 @@ dependencies = [ name = "bls12381-double-test" version = "1.1.0" dependencies = [ - "num", "sp1-zkvm", ] @@ -151,7 +150,7 @@ name = "bls12381-mul-test" version = "1.1.0" dependencies = [ "sp1-derive", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-zkvm", ] @@ -161,7 +160,7 @@ version = "1.1.0" dependencies = [ "common-test-utils", "sp1-curves", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-zkvm", ] @@ -204,7 +203,7 @@ name = "bn254-mul-test" version = "1.1.0" dependencies = [ "sp1-derive", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-zkvm", ] @@ -246,7 +245,7 @@ name = "common-test-utils" version = "1.1.0" dependencies = [ "num-bigint", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", ] [[package]] @@ -255,6 +254,12 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-oid" +version = "0.10.0-rc.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68ff6be19477a1bd5441f382916a89bc2a0b2c35db6d41e0f6e8538bf6d6463f" + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -319,9 +324,11 @@ version = "0.6.0-rc.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "040a95c58773f47c92f5f17814702bfd68e8ace9ddce4690c982d0019cac32e2" dependencies = [ + "hybrid-array", "num-traits", "rand_core", "subtle", + "zeroize", ] [[package]] @@ -374,7 +381,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -384,20 +391,20 @@ source = "git+https://github.com/sp1-patches/curve25519-dalek?branch=patch-curve dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] name = "curve25519-dalek-ng" version = "4.1.1" -source = "git+https://github.com/sp1-patches/curve25519-dalek-ng.git?branch=patch-v4.1.1#8dd77b20f3e78965a0cc57070a04465b9d52c49e" +source = "git+https://github.com/sp1-patches/curve25519-dalek-ng.git?branch=patch-v4.1.1#3fb3e7f6047ddeef0f0c9212f4604bd30d64bd28" dependencies = [ "anyhow", "byteorder", "cfg-if", "digest 0.9.0", "rand_core", - "sp1-lib 1.2.0", + "sp1-lib 3.2.0", "subtle-ng", "zeroize", ] @@ -494,11 +501,21 @@ version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ - "const-oid", + "const-oid 0.9.6", "pem-rfc7468", "zeroize", ] +[[package]] +name = "der" +version = "0.8.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82db698b33305f0134faf590b9d1259dc171b5481ac41d5c8146c3b3ee7d4319" +dependencies = [ + "const-oid 0.10.0-rc.3", + "zeroize", +] + [[package]] name = "deranged" version = "0.3.11" @@ -516,7 +533,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -535,7 +552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", - "const-oid", + "const-oid 0.9.6", "crypto-common", "subtle", ] @@ -546,9 +563,9 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.9", "digest 0.10.7", - "elliptic-curve", + "elliptic-curve 0.13.8", "rfc6979", "signature", "spki", @@ -558,8 +575,6 @@ dependencies = [ name = "ed-add-test" version = "1.1.0" dependencies = [ - "hex-literal", - "num", "sp1-zkvm", ] @@ -636,7 +651,22 @@ dependencies = [ "pem-rfc7468", "pkcs8", "rand_core", - "sec1", + "sec1 0.7.3", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.14.0-rc.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc43715037532dc2d061e5c97e81b684c28993d52a4fa4eb7d2ce2826d78f2f2" +dependencies = [ + "base16ct", + "crypto-bigint 0.6.0-rc.5", + "hybrid-array", + "rand_core", + "sec1 0.8.0-rc.3", "subtle", "zeroize", ] @@ -840,6 +870,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hybrid-array" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a9a965bb102c1c891fb017c09a05c965186b1265a207640f323ddd009f9deb" +dependencies = [ + "typenum", + "zeroize", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.2" @@ -893,7 +933,7 @@ checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", - "elliptic-curve", + "elliptic-curve 0.13.8", "once_cell", "sha2 0.10.8", "signature", @@ -1074,16 +1114,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ "ecdsa", - "elliptic-curve", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.8", ] [[package]] name = "p3-air" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "066f571b2e645505ed5972dd0e1e252ba03352150830c9566769ca711c0f1e9b" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "p3-field", "p3-matrix", @@ -1091,9 +1130,8 @@ dependencies = [ [[package]] name = "p3-baby-bear" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff00f571044d299310d9659c6e51c98422de3bf94b8577f7f30cf59cf2043e40" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "num-bigint", "p3-field", @@ -1106,9 +1144,8 @@ dependencies = [ [[package]] name = "p3-challenger" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be7e4fbce4566a93091107eadfafa0b5374bd1ffd3e0f6b850da3ff72eb183f" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "p3-field", "p3-maybe-rayon", @@ -1120,13 +1157,11 @@ dependencies = [ [[package]] name = "p3-commit" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a03eb0f99d68a712c41e658e9a7782a0705d4ffcfb6232a43bd3f1ef9591002" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-challenger", - "p3-dft", "p3-field", "p3-matrix", "p3-util", @@ -1135,9 +1170,8 @@ dependencies = [ [[package]] name = "p3-dft" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1556de968523fbe5d804ab50600ea306fcceea3500cfd7601e40882480524664" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "p3-field", "p3-matrix", @@ -1148,9 +1182,8 @@ dependencies = [ [[package]] name = "p3-field" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2af6e1ac47a2035af5165e668d64612c4b9ccabd06df37fc1fd381fdf8a71" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "num-bigint", @@ -1162,9 +1195,8 @@ dependencies = [ [[package]] name = "p3-fri" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f351ee9f9d4256455164565cd91e3e6d2487cc2a5355515fa2b6d479269188dd" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-challenger", @@ -1181,9 +1213,8 @@ dependencies = [ [[package]] name = "p3-interpolation" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24d0f2907a374ebe4545fcff3120d6376d9630cf0bef30feedcfc5908ea2c37" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "p3-field", "p3-matrix", @@ -1192,9 +1223,8 @@ dependencies = [ [[package]] name = "p3-matrix" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa272f3ae77ed8d73478aa7c89e712efb15bda3ff4aff10fadfe11a012cd5389" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-field", @@ -1207,18 +1237,16 @@ dependencies = [ [[package]] name = "p3-maybe-rayon" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eecad6292021858f282d643d9d1284ab112a200494d589863a9c4080e578ef0" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "rayon", ] [[package]] name = "p3-mds" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716c4dbe68a02f1541eb09149d07b8663a3a5951b1864a31cd67ff3bb0826e57" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-dft", @@ -1231,9 +1259,8 @@ dependencies = [ [[package]] name = "p3-merkle-tree" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad7ebab52a03c26025988663a135aed62f5084a2e2ea262176dc8748efb593e5" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-commit", @@ -1248,9 +1275,8 @@ dependencies = [ [[package]] name = "p3-poseidon2" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39c042efa15beab7a8c4d0ca9b9e4cbda7582be0c08e121e830fec45f082935b" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "gcd", "p3-field", @@ -1262,9 +1288,8 @@ dependencies = [ [[package]] name = "p3-symmetric" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9896a831f5b688adc13f6fbe1dcf66ecfaa4622a500f81aa745610e777acb72" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-field", @@ -1273,9 +1298,8 @@ dependencies = [ [[package]] name = "p3-uni-stark" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437ebcd060c8a5479898030b114a93da8a86eb4c2e5f313d9eeaaf40c6e6f61" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "itertools 0.12.1", "p3-air", @@ -1292,9 +1316,8 @@ dependencies = [ [[package]] name = "p3-util" -version = "0.1.4-succinct" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dedb9d27ba47ac314c6fac4ca54e55c3e486c864d51ec5ba55dbe47b75121157" +version = "0.1.0" +source = "git+https://github.com/Plonky3/Plonky3?branch=sp1-v4#db3d45d4ec899efaf8f7234a8573f285fbdda5db" dependencies = [ "serde", ] @@ -1363,7 +1386,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", + "der 0.7.9", "spki", ] @@ -1394,7 +1417,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -1435,7 +1458,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -1472,7 +1495,6 @@ name = "rand-test" version = "1.1.0" dependencies = [ "rand", - "sp1-derive", "sp1-zkvm", ] @@ -1586,20 +1608,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct", - "der", + "der 0.7.9", "generic-array 0.14.7", "pkcs8", "subtle", "zeroize", ] +[[package]] +name = "sec1" +version = "0.8.0-rc.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1988446eff153796413a73669dfaa4caa3f5ce8b25fac89e3821a39c611772e" +dependencies = [ + "base16ct", + "der 0.8.0-rc.1", + "hybrid-array", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1-add-test" version = "1.1.0" dependencies = [ "common-test-utils", "sp1-curves", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-zkvm", ] @@ -1625,6 +1660,41 @@ dependencies = [ "sp1-zkvm", ] +[[package]] +name = "secp256r1-add-test" +version = "1.1.0" +dependencies = [ + "common-test-utils", + "elliptic-curve 0.14.0-rc.1", + "hex-literal", + "num", + "p256", + "sp1-curves", + "sp1-lib 4.0.0-rc.1", + "sp1-zkvm", +] + +[[package]] +name = "secp256r1-decompress-test" +version = "1.1.0" +dependencies = [ + "sp1-zkvm", +] + +[[package]] +name = "secp256r1-double-test" +version = "1.1.0" +dependencies = [ + "common-test-utils", + "elliptic-curve 0.14.0-rc.1", + "hex-literal", + "num", + "p256", + "sp1-curves", + "sp1-lib 4.0.0-rc.1", + "sp1-zkvm", +] + [[package]] name = "semver" version = "1.0.23" @@ -1657,14 +1727,14 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -1680,7 +1750,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -1762,12 +1832,12 @@ dependencies = [ [[package]] name = "sp1-curves" -version = "3.0.0-rc4" +version = "4.0.0-rc.1" dependencies = [ "cfg-if", "curve25519-dalek 4.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "dashu", - "elliptic-curve", + "elliptic-curve 0.13.8", "generic-array 1.1.0", "itertools 0.13.0", "k256", @@ -1783,7 +1853,7 @@ dependencies = [ [[package]] name = "sp1-derive" -version = "3.0.0-rc4" +version = "4.0.0-rc.1" dependencies = [ "quote", "syn 1.0.109", @@ -1805,7 +1875,17 @@ dependencies = [ [[package]] name = "sp1-lib" -version = "3.0.0-rc4" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1aa18834c58df127706eb2fb2ea6e2892dbf0361d6b2485bf7b3fbd5f8b8c3c" +dependencies = [ + "bincode", + "serde", +] + +[[package]] +name = "sp1-lib" +version = "4.0.0-rc.1" dependencies = [ "bincode", "serde", @@ -1813,7 +1893,7 @@ dependencies = [ [[package]] name = "sp1-primitives" -version = "3.0.0-rc4" +version = "4.0.0-rc.1" dependencies = [ "bincode", "hex", @@ -1829,12 +1909,12 @@ dependencies = [ [[package]] name = "sp1-stark" -version = "3.0.0-rc4" +version = "4.0.0-rc.1" dependencies = [ "arrayref", - "getrandom", "hashbrown 0.14.5", "itertools 0.13.0", + "num-bigint", "num-traits", "p3-air", "p3-baby-bear", @@ -1857,13 +1937,12 @@ dependencies = [ "strum", "strum_macros", "sysinfo", - "thiserror", "tracing", ] [[package]] name = "sp1-zkvm" -version = "3.0.0-rc4" +version = "3.0.1" dependencies = [ "cfg-if", "getrandom", @@ -1873,7 +1952,7 @@ dependencies = [ "p3-field", "rand", "sha2 0.10.8", - "sp1-lib 3.0.0-rc4", + "sp1-lib 4.0.0-rc.1", "sp1-primitives", ] @@ -1884,7 +1963,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.9", ] [[package]] @@ -1909,7 +1988,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -1946,9 +2025,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -2041,26 +2120,6 @@ dependencies = [ "time", ] -[[package]] -name = "thiserror" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - [[package]] name = "time" version = "0.3.36" @@ -2135,7 +2194,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -2153,6 +2212,18 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "u256x2048-mul" +version = "1.0.0" +dependencies = [ + "bytemuck", + "num", + "num-bigint", + "rand", + "sp1-derive", + "sp1-zkvm", +] + [[package]] name = "uint256-arith-program" version = "1.1.0" @@ -2321,7 +2392,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] [[package]] @@ -2341,5 +2412,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.87", ] diff --git a/tests/Cargo.toml b/crates/test-artifacts/programs/Cargo.toml similarity index 90% rename from tests/Cargo.toml rename to crates/test-artifacts/programs/Cargo.toml index aed2a6f4ef..0a02636733 100644 --- a/tests/Cargo.toml +++ b/crates/test-artifacts/programs/Cargo.toml @@ -35,12 +35,18 @@ members = [ "sha-extend", "sha2", "tendermint-benchmark", + "u256x2048-mul", "uint256-arith", "uint256-mul", "verify-proof", + "u256x2048-mul", ] resolver = "2" +[workspace.dependencies] +serde = "1.0.204" +serde_json = "1.0.132" + [patch.crates-io] sha2-v0-9-8 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", branch = "patch-v0.9.8" } ed25519-consensus = { git = "https://github.com/sp1-patches/ed25519-consensus", branch = "patch-v2.1.0" } diff --git a/tests/Makefile b/crates/test-artifacts/programs/Makefile similarity index 100% rename from tests/Makefile rename to crates/test-artifacts/programs/Makefile diff --git a/crates/test-artifacts/programs/bls12381-add/Cargo.toml b/crates/test-artifacts/programs/bls12381-add/Cargo.toml new file mode 100644 index 0000000000..eba457a03d --- /dev/null +++ b/crates/test-artifacts/programs/bls12381-add/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "bls12381-add-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +common-test-utils = { path = "../common" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-curves = { path = "../../../../crates/curves" } diff --git a/tests/bls12381-add/src/main.rs b/crates/test-artifacts/programs/bls12381-add/src/main.rs similarity index 100% rename from tests/bls12381-add/src/main.rs rename to crates/test-artifacts/programs/bls12381-add/src/main.rs diff --git a/tests/bls12381-decompress/Cargo.toml b/crates/test-artifacts/programs/bls12381-decompress/Cargo.toml similarity index 64% rename from tests/bls12381-decompress/Cargo.toml rename to crates/test-artifacts/programs/bls12381-decompress/Cargo.toml index 1da8e9b5d3..d68befe785 100644 --- a/tests/bls12381-decompress/Cargo.toml +++ b/crates/test-artifacts/programs/bls12381-decompress/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/bls12381-decompress/src/main.rs b/crates/test-artifacts/programs/bls12381-decompress/src/main.rs similarity index 100% rename from tests/bls12381-decompress/src/main.rs rename to crates/test-artifacts/programs/bls12381-decompress/src/main.rs diff --git a/tests/bls12381-double/Cargo.toml b/crates/test-artifacts/programs/bls12381-double/Cargo.toml similarity index 64% rename from tests/bls12381-double/Cargo.toml rename to crates/test-artifacts/programs/bls12381-double/Cargo.toml index f93909483e..d3c7e77b25 100644 --- a/tests/bls12381-double/Cargo.toml +++ b/crates/test-artifacts/programs/bls12381-double/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/bls12381-double/src/main.rs b/crates/test-artifacts/programs/bls12381-double/src/main.rs similarity index 100% rename from tests/bls12381-double/src/main.rs rename to crates/test-artifacts/programs/bls12381-double/src/main.rs diff --git a/tests/bls12381-fp/Cargo.toml b/crates/test-artifacts/programs/bls12381-fp/Cargo.toml similarity index 70% rename from tests/bls12381-fp/Cargo.toml rename to crates/test-artifacts/programs/bls12381-fp/Cargo.toml index 1fe608fc9e..11fcf35676 100644 --- a/tests/bls12381-fp/Cargo.toml +++ b/crates/test-artifacts/programs/bls12381-fp/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp/src/main.rs b/crates/test-artifacts/programs/bls12381-fp/src/main.rs similarity index 100% rename from tests/bls12381-fp/src/main.rs rename to crates/test-artifacts/programs/bls12381-fp/src/main.rs diff --git a/tests/bls12381-fp2-addsub/Cargo.toml b/crates/test-artifacts/programs/bls12381-fp2-addsub/Cargo.toml similarity index 71% rename from tests/bls12381-fp2-addsub/Cargo.toml rename to crates/test-artifacts/programs/bls12381-fp2-addsub/Cargo.toml index 2d4d21f905..670ece6d23 100644 --- a/tests/bls12381-fp2-addsub/Cargo.toml +++ b/crates/test-artifacts/programs/bls12381-fp2-addsub/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp2-addsub/src/main.rs b/crates/test-artifacts/programs/bls12381-fp2-addsub/src/main.rs similarity index 100% rename from tests/bls12381-fp2-addsub/src/main.rs rename to crates/test-artifacts/programs/bls12381-fp2-addsub/src/main.rs diff --git a/tests/bls12381-fp2-mul/Cargo.toml b/crates/test-artifacts/programs/bls12381-fp2-mul/Cargo.toml similarity index 71% rename from tests/bls12381-fp2-mul/Cargo.toml rename to crates/test-artifacts/programs/bls12381-fp2-mul/Cargo.toml index dee3919614..ee15a0f2cd 100644 --- a/tests/bls12381-fp2-mul/Cargo.toml +++ b/crates/test-artifacts/programs/bls12381-fp2-mul/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bls12381-fp2-mul/src/main.rs b/crates/test-artifacts/programs/bls12381-fp2-mul/src/main.rs similarity index 100% rename from tests/bls12381-fp2-mul/src/main.rs rename to crates/test-artifacts/programs/bls12381-fp2-mul/src/main.rs diff --git a/crates/test-artifacts/programs/bls12381-mul/Cargo.toml b/crates/test-artifacts/programs/bls12381-mul/Cargo.toml new file mode 100644 index 0000000000..e74d3a21ea --- /dev/null +++ b/crates/test-artifacts/programs/bls12381-mul/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "bls12381-mul-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-derive = { path = "../../../../crates/derive" } diff --git a/tests/bls12381-mul/src/main.rs b/crates/test-artifacts/programs/bls12381-mul/src/main.rs similarity index 100% rename from tests/bls12381-mul/src/main.rs rename to crates/test-artifacts/programs/bls12381-mul/src/main.rs diff --git a/crates/test-artifacts/programs/bn254-add/Cargo.toml b/crates/test-artifacts/programs/bn254-add/Cargo.toml new file mode 100644 index 0000000000..78051d3f5a --- /dev/null +++ b/crates/test-artifacts/programs/bn254-add/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "bn254-add-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +common-test-utils = { path = "../common" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-curves = { path = "../../../../crates/curves" } diff --git a/tests/bn254-add/src/main.rs b/crates/test-artifacts/programs/bn254-add/src/main.rs similarity index 100% rename from tests/bn254-add/src/main.rs rename to crates/test-artifacts/programs/bn254-add/src/main.rs diff --git a/tests/bn254-double/Cargo.toml b/crates/test-artifacts/programs/bn254-double/Cargo.toml similarity index 63% rename from tests/bn254-double/Cargo.toml rename to crates/test-artifacts/programs/bn254-double/Cargo.toml index 43b6de30b9..4f00539395 100644 --- a/tests/bn254-double/Cargo.toml +++ b/crates/test-artifacts/programs/bn254-double/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/bn254-double/src/main.rs b/crates/test-artifacts/programs/bn254-double/src/main.rs similarity index 100% rename from tests/bn254-double/src/main.rs rename to crates/test-artifacts/programs/bn254-double/src/main.rs diff --git a/tests/bn254-fp/Cargo.toml b/crates/test-artifacts/programs/bn254-fp/Cargo.toml similarity index 69% rename from tests/bn254-fp/Cargo.toml rename to crates/test-artifacts/programs/bn254-fp/Cargo.toml index 9959fe0357..a2b789730c 100644 --- a/tests/bn254-fp/Cargo.toml +++ b/crates/test-artifacts/programs/bn254-fp/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp/src/main.rs b/crates/test-artifacts/programs/bn254-fp/src/main.rs similarity index 100% rename from tests/bn254-fp/src/main.rs rename to crates/test-artifacts/programs/bn254-fp/src/main.rs diff --git a/tests/bn254-fp2-addsub/Cargo.toml b/crates/test-artifacts/programs/bn254-fp2-addsub/Cargo.toml similarity index 71% rename from tests/bn254-fp2-addsub/Cargo.toml rename to crates/test-artifacts/programs/bn254-fp2-addsub/Cargo.toml index 3f1b8a399c..d6a8351978 100644 --- a/tests/bn254-fp2-addsub/Cargo.toml +++ b/crates/test-artifacts/programs/bn254-fp2-addsub/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp2-addsub/src/main.rs b/crates/test-artifacts/programs/bn254-fp2-addsub/src/main.rs similarity index 100% rename from tests/bn254-fp2-addsub/src/main.rs rename to crates/test-artifacts/programs/bn254-fp2-addsub/src/main.rs diff --git a/tests/bn254-fp2-mul/Cargo.toml b/crates/test-artifacts/programs/bn254-fp2-mul/Cargo.toml similarity index 70% rename from tests/bn254-fp2-mul/Cargo.toml rename to crates/test-artifacts/programs/bn254-fp2-mul/Cargo.toml index aa5d276fd7..41aba97a83 100644 --- a/tests/bn254-fp2-mul/Cargo.toml +++ b/crates/test-artifacts/programs/bn254-fp2-mul/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } num-bigint = "0.4.6" rand = "0.8.5" diff --git a/tests/bn254-fp2-mul/src/main.rs b/crates/test-artifacts/programs/bn254-fp2-mul/src/main.rs similarity index 100% rename from tests/bn254-fp2-mul/src/main.rs rename to crates/test-artifacts/programs/bn254-fp2-mul/src/main.rs diff --git a/crates/test-artifacts/programs/bn254-mul/Cargo.toml b/crates/test-artifacts/programs/bn254-mul/Cargo.toml new file mode 100644 index 0000000000..c9f5a74e66 --- /dev/null +++ b/crates/test-artifacts/programs/bn254-mul/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "bn254-mul-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-derive = { path = "../../../../crates/derive" } diff --git a/tests/bn254-mul/src/main.rs b/crates/test-artifacts/programs/bn254-mul/src/main.rs similarity index 100% rename from tests/bn254-mul/src/main.rs rename to crates/test-artifacts/programs/bn254-mul/src/main.rs diff --git a/tests/common/Cargo.toml b/crates/test-artifacts/programs/common/Cargo.toml similarity index 70% rename from tests/common/Cargo.toml rename to crates/test-artifacts/programs/common/Cargo.toml index e320ad9f2b..11ecf9a146 100644 --- a/tests/common/Cargo.toml +++ b/crates/test-artifacts/programs/common/Cargo.toml @@ -5,5 +5,5 @@ edition = "2021" publish = false [dependencies] -sp1-lib = { path = "../../crates/zkvm/lib" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } num-bigint = "0.4" diff --git a/tests/common/src/lib.rs b/crates/test-artifacts/programs/common/src/lib.rs similarity index 100% rename from tests/common/src/lib.rs rename to crates/test-artifacts/programs/common/src/lib.rs diff --git a/tests/common/src/weierstrass_add.rs b/crates/test-artifacts/programs/common/src/weierstrass_add.rs similarity index 100% rename from tests/common/src/weierstrass_add.rs rename to crates/test-artifacts/programs/common/src/weierstrass_add.rs diff --git a/crates/test-artifacts/programs/cycle-tracker/Cargo.toml b/crates/test-artifacts/programs/cycle-tracker/Cargo.toml new file mode 100644 index 0000000000..4b7d3ccc5b --- /dev/null +++ b/crates/test-artifacts/programs/cycle-tracker/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "cycle-tracker-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-derive = { path = "../../../../crates/derive" } diff --git a/tests/cycle-tracker/src/main.rs b/crates/test-artifacts/programs/cycle-tracker/src/main.rs similarity index 100% rename from tests/cycle-tracker/src/main.rs rename to crates/test-artifacts/programs/cycle-tracker/src/main.rs diff --git a/tests/ed-add/Cargo.toml b/crates/test-artifacts/programs/ed-add/Cargo.toml similarity index 62% rename from tests/ed-add/Cargo.toml rename to crates/test-artifacts/programs/ed-add/Cargo.toml index 0beb43f4f6..18003aeebd 100644 --- a/tests/ed-add/Cargo.toml +++ b/crates/test-artifacts/programs/ed-add/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/ed-add/src/main.rs b/crates/test-artifacts/programs/ed-add/src/main.rs similarity index 100% rename from tests/ed-add/src/main.rs rename to crates/test-artifacts/programs/ed-add/src/main.rs diff --git a/tests/ed-decompress/Cargo.toml b/crates/test-artifacts/programs/ed-decompress/Cargo.toml similarity index 68% rename from tests/ed-decompress/Cargo.toml rename to crates/test-artifacts/programs/ed-decompress/Cargo.toml index c6e0c436f7..38163b84d7 100644 --- a/tests/ed-decompress/Cargo.toml +++ b/crates/test-artifacts/programs/ed-decompress/Cargo.toml @@ -5,5 +5,5 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } hex-literal = "0.4.1" diff --git a/tests/ed-decompress/src/main.rs b/crates/test-artifacts/programs/ed-decompress/src/main.rs similarity index 100% rename from tests/ed-decompress/src/main.rs rename to crates/test-artifacts/programs/ed-decompress/src/main.rs diff --git a/tests/ed25519/Cargo.toml b/crates/test-artifacts/programs/ed25519/Cargo.toml similarity index 80% rename from tests/ed25519/Cargo.toml rename to crates/test-artifacts/programs/ed25519/Cargo.toml index 0c22ff9d2e..fe2a1d2821 100644 --- a/tests/ed25519/Cargo.toml +++ b/crates/test-artifacts/programs/ed25519/Cargo.toml @@ -5,6 +5,6 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } ed25519-dalek = { git = "https://github.com/sp1-patches/curve25519-dalek", branch = "patch-curve25519-v4.1.3" } hex-literal = "0.4.1" diff --git a/tests/ed25519/src/main.rs b/crates/test-artifacts/programs/ed25519/src/main.rs similarity index 100% rename from tests/ed25519/src/main.rs rename to crates/test-artifacts/programs/ed25519/src/main.rs diff --git a/tests/fibonacci/Cargo.toml b/crates/test-artifacts/programs/fibonacci/Cargo.toml similarity index 65% rename from tests/fibonacci/Cargo.toml rename to crates/test-artifacts/programs/fibonacci/Cargo.toml index 28f3f76e9f..c15bb2f381 100644 --- a/tests/fibonacci/Cargo.toml +++ b/crates/test-artifacts/programs/fibonacci/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/fibonacci/src/main.rs b/crates/test-artifacts/programs/fibonacci/src/main.rs similarity index 100% rename from tests/fibonacci/src/main.rs rename to crates/test-artifacts/programs/fibonacci/src/main.rs diff --git a/tests/hint-io/Cargo.toml b/crates/test-artifacts/programs/hint-io/Cargo.toml similarity index 62% rename from tests/hint-io/Cargo.toml rename to crates/test-artifacts/programs/hint-io/Cargo.toml index 8f0a538469..ab21da2e7d 100644 --- a/tests/hint-io/Cargo.toml +++ b/crates/test-artifacts/programs/hint-io/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/hint-io/src/main.rs b/crates/test-artifacts/programs/hint-io/src/main.rs similarity index 100% rename from tests/hint-io/src/main.rs rename to crates/test-artifacts/programs/hint-io/src/main.rs diff --git a/tests/keccak-permute/Cargo.toml b/crates/test-artifacts/programs/keccak-permute/Cargo.toml similarity index 64% rename from tests/keccak-permute/Cargo.toml rename to crates/test-artifacts/programs/keccak-permute/Cargo.toml index d47b917ddc..22f9baa875 100644 --- a/tests/keccak-permute/Cargo.toml +++ b/crates/test-artifacts/programs/keccak-permute/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/keccak-permute/src/main.rs b/crates/test-artifacts/programs/keccak-permute/src/main.rs similarity index 100% rename from tests/keccak-permute/src/main.rs rename to crates/test-artifacts/programs/keccak-permute/src/main.rs diff --git a/tests/keccak256/Cargo.toml b/crates/test-artifacts/programs/keccak256/Cargo.toml similarity index 79% rename from tests/keccak256/Cargo.toml rename to crates/test-artifacts/programs/keccak256/Cargo.toml index c76b886d2f..5bbd18b60a 100644 --- a/tests/keccak256/Cargo.toml +++ b/crates/test-artifacts/programs/keccak256/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } tiny-keccak = { git = "https://github.com/sp1-patches/tiny-keccak", branch = "patch-v2.0.2", features = [ "keccak", ] } diff --git a/tests/keccak256/src/main.rs b/crates/test-artifacts/programs/keccak256/src/main.rs similarity index 100% rename from tests/keccak256/src/main.rs rename to crates/test-artifacts/programs/keccak256/src/main.rs diff --git a/tests/panic/Cargo.toml b/crates/test-artifacts/programs/panic/Cargo.toml similarity index 62% rename from tests/panic/Cargo.toml rename to crates/test-artifacts/programs/panic/Cargo.toml index 6b6fbb65f0..c8f0270dcf 100644 --- a/tests/panic/Cargo.toml +++ b/crates/test-artifacts/programs/panic/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/panic/src/main.rs b/crates/test-artifacts/programs/panic/src/main.rs similarity index 100% rename from tests/panic/src/main.rs rename to crates/test-artifacts/programs/panic/src/main.rs diff --git a/tests/rand/Cargo.toml b/crates/test-artifacts/programs/rand/Cargo.toml similarity index 65% rename from tests/rand/Cargo.toml rename to crates/test-artifacts/programs/rand/Cargo.toml index fed3efb4cf..980392d94a 100644 --- a/tests/rand/Cargo.toml +++ b/crates/test-artifacts/programs/rand/Cargo.toml @@ -5,5 +5,5 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } rand = "0.8.5" diff --git a/tests/rand/src/main.rs b/crates/test-artifacts/programs/rand/src/main.rs similarity index 100% rename from tests/rand/src/main.rs rename to crates/test-artifacts/programs/rand/src/main.rs diff --git a/crates/test-artifacts/programs/secp256k1-add/Cargo.toml b/crates/test-artifacts/programs/secp256k1-add/Cargo.toml new file mode 100644 index 0000000000..f21057e88b --- /dev/null +++ b/crates/test-artifacts/programs/secp256k1-add/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "secp256k1-add-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-curves = { path = "../../../../crates/curves" } +common-test-utils = { path = "../common" } diff --git a/tests/secp256k1-add/src/main.rs b/crates/test-artifacts/programs/secp256k1-add/src/main.rs similarity index 100% rename from tests/secp256k1-add/src/main.rs rename to crates/test-artifacts/programs/secp256k1-add/src/main.rs diff --git a/tests/secp256k1-decompress/Cargo.toml b/crates/test-artifacts/programs/secp256k1-decompress/Cargo.toml similarity index 65% rename from tests/secp256k1-decompress/Cargo.toml rename to crates/test-artifacts/programs/secp256k1-decompress/Cargo.toml index a803f47734..8f504400e7 100644 --- a/tests/secp256k1-decompress/Cargo.toml +++ b/crates/test-artifacts/programs/secp256k1-decompress/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/secp256k1-decompress/src/main.rs b/crates/test-artifacts/programs/secp256k1-decompress/src/main.rs similarity index 100% rename from tests/secp256k1-decompress/src/main.rs rename to crates/test-artifacts/programs/secp256k1-decompress/src/main.rs diff --git a/tests/secp256k1-double/Cargo.toml b/crates/test-artifacts/programs/secp256k1-double/Cargo.toml similarity index 64% rename from tests/secp256k1-double/Cargo.toml rename to crates/test-artifacts/programs/secp256k1-double/Cargo.toml index 72f332f741..08979dd5f3 100644 --- a/tests/secp256k1-double/Cargo.toml +++ b/crates/test-artifacts/programs/secp256k1-double/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/secp256k1-double/src/main.rs b/crates/test-artifacts/programs/secp256k1-double/src/main.rs similarity index 100% rename from tests/secp256k1-double/src/main.rs rename to crates/test-artifacts/programs/secp256k1-double/src/main.rs diff --git a/crates/test-artifacts/programs/secp256k1-mul/Cargo.toml b/crates/test-artifacts/programs/secp256k1-mul/Cargo.toml new file mode 100644 index 0000000000..4ed08c1f9b --- /dev/null +++ b/crates/test-artifacts/programs/secp256k1-mul/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "secp256k1-mul-test" +version = "1.1.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-derive = { path = "../../../../crates/derive" } diff --git a/tests/secp256k1-mul/src/main.rs b/crates/test-artifacts/programs/secp256k1-mul/src/main.rs similarity index 100% rename from tests/secp256k1-mul/src/main.rs rename to crates/test-artifacts/programs/secp256k1-mul/src/main.rs diff --git a/tests/secp256r1-add/Cargo.toml b/crates/test-artifacts/programs/secp256r1-add/Cargo.toml similarity index 71% rename from tests/secp256r1-add/Cargo.toml rename to crates/test-artifacts/programs/secp256r1-add/Cargo.toml index aa89d51815..03926de266 100644 --- a/tests/secp256r1-add/Cargo.toml +++ b/crates/test-artifacts/programs/secp256r1-add/Cargo.toml @@ -5,9 +5,9 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-lib = { path = "../../crates/zkvm/lib" } -sp1-curves = { path = "../../crates/curves" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-curves = { path = "../../../../crates/curves" } common-test-utils = { path = "../common" } hex-literal = "0.4.1" num = { version = "0.4.1", default-features = false } diff --git a/tests/secp256r1-add/src/main.rs b/crates/test-artifacts/programs/secp256r1-add/src/main.rs similarity index 100% rename from tests/secp256r1-add/src/main.rs rename to crates/test-artifacts/programs/secp256r1-add/src/main.rs diff --git a/tests/secp256r1-decompress/Cargo.toml b/crates/test-artifacts/programs/secp256r1-decompress/Cargo.toml similarity index 65% rename from tests/secp256r1-decompress/Cargo.toml rename to crates/test-artifacts/programs/secp256r1-decompress/Cargo.toml index dd6984ced9..21bf76d699 100644 --- a/tests/secp256r1-decompress/Cargo.toml +++ b/crates/test-artifacts/programs/secp256r1-decompress/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/secp256r1-decompress/src/main.rs b/crates/test-artifacts/programs/secp256r1-decompress/src/main.rs similarity index 100% rename from tests/secp256r1-decompress/src/main.rs rename to crates/test-artifacts/programs/secp256r1-decompress/src/main.rs diff --git a/tests/secp256r1-double/Cargo.toml b/crates/test-artifacts/programs/secp256r1-double/Cargo.toml similarity index 71% rename from tests/secp256r1-double/Cargo.toml rename to crates/test-artifacts/programs/secp256r1-double/Cargo.toml index 88c4299cad..9797ad1864 100644 --- a/tests/secp256r1-double/Cargo.toml +++ b/crates/test-artifacts/programs/secp256r1-double/Cargo.toml @@ -5,9 +5,9 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-lib = { path = "../../crates/zkvm/lib" } -sp1-curves = { path = "../../crates/curves" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-lib = { path = "../../../../crates/zkvm/lib" } +sp1-curves = { path = "../../../../crates/curves" } common-test-utils = { path = "../common" } hex-literal = "0.4.1" num = { version = "0.4.1", default-features = false } diff --git a/tests/secp256r1-double/src/main.rs b/crates/test-artifacts/programs/secp256r1-double/src/main.rs similarity index 100% rename from tests/secp256r1-double/src/main.rs rename to crates/test-artifacts/programs/secp256r1-double/src/main.rs diff --git a/tests/sha-compress/Cargo.toml b/crates/test-artifacts/programs/sha-compress/Cargo.toml similarity index 63% rename from tests/sha-compress/Cargo.toml rename to crates/test-artifacts/programs/sha-compress/Cargo.toml index c2c76a4411..2938af28b0 100644 --- a/tests/sha-compress/Cargo.toml +++ b/crates/test-artifacts/programs/sha-compress/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/sha-compress/src/main.rs b/crates/test-artifacts/programs/sha-compress/src/main.rs similarity index 100% rename from tests/sha-compress/src/main.rs rename to crates/test-artifacts/programs/sha-compress/src/main.rs diff --git a/tests/sha-extend/Cargo.toml b/crates/test-artifacts/programs/sha-extend/Cargo.toml similarity index 63% rename from tests/sha-extend/Cargo.toml rename to crates/test-artifacts/programs/sha-extend/Cargo.toml index ddf51c1756..d5c383a16f 100644 --- a/tests/sha-extend/Cargo.toml +++ b/crates/test-artifacts/programs/sha-extend/Cargo.toml @@ -5,4 +5,4 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } diff --git a/tests/sha-extend/src/main.rs b/crates/test-artifacts/programs/sha-extend/src/main.rs similarity index 100% rename from tests/sha-extend/src/main.rs rename to crates/test-artifacts/programs/sha-extend/src/main.rs diff --git a/tests/sha2/Cargo.toml b/crates/test-artifacts/programs/sha2/Cargo.toml similarity index 77% rename from tests/sha2/Cargo.toml rename to crates/test-artifacts/programs/sha2/Cargo.toml index 88440b8c01..4cbde32cac 100644 --- a/tests/sha2/Cargo.toml +++ b/crates/test-artifacts/programs/sha2/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } hex-literal = "0.4.1" sha2 = { git = "https://github.com/succinctbot/RustCrypto-hashes.git" } hex = "0.4.3" diff --git a/tests/sha2/src/main.rs b/crates/test-artifacts/programs/sha2/src/main.rs similarity index 100% rename from tests/sha2/src/main.rs rename to crates/test-artifacts/programs/sha2/src/main.rs diff --git a/tests/tendermint-benchmark/Cargo.toml b/crates/test-artifacts/programs/tendermint-benchmark/Cargo.toml similarity index 57% rename from tests/tendermint-benchmark/Cargo.toml rename to crates/test-artifacts/programs/tendermint-benchmark/Cargo.toml index 3bf23b4031..a8e89486cc 100644 --- a/tests/tendermint-benchmark/Cargo.toml +++ b/crates/test-artifacts/programs/tendermint-benchmark/Cargo.toml @@ -5,9 +5,9 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -serde_json = { version = "1.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0", default-features = false, features = ["derive"] } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +serde_json = { workspace = true, default-features = false, features = ["alloc"] } +serde = { workspace = true, default-features = false, features = ["derive"] } tendermint = { version = "0.34.0", default-features = false } tendermint-light-client-verifier = { version = "0.34.0", default-features = false, features = [ "rust-crypto", diff --git a/tests/tendermint-benchmark/src/fixtures/1/next_validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/next_validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/1/next_validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/next_validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/1/signed_header.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/signed_header.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/1/signed_header.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/signed_header.json diff --git a/tests/tendermint-benchmark/src/fixtures/1/validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/1/validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/1/validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/2/next_validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/next_validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/2/next_validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/next_validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/2/signed_header.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/signed_header.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/2/signed_header.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/signed_header.json diff --git a/tests/tendermint-benchmark/src/fixtures/2/validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/2/validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/2/validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-1/next_validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/next_validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-1/next_validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/next_validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-1/signed_header.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/signed_header.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-1/signed_header.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/signed_header.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-1/validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-1/validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-1/validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-2/next_validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/next_validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-2/next_validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/next_validators.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-2/signed_header.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/signed_header.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-2/signed_header.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/signed_header.json diff --git a/tests/tendermint-benchmark/src/fixtures/small-2/validators.json b/crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/validators.json similarity index 100% rename from tests/tendermint-benchmark/src/fixtures/small-2/validators.json rename to crates/test-artifacts/programs/tendermint-benchmark/src/fixtures/small-2/validators.json diff --git a/tests/tendermint-benchmark/src/main.rs b/crates/test-artifacts/programs/tendermint-benchmark/src/main.rs similarity index 100% rename from tests/tendermint-benchmark/src/main.rs rename to crates/test-artifacts/programs/tendermint-benchmark/src/main.rs diff --git a/crates/test-artifacts/programs/u256x2048-mul/Cargo.toml b/crates/test-artifacts/programs/u256x2048-mul/Cargo.toml new file mode 100644 index 0000000000..cac74cafa8 --- /dev/null +++ b/crates/test-artifacts/programs/u256x2048-mul/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "u256x2048-mul" +version = "1.0.0" +edition = "2021" +publish = false + +[dependencies] +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-derive = { path = "../../../../crates/derive" } +num-bigint = "0.4.6" +num = { version = "0.4.1" } +rand = "0.8.5" +bytemuck = "1.15.0" \ No newline at end of file diff --git a/crates/test-artifacts/programs/u256x2048-mul/src/main.rs b/crates/test-artifacts/programs/u256x2048-mul/src/main.rs new file mode 100644 index 0000000000..87f8e106cb --- /dev/null +++ b/crates/test-artifacts/programs/u256x2048-mul/src/main.rs @@ -0,0 +1,84 @@ +#![no_main] +sp1_zkvm::entrypoint!(main); + +use num::BigUint; +use rand::Rng; +use sp1_zkvm::syscalls::syscall_u256x2048_mul; + +fn u256_to_bytes_le(x: &BigUint) -> [u8; 32] { + let mut bytes = x.to_bytes_le(); + bytes.resize(32, 0); + bytes.try_into().unwrap() +} + +fn u2048_to_bytes_le(x: &BigUint) -> [u8; 256] { + let mut bytes = x.to_bytes_le(); + bytes.resize(256, 0); + bytes.try_into().unwrap() +} + +pub fn main() { + let mut a_max: [u8; 32] = [0xff; 32]; + let mut b_max: [u8; 256] = [0xff; 256]; + + let a_max_big = BigUint::from_bytes_le(&a_max); + a_max = u256_to_bytes_le(&a_max_big); + let b_max_big = BigUint::from_bytes_le(&b_max); + b_max = u2048_to_bytes_le(&b_max_big); + + let mut lo_max: [u32; 64] = [0; 64]; + let mut hi_max: [u32; 8] = [0; 8]; + + syscall_u256x2048_mul( + a_max.as_ptr() as *const [u32; 8], + b_max.as_ptr() as *const [u32; 64], + lo_max.as_mut_ptr() as *mut [u32; 64], + hi_max.as_mut_ptr() as *mut [u32; 8], + ); + + let lo_max_bytes: [u8; 256] = bytemuck::cast::<[u32; 64], [u8; 256]>(lo_max); + let hi_max_bytes: [u8; 32] = bytemuck::cast::<[u32; 8], [u8; 32]>(hi_max); + + let lo_max_big = BigUint::from_bytes_le(&lo_max_bytes); + let hi_max_big = BigUint::from_bytes_le(&hi_max_bytes); + + let result_max_syscall = (hi_max_big << 2048) + lo_max_big; + let result_max = a_max_big * b_max_big; + assert_eq!(result_max, result_max_syscall); + + // Test 10 random pairs of a and b. + let mut rng = rand::thread_rng(); + for _ in 0..10 { + let a: [u8; 32] = rng.gen(); + let mut b = [0u8; 256]; + rng.fill(&mut b); + + let a_big = BigUint::from_bytes_le(&a); + let b_big = BigUint::from_bytes_le(&b); + + let a = u256_to_bytes_le(&a_big); + let b = u2048_to_bytes_le(&b_big); + + let mut lo: [u32; 64] = [0; 64]; + let mut hi: [u32; 8] = [0; 8]; + + syscall_u256x2048_mul( + a.as_ptr() as *const [u32; 8], + b.as_ptr() as *const [u32; 64], + lo.as_mut_ptr() as *mut [u32; 64], + hi.as_mut_ptr() as *mut [u32; 8], + ); + + let lo_bytes: [u8; 256] = bytemuck::cast::<[u32; 64], [u8; 256]>(lo); + let hi_bytes: [u8; 32] = bytemuck::cast::<[u32; 8], [u8; 32]>(hi); + + let lo_big = BigUint::from_bytes_le(&lo_bytes); + let hi_big = BigUint::from_bytes_le(&hi_bytes); + + let result_syscall = (hi_big << 2048) + lo_big; + let result = a_big * b_big; + assert_eq!(result, result_syscall); + } + + println!("All tests passed successfully!"); +} diff --git a/tests/uint256-arith/Cargo.toml b/crates/test-artifacts/programs/uint256-arith/Cargo.toml similarity index 55% rename from tests/uint256-arith/Cargo.toml rename to crates/test-artifacts/programs/uint256-arith/Cargo.toml index 8a3bd274d8..7ea91e234b 100644 --- a/tests/uint256-arith/Cargo.toml +++ b/crates/test-artifacts/programs/uint256-arith/Cargo.toml @@ -5,6 +5,6 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-derive = { path = "../../../../crates/derive" } crypto-bigint = "0.6.0-pre.12" diff --git a/tests/uint256-arith/src/main.rs b/crates/test-artifacts/programs/uint256-arith/src/main.rs similarity index 100% rename from tests/uint256-arith/src/main.rs rename to crates/test-artifacts/programs/uint256-arith/src/main.rs diff --git a/tests/uint256-mul/Cargo.toml b/crates/test-artifacts/programs/uint256-mul/Cargo.toml similarity index 59% rename from tests/uint256-mul/Cargo.toml rename to crates/test-artifacts/programs/uint256-mul/Cargo.toml index 17a4e6b0ba..f867081005 100644 --- a/tests/uint256-mul/Cargo.toml +++ b/crates/test-artifacts/programs/uint256-mul/Cargo.toml @@ -7,6 +7,6 @@ publish = false [dependencies] rand = "0.8" num = { version = "0.4.1" } -sp1-zkvm = { path = "../../crates/zkvm/entrypoint" } -sp1-derive = { path = "../../crates/derive" } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint" } +sp1-derive = { path = "../../../../crates/derive" } bytemuck = "1.15.0" diff --git a/tests/uint256-mul/src/main.rs b/crates/test-artifacts/programs/uint256-mul/src/main.rs similarity index 100% rename from tests/uint256-mul/src/main.rs rename to crates/test-artifacts/programs/uint256-mul/src/main.rs diff --git a/tests/verify-proof/Cargo.toml b/crates/test-artifacts/programs/verify-proof/Cargo.toml similarity index 61% rename from tests/verify-proof/Cargo.toml rename to crates/test-artifacts/programs/verify-proof/Cargo.toml index 7551bd81f3..77c04e4664 100644 --- a/tests/verify-proof/Cargo.toml +++ b/crates/test-artifacts/programs/verify-proof/Cargo.toml @@ -5,6 +5,6 @@ edition = "2021" publish = false [dependencies] -sp1-zkvm = { path = "../../crates/zkvm/entrypoint", features = ["verify"] } +sp1-zkvm = { path = "../../../../crates/zkvm/entrypoint", features = ["verify"] } hex = "0.4.3" sha2 = "0.10.8" diff --git a/tests/verify-proof/src/main.rs b/crates/test-artifacts/programs/verify-proof/src/main.rs similarity index 100% rename from tests/verify-proof/src/main.rs rename to crates/test-artifacts/programs/verify-proof/src/main.rs diff --git a/crates/test-artifacts/src/lib.rs b/crates/test-artifacts/src/lib.rs new file mode 100644 index 0000000000..8707526193 --- /dev/null +++ b/crates/test-artifacts/src/lib.rs @@ -0,0 +1,79 @@ +#![warn(clippy::pedantic)] + +//! This crate goal is to compile all programs in the `programs` folders to ELFs files, +//! and give an easy access to these ELFs from other crates, using the constants below. +//! +//! **Note:** If you added a new program, don't forget to add it to the workspace in the +//! `programs` folder to have if compiled to an ELF file. + +use sp1_build::include_elf; + +pub const FIBONACCI_ELF: &[u8] = include_elf!("fibonacci-program-tests"); + +pub const ED25519_ELF: &[u8] = include_elf!("ed25519-program"); + +pub const CYCLE_TRACKER_ELF: &[u8] = include_elf!("cycle-tracker-test"); + +pub const ED_ADD_ELF: &[u8] = include_elf!("ed-add-test"); + +pub const ED_DECOMPRESS_ELF: &[u8] = include_elf!("ed-decompress-test"); + +pub const KECCAK_PERMUTE_ELF: &[u8] = include_elf!("keccak-permute-test"); + +pub const KECCAK256_ELF: &[u8] = include_elf!("keccak256-test"); + +pub const SECP256K1_ADD_ELF: &[u8] = include_elf!("secp256k1-add-test"); + +pub const SECP256K1_DECOMPRESS_ELF: &[u8] = include_elf!("secp256k1-decompress-test"); + +pub const SECP256K1_DOUBLE_ELF: &[u8] = include_elf!("secp256k1-double-test"); + +pub const SECP256R1_ADD_ELF: &[u8] = include_elf!("secp256r1-add-test"); + +pub const SECP256R1_DECOMPRESS_ELF: &[u8] = include_elf!("secp256r1-decompress-test"); + +pub const SECP256R1_DOUBLE_ELF: &[u8] = include_elf!("secp256r1-double-test"); + +pub const SHA_COMPRESS_ELF: &[u8] = include_elf!("sha-compress-test"); + +pub const SHA_EXTEND_ELF: &[u8] = include_elf!("sha-extend-test"); + +pub const SHA2_ELF: &[u8] = include_elf!("sha2-test"); + +pub const BN254_ADD_ELF: &[u8] = include_elf!("bn254-add-test"); + +pub const BN254_DOUBLE_ELF: &[u8] = include_elf!("bn254-double-test"); + +pub const BN254_MUL_ELF: &[u8] = include_elf!("bn254-mul-test"); + +pub const SECP256K1_MUL_ELF: &[u8] = include_elf!("secp256k1-mul-test"); + +pub const BLS12381_ADD_ELF: &[u8] = include_elf!("bls12381-add-test"); + +pub const BLS12381_DOUBLE_ELF: &[u8] = include_elf!("bls12381-double-test"); + +pub const BLS12381_MUL_ELF: &[u8] = include_elf!("bls12381-mul-test"); + +pub const UINT256_MUL_ELF: &[u8] = include_elf!("biguint-mul-test"); + +pub const BLS12381_DECOMPRESS_ELF: &[u8] = include_elf!("bls-decompress-test"); + +pub const VERIFY_PROOF_ELF: &[u8] = include_elf!("verify-proof"); + +pub const PANIC_ELF: &[u8] = include_elf!("panic-test"); + +pub const BLS12381_FP_ELF: &[u8] = include_elf!("bls12381-fp-test"); + +pub const BLS12381_FP2_MUL_ELF: &[u8] = include_elf!("bls12381-fp2-mul-test"); + +pub const BLS12381_FP2_ADDSUB_ELF: &[u8] = include_elf!("bls12381-fp2-addsub-test"); + +pub const BN254_FP_ELF: &[u8] = include_elf!("bn254-fp-test"); + +pub const BN254_FP2_ADDSUB_ELF: &[u8] = include_elf!("bn254-fp2-addsub-test"); + +pub const BN254_FP2_MUL_ELF: &[u8] = include_elf!("bn254-fp2-mul-test"); + +pub const TENDERMINT_BENCHMARK_ELF: &[u8] = include_elf!("tendermint-benchmark-program"); + +pub const U256XU2048_MUL_ELF: &[u8] = include_elf!("u256x2048-mul"); diff --git a/crates/verifier/Cargo.toml b/crates/verifier/Cargo.toml index 63690ec55a..c2b861b16d 100644 --- a/crates/verifier/Cargo.toml +++ b/crates/verifier/Cargo.toml @@ -12,10 +12,17 @@ categories = { workspace = true } [dependencies] bn = { version = "0.6.0", package = "substrate-bn-succinct" } sha2 = { version = "0.10.8", default-features = false } -thiserror-no-std = "2.0.2" +thiserror = { version = "2", default-features = false } hex = { version = "0.4.3", default-features = false, features = ["alloc"] } lazy_static = { version = "1.5.0", default-features = false } +# arkworks +ark-bn254 = { version = "0.4.0", optional = true } +ark-serialize = { version = "0.4.2", optional = true } +ark-ff = { version = "0.4.2", optional = true } +ark-groth16 = { version = "0.4.0", optional = true } +ark-ec = { version = "0.4.0", optional = true } + [dev-dependencies] sp1-sdk = { workspace = true } num-bigint = "0.4.6" @@ -23,4 +30,5 @@ num-traits = "0.2.19" [features] default = ["std"] -std = ["thiserror-no-std/std"] +std = ["thiserror/std"] +ark = ["ark-bn254", "ark-serialize", "ark-ff", "ark-groth16", "ark-ec"] diff --git a/crates/verifier/README.md b/crates/verifier/README.md index 6a4042e04e..8755cfa7c1 100644 --- a/crates/verifier/README.md +++ b/crates/verifier/README.md @@ -6,7 +6,7 @@ to be generated using the [SP1 SDK](../sdk). ## Features Groth16 and Plonk proof verification are supported in `no-std` environments. Verification in the -SP1 ZKVM context is patched, in order to make use of the +SP1 zkVM context is patched, in order to make use of the [bn254 precompiles](https://blog.succinct.xyz/succinctshipsprecompiles/). ### Pre-generated verification keys diff --git a/crates/verifier/src/error.rs b/crates/verifier/src/error.rs index 2d37bceac9..1f30633fde 100644 --- a/crates/verifier/src/error.rs +++ b/crates/verifier/src/error.rs @@ -1,5 +1,5 @@ use bn::{CurveError, FieldError, GroupError}; -use thiserror_no_std::Error; +use thiserror::Error; #[derive(Error, Debug)] pub enum Error { diff --git a/crates/verifier/src/groth16/ark_converter.rs b/crates/verifier/src/groth16/ark_converter.rs new file mode 100644 index 0000000000..2554585597 --- /dev/null +++ b/crates/verifier/src/groth16/ark_converter.rs @@ -0,0 +1,196 @@ +use ark_bn254::{Bn254, Fr, G1Affine, G2Affine}; +use ark_ec::AffineRepr; +use ark_ff::PrimeField; +use ark_groth16::{Proof, VerifyingKey}; +use ark_serialize::{CanonicalDeserialize, Compress, Validate}; +use thiserror::Error; + +const GNARK_MASK: u8 = 0b11 << 6; +const GNARK_COMPRESSED_POSITIVE: u8 = 0b10 << 6; +const GNARK_COMPRESSED_NEGATIVE: u8 = 0b11 << 6; +const GNARK_COMPRESSED_INFINITY: u8 = 0b01 << 6; + +const ARK_MASK: u8 = 0b11 << 6; +const ARK_COMPRESSED_POSITIVE: u8 = 0b00 << 6; +const ARK_COMPRESSED_NEGATIVE: u8 = 0b10 << 6; +const ARK_COMPRESSED_INFINITY: u8 = 0b01 << 6; + +#[derive(Error, Debug)] +pub enum ArkGroth16Error { + #[error("G1 compression error")] + G1CompressionError, + #[error("G2 compression error")] + G2CompressionError, + #[error("Invalid input")] + InvalidInput, +} + +/// Convert the endianness of a byte array, chunk by chunk. +/// +/// Taken from https://github.com/anza-xyz/agave/blob/c54d840/curves/bn254/src/compression.rs#L176-L189 +fn convert_endianness( + bytes: &[u8; ARRAY_SIZE], +) -> [u8; ARRAY_SIZE] { + let reversed: [_; ARRAY_SIZE] = bytes + .chunks_exact(CHUNK_SIZE) + .flat_map(|chunk| chunk.iter().rev().copied()) + .enumerate() + .fold([0u8; ARRAY_SIZE], |mut acc, (i, v)| { + acc[i] = v; + acc + }); + reversed +} + +/// Decompress a G1 point. +/// +/// Taken from https://github.com/anza-xyz/agave/blob/c54d840/curves/bn254/src/compression.rs#L219 +fn decompress_g1(g1_bytes: &[u8; 32]) -> Result { + let g1_bytes = gnark_compressed_x_to_ark_compressed_x(g1_bytes)?; + let g1_bytes = convert_endianness::<32, 32>(&g1_bytes.as_slice().try_into().unwrap()); + let decompressed_g1 = G1Affine::deserialize_with_mode( + convert_endianness::<32, 32>(&g1_bytes).as_slice(), + Compress::Yes, + Validate::No, + ) + .map_err(|_| ArkGroth16Error::G1CompressionError)?; + Ok(decompressed_g1) +} + +/// Decompress a G2 point. +/// +/// Adapted from https://github.com/anza-xyz/agave/blob/c54d840/curves/bn254/src/compression.rs#L255 +fn decompress_g2(g2_bytes: &[u8; 64]) -> Result { + let g2_bytes = gnark_compressed_x_to_ark_compressed_x(g2_bytes)?; + let g2_bytes = convert_endianness::<64, 64>(&g2_bytes.as_slice().try_into().unwrap()); + let decompressed_g2 = G2Affine::deserialize_with_mode( + convert_endianness::<64, 64>(&g2_bytes).as_slice(), + Compress::Yes, + Validate::No, + ) + .map_err(|_| ArkGroth16Error::G2CompressionError)?; + Ok(decompressed_g2) +} + +fn gnark_flag_to_ark_flag(msb: u8) -> Result { + let gnark_flag = msb & GNARK_MASK; + + let ark_flag = match gnark_flag { + GNARK_COMPRESSED_POSITIVE => ARK_COMPRESSED_POSITIVE, + GNARK_COMPRESSED_NEGATIVE => ARK_COMPRESSED_NEGATIVE, + GNARK_COMPRESSED_INFINITY => ARK_COMPRESSED_INFINITY, + _ => { + return Err(ArkGroth16Error::InvalidInput); + } + }; + + Ok(msb & !ARK_MASK | ark_flag) +} + +fn gnark_compressed_x_to_ark_compressed_x(x: &[u8]) -> Result, ArkGroth16Error> { + if x.len() != 32 && x.len() != 64 { + return Err(ArkGroth16Error::InvalidInput); + } + let mut x_copy = x.to_owned(); + + let msb = gnark_flag_to_ark_flag(x_copy[0])?; + x_copy[0] = msb; + + x_copy.reverse(); + Ok(x_copy) +} + +/// Deserialize a gnark decompressed affine G1 point to an arkworks decompressed affine G1 point. +fn gnark_decompressed_g1_to_ark_decompressed_g1( + buf: &[u8; 64], +) -> Result { + let buf = convert_endianness::<32, 64>(buf); + if buf == [0u8; 64] { + return Ok(G1Affine::zero()); + } + let g1 = G1Affine::deserialize_with_mode( + &*[&buf[..], &[0u8][..]].concat(), + Compress::No, + Validate::Yes, + ) + .map_err(|_| ArkGroth16Error::G1CompressionError)?; + Ok(g1) +} + +/// Deserialize a gnark decompressed affine G2 point to an arkworks decompressed affine G2 point. +fn gnark_decompressed_g2_to_ark_decompressed_g2( + buf: &[u8; 128], +) -> Result { + let buf = convert_endianness::<64, 128>(buf); + if buf == [0u8; 128] { + return Ok(G2Affine::zero()); + } + let g2 = G2Affine::deserialize_with_mode( + &*[&buf[..], &[0u8][..]].concat(), + Compress::No, + Validate::Yes, + ) + .map_err(|_| ArkGroth16Error::G2CompressionError)?; + Ok(g2) +} + +/// Load a Groth16 proof from bytes in the arkworks format. +pub fn load_ark_proof_from_bytes(buffer: &[u8]) -> Result, ArkGroth16Error> { + Ok(Proof:: { + a: gnark_decompressed_g1_to_ark_decompressed_g1(buffer[..64].try_into().unwrap())?, + b: gnark_decompressed_g2_to_ark_decompressed_g2(buffer[64..192].try_into().unwrap())?, + c: gnark_decompressed_g1_to_ark_decompressed_g1(&buffer[192..256].try_into().unwrap())?, + }) +} + +/// Load a Groth16 verifying key from bytes in the arkworks format. +pub fn load_ark_groth16_verifying_key_from_bytes( + buffer: &[u8], +) -> Result, ArkGroth16Error> { + // Note that g1_beta and g1_delta are not used in the verification process. + let alpha_g1 = decompress_g1(buffer[..32].try_into().unwrap())?; + let beta_g2 = decompress_g2(buffer[64..128].try_into().unwrap())?; + let gamma_g2 = decompress_g2(buffer[128..192].try_into().unwrap())?; + let delta_g2 = decompress_g2(buffer[224..288].try_into().unwrap())?; + + let num_k = u32::from_be_bytes([buffer[288], buffer[289], buffer[290], buffer[291]]); + let mut k = Vec::new(); + let mut offset = 292; + for _ in 0..num_k { + let point = decompress_g1(&buffer[offset..offset + 32].try_into().unwrap())?; + k.push(point); + offset += 32; + } + + let num_of_array_of_public_and_commitment_committed = u32::from_be_bytes([ + buffer[offset], + buffer[offset + 1], + buffer[offset + 2], + buffer[offset + 3], + ]); + offset += 4; + for _ in 0..num_of_array_of_public_and_commitment_committed { + let num = u32::from_be_bytes([ + buffer[offset], + buffer[offset + 1], + buffer[offset + 2], + buffer[offset + 3], + ]); + offset += 4; + for _ in 0..num { + offset += 4; + } + } + + Ok(VerifyingKey { alpha_g1, beta_g2, gamma_g2, delta_g2, gamma_abc_g1: k }) +} + +/// Load the public inputs from the bytes in the arkworks format. +/// +/// This reads the vkey hash and the committed values digest as big endian Fr elements. +pub fn load_ark_public_inputs_from_bytes( + vkey_hash: &[u8; 32], + committed_values_digest: &[u8; 32], +) -> [Fr; 2] { + [Fr::from_be_bytes_mod_order(vkey_hash), Fr::from_be_bytes_mod_order(committed_values_digest)] +} diff --git a/crates/verifier/src/groth16/converter.rs b/crates/verifier/src/groth16/converter.rs index 6c7a5e3b97..3b1898356d 100644 --- a/crates/verifier/src/groth16/converter.rs +++ b/crates/verifier/src/groth16/converter.rs @@ -13,7 +13,7 @@ use super::error::Groth16Error; /// Load the Groth16 proof from the given byte slice. /// /// The byte slice is represented as 2 uncompressed g1 points, and one uncompressed g2 point, -/// as outputted from gnark. +/// as outputted from Gnark. pub(crate) fn load_groth16_proof_from_bytes(buffer: &[u8]) -> Result { let ar = uncompressed_bytes_to_g1_point(&buffer[..64])?; let bs = uncompressed_bytes_to_g2_point(&buffer[64..192])?; @@ -24,8 +24,8 @@ pub(crate) fn load_groth16_proof_from_bytes(buffer: &[u8]) -> Result Result { diff --git a/crates/verifier/src/groth16/error.rs b/crates/verifier/src/groth16/error.rs index 36952cb749..18d8e2dcbe 100644 --- a/crates/verifier/src/groth16/error.rs +++ b/crates/verifier/src/groth16/error.rs @@ -1,4 +1,4 @@ -use thiserror_no_std::Error; +use thiserror::Error; #[derive(Debug, Error)] pub enum Groth16Error { diff --git a/crates/verifier/src/groth16/mod.rs b/crates/verifier/src/groth16/mod.rs index c6cf98a23a..afd8b603ba 100644 --- a/crates/verifier/src/groth16/mod.rs +++ b/crates/verifier/src/groth16/mod.rs @@ -2,26 +2,31 @@ mod converter; pub mod error; mod verify; +use bn::Fr; pub(crate) use converter::{load_groth16_proof_from_bytes, load_groth16_verifying_key_from_bytes}; -use sha2::{Digest, Sha256}; pub(crate) use verify::*; use error::Groth16Error; -use crate::{bn254_public_values, decode_sp1_vkey_hash, error::Error}; +use crate::{decode_sp1_vkey_hash, error::Error, hash_public_inputs}; + +use alloc::vec::Vec; +use sha2::{Digest, Sha256}; + +#[cfg(feature = "ark")] +pub mod ark_converter; /// A verifier for Groth16 zero-knowledge proofs. #[derive(Debug)] pub struct Groth16Verifier; impl Groth16Verifier { - /// Verifies a Groth16 proof. + /// Verifies an SP1 Groth16 proof, as generated by the SP1 SDK. /// /// # Arguments /// /// * `proof` - The proof bytes. /// * `public_inputs` - The SP1 public inputs. - /// * `sp1_vkey_hash` - The SP1 vkey hash. - /// This is generated in the following manner: + /// * `sp1_vkey_hash` - The SP1 vkey hash. This is generated in the following manner: /// /// ```ignore /// use sp1_sdk::ProverClient; @@ -29,9 +34,9 @@ impl Groth16Verifier { /// let (pk, vk) = client.setup(ELF); /// let sp1_vkey_hash = vk.bytes32(); /// ``` - /// * `groth16_vk` - The Groth16 verifying key bytes. - /// Usually this will be the [`static@crate::GROTH16_VK_BYTES`] constant, which is the Groth16 - /// verifying key for the current SP1 version. + /// * `groth16_vk` - The Groth16 verifying key bytes. Usually this will be the + /// [`static@crate::GROTH16_VK_BYTES`] constant, which is the Groth16 verifying key for the + /// current SP1 version. /// /// # Returns /// @@ -47,8 +52,8 @@ impl Groth16Verifier { .try_into() .map_err(|_| Groth16Error::GeneralError(Error::InvalidData))?; - // Check to make sure that this proof was generated by the groth16 proving key corresponding to - // the given groth16_vk. + // Check to make sure that this proof was generated by the groth16 proving key corresponding + // to the given groth16_vk. // // SP1 prepends the raw Groth16 proof with the first 4 bytes of the groth16 vkey to // facilitate this check. @@ -57,11 +62,45 @@ impl Groth16Verifier { } let sp1_vkey_hash = decode_sp1_vkey_hash(sp1_vkey_hash)?; - let public_inputs = bn254_public_values(&sp1_vkey_hash, sp1_public_inputs); - let proof = load_groth16_proof_from_bytes(&proof[4..])?; - let groth16_vk = load_groth16_verifying_key_from_bytes(groth16_vk)?; + Self::verify_gnark_proof( + &proof[4..], + &[sp1_vkey_hash, hash_public_inputs(sp1_public_inputs)], + groth16_vk, + ) + } + + /// Verifies a Gnark Groth16 proof using raw byte inputs. + /// + /// WARNING: if you're verifying an SP1 proof, you should use [`verify`] instead. + /// This is a lower-level verification method that works directly with raw bytes rather than + /// the SP1 SDK's data structures. + /// + /// # Arguments + /// + /// * `proof` - The raw Groth16 proof bytes (without the 4-byte vkey hash prefix) + /// * `public_inputs` - The public inputs to the circuit + /// * `groth16_vk` - The Groth16 verifying key bytes + /// + /// # Returns + /// + /// A [`Result`] containing unit `()` if the proof is valid, + /// or a [`Groth16Error`] if verification fails. + /// + /// # Note + /// + /// This method expects the raw proof bytes without the 4-byte vkey hash prefix that + /// [`verify`] checks. If you have a complete proof with the prefix, use [`verify`] instead. + pub fn verify_gnark_proof( + proof: &[u8], + public_inputs: &[[u8; 32]], + groth16_vk: &[u8], + ) -> Result<(), Groth16Error> { + let proof = load_groth16_proof_from_bytes(proof).unwrap(); + let groth16_vk = load_groth16_verifying_key_from_bytes(groth16_vk).unwrap(); - verify_groth16_raw(&groth16_vk, &proof, &public_inputs) + let public_inputs = + public_inputs.iter().map(|input| Fr::from_slice(input).unwrap()).collect::>(); + verify_groth16_algebraic(&groth16_vk, &proof, &public_inputs) } } diff --git a/crates/verifier/src/groth16/verify.rs b/crates/verifier/src/groth16/verify.rs index 686e62ff61..636a7f66d6 100644 --- a/crates/verifier/src/groth16/verify.rs +++ b/crates/verifier/src/groth16/verify.rs @@ -46,11 +46,11 @@ fn prepare_inputs(vk: Groth16VerifyingKey, public_inputs: &[Fr]) -> Result Result<(), PlonkError> { + let plonk_vk = load_plonk_verifying_key_from_bytes(plonk_vk).unwrap(); + let proof = load_plonk_proof_from_bytes(proof, plonk_vk.qcp.len()).unwrap(); - verify_plonk_raw(&plonk_vk, &proof, &public_inputs) + let public_inputs = + public_inputs.iter().map(|input| Fr::from_slice(input).unwrap()).collect::>(); + verify_plonk_algebraic(&plonk_vk, &proof, &public_inputs) } } diff --git a/crates/verifier/src/plonk/verify.rs b/crates/verifier/src/plonk/verify.rs index 4cfbcc884f..3ba28282a9 100644 --- a/crates/verifier/src/plonk/verify.rs +++ b/crates/verifier/src/plonk/verify.rs @@ -33,7 +33,7 @@ pub(crate) struct PlonkVerifyingKey { pub(crate) commitment_constraint_indexes: Vec, } -/// Verifies a PLONK proof +/// Verifies a PLONK proof using algebraic inputs. /// /// # Arguments /// @@ -44,7 +44,7 @@ pub(crate) struct PlonkVerifyingKey { /// # Returns /// /// * `Result` - Returns true if the proof is valid, or an error if verification fails -pub(crate) fn verify_plonk_raw( +pub(crate) fn verify_plonk_algebraic( vk: &PlonkVerifyingKey, proof: &PlonkProof, public_inputs: &[Fr], @@ -54,7 +54,8 @@ pub(crate) fn verify_plonk_raw( return Err(PlonkError::Bsb22CommitmentMismatch); } - // Check if the number of public inputs matches the number of public variables in the verifying key + // Check if the number of public inputs matches the number of public variables in the verifying + // key if public_inputs.len() != vk.nb_public_variables { return Err(PlonkError::InvalidWitness); } @@ -266,8 +267,8 @@ pub(crate) fn verify_plonk_raw( scalars.push(zeta_n_plus_two_square_zh); // Compute the linearized polynomial digest: - // α²*L₁(ζ)*[Z] + _s1*[s3]+_s2*[Z] + l(ζ)*[Ql] + l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + ∑ᵢQcp_(ζ)[Pi_i] - - // Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) + // α²*L₁(ζ)*[Z] + _s1*[s3]+_s2*[Z] + l(ζ)*[Ql] + l(ζ)r(ζ)*[Qm] + r(ζ)*[Qr] + o(ζ)*[Qo] + [Qk] + + // ∑ᵢQcp_(ζ)[Pi_i] - Z_{H}(ζ)*(([H₀] + ζᵐ⁺²*[H₁] + ζ²⁽ᵐ⁺²⁾*[H₂]) let linearized_polynomial_digest = AffineG1::msm(&points, &scalars); // Prepare digests for folding diff --git a/crates/verifier/src/tests.rs b/crates/verifier/src/tests.rs index e99d71ebd9..c6d9a853c6 100644 --- a/crates/verifier/src/tests.rs +++ b/crates/verifier/src/tests.rs @@ -1,7 +1,5 @@ use sp1_sdk::{install::try_install_circuit_artifacts, SP1ProofWithPublicValues}; -extern crate std; - #[test] fn test_verify_groth16() { // Location of the serialized SP1ProofWithPublicValues. See README.md for more information. @@ -50,3 +48,35 @@ fn test_vkeys() { let s3_vkey_bytes = std::fs::read(s3_vkey_path).unwrap(); assert_eq!(s3_vkey_bytes, *crate::PLONK_VK_BYTES); } + +#[test] +#[cfg(feature = "ark")] +fn test_ark_groth16() { + use ark_bn254::Bn254; + use ark_groth16::{r1cs_to_qap::LibsnarkReduction, Groth16}; + + use crate::{decode_sp1_vkey_hash, groth16::ark_converter::*, hash_public_inputs}; + + // Location of the serialized SP1ProofWithPublicValues. See README.md for more information. + let proof_file = "test_binaries/fibonacci-groth16.bin"; + + // Load the saved proof and extract the proof and public inputs. + let sp1_proof_with_public_values = SP1ProofWithPublicValues::load(proof_file).unwrap(); + + let proof = sp1_proof_with_public_values.bytes(); + let public_inputs = sp1_proof_with_public_values.public_values.to_vec(); + + // This vkey hash was derived by calling `vk.bytes32()` on the verifying key. + let vkey_hash = "0x00e60860c07bfc6e4c480286c0ddbb879674eb47f84b4ef041cf858b17aa0ed1"; + + let proof = load_ark_proof_from_bytes(&proof[4..]).unwrap(); + let vkey = load_ark_groth16_verifying_key_from_bytes(&crate::GROTH16_VK_BYTES).unwrap(); + + let public_inputs = load_ark_public_inputs_from_bytes( + &decode_sp1_vkey_hash(vkey_hash).unwrap(), + &hash_public_inputs(&public_inputs), + ); + + Groth16::::verify_proof(&vkey.into(), &proof, &public_inputs) + .unwrap(); +} diff --git a/crates/zkvm/entrypoint/src/syscalls/mod.rs b/crates/zkvm/entrypoint/src/syscalls/mod.rs index a25434370e..cc51f4d8ee 100644 --- a/crates/zkvm/entrypoint/src/syscalls/mod.rs +++ b/crates/zkvm/entrypoint/src/syscalls/mod.rs @@ -12,6 +12,7 @@ mod secp256r1; mod sha_compress; mod sha_extend; mod sys; +mod u256x2048_mul; mod uint256_mul; mod unconstrained; #[cfg(feature = "verify")] @@ -31,6 +32,7 @@ pub use secp256r1::*; pub use sha_compress::*; pub use sha_extend::*; pub use sys::*; +pub use u256x2048_mul::*; pub use uint256_mul::*; pub use unconstrained::*; #[cfg(feature = "verify")] @@ -84,6 +86,9 @@ pub const SECP256R1_DOUBLE: u32 = 0x00_00_01_2D; /// Executes `SECP256R1_DECOMPRESS`. pub const SECP256R1_DECOMPRESS: u32 = 0x00_00_01_2E; +/// Executes `U256XU2048_MUL`. +pub const U256XU2048_MUL: u32 = 0x00_01_01_2F; + /// Executes `BN254_ADD`. pub const BN254_ADD: u32 = 0x00_01_01_0E; diff --git a/crates/zkvm/entrypoint/src/syscalls/sys.rs b/crates/zkvm/entrypoint/src/syscalls/sys.rs index 520d58ef90..48bd40713a 100644 --- a/crates/zkvm/entrypoint/src/syscalls/sys.rs +++ b/crates/zkvm/entrypoint/src/syscalls/sys.rs @@ -26,7 +26,7 @@ static SYS_RAND_WARNING: std::sync::Once = std::sync::Once::new(); #[no_mangle] pub unsafe extern "C" fn sys_rand(recv_buf: *mut u8, words: usize) { SYS_RAND_WARNING.call_once(|| { - println!("WARNING: Using insecure random number generator."); + eprintln!("WARNING: Using insecure random number generator."); }); let mut rng = RNG.lock().unwrap(); for i in 0..words { diff --git a/crates/zkvm/entrypoint/src/syscalls/u256x2048_mul.rs b/crates/zkvm/entrypoint/src/syscalls/u256x2048_mul.rs new file mode 100644 index 0000000000..ec75362f29 --- /dev/null +++ b/crates/zkvm/entrypoint/src/syscalls/u256x2048_mul.rs @@ -0,0 +1,29 @@ +#[cfg(target_os = "zkvm")] +use core::arch::asm; + +/// Multiplication operation between a 256-bit and a 2048-bit unsigned integer. +/// +/// The low 2048-bit result is written to the `lo` pointer, and the high 256-bit overflow is written to the `hi` pointer. +#[allow(unused_variables)] +#[no_mangle] +pub extern "C" fn syscall_u256x2048_mul( + a: *const [u32; 8], + b: *const [u32; 64], + lo: *mut [u32; 64], + hi: *mut [u32; 8], +) { + #[cfg(target_os = "zkvm")] + unsafe { + asm!( + "ecall", + in("t0") crate::syscalls::U256XU2048_MUL, + in("a0") a, + in("a1") b, + in("a2") lo, + in("a3") hi, + ); + } + + #[cfg(not(target_os = "zkvm"))] + unreachable!() +} diff --git a/crates/zkvm/lib/Cargo.toml b/crates/zkvm/lib/Cargo.toml index 368c52faf1..d54765f4c3 100644 --- a/crates/zkvm/lib/Cargo.toml +++ b/crates/zkvm/lib/Cargo.toml @@ -11,7 +11,7 @@ categories = { workspace = true } [dependencies] bincode = "1.3.3" -serde = { version = "1.0.204", features = ["derive"] } +serde = { workspace = true, features = ["derive"] } [features] default = [] diff --git a/crates/zkvm/lib/src/lib.rs b/crates/zkvm/lib/src/lib.rs index ffa6bb64c6..e43c4d0d68 100644 --- a/crates/zkvm/lib/src/lib.rs +++ b/crates/zkvm/lib/src/lib.rs @@ -72,6 +72,13 @@ extern "C" { /// Executes an uint256 multiplication on the given inputs. pub fn syscall_uint256_mulmod(x: *mut [u32; 8], y: *const [u32; 8]); + /// Executes a 256-bit by 2048-bit multiplication on the given inputs. + pub fn syscall_u256x2048_mul( + x: *const [u32; 8], + y: *const [u32; 64], + lo: *mut [u32; 64], + hi: *mut [u32; 8], + ); /// Enters unconstrained mode. pub fn syscall_enter_unconstrained() -> bool; diff --git a/crates/zkvm/lib/src/utils.rs b/crates/zkvm/lib/src/utils.rs index 94ae422159..77853230b2 100644 --- a/crates/zkvm/lib/src/utils.rs +++ b/crates/zkvm/lib/src/utils.rs @@ -8,7 +8,8 @@ pub trait AffinePoint: Clone + Sized { /// Returns a reference to the limbs. fn limbs_ref(&self) -> &[u32; N]; - /// Returns a mutable reference to the limbs. If the point is the infinity point, this will panic. + /// Returns a mutable reference to the limbs. If the point is the infinity point, this will + /// panic. fn limbs_mut(&mut self) -> &mut [u32; N]; /// Creates a new [`AffinePoint`] from the given x and y coordinates. @@ -48,7 +49,8 @@ pub trait AffinePoint: Clone + Sized { fn add_assign(&mut self, other: &Self); /// Adds the given [`AffinePoint`] to `self`. Can be optionally overridden to use a different - /// implementation of addition in multi-scalar multiplication, which is used in secp256k1 recovery. + /// implementation of addition in multi-scalar multiplication, which is used in secp256k1 + /// recovery. fn complete_add_assign(&mut self, other: &Self) { self.add_assign(other); } diff --git a/examples/Cargo.lock b/examples/Cargo.lock index 9acc219708..5edbafa237 100644 --- a/examples/Cargo.lock +++ b/examples/Cargo.lock @@ -219,7 +219,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", "tracing", ] @@ -241,7 +241,7 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -426,7 +426,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -442,7 +442,7 @@ dependencies = [ "async-trait", "k256", "rand 0.8.5", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -467,7 +467,7 @@ checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" dependencies = [ "alloy-sol-macro-input", "const-hex", - "heck", + "heck 0.5.0", "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", @@ -485,7 +485,7 @@ checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" dependencies = [ "const-hex", "dunce", - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.87", @@ -1053,7 +1053,7 @@ version = "1.1.0" dependencies = [ "rand 0.8.5", "sp1-zkvm", - "substrate-bn 0.6.0 (git+https://github.com/sp1-patches/bn?tag=substrate_bn-v0.6.0-patch-v1)", + "substrate-bn", ] [[package]] @@ -1155,7 +1155,26 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", +] + +[[package]] +name = "cbindgen" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fce8dd7fcfcbf3a0a87d8f515194b49d6135acab73e18bd380d1d93bb1a15eb" +dependencies = [ + "clap", + "heck 0.4.1", + "indexmap 2.6.0", + "log", + "proc-macro2", + "quote", + "serde", + "serde_json", + "syn 2.0.87", + "tempfile", + "toml", ] [[package]] @@ -1270,7 +1289,7 @@ version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.87", @@ -1377,6 +1396,15 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crossbeam-channel" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -1834,7 +1862,7 @@ dependencies = [ "rand_core 0.6.4", "serde", "sha2 0.9.9", - "thiserror", + "thiserror 1.0.68", "zeroize", ] @@ -2368,6 +2396,12 @@ dependencies = [ "serde", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -3815,6 +3849,12 @@ dependencies = [ "sp1-sdk", ] +[[package]] +name = "pathdiff" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c5ce1153ab5b689d0c074c4e7fc613e942dfb7dd9eea5ab202d2ad91fe361" + [[package]] name = "pem-rfc7468" version = "0.3.1" @@ -3846,7 +3886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror", + "thiserror 1.0.68", "ucd-trie", ] @@ -4106,7 +4146,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls", "socket2", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", ] @@ -4123,7 +4163,7 @@ dependencies = [ "rustc-hash 2.0.0", "rustls", "slab", - "thiserror", + "thiserror 1.0.68", "tinyvec", "tracing", ] @@ -4289,7 +4329,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4407,7 +4447,7 @@ dependencies = [ "http", "reqwest", "serde", - "thiserror", + "thiserror 1.0.68", "tower-service", ] @@ -4420,7 +4460,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4514,7 +4554,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4598,7 +4638,7 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", - "thiserror", + "thiserror 1.0.68", "tracing", ] @@ -4637,7 +4677,7 @@ source = "git+https://github.com/sp1-patches/reth?tag=rsp-20240830#260c7ed2c9374 dependencies = [ "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4649,7 +4689,7 @@ dependencies = [ "alloy-rlp", "enr", "serde_with", - "thiserror", + "thiserror 1.0.68", "url", ] @@ -4706,7 +4746,7 @@ dependencies = [ "reth-trie-common", "revm-primitives", "serde", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4741,7 +4781,7 @@ dependencies = [ "modular-bitfield", "reth-codecs", "serde", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -4891,7 +4931,7 @@ dependencies = [ "ripemd", "secp256k1", "sha2 0.10.8", - "substrate-bn 0.6.0 (git+https://github.com/sp1-patches/bn?tag=substrate_bn-v0.6.0-patch-v1)", + "substrate-bn", ] [[package]] @@ -5090,7 +5130,7 @@ dependencies = [ "rlp", "rsp-primitives", "serde", - "thiserror", + "thiserror 1.0.68", ] [[package]] @@ -5478,6 +5518,15 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5679,7 +5728,7 @@ dependencies = [ [[package]] name = "sp1-build" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "cargo_metadata", @@ -5690,7 +5739,7 @@ dependencies = [ [[package]] name = "sp1-core-executor" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "bytemuck", @@ -5713,7 +5762,7 @@ dependencies = [ "sp1-stark", "strum", "strum_macros", - "thiserror", + "thiserror 1.0.68", "tiny-keccak", "tracing", "typenum", @@ -5722,12 +5771,15 @@ dependencies = [ [[package]] name = "sp1-core-machine" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", + "cbindgen", + "cc", "cfg-if", "elliptic-curve", "generic-array 1.1.0", + "glob", "hashbrown 0.14.5", "hex", "itertools 0.13.0", @@ -5745,7 +5797,10 @@ dependencies = [ "p3-maybe-rayon", "p3-uni-stark", "p3-util", + "pathdiff", "rand 0.8.5", + "rayon", + "rayon-scan", "serde", "size", "snowbridge-amcl", @@ -5758,7 +5813,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror", + "thiserror 1.0.68", "tracing", "tracing-forest", "tracing-subscriber", @@ -5768,7 +5823,7 @@ dependencies = [ [[package]] name = "sp1-cuda" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "ctrlc", @@ -5783,7 +5838,7 @@ dependencies = [ [[package]] name = "sp1-curves" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "cfg-if", "curve25519-dalek", @@ -5804,7 +5859,7 @@ dependencies = [ [[package]] name = "sp1-derive" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "quote", "syn 1.0.109", @@ -5826,7 +5881,9 @@ dependencies = [ [[package]] name = "sp1-lib" -version = "3.0.0" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14deb700469a37ec075bcf88dac3815b026dd9c4b9cb175980826f1fbb2e4e80" dependencies = [ "bincode", "serde", @@ -5834,9 +5891,7 @@ dependencies = [ [[package]] name = "sp1-lib" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14deb700469a37ec075bcf88dac3815b026dd9c4b9cb175980826f1fbb2e4e80" +version = "4.0.0-rc.1" dependencies = [ "bincode", "serde", @@ -5844,7 +5899,7 @@ dependencies = [ [[package]] name = "sp1-primitives" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "bincode", "hex", @@ -5860,7 +5915,7 @@ dependencies = [ [[package]] name = "sp1-prover" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "bincode", @@ -5888,14 +5943,15 @@ dependencies = [ "sp1-recursion-core", "sp1-recursion-gnark-ffi", "sp1-stark", - "thiserror", + "thiserror 1.0.68", "tracing", + "tracing-appender", "tracing-subscriber", ] [[package]] name = "sp1-recursion-circuit" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "hashbrown 0.14.5", "itertools 0.13.0", @@ -5927,7 +5983,7 @@ dependencies = [ [[package]] name = "sp1-recursion-compiler" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "backtrace", "itertools 0.13.0", @@ -5947,12 +6003,16 @@ dependencies = [ [[package]] name = "sp1-recursion-core" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "backtrace", + "cbindgen", + "cc", "ff 0.13.0", + "glob", "hashbrown 0.14.5", "itertools 0.13.0", + "num_cpus", "p3-air", "p3-baby-bear", "p3-bn254-fr", @@ -5967,13 +6027,14 @@ dependencies = [ "p3-poseidon2", "p3-symmetric", "p3-util", + "pathdiff", "serde", "sp1-core-machine", "sp1-derive", "sp1-primitives", "sp1-stark", "static_assertions", - "thiserror", + "thiserror 1.0.68", "tracing", "vec_map", "zkhash", @@ -5981,7 +6042,7 @@ dependencies = [ [[package]] name = "sp1-recursion-derive" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "quote", "syn 1.0.109", @@ -5989,7 +6050,7 @@ dependencies = [ [[package]] name = "sp1-recursion-gnark-ffi" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "anyhow", "bincode", @@ -6013,7 +6074,7 @@ dependencies = [ [[package]] name = "sp1-sdk" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "alloy-signer", "alloy-signer-local", @@ -6036,6 +6097,7 @@ dependencies = [ "reqwest", "reqwest-middleware", "serde", + "sp1-build", "sp1-core-executor", "sp1-core-machine", "sp1-cuda", @@ -6045,7 +6107,7 @@ dependencies = [ "strum", "strum_macros", "tempfile", - "thiserror", + "thiserror 1.0.68", "tokio", "tracing", "twirp-rs", @@ -6054,11 +6116,12 @@ dependencies = [ [[package]] name = "sp1-stark" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "arrayref", "hashbrown 0.14.5", "itertools 0.13.0", + "num-bigint 0.4.6", "num-traits", "p3-air", "p3-baby-bear", @@ -6086,13 +6149,13 @@ dependencies = [ [[package]] name = "sp1-verifier" -version = "3.0.0" +version = "4.0.0-rc.1" dependencies = [ "hex", "lazy_static", "sha2 0.10.8", - "substrate-bn 0.6.0 (git+https://github.com/sp1-patches/bn?tag=substrate_bn-v0.6.0-patch-v2)", - "thiserror-no-std", + "substrate-bn-succinct", + "thiserror 2.0.3", ] [[package]] @@ -6107,7 +6170,7 @@ dependencies = [ "p3-field", "rand 0.8.5", "sha2 0.10.8", - "sp1-lib 3.0.0", + "sp1-lib 4.0.0-rc.1", "sp1-primitives", ] @@ -6235,7 +6298,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", @@ -6258,9 +6321,10 @@ dependencies = [ ] [[package]] -name = "substrate-bn" +name = "substrate-bn-succinct" version = "0.6.0" -source = "git+https://github.com/sp1-patches/bn?tag=substrate_bn-v0.6.0-patch-v2#8ef05d3969312eca34fa9f1f566a469022badda6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "114c855c26ad0594c830129cb868552fb41415603a6133276c2ecdd9e5ef4255" dependencies = [ "bytemuck", "byteorder", @@ -6486,7 +6550,16 @@ version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.68", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -6500,6 +6573,17 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "thiserror-impl-no-std" version = "2.0.2" @@ -6659,11 +6743,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.22", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -6683,6 +6782,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap 2.6.0", + "serde", + "serde_spanned", "toml_datetime", "winnow 0.6.20", ] @@ -6727,6 +6828,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.68", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" version = "0.1.27" @@ -6756,7 +6869,7 @@ checksum = "ee40835db14ddd1e3ba414292272eddde9dad04d3d4b65509656414d1c42592f" dependencies = [ "ansi_term", "smallvec", - "thiserror", + "thiserror 1.0.68", "tracing", "tracing-subscriber", ] @@ -6812,7 +6925,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.68", "tokio", "tower", "url", diff --git a/examples/Cargo.toml b/examples/Cargo.toml index f5d06c2216..d10ee9741e 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -60,6 +60,11 @@ sp1-sdk = { path = "../crates/sdk" } sp1-lib = { path = "../crates/zkvm/lib", default-features = false } sp1-zkvm = { path = "../crates/zkvm/entrypoint", default-features = false } +# misc +serde = "1.0.204" +serde_json = "1.0.132" +tracing = "0.1.40" + [patch.crates-io] curve25519-dalek = { git = "https://github.com/sp1-patches/curve25519-dalek", tag = "curve25519_dalek-v4.1.3-patch-v1" } curve25519-dalek-ng = { git = "https://github.com/sp1-patches/curve25519-dalek-ng", tag = "curve25519_dalek_ng-v4.1.1-patch-v1" } diff --git a/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf b/examples/aggregation/program/elf/riscv32im-succinct-zkvm-elf deleted file mode 100755 index a2da621a86a4b5529e8cccf665de40aa77819cf0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 141956 zcmeFa33yf2)&IZGJwrwya3CO68*-9sK&0xG9BZq+sU(i|RV%5r4#<^gt*_{W=j7~tzI*Sr*IIk6wTJU>wI|jYhM{u*BI->gf88*vOdZB`eN{v!9j3}uo~lyC zs!%yRk+j3eDZf8(FFz~$+l?l7hMLSpev+)91a%vvD)@ z@3g~}lHbVpZ<%YTTjtNuzbamP06eLeyno7;E6$?ITI~!cC+Qpy3z8gX4qcz5ZkM$vAyzB zY_IHG+naNlNxDh8*;#L>AYDH{upH$sx0P$zDkyYR;=Dc;tl-*GX!(rGX6=+SUAs%g zR@@cwr_4~Uof-)2xN@CBwYp5HtxZi{P++TsWl#4i4l%swDTY^NsKoayb%|=47!<_S zsOL+p61Pw(x9Cz86m?0SNYL`uP|W_C8B5)%yt0(xmCrZ4idBYNI9+*FHAZ5YRcuxs zrxKUnJ>HBKsAO%aO4d2bJSM7=$5g9eRZ3;HY*y~nJR?}W)JWFVEA!Yo72CR1Wfpa+ zpf#;B1CC)H(?I@4<>sfQ9YeYNv4O2B*3d`U1(ba!X+~uArP8+e17@WWNzST?Buedk z*RC-FyFj^V+71_OZBnz$*)>LTRDo=oi>{6MmC(qZas&+hMk<*ZJD#0E$VBT2O3uLz;4r|zmv0TO!&h&zezCTVZH9t z^}phZ64gc8J6?36<5e^}UipoVS2iQhn*;C6uCcvY@J`jac4u0tv3R9(g;I$nBY5T3 zGt}68q72O^h4y5x96qUlPog;j<`+}^e9lPu{Yu63+TfL zv@;X?+m6|`!br9qZ7o=3rbYtx`Ief=6CmD z3H%Oyw5)f%lyL0vUisRrOy~NqDbKY_sP~BHODreA5x7Kk8Ct%bjmlHiMv$5Uf8P(! zSe6+@ri&dbiA=jok!k3dd;)#s3=Vl^e**6b)qm6WokGW~ywxW~ZYqU_zkl}wXo$>E zpIK|wupCvb6}dJ-xmL@(a^xEA*V+R>b?YgaXORlH+JUBkB8I!~5flcerb*GV1Cm!&MT5}h7RGcM64oA#u0w0*Qz z*CkpDpFmsEgzm1Br=}(}?JtwIjZKF~56-qH+rH${ns$_(tfgHy*^y)|xX!^U@tq#( zkfJARxl*5Xw$x2Yerda#pQnPQ4sA!yqhb5qbnP?rDeb1Md5&8Bl=M@)F)M?bAH3Pk z@Zyc=g&F86(S0@Wz$u2-r;NLa6}FXl5`DxtY_`=YWXv+}N~=Vq&`RFrjL@et_E=e2 zRQDwEZAWx!kSe79j5S(^sSD7TrdKZGZ`>9++gqkb;xyf%^>V~UW)w1a81=hZ#v=5F z#z1cusQyU>yS(xWou*~0Y`u&>2C`!$X%Di)_=arGLWXD62)#6qavi0IME(;?7)xol z37vz<{Qc4@6|_j7$HRLIKH9wA2sTbpZtH1`@0BW{=%=Wq+$r;ssi&bgcEy~hk&U$x z+RMEGUvvxq8KJIddO6e;2D;vkblR#(mpy)?Vl0n@GTpRG`pKg$Uir<^o?%@;y|oUI zF(xbjYAn5~G1Gs)J)IlxDhyH^@5*d^fI4DNrJsr9+GQSKshGW)vFeW>qFw5@f4^Po z^$*{!t~cJdUCZ`p*N#<@0Q)5IF4vy7hJwx8jYP2>G3gg?HvG@n;?3#K_DP&ESNgO` z^}n)eXJ9eTX5Z!N zVcU8>WZRm4_V?S?wE4rgE&gn_ZP1c&es*(2WXeSstzICs>|%U0uvaftiCt}lVr%}7 z+0EFt-t4tfb|hOivfT_{bVm}CYzMid9>~$Qp&@LnFPhkCX4Ht_Gg!zMwu#8!CiLn9 zB7-J6MeJ2e>s_&jXN|CjtHd5Q%&?rMBGq!e70uJIE2nv7 zGxou*oa)W4nToG4)vG#psu#U%D(O>6pNf8->dm=-iuS2O8Q&~+cc`Pgwk8jRPbzEh zOD~%U4@DBU-2Dyr+h1agD4OS$&%n3BMz;(Hdp8oST%r6e1sA&Z;sy_U+uwNpk6nBI z%ffs9_KQF5+Lb=M!u`Lrxpv#5c@ZGqLZ%-@pyxI1p+t^Vde zd?^^W>S^CLlv`|X^C~Vg+=3z*`^wrANgYxi3wtWVE}`osu$Qi+HwZBV#0ft>o)VhxrE6I-hxfQH0NH z#0Glte>;?0zZ8tL5!5d=#Lvw%E;Ym_P99KG`XFB6r#{BXj-^?Ry8$E>ju7 zc#daV^NiSzWd`XgS#A^uxA5-E`8u!r+!7VL8jR;E=!3Tg;1Bv+cp+H*Q26&NYtM>eBq~DODXh#a#tOQExUsD7U5UhD%JuPzF?5*!Jl?^nfT7Z zqT`4Iw5gc29$f6G)ZLNH^1D=Gyme^eG%K=tjf{QjT+vygJ4A1c%;#cT?rjg_<2u;Y zn#N6oi9)+t`rhrkMs+#{{)n?J*4U@qXRkqi29S~8>X?$d`Wh`e^n0NfZ7$5b)Cz@#m`%K)4;^4YgFbYCzM=zE55Dj!+FDEA`wQW@a`DYG)`3RgY%}ngCzut@ zg)#d9WOsS^em$`%dB1*n_Werq1G>kQXXqY%MfXI&j?|(Pt~J)#Qtmjz)7NX&I*T-G zBkz%xC-0G_ucSGoIc>bR5}hUQ6{EA{x)Pm5T0Zf(deVyIJ<{}*v_jGfQ@nSLv2L8? zFETuRy+*AoCarieX|!R2q|pX_r48dr8xN1B@H>i%BAO1)ZI(H+2p{4El|1MgWghf% z;%P;OIr{|s-0s5Uf!E+;{M>-9@%}%wzZUY5iiCEBS}3$jRvJa{)T>&j7unl_qJzQf z7MhA}Ul^=-5?i~1aO82zF&n|JADoLs`z& zCA5#`n=^GAwukMDt8DvDleS44F4^|+x8*9jVfD$v?@i}pH%;T&v}8riG~S<S3(;v5vL6t?)hLO6H|DWKd)oe@%2{SpVa6Ju7uRpYkeH*?5b$hkxxZC$IP( z*uz=7a3B{?$m-Dtwa<%BnK-Y)a4qN$KLjZ&)C1a1X}Ut5LD#)}TD{TD=fF?9kD^65znw;x0G>jm-7sr)E(NjQ}nafWNgXS^%b#UT)PaP3aoPM9_%H? zH0@XMo)w5Kt?AUY99_@fj2}){E=G32-uf+0yOfdDZyUsC3w;G_YvE0MGPHl1@nl;T zqbni2McZ|Jnf8u|9fPCGj!)42mW>JN_)RTzpHithC7zW|58%^cv(R@b;wVj|Id$+^ zcQzK3`g0gJ>ix6pl!?tXVRU-Wt6{qH8afsERpUm-o$SAc>2=$(=|!Dkob7e_o4V`R zQ#PhW86(SH{6@CyHz^zJcwYE!&s(muW{-Exm&3G0gA$J$jeGP!n6{m?1$(@gd}(x< z^LJ#+#Qt7@pO2q*pZ0mJ9A=i6{%$ND`gjGY}Hb4}j4^z)P5>(eS2 zpBhR&Qm4#E>s9j624()Tk^EZ>cLF?l`(`5%8Tgo6K>n+LGtphqtCGJ=EA#)fs^tG^ zSIJEsDk!KO^44J6+&Yf0kXpd|8;wMMX?dXJ z{rl{osh4+aDKCw!cP{U@VxP&iop}Yhb{O8Qb17fmzchTmxO9K`jd`;U>@wOgmanRH zbXS?goHfnM9uQvnz}SBBz-|kDZK1C%^B`=`45txcogO7>8Yxb|U7_dx_AN(zYJ-fxQdb zV(Z~IFo)(&%NKjxh;7Md9M~fEHh#tDRS=klyZ?3AMBfVijE`T>JlgM#;QJGd;N}+m zg||q1Oa{ zeuEpBi^4z343si2SdWe+{y^-(i!xSMy+j|rh3>^(n==`{c_soqx48DnLO;W`7YIF} z(WN8k9$$ma@#55^_K{CX6guO@?#?*v#Dou4>#pzEl~^z^ejU#|gCBppd6%kn*YW(k zf#P*M3-WGIp8S2$U}45t6L;UP=KU=y<_yH$#@?Y|`}PTbo?*CoQ%f?bEfH@G_V&_V zVjrEm5(@|Oz1usKx1Qgu&Rt-^zdUakbi8(+ZQyqozkB$Vckc2k_(jS0Ho9Reew{NI zck_w&R`Je7j+sdhQtxf>0{L^ed*a-SU+Flzu#F@A7d^I9bXPDqR|OrV%Jqe=1H{vv z@xfAJxcS7|m!XTe51eU?In#*AVApT=Rc7veDt34mew}RuOQ%B9JmoHPRR1mcJA>`j z5-Tna@>C>o@nCrXEfeYOlY+c5o^PKX z(Kk-tP~hZ2H)WU`>V|@6@ms|Qy*)jY*}4PWvt5ZF>)x_NB{%dAiSO#&z8wsF(3{A2 zhofRIM+|Sht^L^G7Gwgy)oZ7|DV{mb5Hv9Q7e4l`yR>=;9@$6RD@(CIsDJ-W_FI9i zjKoD9jN{wj_dNXRWAK&nm&+fc{6Wfdz%K=}9=cQ49Of&;78f%Qa2@ok%;Vi-$S-|D zThnRs;*Tw^6B?CDx}*i^I-Vb+WnIb&Ql*+Un%lo$4}eA;)NfEFZ9n82Xc#Q5Q|p?A zrqFS5R3&Z^tWuR|wYB}1dtcsl^A5!Smh-t&&>aVE-z`H zue>FLYTXjYNakIoykrOO5{JoOt=1)}W8NLgYoWd^q}!d!OUbjxNPkwXOG)0X%0q_N z!I#Orx0Sb4>L)Vc80(hi>X%miEA7|*D|OQRmrCn8g>9}={jK&Z=)%9OJ_P@vx%uF! ze`LHdYECg{7&+UrSlg$SwPyzD?L*APD1Xrm^l8sana0h^?L>E^Hpkt(w}&!|J40U| z-!wC~op{3bp-x9#9i+)q-G@K2StZabo%3fP6Fp}NjTs|%`8pO{OYCPYxFuqHqtA`3 zFVK^R{f(Xs{|@ZbRI$&)_;A^#)>y*bv|x*oIFpWR9PGDs1>T>pUTBz_sBh+npIPy8}S z5x?9Z_y5usq=;YM#Xa%MAVvK09_}T6nd({Q-OD{OOy-kr_0~KP>6AETYSq);?GHq} zb}Qnow<1ClH*KHql{<`W{G$A-_|4)so8KIMWlqG~(AE(8ZSmX1rpVe6xw)=4z39YZ z+E`2*i@mZL6BS80W_#)!?_T?C?;iW?#3U!{$Jl=p+t^LlqWA8L zWa;~D?K5Dj2C39J{OBJ;e3QJe-1v7)S#QIfKwvdT%;TCJw;tQDbrtqQuL`EF!WJPu z^)fng$`sX^Pd_co$48xFh+fZpc{_eOW6P8jc^@)juiSyZIz`2{lP+9MjmTtSZg6P8(D6ANwSh>eJQ2m9yWe^Bl6Oh!u~=|2HSMJ$~>S({C8q? zn^`Z_Lwtg5OCDVk7Pm7Y1WEF1$h8 zkl1e$_uV5d490W0xbO!0RqVK$(9Rqc7Y1WEE_^%Vj>LtVCGNXNT$p^rap850S&Zi% zYb8d+hJ#;Wx4v<^a(_*{_{D|r#X5;qV#g-2ZP%$-Vm@*&HeM*_;XQOF_8@W}{#{0Y zkNg|mfR5jt9w&X|-zSrvKO%hr=_CKXlyp<4i(X7t+ULufgiwbK%Mg5r3~7I{X_3ef zHpcm~R>03w%diPm#@-U~(T(CGXF7W#nN&|CHg_Ys^bVErw-KX4HtaR=*kW`kajhV- zX1JYZTUx*W;omFK^)=giKjM33+4men>igl#J2?AZ-A8;co_#O<5#KvG*S?SV-hy2F zBD#GYBm1G9G3zP%0UfkgKU}KcgTK&+mcGx9VOFR^dmQvmuK#OC#$C|yDzLGw@%OUh z4Y4KWltR6=Ugw3M_1Kubj+o1P-}Nc3<-qS@e`)`AT2G1ary1=zCmBthBXQ5fc&CIp z*K^F{Y;=;cVoqgE;>t~W4VgTT*_H5^&3K4@)VeT!RK{A4F6$~vj4!Za)~0x7d5@0A zyM>GgRv~LZiN_VJ$mRc(S-FDo;7QtqA5lF8e(qsByDQ>FA7`_M+};6Ou%vHvL@OJ135I}Y1G z;(~job*)IYE!Y0?-f3!ivh4}_W~}$(ODU5W!&qreDas?hGFDnwlJbbTjFqOAP#%8n zSZVPlZ_fJRc^>MTznc!~Hr7p*O8l4gr4d?puXX)xr;pS4h1WYXMNIS z)xU4yPP1|oa||tp8J%aa9*Hq|twsEe^%ulwXFbil;3jl<3vx1#*r$cfwH96_y@K@e zr|Ay`-^M8~Clg(PYu8xVK>5PEp`3ITkq?`Me8bP;H)@`=?DGSA9`D!CZ`esrjg__8 zDot%WbyK%y>Q+PD))O}-t}%NPcHc9s$uOvUH+5^KZZ*_xJ$51K6{MFxBe9WjW<~S3 zBz$JhG{!N9pyv~lvucXr>+!;yqxrgazukPTc80o!@sBpHzCqLQ2P2;4htEPA8@w@f zkMJ;aC0{n{3a%DjmG~8QkCE87y`=Mt=%1c_SdWemAbkA?Sx=(A#eChQK=O)jfIi61 zL%bd4jWc%U^2WcO7;l%j^u5_;l_cA&$!5F78iqS*|3mU@U$5c`^y@$d2ITbGUpN$ILcjtUef)hig(k!DLA-aA;2OGiWN;$gYhd>cQFe${K8 zkT;WlF_E_jV~E~wK>u1IZ-Lzmjbz<$H}ZUcMC<{4?MlX`KhnSOl_5GMTTcAIBl{um$b|L;XG$oIW5oy|^>)v&2 zVqeFEA^d#BVjjJHbuF8yvGK6;s6`=!u7 z3VJBvQ)r;q4$BzdKe=EhaoBfw_7AQneIUD@blx88No(n|54E23RjoVGp<}Km4e9(v zp>w#N?E}`6>b&tUW&7kGY|k^<_EhZAp2`o~p5K3n_MG?;+H+F2J!9&1^o3sY?3JC% zd@FijCVp27eCHHc=uqUIUpr%FyXgE{70WBDk@&7#RH9EG{*0j+c)*7uSH2Nb&JIqSfZDZGZWp~LqqWWKJdzbwtDp0TdnY!j#2{rMYseV7b`$eO^HgF-Pj=m1(?szblF(f5 z$3XVP$Au1B{&I4cnB3uD$5_eerU+|h@(BIsndSkOnT)oWiG8<3B!1`37F{fHjV{*TgL$j{R@RQ0hW=t* zgy=r>g7bjIUXM40PUZpd#{n(q+V%Lhe^8mnh);-bd({?QhfuFPqx!8p_FwTn{sChG z=RAns0|%bk<8Mbxai%)wr4G~ z3Laa147|95dJ(^PMxX|x5tUovTkxT2Y{ACJ2o%&YX_ z_jll167P}y35il-dr<=#GB#Do_|XjA%P3QRp54Odz`g)yWbz*UMTX;{EKOlKe)L9 zKYJDVE0Jr#2+I*WUfvV_Pqeh*7u3Ov51^0W|EL9@Zerc?JTr0Y0Qv$xowW%YTgTrR z|4QLOFYZImn~96Bho`AmiZN7a zn_!L93trzYIx8rISFO|7*Ts7WLZ6ban~A2vl7#d1iOlm(3fK#mOY673K4i|WftHp< zyCO-}zr>uo<8Gk^U-sm?F}r7 zo+7~v(7y;P)c8arL|K69UxZ;V+!x|Z@upEq_oc0T2k-ZFMNzN+lyqWrPbWlU+u zHf5X{v-~pTx4oA1d1I%SOL~g*ma)_EBPd_wX6$sHQ$Dh7nPZk;rOOxDA3Gg=Liu_u z9P@qrlw@0vX(G3QMZC*qo`2(Fcw+ldY;nraI&jW<ozu+B z5`N1TnwcG^8?ofoX68YDKU&CIq=<}Tj3L0+EfkMwQJ_Zu@A*RLzf%}58<;{SVAJZiYtnY8Hw75gc_@m)2UZ|wr(0%&%1JagUtM&`k@dGG8< zCXz5SZ=7mml9xk^%jJFLj+b{&=UvLkykN$zf5FU*zr@V!I@`!x|1C4~({Guv&kPyC zgbC#PwUPPRuPM8$CZ@|coiZ+n$Hr4e{_ov5`=!wiS*NB-Ht4!=sBNO>NX%>CQyh$>l zlHNQccHJXJ=Fqd1q-D%+F)!0%#C}TNC65^9oK5t7f_JZ=UYA3gM~$S1%*Mc5vXH#< zBJxfyzvQLtNG^ZUL;iRtmp{Zu=7#Z+uf6bFplc3)les6p#!k}yA7Ml3{(XbGOsbLk z$9_t?uA}bPL9Zd`f7HL3u?hTs_F_nX>jfk8nHLN_zY>8iSvq9$U#Q6}d$}fK(g&K3 zE`IuCBQUqozxk}C%hNV)Y{F1YY{K{BnF$BiWQK-R?E1gr)4XN`$v20B>rNu?{;GeO z*=PRCc$NI&colqg$P8Kvb|rc(mHGB(7#EnAdj8`oc;cO*%omR^V&DI(5j^u&O>F2T zBU2DFGA%C}v7em=?w{bD^UTaU=fE#tH8ackJ#d&2EVoqu_@TEmpB$3-z}ty$eRr;m zKkkYG)~+4NSqtN^feKXTIOOEo{Y0;X%gDmEH#%zCt_KPh!b~-UD+Ant0*y-3* zlu!DmvD2fHZZl3;_yA*+UnS|aq|X~WotPNqlio6R`s@d=0ei5yHZczEr`KU*=5E9< z**+wDj(-#iXI_`fg?Nf~>NLmmI{IK=p4iOsL$hGD$yx{qp?vffkTU)na+ z{;HGmwT+9N8~UwH%f)Y%c%Yk3v0uf`K3B`+dG?*TV%O_=g*9SN#?#pt%|E=xSoUlu zD=jUzNwE=^wy91Foq=DK@mI5E>g}OUY~Wz!&Y{fPtBn}`Yi4P$iaofRad*d1?CM*v z9rBFWvaO6!x3GR=$53X+E!YFo;HeEfTebU{J;lg4&+%^S?sr+!q}Oyj$Xbvb9{zA9 zg$#^*w!~(S20roJR@yH-yGqM3bKmGH-qpIQl`*OZUABdMlq>n(Bj31<^!*@q7vpjS`d4CKF#l6&8D8`>p+#t~LW>*?{%^KDw(LW;J-tWUHH}y^ut(ct zDcX)c)qW0VS4gbho!}@h+KunLo-q#phrJ5o`#}@p?3N*8pI2sxehF>q|DVt%bG=Qx z;&t=G@P^Dy%f75p{2??d#3!D0%3eH@lsK381uLLeMGgKjzC3n(EU#k>zG(kYGLf+# z#Qt10EEB|s#*_(ZPwepBauRbK?LUormd`rVD%Os(b}CjqLI!O3fY{2&XBpY^6`g3Z z))+Y;ugJkSR~fNu+K_>B^%~)k@)27~n`i%#xsX%9!Y-UUAA5eg5t~apSGGZq%Xp3r zJ9jbbnrn^N#&%*|l$jd9huzHmX624EN$WRaoek)WY9rI}EPwM`j=%8mJDfpP2h9&6 z9o@XNes}tc^NrY#-&L_|8jMU4?e6%wiWRe;Gi?zAB$jla=sxv*J)fiF1jJoOrNzIi z=Yvz|O1o%}w5Ho!=7`-?5oyJHq;-8ebM5Dh%*OBT)-kDn7d?T^awPSDZdpB}Z6a;+ zuns*{xp$hjcMS9xG}-e#ZR^zV9(3C?P1`x$q|MzsP1`ulV2)9qwr>p935`*nwry&7 zZ;bNF#BS&&ZH)4?O~ZQrvC7l-Obz@rMtRznsfoyb*v#Bp)oy+Lu;?8rk3F7qq4T$Q zr=?_0R??cy*z!u=8MM_X&jBlr=2@N3{ZkANTXJ5D)@{OiDv_>D96qdpgc z&wX;P>$J}YKjQbEcb4l^KM8)sPj9`F`**>Q_)hz!uG7dKgdWRxzIu-9q<;jyMm~s{x!+@EyPXh^f50ANY>n_tZR|PXnL(!%lUbJ{Nq)hi2e>1$@V^o_`9@ z7l7~h4YwRg-Hr#}@$0_}=HOQF^ZkzUFOt6!{CvOqr3K_~0YBd#yoUSI7s1c>(>Fk$ z>Mrofa{>Fq8@~#EzCZBYW1#bAzz63gkLNb{`F`CEb@U_aM|ukV`iE${O8vJ6v^op~g^Zno#`_*T0U-}04JiqNz zJiifq-u=cW$p1C)i~Wx8QP1{Yfj{2wn@3r#tS#og^yk#G?p*M>e}aB&cmVwIe)YD` zLWk+#kN2Y|z=zRqfIr@E?5rpM$H5=(H~dA~SP%Yqzy4SBWBWbem-x~3(0^b8_}uS` z)AlpLFY%q#)#N!1{1U&zozC;?z~}y4_^rAbeDWN5F8E8qFY!x1&%1;7f?wkIU3VGu z`6Bp}{MPOCd-`qgC;5ZVeVMk52Y-^^_(SBYZx#6DIpJf}?RVf$@;mPR7xLTyK6whD zN$=y}Px8|%zCeB-{7HWMg`bD!w}3y{AN=VF;17U5*)LrQU)76zP4*iefhOsPz@O~b zp9!DT{TBSme)P$I1wRV@WcZ(QJ6Jc(eQ)96^vhA;PxkA!oCeNC;FtP!KZf_xe*?eN zuby%c`7Z#!)E{h@vc3p@so(eo@(d1wU+R~>a0U5~0Ke3CV&$|;@Js#Zsk5Nxwczt^ z9eZ{gCxSoCZyiDxl}-kq``;2f>pKbjX@0{UmqWur@Td8M8{x5NH~7>1x+%1!;~Ma% z`HjDjHr@d~_tU_1t_Pp|(F?)x+w_Pb2u#p_Ryv)PK5P{peZr^+xcyKNb3??*xCk-}hVa>rV%NU%%nquTZxigTJp| zk6q`auLOTzzp;V3bzB1ezJ6Wb>9p&0@b~rGKLbBUd%@q=Prp8a`xN-R+YgU5UIRXu z6;bfh;P3DE_8kaj8vOlz=k7`1d=32l{f_sbfAtIC@9(!RK92kkgU|DpqmYYE@b~uz zIzCC;PXK>^zhNT$+`AS0{r$$Q_~d^W-RR5(zuX_JI+1(fpK`zV zQj7LBfnV;YS$i(L)Gp62f)D%d0KeStE7=#EKJa;d<)6gIgc?Iv* zN&TaK`qSto=W6hyzVig~UVR1lQNQuk8uGsbe$;OW;F;b$@T>gvsmMq}9r#s#!-gYh zyAOVq?}&_~qu^KhgO$i^dlC3m^u@8zZ3Fn^c>&$k!M-}~`^Gu6YhUoI(Cz5G^yT2s z@|~X?Kz+^xf0p031Agum{XEN0zxQc)V;1zvy}Jzj z&w)S7Fa7FV_;Ww-XZej6+0cI-__KZIbZDFY82GdO_VJ7z)lYyw+wVJ+{_T4W{MmkW zC;Gv;9sJqU4gJ>VfIr)Bxc)H8Iv)JlV9urvGCq?34rrKO0{$G|i7*EBtpI+XZ*0}q0KF#T0R-9&#L>{m~Qp3y77KN$UqKCC+${DXbxEa=(06nygh1zB$VCipzR z^GND>0r&^|9jC*$y*~w?JpWF+(BW%(4)yEqy^{L$fPbi8zYO|2$AW*T-~Jf7bl_s} z4@HM~@ZkpV5B2-5g`U;#fPbi;{;}|3EBHK52~Bf`1sc1kdXZ0RJ$5 z@OawZaVPkP`5jMJ@Lc5UFuymLMZ2B{|1iJt4rJDO3jD+ThCF1at`B_jH=+j`e*`{x zzCW4!)!EPeDZ8qNPRvI{^5S>8R)vYqrgAhPgjE9`ZD;3`_;ck=MQWGpZlK) z&m00i`7cE$G(HLb;eOr4(yn8{KiqG8mwFCP1fToPYU+>#{|LXXN5*as{3HB87x+i`&Q**-rB&b`;kSO`4Dvq({t8^^0lC;6m^}?GIdgB6XJW@zZ{F0q>T+2|juLOkX#O{C?VRKj=)}y%2n!H$u0@ z>EM6bulpOa)AuC!pY{j8Dg7(s?@@m1trK~E1o%h!13Tc8hF* zcqdxb4g2Uju&J@B5YT*tfxt`vb2)&-9Jp$Ni3J z^Qg}%@IUKEuTeY~{r_2i@C5k3{dw>|>-Rp7ytGz>|5?Afmp+QJ_l*1262_+V4)A$5 zkFm1)ZSX(q_x<5K+S?2MXZ`xiFQI>zgFnx&zTr4{ej@nu{5k{PaBc*Dp6|SJ8EyOl z`12Uku_daH1)u!+;4ntq#r@z-@Nns$z~}xf$+H;zd49t_w6XPD;Me)lBj}^nDd5-n z>2n$5N~^)I^PM&5jXLp<>ipJ|r_in*@az2cmrf(kt>D-B1J}b>^)G?XyOYtktv>|6 z&Tm+b?n-xne=Po23|but{;_^XA9}2JCHTks4cE}uX|W%U^_@KE)A|3q z@Q?LNJK*!yo#6BCMCexiMevVBcIqj&82sb0?T?}#zXU$_Z=(02w}5}VAI-qe)pNi< z-miDy`FinRj`th?6&XphUz_{($+WRU{1@`v_X&9Q7vLZ7ckE+u{{`?*^cyBqR`qM( zpXirYfnN0q5 z;GgKXUvdh0jspKAY=-l}Uj+V1{@`M0+js`}C;6>Y@jcU0|C7+S7Wdx;|0I9lSMWe< z5%?#;C&$t+P2ltVwZa$FI?Q)N@VG$$sBfp5rIp&VAi$=#zm@fq$|eeHong z&wzijKX{KxJ^R5w*-zhttyq6Q_$T`f`yCH$XMlgQKQNK}^*4imGX4>5X}li%Q~mS= zcsRNr_^0~y_twEPUkCqGzv1Ik$^QoUr}_h%>EHJEz(3WmoAg=u=QrTF#b~?OKd1SPKgAA-P6huoKe|cA=mWt&%}>9J?^b;d z_}ov2R~tO=$-lLN{67c(G{3ZyzOHTq|1`g${WH|z5b*2$LDq*j>8HT2_Zwy+za8_z zulENsdC=+@@az5P+w^Zb27bL?x&YnMdp-E|etH_bH*g#H^?vKS@Cmlcz1-K=Q*PgX zfPcE*d-`GI`9Aok`_+R9Xi^3K>Ath%1ZXAW%jtf@Z;{Qk_;;uK0|C0X@m26o_xn}~ z&ST(%^NR-B@?-E%_v;fgDC;!v&+t1Ix!`;i{4@Oajbf7&fq#ZSNIco;{S)}yuV(D) z6aR?(%|}w!7VyvT8=iv>+~31}-!Jz8^H<=X;n&Yb4^-!af2QB~EcJ2z3jUe?;Ayp* z=IgnSuBQH_H-XRd%i;6-PlJD^U;i=WsP$s-dEU#r4Nrs5eFyjGE;7ss&J`X*f1Y3eDQGgd9enN=AHegUflr=qLAQo`z(3C~{dd~f-Ua@7{y@bUJii5e z?q9~v?-2X>Jb&;J?7&_bUmASpI+43>@EiQb#lF4JZ(odD;|FcvesI-XXtEUi3;m9@^n1J5 z4?N#?4COu!{)K+@NN`&Fz`w|^J{I|E|2X&;`HkQEBs6~t{EP4<;fIFDz`w|^e-u6G z2z_||vr}oyR`4(K`|dlAIy?bBI75s^D9^{d>oO&;9SPz& zE7bo|zdj%SuWkqbQorts{ix3%_?P;vY3ST8@^vY(HEgZ)S>Rvl54L{^%mVN)#jinE zMBf4*oUOFIJ|BE=T4-bYDd1o3m(H9-{%gU%+#lF_Abi^f{^fq-hLfqoS>RtzED>9% z?Z* zfg{0h^s8T>z10VT-{|+{ktezYeC{t6+I|ZBM!)?%c)s_0;B&tZ`8!PT8~uiFoJn6? z4}PQH`wV)%uLS(Yz&=>Vr-@_hwaBu^DzWb!tZEsxckwgXx#e2D$4l>7==$#P+_GK! zDb{Xh_k-1bM%F*ddTwvjSqrJdSQkv3-XN`+*gfkYHBNk0b{=}v-Uq$Ty4GgB7IF3X z9r>IOEN7#0R-DAs*XvliN&MVf!}IaBldK>w`=3@nCHuhQx3N~(@a|pDJY=)s-6Pi; z!@EnahT(0HEAy4@I-RxoI-RxoIvvdQUGK!n;5f&knX^o9oaB|waG8(f zoZOm;%s)=_s?MFrSzr@MpGf*duWao^_6ki%M$@cQX5N8y-_jqhocSbsBlx++{8(=s zep`cQq{%vLmtT;_mG7?w@?$S@AbCx$y{&Bol_4;_$qEFzg z57v=)qm#OelJtF+KR#<7)SYJ@XyhjktjT8$aK1USJKvmT6eMTW6eJI7E?_-&fysG= z$pdN%&8p_YX-1C6>PdVPlGVYMUaHAfPY zZCMLnW!Bc@aej8y=saCG*5sLFbMAdSnv^^@*ikTLeKg%+byGQ<~!JjPqghoG)KLtK=asM9AyMD&$}K6ztG*gK98C5og(y=%Xu_Gv8DIX z&?l^YFe2sw-A3{N_T{Ore4bg!XtR}Y`lba=9eX97w#+%q6Gppj^MGdCL^g)^9xz8a za_@mR=V{*U2G20{Spvt%-UId^u=hZ8rWZvXIX5lwJ!g_TZoKR*_=CxQx;^V5bv5DP z#58-dS!v`2><^G{EyU)|CMt$zyFI>?iu_Z-c-U-ome zzjdwFQzP5Yd0yL~>+9hdQ2V*;IrL}m*84rP!)NicIVw7YqDpAU2z{$>Yt&~`xBrtK%mxX&KbFUZwhn)(v^aSYeFUuDnC zK2wiPX+1VGPDk+?8RQlyzJ1p9NjzJ2`g9S8?9qywsER+Pj}*$MoU7d(cFg6|ASLut%O%Kpk`S zh~GdxBA$Y7<*R5D`!=$B^eAV<{@9V_bZwM!%ym7K?db2OVDo z`vlP`qPr6d+aub(<6AHz=}B3`G&N_=qhBm+PTHNaSU*IYD~}7yjIwT;XRFs}`MfHy zyQqW6Ao)l1PYy4v+tG_@!(Orut@HKxhHNBuvG1g^g*MG&4_TYW3-|4-v%}{WYZY00 zrdE-sh4d-#gxIT1cGpGtTrf3H#((`mb%8I1U*4}iCx`X< z{`=O)qfNq($vac*6RnjsTs8Xa>cUVa6EAPh)@v>_V$B+R@WXm_J&{>H6MfEp!`mz9 zkGR%Rd(ukk#~3)ej#`evDbqTsE*a8nuYL8%jQ(Dn$38h(>*#U*Exv}PVPb_-D6|v1 zPw!p4ty*P@8mUJeGGen{kNUDFjWY^3$CGpWXR*%>*@@Y;HHjz@_Q-b)bUnI` zlsxbzYt9QKAMKTOc`xfb~1I3uz0N>lKXcg<7g-NHk)HT_r4e{VM~ z?}hCunO0}TUvLFlIaJ<;)5y|S153Sa%* zejIqeevJPhru*@~(uZnR*oPl~SH=xlcc$x#>}2b_1HJE{_Z{@UB7=wE2Z`7@!d z*=_h!fASvZl3ft18&LkUw|BdFk1fyivbSuYS9K2Fk9|=$nCaQ9I@A0A2m4Zg606;; zziA@p%*&aOa>fmM&B>O--^1m=V{{DX+a$3i{5z^&bo0(n>T({t@NPG6>q*1qbTnQN zq@&m9a!O-=4$=)XMwK&N$`PG}-fm;<9yV)eTP$8AI^C1Ad$0w?MnLDC!C0Q7$uYSz z)Mfm^hbYr@OQVm=sQ;68nTP$t-gnNwb{K0|{}*xOJ7w6FhPG=0dv1f2r){m&+=fJ{ z*h4pn3{an1_;fq_yRk#DcNizdu8}L}oM8V@pJeSSVmpTEMY}C*GqL+E&ZQV1(0A*s zjdGqzb`PmCscX5|7+x7az73nW&MHjyw5W9j_lLfB%X&G}bh@^MD($mmeK6;HOb$|; z!dQpX(|~Pd<<1$(Lv9Jk;C_)?}Yvm zOU_XJb5@6amGc%}5t&>^KQX@jsk3qh@AkYdG{gqBeEvq){smqcj}i&Fk4N zwBpQpbZ?F?n$FR7Vw;tvKRhs+{%3`_+lxD;ymV@!T=54c2lfJZ4%#me8g<{`yYq@VTsO{dBG&~o=T6yE3i#j<|el)ja*N5)OY z025mbnTYrs%V4@Ex__lk7#H+x^75c$QIlXz0rpGaMTToVsNSISxLW#Tt9M;J@U z$MZsBfO8m^@kfxaP`@=5qBEQMc5AmvTzq#VQC!RT^90XCe-I~B8?l$m*weky5#CRf zY!rVcBEGxmEwLxy-y-vF>U$S^)GHa=7|%nS@`S8S$vi#JLYuPbSHm_X<8}Bf{`AN) z`=@Q(fj-IaL>HL7DdZiRx3rzf_dlIbr=%Js?xX3E)j8;}b3>gbZ8q>_s3Y~;s@sQ7 z#U7FIK-WE6??Bei>#~$`MiEa~H-E2eth@JUzPFSs+6BdX5@f=Nm%r||h#a?*8~(D}bc>#iHdrgi-K{~lU54ed?q_%$C)>!zh+(^{qf_h{X8 z(r8*MeTLRQOzRuOe^+1Ii`M#i)72lW7bGSY`uwYP{`ghzOTQ5BioNI;mVf#GBK?lo zgMP=1;8h)$O0p+=_xZy;3uP?ftQ=w-oIf0n*Z(Q+n9&||5A#bhpT#*gGJh406HTpS z^@sL5@Vf(Rp4b@{`{Z~Bea6`B$$S#u_z>GWkTVMzJF_vn|HP(~xU*>z_tv_P7``{B zoB2E99k*}N^LA!i3-&v{V7nzYJ3b2a(scsAd_DI(r#x-95-)YpGkRPI>3@dU@|rF> zHc3n<+RS@BDm#vT)hpXPNj&Cw{^|HBRrzNH z%k!WEzZJwah;4-X@c0f$)rK<@kK{975RQRad-o|~SmVQS@BM5~cumJt)mu^z;Tvt+ zFqazMr@kK2nYac1yjR@Jx9EufNQ@bphOy+-7UA76f6u@1oH4Zw8pDt9qVTiurnUp* z{8s$1z`|xMzA=)uVX?`ZSyN(Uc{To{WNowfTKS3Vh|h6vRxnQz+De==rER5rsjvLx zo8!n?aX21|-zc^Q{1MrxzlVT)^|P09RQ(g5<-BD%lWI=Si`csTNzTw?+@)Ow+Lolg z@Yns|7H}?ebM6df@oVMVkY-|l_#O4=|6beRhqA@Ae00qmHCBYWtSZ-ztbNiF(of_q zq~C}>(ekb7iEL^bT6`}d^aF&3VrS>%{d>@mGx+UD|H@UbLPPklTk~uGwzgOCeclSo zOI*wgl#L)e$tD{%5GEmB6|+3){}zi zW^LN6&v}RNF7};{KMc=%=WS%Pdn#q|=K_lN$xfS_sbRzf} zYU<}D%{+W+Os+@I3$Mw~o!^8#i|!j0pAU3=UeAk2I&B^`58{>0Fp~IFqN_x&CKhg_ zJ;=;;ZKL#&`B7u^A-}#il7~m?w;bNouzr6NJ)MlE@N-2cp|4W3m%W`+kJfEGGS^og z-`ggZ0{_i0%{pj>ub1fUElKR}luCSA>~+nnj(vk`6DM=Zays4-*$C{^Z*+QItxgwP zg0q~BaEw(y#_x!%GqUHsrtIhaD1Dz-cVn3Ekla}Op!8X?hcV5P`iAqDZb970+9aUOhLt&ZSZ(`T-eOAUxc3fA;$8 zB;&G_Ngs3WBzb6mSZ3D%`eu@}H!;1hP>($#Gfq!HoCTlw>%$9RkC(rTYZ_2j+=CDq~_1Tck_ayRPNL&;BoYm=0&u_x=z=})V<9WIaCoChN zPr`TAQ{grKGQO`PeHY}Vv>wy>ZM#+Us$ikpxSq_OUFsC6TP5G~3mhlxoBX;P_4EAp zRj#vuwjfuGHQ{`3B-iFYhyA3-?J@m5k>P-T%<6)7vif1vJSwunH~%<;d3Fsku~SU1 z>RkHoGSjPQCQf*x=mKK*_iOthvC^5A8(%q-u1tUHIK_E!+ZQvZH-+#0R@2@(Wo}^q z^1k`Bk7s#Hhk~U`Rex#TE~$UwrS|gJ^7c4qwN7@Y%vCX`UB^lj-zk_LE4VYBC@h%d z%3Mluot_VHQ^mBQfY^+!G6mI?vpqW(!2JJR#9Oh|^jyG#xf9&|KFPUv>kSuutKN#djBX#kA z>%4WSFSmYBT_leE0rgpwo41x&yBV$3F?ZAJ6NN@pi6WdYpCAtWT-V**=lPb*!_i=*1REBbVu+{$aFR(NF6~l}%aUx%4A!SZxcfWJ~}<>}HY6FoxO0H=0LdjCg;qSS<6&jN>(o zT}Zy5J}6PGbGSg~SA z=*#)N;TZad_1Q7>Vb-*)Cq7?LoM~Af@Bf}Pi2gQXg%6{%j#r77D_OS;-3pIqOs!;0 zjb_Kxhn3FXYQ2@Cqr4Z67b$F0=xs?o@!>)_yxdbaXUB=upJbeXZ${Z#@c&-lSqODt z&kK4Dw$^=3^GA$nJZEgs&y}993df<^7eil-F;wDCbHXu`G4wyh{%5W?H-^3_{#^Ke zQ=8!G_nn>r@iDW{-}n4{;jJ;oN_^+sSlM*f2aJ`k{IV zoVLJ=$V)&QL-|8)LVm0FZg1fkveWeUK8f(?88d-x$!eTgG`~_ZZamXJQkSXN>)Ai!vR*I8gXj;*X*iktyZ^u}LfN z*NkwTOp+KwHU@l9)@~TJzay6JQHcvWMz`(i){oXU@iTPW_%`l`YMXlD@3&3u|KZ!# z^>NADIprtsp@T2bw%c~rv!rSVwe7a~_HO3pmuuTC#oCtCl%M2mHyQr}JM35bkJw#G zf4@}vZ>k>0%r~=k+WU?R{ugGbR9*g zwH*1$=<$(tc@hJCU3^>Rx1lrb9@fxvAKKJ|qQi6yNpu(UieeAS8hD8_$y~VTD$#>m zo6z~=v+;Dw7_|;$e_OAY@of9d_tU%ZizV$0<7)A*2(R;9hQaLL7xtIfJEi^szVJGG zPuT&S(#S}OTQK-^BGC2lC+-Dt|K%ivqvS-OrIBi_XAM~2pZ)E!+^hfyC8F5VXTpQm&V9y2eK)v{0D(iRjde%kZdR7ZMZr>iRXO(+m zOvDUW&uVAav(ipk&njuFC@;4*b;NsoW0m)E>sdGMUeBuE*|K{*>mC^TyZUZfl>k)rq)$w6(!u?Jhc+^5|36 z?n=IFTl=J~l8?6jf8@Ohd{x!8_`lCRcgjE#xC{{y4u(l(NVsGGwM~>DRH~>&LYI51Qx`G40w z=Oj0mpw{;NKL5}EfA}13)?RyDd+oK?UVH6*=JTC-;)U*}tuwc>AL%DH!}SJKedgfxkHX%>FHLY^jb>frg z(n@&P(YJo>@~-q-q8!22jeUr<9(Eh4cg~O!?f|{QaKHB^@>|lxctd{f&=kL3t;6ly z!8h;QBSHM4bewz@yLu>g5bSdrU%XxBDp`YcO;`LR>lJTgw>PNgF@45`Z$ZH`_ta|n zadV7xS~iHiRKhJSi?@ng5no@%lt;b?BQ~mz^|q#=Ix6~$_?OGM7~0&0KV@xfGQVJZ z>sTY(dE0(&F5RSF==tB2HaoVx;>E9whXlA zbE{*`NB-;uZGs=5Df$RH7Pfizj%B|%n7ChTuqVeHsCl*cx6P%7DeJb#>bv~dF?M-&*9Cx z{Yv~bV}ACST$?0oK+!8DzP$E6Txz9G`SkpF&L6)^TNmJ0b*u~QcujdB zUvE7|`L^6O&U~Zg7H!jZMRO7|XA<(~lA){*@)h5=*WUsQ^$Qs8H`>GVpH($v3=;WnXGZXuh%=Judn0p z!+O2bcBQhEOxasGN&7Hned`<27srM;9Wjee# zL>}4rY;^H8?Z|UJ-;Qhu?+KgAi6pUk1!r~Yi} zCwb!o+xe~B;>7rAx?cJlx{2XCQ}A`mNUm3oH55C8mpxIDH4`a=wGDHdz6%_mf9!Dn z)3o1vn)Zu-{wcn;(tc@c+kl7J7t>?VC%@(}&iD*Pmt-A+A2;#>`Fi#r_zrkq72}(C z_P6dHX6*Vsr=BCg7NKMg*;yO%t?AYVNS}K<=VyFulh(f>-!j|hjP)gdLr310$oXXd08Hm`98 zCY--f^IIH#3omXJz3wzLY5K43X`=RvzSA8|G|%;Qd5&*^!*gOsmhhajAJ#PDd(N!W zp8Mb6pG`^p^V6>U)1lKH|ML8ETKV*R_p~a0j{f;q%BS6u-xueb;$7YI&CXx{i{;ZV zPSgIMQ{VsF@@dq+Tt3-0mK&4!tMd#%SNWvn7V^oq)nXge`tnQKUe;UA1TlYh*m!L{ zp>)(2mX*3Ls=lKxhb7;pTRM2X+j5(7{x#(`UDvf-wd8ZkedAwKuBzx-Zd2`Nmx~SY zUqa`mIbF+Dl?ml`tQ+-t8NOX(`{(UW)JJa%$$C9?v$jd-b9wBaF(&kSR&*%%MBBE| z53wZ`!z-P(55d`bRr5;z?ouAI-Czuh9%1Atbg3?Sa8vU?R&LX&!Ff=>g=k_i{ZEFn*q`$>}E=6!n%-k5oZwfn5DnE?9a&jYVX-K z-7kFM_7P`a9bKvEeV1QO@guS6TPgaIt24#?E!-J5&9+mt;D-{@Ho9)_Igmb(Gwxz5 zqVKisFoc~Z|3%TF<4t>}DjZ&5MU zy|Gqw1EDo%g(V+!QE%vsGw|C@@kB05Np)=>G3Ii;$6BW4**8V*%ls}jAxA%3u4y52 zzJ=Z3?7`T#T-$*!Ed8hV$+EOP;uX0!!K4q_>!KeGCHF7L9VR)}ZlSqelQm->W1S&B zT6%3}%gmN>vbKt?Y%SDltW~8GV!mAL0Ux;bfPmD))+JS=(?8wX4TXmC{d7&6&$Aoa z>ybTQ-fpN$UArOeIi)`6OP_szh1QMb+>Y2e|3B^rq-m`l`z%_!JfUrJ@L=a2Y}06) z&aVA?()nF%-1gXdwEJ<{JVwT3cl{&w+AqN`4}4Y|#727hcBr%e1^jZ}|Bv|P_RpgA z>FL~*=JHFYJZ`<9%N2@MLy|L3 ze$^CTxwSaHZJSvN`PESR6mn>h2Bb`Q4UnjThRot+d#dZN7`;yqLZ- z$`ZNR^m9j^{lCB;pedwvq|^C2XnCd1lKSRYpX2MG8ClhMsqK zzRkQTwru>`yv&*FnSW*9b0_!7d9}ZQeUF*!*SKk#4n(qS+pK*@*iQDGWY19Vm$H{E zbGTyerH}Mpj`q*(u=CL$UG`vYKJ{JF{honuVosMmgO*G58p!6|uO-qTabL!6n<{be z$8v`C42mzXz5_5Iy3H=meQgw*g(no?KAb)+>`Av8@s+Qu2QpthDc_@(vn8@`6kB{m z?%g!o-rsg27OeH*^T3%E6%4l=DUQ6e#r18NEbWi^Eyi)swL8_KcLZr~ipT*y{#Lep zP0~943IFGRp?>B+P`~+~Q@=O6)DQjjRQf5KeltqhlllzW{l%r-+%HHw)=QsByW)NS z;<`b%FIu;qSAKTgntmg7JKek=^Ve!WFs*MN+~x`0HAM3UdyBKh7h7!F3s{F0=EJAH zRv+t!K_TBxU*zu1-omI3YlUwI7B=d?OTnMz{QWlLc7Xhi&TWMH995Ay;ya_`4JGW% zP@_MKvD-hixG!r*WPyxnJ(hOr;<0`uo{JCN?Zp>jk0HKh<|O=~?w)AGUfsi( zW6yFAb-8DS5t{X^5nlB+dnEzq?{E}^!`$5*jvpo7KJqz2 zooIu6t5)aN(y9vkW^ymx(Zhl(#J0@dYZhrev3*;(@9J&N=|86AF15(VPXt50af0{AaY=4#@F{2N&p@%lRCsXxcJp0*CrcYHYg z)u`vV-pJ$-Z!hfoK4sK_lc6WwJ&JpFv{T6;D_qI@0E~8a%;P}<@t8*@bEHSL%n#{%1dgGMyQ1Fv|{^>OPDmm<#iw}K{RnA$s zZ*x!VGR{)p!98}H4EFcAcaJkLoJn{6CdEdL&#>1#?>QrO)0t{}YESm)ILDUym?zGe zuuvoS^aN=$z7~-a56iELG^OB2f$s|bF3l_H>#hD1u|0j^hkfwvQGDzBaF)z}l6G;9 zi+t0$Lp1dzkL1-h!1syV!7X$ZnMfS+biUPXkEsXa>36|L;Mt=#M@>1Jt)-nWjY|DYtYcS|qw=uIBIo6nnq?O_dPJmq_}vc4owd~!mouy-geb1fMM_#IGQ zn$Y^RzVVzr^E98g%P^r`Gxq?RqYQ(yXEK)czL}Zx95R4%kmn)%xW05=oGYQ7jPFg- z|7P>oQz(~qj9f#1J3d6W=E0=XgsLzBL*5A7+ID$LQlg5WkHq()%@=WoHuka+dO5Cfu@65q z}URY!|#n9 zJ(?K`sEo+Lcf9;&Mh?8=H81IlZwKXHf6R;>JI1+l(%=0m`VIO-zh}9FcYzAO`!476 zkDB7^A^YRo{m8Kl{7z)PI^5z3uY5w8lo9i*6OpU&0b##0(qHDRCZp}XmXksw?ZXUB zWu3>`#NT|*6zMx7a?@sH2JyXK?vi)L{Ab9$$j$lGQRI}Oyez)o%~|26_xRBXZMlj3 zqg=n}5n>~E{Gw*J&ClmP@HNOjWWWQhpUGRUuO70>wn=F|V|-_CRk0cUnV)#C?$6zA zQ@GocJY`JZmN2Hbq8GFXe{+Y1BO_#Oi9GN%Z+)Ke+R58p`2UAKPuyEv5=u)Lw<>Cy zN%^_smUhCcso1h7ex}c1M>`k~&BI!tK`At_&)#tsk9`W(kgII{s^e~gk7V!T;%gZ{ zUDoO1*KR)8f^I_|!{HAv>00PlM>l&{q5p1Np>mdQRu3%K^1HkBa~Hctjy1U3dtq}p z7b|Ox<{_L{KAU=dRvxm&mE4UA{MB5;w?;`gxj}J8nYc*m= zO6A+z9_}^Lv~1prjSC%?xw$pOI$GA|;rz$=Ro%q#5hqK_J?6HK^Dfx-yS*=#c~W@L zbk<~D`U1M?yMDX$2eMY=S#qDmp6rdTxdy%SVd#^>ItRavZhV+dxAt>wzRkm(Ha{qR z56w9vB0jtCHqi%9@-2(Ap}lt#Wf+u!uehc^w1vjbSp1bu$7RS6A9u%Ux$kc-y@R;6 zZ$|Tw68qZ$QT*3sZ=`iMcWp!S-q3u@3WIa{Mkmdqnx2x6*0cL|7pJj<=E6@hZo=ap zqdd)@r=2^*$4=(oXH9Z1ef6357ct*(&%(Fy z%L_!kKF1#SNxg7w|`$1O8@j=O_Q#Ip*3mv;iA_v?pT*_R`PMy zbxodd+WGqKV&O$|REcUHWpKa2IVJHmQeH$u9yT`i{A5h`aL$tK{D^O!a1YD}hB@ia=pDY} zdS8{fntEl54O)Mv0$ywD>-#YBUgh9;%XRE4r?J-fzB1{5=Cc!#nc^qZG$_*l`vcJf zJ&_als&LEshUpXh;ZGxz(={v2D_4OUp>%pPR*cV3c7_v#< zGZbn(nwf(Pi1fNE!1r{5LTmZ14*P%}#*c?RL*lV+B_A`#FhsVsW%&+-8vmZzmVVdY z%}Y0V%u?2@OMb#S{!Fnu5O*W>=9|(!gMN$B?ljQ@PBar&aG~31S4AT_=tk@Yk9)#B zZY+uW8cV|Y=&p>v_;>hCCvEyoM(nmT;gueq9N)Lii0YBiT#6h`s7t8F-`iui|Ghnw zj=oFWwp8B<%6e7nrtjSMU!i@!K+m;QoAT`s?MpSn=Tm;#`BI-hMuKUc$j#q}hSzBt z;?w_W4P)=)HfcX8ZRZWN%Q z3Z>v@X#39WGE0}qJuAUTc77m~x{-O+=0|?v$6X3uL=N!|lQ~uRu|&%x?h!uD7>6Ix zuO<#Q%+fs8vYUhQZHt_gO?-b~Vw&tlHMdH+3F-X8D{p?>{53m`&^@*SS#od&ZQQ8B zYo^1OGdlTlDSVl_OzAt@Z|x6Xrg(Bvu3(&|c$-TZ?+JN_QaIDjdQJ4S7;zoG{EJIB zsr%>BDw!u*=u5p{U;LW3OARa4YX z?tofA{%*h9>z~IN^RAD4>3XpXX`b@6R3a~EAL~`wPvHDF{-XHP@6t9L=}V_xtx_-Y z)IOmhe+xQItF|xMe)p|+YPmCPSEnDozKi^K;*aW-ReG(Iq3vUmKR&7^b+_$rC7eh2 z9p&!=&v^sQH^qCQqe+=wM{ltGQStY(?Nj72;&;@&MC$I8OFbx8{N1rZxnXy7*1=ar zIB6`(A|K>mOy5h3%}U=#hJ5!0oHi?c7J%}#USyB8o!Z}@u)nc);*W9fNY2?(=d3Kg zsih+Cw0fKQo>M4z_*3M1R^*~=vpFc=@i?3rnVM~w!M8JG8h0QwvO9?F^rcKqcj%$d zi3RsJP0A|< zPoPuq%`_+9a})9S9& z$V6n2-19Q}1LQEe(TCgpp_Q+-$G4#m@_mNz@jd+ZQ<1}deV+n%baBT6dOBmm;nORH zPs4tn5%)u*m3xZg0caz0EM+yXMyC`z*Q#UfZF^ro8TL~LY>1H?wt3_5b2IkeSb(|W zmDjwo-sbLgmv?KiYh&Yed82rPF2_e2KQi;k(agxxe#Y42+QvmYQ`?$2?@b$QzES%n ze)A>hLPw7n@JA0>4@c^Dc+JT}^*T7T3huwXFu*oE;xKZdd*+c;OR}=iK@?Q^W8mU6EcE4pdIwJ=QMpc>6baf!Pxvv9oc-C z^`*>r=VEtZEh9V{^R?a_@Yp=um&jOW zEFtT(oXPE8&Twy@Z9`z5*Z!tWOId$pH9xWj8*Zwm7vpizl;4`qPqc?BD7cG@< zh@qct!PZaN+SZS}6PvrxR__G}t^K<^UCUARtc%bEccz$Irf@zsg?Xiwel%#OZBH;| z9Xb?U-1YhUXTFQUcMxp)Cbb2hw&tNy_LRi-L3c%6^t$P^YY_U^UTZjd4folp(5zC% zZPxG*dx(*WV|-5<9VW2Jv%TN;-^Etl(^xoye1pMnZ_mcAvwFuRF%LfZImlGA`O@ue zM|`p7k1|8}dWQ3lc=%qBif8!Dc*dVfLO;qdLUYF$;mkp1Z0;cb&NM=|{PmXj_vU)y z-#u9p&TKQ9FCV00KOAVp{*+;umuIq1eyly7@v_nU+kvY2#SD07PkT=0yX_CCV@8g8 z&EUHt?QQQp^S4kS*B`n0L2sy^kNv&j#`b>HZPoTm!hvI+(5!w&EKux^^LN;Hq?qqp zux=rq|DE>j!{OIet1b<CgEc+^Qd9# zuJj}F*IDbnB{ox;0}|4UE%-CXc1}0zU;WLWyIP@fsftIp%i4c*efYiY$xA}%#s2LgC)T!Hy8Z1VzR>N=>Ffi`n325{ zO#@5o;v2&S7IRO&8TY?Y9J%$?D}_aiLY8^##bIK4zGHjKJ15_ z?~6^;Z?hL>79cqw-4d+pA-b^3lQd(Tea z`|rROJ=1ugX%BtB)z&$E!b{8{w|>I@F8G!iveq@_E_E^hPOG%7U9Z7tp^eC#?u^?*uwPW{5D_W`tG;Rebf{;>Z%eFZO!2 z{H@*cN%ZlUku2O9)-Z~LF`eA0I|512^biuvF zZDszyGiPfU8@VSk|Jn3dzhSn0Z)NF<_^PAqxnk?uH^Yczze67$Mqbp8k~ORO;3dep zSz2Ea9VXOwnCLgiP(NoUv8T`6reb{SPQG^=S>g9GR!7OWiX4Q;{rnwJO7}ne28Fa0 zzvZ=&)!RJb;5KDWrj4u6I~E>+R)6ufn6Gw}jD1J9 zNf+J5!7UH@vP1{+IP-X_;AAXAOYMi}=<<3`$F7I}7db=Fe|4Ssn|X&h4EtO>-9?9@xIy~)R3 zQ#g7y^PnL*O2@p;T*~@D>SC{BWF7A-ed_bq5N+`|r~KaLW?DV6GS2uphk~-QBGxYaXpXm584~=bA_E|CYL+ zjV{g_z@f)Z(s@H#$8Sot!&;m&YBldk8Es9zPs}Zsur>?G{xh_F9G&7>@T=Gdzl42o z>f+d$a*+9a=M}niSEpTCPbqA*gl-E(cOhQFnxgd%shh5+v!-y`{j^=rVretq;xt5` zlsjVex`I4>u?~N-raw9~{zCp5IYX9pxpOvzec85&A`93{72D-@U;D|>tw;X4y-#Xt z?A9$C>B&SA5jvan|Lf@B_ToGEuM3?0dsY|0p~g>#=Q%9Xj&2Onm+OMwZ;`)AvB!&VGWIz4R65^)s=v zMfetpn-+Pl(>LQ2>e!`why4vD@s8}l6MK$Ff?K`um6XGLh|Hob zQr}kXry%-YeC6FnY%O8OhkX!V`HC`I*{AOHzhe_zXLwxM*mD+p2%I|I7`kZs8 zbm7aD z7t*0*FT*UQp7;WY&6Rb7C&yD^Jm6{2DcK@S!;pgd~{@`FH6RM%-6V^zQ~H)E&DC(fo@?hbgR1$_T+A{AqkDNEx51msN^Mo zKaf_A%$j1DTi0L*N7gcz#C+^IU_)u%l46*9@YC2zA3Qh(o?XKo($C3Q)^kWsI&y3z zHmK2mX8rtu*v8F}wsjy=(f!bO=7}tnF!%l!doJ0z13Tqk?0Vm|Tkx`ObLPhFZGC<3 zo5N%sxlFJ5^q%bi-+PQ*_8NClAGcj^(=JQ53;7?g^DEumOk3>s?;+n|o$c=Ldnb`L zZk;;l(%*M55x-sUAyPl53{5-yFQjgIjo4zZ|3d5yF-An!?${5sd0_@R9X?Xr<0pKH zZNx9WQXPAi_t96U-Lpg%vS$g;!2i(4(I-xg-?@L8K}UgisJBgzTFYuGMx z&V|=Tl4i8Qm@;Hu^q3+KTz+GYjiHl>zl!h;<$IflE|YoSG<;KR+U?12ZKhs!JA`lg z+A@W+cap#4Cw^d(r`Y^+y!f|c%g(W;yJP#b@{c}n{n$GALwxLWtikrV9p=gAQhb7B zeI$Lrc$fTzr{nC4qRX;(x1KuK`>}F2jJrps_hU;L%kF-x<5ST&M(q7q$yfNCarcSq zchMQAb3fK@+h)z*F7GuDDZ%g3=-@#|7jEsc27OukCzR3FB>ixg?!&J7p&@qEME&r2 ztsnR-t*;?RM2AF2dR*J(&|R=GqaSJ;_s+{5-Q~-a^P4YTPVtm4Urx)5U%Z^17dhqV zvjyl{L3kqwZ={HCNAnie;YV4c!gG8-)Dt@5^DsvJ<`!A|G0(`H>}M~;w*S9{&XW$` z4Al4lbGhGe&L^l&|9XFte|?6}D`yk1C!1~f*N61kgt>qJhw^aFHzefI)eru?Eq(2E zYfI<(2B|yq?>#T+b;>+=xtEMZY@{!v_v*IgoIRMih_wl89sEr>%ZqQff6^=VngE{k z8&3MBBGPBz4~x9=@eLDXZX>kH7Fsc0gjUcAp5_}e%YyK95T4d&DxAHc7j?XUnr4*s zMf3PPnjP3;+eZBfH0z|-<=U?Vxzt^oMAz9aGI6DIPK322XSDWcUh^Yo0_d#Ao^6jW zf3P@mXhz@o${EF>*M`YGV0~jp3g`oc9{nu#hNH!yAo~D8_6!~ho`~(uR$*Ts_8j`b zFWKA$7C0IA%+zfb|Fh6h(Sx7H#*dtId}ZUSUSs~3bHC`D>wPD;BcE9_XBGO}>64?G z+v!8mU&TI$tTVN(z%yHgSM@82eN?2vH$S67S2e3}{WI7Ko-xAa%bw6xrV;i%pdvps z(<7;Y^w4VT3IW#N0hJzO&mwa0T{&xmUmj!NwGpb#ANVBH?-KOyLwwgGsG1+}sL+ca zwTG^`S;bzCsOFzuq~ejARs5s43O7eo?3t78v6t(W`P1bp_NQtUZ+qDgTXe|gbp{^`@y?^i0`EcL4INEgy+p0U#`_h?+(-EYY~Odo36@?G3S8i_w~B8{}J z3LnLe@6C*D7{OX0+lcK5q{o9r3h}}t-<>)T4UW}b|eD{Lb z65)Z+%gc>WU;2i%MC_yEnaCC+od2>Bez8@BzA{jStC3GFLHwIDyv;8%SH9SXb_~UK z(`vNcUwtC-&O;fIvVJ|~`v{Tr4Ap#@0eETP%#;$gnP@`Tqbj)#5ZgL!3HkH1z zU1ZeC(m%-lma|{~o{TFe{EZmxv2|Jd48Bh1FMUJWnXq58Ued&T$`e`4UOKea{op?w z5Lp#j41HTTzr#3=VaMjaD39b1ZBs-R#{9!F^lzZA#X1$)ez+%lWW5|)a9eB!-~aAN zQ~F0c&7K2x8vp;WcSD-u^ia?mV5hnGK#cPgVh82y=vDGPZP78d&%~!><@~j>HyGPC zpK~$%&g`QK7a)5B$9BqhxMjUtScuo<1TEc{xZwS8OPIYIgURw=*mUG|v*fqn69 z@91xg>$6A58JBj2{=c?kuY*RuL2>b)ap)H>d`X3huw5U+Ubi-o8u9oCMs9d@VC;3j z5#PR7h4%K-d=)vkmtV$HHsk5{tJpJsT8X@i9OQQu@?`_E=upsz{r-|}aEwv*mt(&l z-VH8W#a^TQ?a!*%v;4lxdY(0XSX**&RD$^>>uuTjQSjF553Hon~jWH z5WweaXLgK^iFn`13KzX&r(GY&ip`e#AF}JuzIx=~p``l1<OVKlD2F zA4@(@_rfoU-+;&$c;e1pP8;5J+VCMh&d2%tLnHQjUlo6iHf7J!ZNoOK;u~hG_?Ftik>bc@CbDQoaofPQ6X9+9?05svbMV=Y{l1q9#f}-` zV=eR(TQc(VM3}vNpi!O(`>7{tmys&lC5*?m^ZSgHMZOy_z0> z6`TIbJ%qIu3tz|{d1y8Enu2V-eMH&H?|qQ_?-a*9R~5JI^qh>h^eG7+f(KWU4=?^UC$w!y z>iU}0^)+4B@Iq{eL%6fk@?l$TWsP&hmJPzMvpC1S`ZZ73-?t=^9rT7?^C6#+b@Le$ z;KD({`AcGV$-ah-i$}Mq@T-zPX`w}|Z$R&>#1|Zc-g_9AV*48Ow}_uS;4iW12#t}) zy1mf+nDlX|-!b^HpAozBsIfh$nsmFRk3zJYKGN-m4tD>(qx-k5*reSvie=nH=u_!e zx83|6X1$>8+9Bk+-j~$-S29OnzqWOj-OWRHq5CZpJ*VTojn7Y~b~R%+G_m(3rW=`Q zDKGiT`*9CT&1jfkR9aV8Q@6Ohsnt%_EYCJ{G1!&2=$k(!F= zrIpnc*YRFevnXm^aG}*u*BHI5raEfzB&Hccnu=(Bw62o!ZYXc4tf{u{w!aHJyl6~S<-9TV zb&)ZZHKXh5)!`n4RUvhq$!qbJS6k(Eb>&N~n)w#3SkSQ0Dw5cX$}6iS{?v@Y3O7_$ zz{@nGHdFR)-h;f}!IrXki+ShpF5z9zTUo6M=H@rjiY}>*MjE0OmQriUFHBPBsVR@Y!#|fHVzQ7Hk zE{Ww!zfbJwgJ?l-GQFOE*t zq`MfXW>J^?c2bWev`-#`cOm&Jucad8RrIS>c6qII^MnbF)z{aR*N!-+%%aoFtuo<{ zG7CC3M!$GhN2?p^me$r(R>KAKVg1s2DW-nR!t$yHI`fa2gVhx3a3*grUKc)xzo+w( zE?QSzUS(;fo?ww(w2n$PR@X(#BMZytRYfh~XE)vXq`@Tof440*8>}x4=ATO?zr?%@$ zm0bx#H85oBN9B&r8#_8r7rmgarm=S3(vG54S+BvWk}`uHrF!%0!lnbCLSv^c3o0AN zL>5LP*VQyO)Q_pHs;RziX?WDw@}fvVcwVF;KRUlCJTG@l?);*Pyxj1Z+M4=kWkpT( zSX~Rmq;9z3F}*&2AefSxmfj;%y-!`fNgej^7V*09ulZZ}--UNL_{$LrR%JExl|L4i z*ITu9(Z!WDjrCPat$ERCwZ-_XXK*9aSBD0xb4dp@s-cp(p|ZNNAwl*pLM&I;G?2`y zjn>Vtsapg~*F__Zb@i2tqg6|vBE8hN5<4p zc})#9y!T+u+jmjUSl-jo{vYGPFUAK8DO>m;OuvufcWU`kde)wiqY34#an>RK*F2A6#XnZ-G6rHa&)LYcZmg)EN_bGJw zg7a}xAA3F)+RD7_Oi)%mGmSMt|I}NH=qPJGf~`{KhDMDWkxT2XbM}&+V+d*=zara? z@hkI8a<|r&S64<*LKe`i{{X*SRI}uQilx;S^Wax5XsoU-hXucKc(J9@DMR$6`4zr#}Zuea3u#FIzr^U(d4QukUai#SE!wNwLtKf$~A`<8<6lWH2P zE2c9BDlTtqutv;Viuz(rjFwka)kLmqSjhZSVGSQ{*{xs{ls8xtQgL@z`}+=7EJ4&k zelz&}kYCtEeax?v%lL^-u<9DCt?Mh%ZOX2kan7nNLm>GUg0E-t+=uMDPGTv=CB zy@=V4`4-(vra4GkQCVMG#Rx37=9N>2y7EYr0<-%KR+o`)7Woh0cLjxsj7+3SIyBjQ zy`>J_WpxRE0{9R#k@^blYJb#~MrDs$Dz&qT^AVPIiziGdue!c`Y5m;td8i>H&KYUx zKcqK>22vM!_ENVzejR#sYD0N zXeNxA(taf_$V;WIRYcI=ey07DZInzz_2p=g*JU4d`$4aX;ccmOz*Lx1h^EV5t{2TS{i0 z@&@5Xn-+6OlS%v$ycgG0SEJh_NUw~}7oJHBsE;;GuBmUB(WyUP7Hzn`rtZ3gkk0re z!rajcq=6G=i{&ZREOnf86Kk%ozNoI|x@h&Z%Gzj2mB@vqrAyF$>Sxt7R#jXiEFyAz zVkPrr0~+C!`p&;Q0*O(&sJ3D0l_bBsx@zep#s@@gh%V~y&BHbO^8t=-6%r%2<<7h7FJ?GppVJkb*BCy3M( znJF4zq`bNs6*)mGw#p;lZe;kQH8n(+P^>ymeipyUG(DXh6(LeTMkm(-eaN7}+Wyg- zJcL)PfaO8SXIARa4kie{H-Hmfr&8!0+Tv3YZ9uE6$IcS9YFM5!2uGe?BbkrbK<{LZ zE4Nq*&SR0%S;Xknb>u&r{5PXJ$k-6R5j`Ri4&G`h1#KT9T*jotS-09!tJlDLTRQt7 zQN|~RC-zMZD-YWeB?>JQDD|sj8G}BAz9iE+^ssE*%h7?mSzJVF>Y{pe5s6k+b*wH{ zkZufh`vdPqyd}B-JsUgSTCPK&#B^H)oLYnKYB>VdwvJd8jde1$3c<9-gar))o1T)$ z^WcLP|JypiqYs@ru55j03|(GcC7nHD@L+9II7%9>#W;m$ocQ)!-#M`^h*k@oqS#6* zu?)eXT4kLwae_6kvZ1~flrK;Ve=|kK<@GEk$3mVwP9|tdaCiA?`oQ7tfjm!f-59LUBx~pFNMJ++ex!a1R z*bf#v`5z%~k>lO!bqU3}6*?HMOZ~BYA{Rm$q|-XOm`vbzKx?z1~?K4ZhI2 zwDcQuuPVK4`plB4scJuYT})n@yZD{9qjMa*@^jWLzd+adrKQSc=!v!?JOz`>qH9rY zRa7QayHDz(4UKiI-z`k1wmwuvUh+UnRmKOAh zeoS@aBD>sMiGL>XDtTp2z&4kcJ2p3*J1#drw;;DLwC@3r_ zDi~jwTbNfkwlG{ct}wr_ps=v8sBnByZc$#**rIUJxT5@`f}+BrqN4HRsp5EwA5Yff zNj9FKqTTf0=e)DQS0u!#P5Ars(tc6=l5X-`OW5&eI(co{Axpjf9`nKBP9EvfTds7g zs*e^8v->L_AtCb6g+~LQ4SZ1zE4k~x?wDoDZH=6XT0)~06Ree_m+?P=cO!qxBfm0M z+}{b7QcpQJ`Gu9#H%E^i{jGTzHmhaFL<~10%JzX1nfDNT7&51|V;_?Ft|ev}{g4U#4x{WB(EVf{ zE(7)_)J2kZ(OQzyuOq(b!EW7888u8|?uv3Jab?V;K(vwkb}a0*6{K_QcJv{Wkl5*2 z9O~b>6)aB}ekaIh1nH;o%C8I0;cuynGPys%YpFYYW$Ig@jjC5aYf!pXHt4S-H=cKH zt~Ih|>w%G;hss6`{IqPeI%yUBu)VC{@5Y}CjMTw}LwfwV@FyANmFlWc;OXkXpJ)8^t@2s#9~}IRmkw?E zhNb@Ujl;$*SF2hTP~0c7qday5OX;SG4wbgoh*3iSv)z%!ouxuu+COg6k&jtwJdTw!A`u1CnYT~#d9Y48Af4-L5aRJV@QfI-)nFufX8!&$7^PJd+?{<$TXn6d6s7&|B5|E zAZU2fQjAgL*XTLhSZsPdDTd$tBSfIK0m;!5^rw1^+(CKXTwtFuA|>5p(MZEA0!c!1 zLeS&cU>ZG)fD~wY_7p4Q864$qH_9yKuk9mV;o8;=jkKQoO^#qN-2-yq|bkDeyRIkUFYYHhLitz)^^!L$}HH^MS zM!@tv6O_jE6ROfz63_7bH+A##d!}bbP(rR1dVr0ZbgGYG;yFUk!x6?h%JUjl>IlE? zDZj@Y4Z&3af*4o!r7GldgP#JRIou{i8H(N==fe$+w)r!a@BDx=zUsY#a5dV~PeB8( zFBtR$271?X6^VCj(8w_Q`ix9+?WuG0RTvvdbD*v z8c@4QE3)bheiQZE4%rtKb;aJiI8Gdyv&Qj?onTJmjmiz12b{w@dNgy|D0|NN2j-7c z#I-AOYgvqFvkmgLhJ1%ohSZ^e*M()Bcpk5U3mJPJUm)NK22(t#zBEsUHw!lEdS3IqVI1)N+4HXVkmoPf zVefIzC+4R{`tb9=a@pnI`{56NbmMKYyMO%ezgYRWKaf&z;a6sU^wRIVy|W7nXU@9i zk)QtTKk^UsTD9i8KlJv<=-KO>ym1psCr$aZr%LI&mPubkp*)%)HD2-XSRi zedqZmdNa;#dc;4(JH$I8I42fXQlX;TWrJ;H&}!PI5LuDtZT;JJPK4;#?CU&>_^FtNuOfmHwG z;P8~jw2QuSuK#>rs{abV;mb08O}EV(I60WwwDp>^OVd*QJwg-wsRbjw{hEG$LB&<+ zlT%VBm7X~{cvX)nfz+m>lTyzzFPTzkW&~6H;{&P73eE_eZw{DgWRC68{QdcjX-&It zof_$}A~&n=_a0e($)=w#A0If^JI6mPbyDgGUugMb-;7@B9UtgfEG%))@!*PA&rSL9 zyUW6v###OhZ*bY#HQwudJU6bTqX-SlegP4ZYKQr_V|`qiNN$OU#yw zGW)EUHpuU9+IODsD}#;NQD%RyXIb%}o)dh=vX{T69*Ak=~SmC&S-#U-RqUo@Nj8dhc9+y4T1|_ZHE(5y7)B zTQ)uYEb3Aa%pht?py@Y5Q#qa_cSYgV74QcGDLqpMq@9u8zeiR^dZsta?A0rj(%0zc z%{I<3`v=Z626zVbwaoL(QE8)%Tr2`L`uYZ@c}+ zxwB^9wszfs|H{a`bn2%kMvwXGoNvv2YsKx~z2nY@AOFQKTlYNo{GSfK-LAZvp$jLR zf8ms`eQU*cK|b+|U+#JS#or!$TXnGe`NHqlR76+Y@%_u_AR^gL(6`K424eRIw= zb1R~^-|;X>TAzFK;M<3L^emlH5p7!W?@#^e*ZW>O{6TZeZJW3L>esE${`QSOPTuez z|GDSI-%h#g@>$=wX6~Bre(&*Z+kd@l&$F-f?Az~~bB-Q6+1|A1+kbj9V^DR?fV1X) z=jNaO?3Q0X)wkckL6a`I>~i7%n{Rot^_6}54}Wl^uKs%sjdu?lJ?6omZU6PNzkTh^ z`-<<`ko&zszklV$_RB86`kR4ZX4ddAfB9>5P2q)Ky{L5E9ak-A-22>1zkB@;@1AT| z*4(p~zv*2*F?goe-*efc8BIU&4N6&drg=ut@Q(40^9D>K;1BdnotD`vFe6}k2c)K$ zK{H^Y7n$i^pPA-2GWz%~3!E94g}&7!F$%wxz-}@wL_cs`j$Ut z_V+LUyZMbk-<0eWVYqAkss8@{Zv@WsO-dcMsgT3a0xT(gvDWd%uy=)SNvawQtH)Z_};* zpKMO==gr&XUG~PXK)TP@v^8tlk$_>H>j!(gx9L~rnPz4WnJeWnZn#nzHyOs6=^j0Q z%KE(M!;|fz)5|0AMCV?8aeKR00(c~>n0HB1*mS}c5+>_mdE|E?Z!KXPBw%7j*0g2D zYR?+;PRUE~t9cKBUnmQri5XTh{vm>MOWWJm@hkZ0Zu*!{Sp@Cn-3Wd&zk;9LiC-`4 zks-YA^IDUrmt96~MPi)00(S@RNzrZU4Zim!?Hz5mNXPLC6(npUe|ML^&|)faRuCs$ z;_ygV8DTdi%g73w7q?VEdC~Dl$9mdkzk?M4vF{m z_V!|aiC3IVuh$75aCv)s)oJj@312wv^!RMbf8vVn^B4PX5%^cGY;T{&-;|#80^SVnesGgnmkCYg zbft;*joC|_owM58Z?e-DOMQg?2M8ZByKA3(OxOUzgpTq^ydZf?JmGP9By0d-7Z4`h zB#(sU5;ig^?L@-HCB&0_XFK_I$M2HQ67ctv#^P^zB+XjF9wMxP`X%?N)az03C$4U9 z-)ZBgHFTwe@b8Ayd(-Yq|9%gh-+tn+{Z@PX0n(CRmoX)EI01g@HSO(1g6EO2EE-!& znCOb~NZ1g<))AISuOh-W5SB=rsQ{}J;t8LWIcaqn4xdOqO9+3AxThQM8^9l*+x>W# zI_&_zkZe*pY`8=sq4@15W_ zMo+~FjrM~po8PrB4-?iz7^KjT#N(P8RZG|qC(PnEn=pwd@=JIzS)NM0E+EcR3*blo zCQe7aq)%ppD_iLFkJIl`RxROk2v3(ZJQ5cjNlhnAWIXv;GIk~We!>qDe!vdbGDhh2 z1mTZX!nbzaPT}iU!0*1cE59Bh?1h9d$>#)NuMoDu$v=_qf*%l|Pp|8q2c>=&fPV`7 z2uVNLmVp+3PAB{Y!et(j_9XL*q+KFutDrA`6GyjA=(CP+@1m}C-%MCKVQ!xZ?ROA% zg7hxWxI7_wzXEPE@r(G|&0E6XCww>IlkIR#XKAN5h~Ej}wRU}{CDK9C4FNx~#^Ix~ z&bnSecp2fA+n3~y;ro-qg>H`#e!)`u%FaK_rJLYh0JjdD zQ+5~JA#m@58)?(K%UI<)c6H(ghvyP_aeyq)Z|us8xr7A?b9qtdJdv>DZhBy$<7_AG zWT$@m18`>`eX4d6F`?{0pOGIxMK@SU!8dx5Y+31O1Y0m2RwwvNALEGGB2 z7(~i$K7BbB_=j%k+TL-5Z6HkYmq+@2GGQADTP*>e*?N#jpOg`v)pV+S7TitXUOEkK z1Gsg|+S{L!{1WCWDeDPvxy!qbV+ntO@G`~CE*LJk03lB)SUJPh!!Drd|S>5T}($0~@Yxo}V z_*)+NEhem%FiQe>q+h2Kb`xRKB_N^iMgZ1=djgy@cjYcTwT;61dJNr9+@0&-O(&n^ zG4vGp;2p>}XN)ECw$yDu_`~4GNqU}pOYuC4SkaPCk@2?qFyRglcl6B?!iyhhZ?CXvru#tJy^ip;n>*yj z=dN*f693@V4%v3f*ggQh;=#`Kbj7K3)MGZtCVs`k?d|LB`ghEo7Vr>o`@yZX>pCr& z--UN46KCR2+S~U!=@Z8)RZt7TzYji<&u=2^W5SYYvtIIg^wjW2316QSF8S>y{2<}7 zzNSys83~0;*$2Vr{tP}8JdgC_aS3~zF=Mx>gEz9+m{|AkPQMUO(hni*Kpgog18qYn;#{B5Nw&*o@w=jjy)Py3GT`aJsAL-d6Mnx1%yg{b zef%y=h~EypBq2W6%&6%ZmMSCMLyB($??{M$zf>sG((+#Lp8}5rma#3c*ls2!#J}Y0 zAcB^{c{_zifQ_PXhh1D64hbK+lnHS=qzvwr)5*9(1|_LO~-@d19E13v-07kUXhuzjnJ z03UGa2V5`*U6%gNa`2A;kL=T#KKd=0cWqxcNk7jSheW4OK4`xkc($9K9@+uyo;MYK z`Ri2Xy@8$dI{|zcSobINOr!rk25urhq2IX6pf~x+Bjx>``Yh~gsj8rjZ%ZmagK_Pi z{S^L~3cO@cC%-%md=u~q$shdRfbZuFrEV`FVjp@aq5hWvyX8xYS-_huORB?TsJXy9 zh%eQU_N@VaDj|Iwcy~hlUjn}XKIFuI8~8v%{7h^b2NUA=13sJ(zXaHuP~YnW9&C}D zJcd4>&>Os){|~@hgDpYw7-|dfaPWvWUEi00FG$GmQ{dvk7Fc<3VZg316?}gUv0V;4 zXRrk%kD&^H=Yx00e>w1y!ImI-3{?qS177nR^brI0`oWeUc?@kAX#r0#kViesA8Uc# z{`gQ}+Mns*#g5^gGnV*08LxW>Tk0wLQTW9Rf3k0_rWrOoiTLg#{3(C07-~5|hMJXx zzdi|WNrL5`fpzD0rvDM}h6MO0z>iXYx>dKwU(WcOf&aR|9>2N>xOOIf=??tIr2Ibx zevI_zIq8SO=R-Ib>(b+L;Bg7?4ZstDM>z4f09Pg8UjlZ|_6of|0d^muM}O%!=Mi`U z@GCt#%ZmcLXI3Ts19RbL+N0a&@u{pb(J5?yDH)$v0^daW@(3IQUgG8l{2#Pm@Vb4# zm&}8|-TDH*1niyzmh`i%VpDJ*Y0rNq!S5x(0a>6pkHjC6 z1eYYi*CfF=Cc%2alaSw&N%-FEDv z>{`Yj{I>!=6MoFP4!xf9u$sT1r>{zMEc=|W(07PTRL&#y`O!l9dxE8Q6DH&Bec(gD zFFEk|O61}Bo$%L^($6LS>I*C-y0WCNO~U_ZKJqc4K5qiQf1#zOJMl*>P^$Q=mdbMA zX5eYXmO6|8mi*oX-k*?u5aqdNjio(zCzbaU@W{(7Zrme1BX%)s%E_F;b>+=Q;3dHB z`1=j;!UXsz@azQmZuqnKVoSYGze;`n1bhPgdC*77A6!9v$H!6N3g9Qe2Oa!_z#k{T z?*U7FJAms~NOL_kVT-&F!Ke&oq?MQ(0fZg-A691dhan2*{xgiO@7x*~ovxy_| zp8Bb7D)4efHxcii*_9T?z{|N-m%frtVEciIc+#)8^smG}KzxBW5F-5}Pa`i?Z<%kZFn64Zu~^N#FO*L_G74Cy={0i0dD+6yu_2U z%l2caHN+QK_Q~D&iFk=8=bY`wP^o=vSjM9pKN0_3@Nyn{V**}a`?2XGXQo{^r7v^VG__x5%0x$UszDGaq4gi*EPGGLVQdPhs z95@2Jv&m9oi;?;~3A{c5Pc)5}U&%opiElsf1M!a&UqU4RGzQMz8H069L-|w{@IuZk zYk3AroK-D*HV9m zy>1^r`ka;gy7iZHSN3D5{~>-8@%hp5v(G@DC8d`$T5f##O@{T^E*-(9cOrZ-`8|}B zU&F1Ij_uYb5w0fwj)eI40-s2LcLNu5zS`}dzXBgmfcy7nzDt0mpr>x{T3#YtO8gzf zcj;9NEO!AU^1HzHW2l$F3qOne5&roE*gfYj@X#|m&-)8J4cI+LF7S=O?sgk8q<>&$;6VeR>tJ;3XR=pU;?gJd&RPWeMp8 zmNV}5WAjfkye&mN~v#9?Wo&0kd z@NQt2KbHc(kbvI|oKAX|-=rM5m%@JR_9w$~Pe39*5&kXtm2rkw<`c~i1CiI9sdeGk zf!(wFf;R_I9`zaS#2*dpp3N8hH-O#q_X4i~mb1$;jSKuUU^&N}>A-IQ%b8{u_F1fd zfO|RkTwpot?56)Fu$+5#>9-PC&Op2J{&8SA3+?Jl`+?zw6EJG4hQ|WS zd1)8_Enqo2?c!Gh%QU0$czrXSbP_bpGYQa*o^0e=V?_>2~4m zz;eFZt?ygFa?abWZ;v79LkaPVfaUzR+dmOtIScOESH24@=fd6k{Tx`%h`aDXU^y@D z(lc`?XJQlZ6M*F$xzDM86j;ubyYWZe41Y16c6w}iFdta>`vCDpJ`7uiyr4hz{Dc1f z1hB}5kq-Wx<+gnAI`EsoA`e{oGO-zZ1+a_%H((k68=UyXE0CXzZ_9xn16J_AJhEO` z)MCR_Ps9HPyaD^5 z&%xILPfWnaf$vYq|6|~?1bqH#?9IS_C;ejJs}u4oU4y;^JvF~j-<7~3FN%mS^qzMc z`XTsCC;bD!B5z ZiDQ4JD%Uxbj=>jIkd>|H$0{F8nC*cOZXUeL}*$cUv|Z4u1{2 z+%KT}4_F?7?Z<|Pdn9trIBU-}66H361#wBAY1RI|S%Xa|XyCg(@&jWVvm=O4O zVE3*Ffp-AA_dy8!lX|=S1LQCL`NDTt4^qEuV37w$kY7U21_ysG{V((?cHnOaKLE@A zo}|C%R=a64&3rRyS{)r|7U?;pgec{z6GPYcTY(9L+|LCzK#03k2^j; zzTa|!Z2Ii@!Dqwkf7lhD49i^(_Je&I`bd8J36S<(0W9%dc>8MRXUgC0;BALIJGIpB z%ySsO3Hszx2)WUPJzn+VV)PI0lL(ST+S`-yCuh*)5%?NZF4~zhwA@&0X<{@VCf+1ntiz18I*ua$k)77-|*x;spHTz;cg? z9`B@(M_~K0VY&0`1v zPtTY1$4OwJR}od0{>i(I{-pf_NH6e~wa7cl@9p4INhtLl;oxrumi{>4z>fn9{oM2; zM}cr2>A%UqkGb+ek4ofULj5J`-p_#FKt2iH&3`=n_5%3{97Ccu*J{zokN;j!zN3_q2GmwV;x$579apL<`6)c<2(_g)u) z2aQ3#QlB0=caQoSuzPQd;A7X=<-6neYUH`lZ#VIUKQfmn^;_sONz%h#R|03d?E}6G zcya>#I`B=zcgJ`CrHp@Ic_jTD;3t4JJ%F!YuhapTU+CZGfQ9~(i7(~%xZCbup})Wf zfFGqiH~*Y_$SvC!0sJE0>20B-p?cOs11xC(x2tjuNK(7TSxGX(93;X z`IJb1!O0z5_G9Obf{+HDi%*7KpDY)j3|rg&pX$y9*s`oF%%^!di5StKg@Ss) z22t>2zaNR3>6z}HZl#@&1`<#3BzV`x_r>5uJ|E#tDy`KO2*Z*9R z*YG(udLiw3koNcrdd%p5?Xv#q&&z0<%Ofv{sTXV ze1P`f4nA2QucbfsW7kK!zf$&wR{s~lxBQ0{`I!&$$36N>{VgCFkEcS9mB%wz!C>aomU3G!1|-# zXJb3b`~Q&#eWF~^GgH4^{!5fg{HHcPfi1oj7S6L-50B>udtSZ1RsKB69m=0#%J+cJ z0NeHUAb-3X_(`Vx7l9vY!SWu7Wu-sIMt@4Vz;ECOJHG95sV8x-^ap($3sKzb|8%Q=&kCWh;A@YU!1|-dU*cj}c!&A|xBJsB zmwFOIOMi}yK16+i+x=;mOFbJiOJ?Nz|CR2ahmVo&SFV?6hEc{-M_MKSjBX z^Cj>5AHeow=cip?%I^QU6;nlze;0nqBeUgQ_$lw6EpNk*^8PK_Yw7tjlF{f!tNrJI zZCo*F?_1xb+w)OcB=qD{z|vj|Kacs6_rJ@O{{pbYBa@HNw_Dz<;inn+KJeR*?f=W( zy<&bs-`y$y$qpkv(kbu4FM03m^IcftcInTt(OLTQPWtyg zevtnC67a)z{D80h5cgSN$iFK8q6bGK8*fe3U(la6{y(JrGrUiHS1i2{SbvUv+zB@67$T$F9R-H@G7vxK(pmrz!JaA!Z&~=cA16c`?ddh zMT2JiyYPdZ@<-n{TfYsz2K>){|H_DE)$~_j{n7K=g@3+N-i1H>f!XJ~@W(slkNeH{vDBV4lGO}934Bir=I7DHkIdrl!mFL~ zr~UWY`d#?Bt@5w_`>ZG4_f*~^??3+__aD}uy+4FMJQ}S~{uC)^y?^Ihxlgp@_wNS2 zO!?zY`9~ibjpTjOAE6(|e}wxVuskpD8Qy0Tz`&@D#~ww@9*$~v`0QKYn6Ww z6w)ObgTSlDYx+D z`HdBMoX@e*b18p>ayx(W>%*;bf#21FUqJm2QeTZ1_`VDHVPeF|_(*%-4*YS-R}B2? zz#paD;(zQnaUKEN^Jxsc3jAzSpRbX`y3!x`XUc7SKf(W6U>nm<;CBGqcz6Q;2C$8{ zC-CFHnK**re-;P@c7R_7`~(BP7}&?QXY6V2Nj@KlG3Kj{-lAAMAN2uq~JG&$QsX{vrF7xN7o|`Mz#o zyC3BDZ~DCz`QpPM@_B6wehc+~p6A>3KhY|GKjpthxm{0x2K?a`{ES;cNK7-kKkowm zb;`ehpQSxFfR9tYVqm^T60=QzbbGq6!~t!Wci|g_UH8)e->v%Rfv>d6-vE3W*tYj= zw}IGvgg*YG4%iVH%{Mw3i!#uYJTbeV;_Q_Yr($)ejD#k#^=2q{J#zS zP1Kk1l=1!Bx6@u=`3NkZkG9r>z-KH!|z<&( z_sH8RzZdw4WAY{#`0cCUx7dTnHBlsb8PfU`Y+|;Un=9f(|P{Q9r(X=;7@np zuen2Gmj6qCpVxu+I`D%X_!m3ypLXD@5EC9r$Jk zey9U~pacJT2Y%-BX7OL_z_04SZ|lGx>cF4rz+ZRgEPjF7^Zn1>tl@;;Wxg*hsQ<*j zT#?84V2_@&Ux01>Z|~H9xC8%c2mY%L{PpMHm%n^P(Yr4N{}_1dIV+=&^EQDa;N#x3 z{50^>TJX05f67H(YwEuk__?j}3iu~|ecuv%_W?fw{6tg#b-?F%pU`V5e-QX3z)v>t zzXyIl^*;`Ml=2S%2jG7-^g-a?1QvVji>Cfp-lh31Sw>R+ch8ST??zujb!-kFKl)DK zAO8mUSyTQ=;5Y7SeW}!cDTp54Lq2HAzvx>TpLef}Rt)_1%gomYrtSTPEASg1WIjxL zeg;_hZ`=N_zMKBON6Qlg|NX!s&v}!<_YKe2{JgFIdf@wc--D+7kAOvW^Op zKk<7j%D+mU|GF2$ue@99?*x7w@H=-_M(;GwUw;Ym#^2ELd?|n1x3NBW-}_DZM}S|? z_}cb8>7}eM;HR1L6Tl+xK4bcG16cekZT&X^Khm1Np9i-7lTYG(zX<%mw>=$f^b-pA zo4_A5`Z1aB;}h-v8MEb^1Ulz%V#@t%bK9@FJ#U(R}uRz?pS_|m=bZ|`TlJXV*#23YvXRrCCNfQA3G z@Ez;=eqiD60v7p#ZST8)MV|802LEHPaUW+rt{8j?aC`mzAaHyA{RMD){k?bt{t$dm zG0(phSnh)s-xD@9|7_cHC$PxVBrc7N=QgnT4@f*hfqw#6G=x-Qr zu|JUZe$977U;b>m|L*%<=;>QlMvs{1Jvu7-f!7)MXTOjA$a=p8_)A#--|_v>w}