diff --git a/Cargo.toml b/Cargo.toml index e410fba71..d05c704d2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,10 +52,6 @@ name = "field" harness = false name = "matrix" -[[bench]] -name = "merkle_bench" -harness = false - [[bench]] name = "merkle" harness = false diff --git a/benches/merkle_bench.rs b/benches/merkle_bench.rs deleted file mode 100644 index afeb16bfc..000000000 --- a/benches/merkle_bench.rs +++ /dev/null @@ -1,118 +0,0 @@ -use blake2::{Blake2s256, Digest}; -// TODO(Ohad): write better benchmarks. Reduce the variance in sample size. -use criterion::measurement::WallTime; -use criterion::{ - black_box, criterion_group, criterion_main, BatchSize, BenchmarkGroup, BenchmarkId, Criterion, - Throughput, -}; -use stwo::commitment_scheme::blake2_hash::Blake2sHasher; -use stwo::commitment_scheme::blake3_hash::Blake3Hasher; -use stwo::commitment_scheme::hasher::{Hasher, Name}; -use stwo::commitment_scheme::merkle_tree::MerkleTree; -use stwo::core::fields::m31::M31; -use stwo::core::fields::IntoSlice; - -static N_BYTES_U32: usize = 4; - -fn prepare_element_vector(size: usize) -> Vec { - (0..size as u32).map(M31::from_u32_unchecked).collect() -} - -fn merkle_bench(group: &mut BenchmarkGroup<'_, WallTime>, elems: &[M31]) -where - M31: IntoSlice<::NativeType>, -{ - let size = elems.len(); - const LOG_N_COLS: usize = 7; - let cols: Vec<_> = elems - .chunks(size >> LOG_N_COLS) - .map(|chunk| chunk.to_vec()) - .collect(); - assert_eq!(cols.len(), 1 << LOG_N_COLS); - group.sample_size(10); - group.throughput(Throughput::Bytes((size * N_BYTES_U32) as u64)); - group.bench_function(BenchmarkId::new(H::Hash::NAME, size), |b| { - b.iter_batched( - || cols.clone(), - |cols| { - black_box(MerkleTree::::commit(black_box(cols))); - }, - BatchSize::LargeInput, - ) - }); -} - -fn merkle_blake3_benchmark(c: &mut Criterion) { - let mut group = c.benchmark_group("Blake3_Tree"); - for exp in 15u32..20u32 { - // Set Up. - let elems: Vec = prepare_element_vector(2usize.pow(exp)); - - // Benchmark Loop. - merkle_bench::(&mut group, &elems); - } - group.finish(); -} - -fn merkle_blake2s_benchmark(c: &mut Criterion) { - let mut group = c.benchmark_group("Blake2s_Tree"); - for exp in 15u32..20u32 { - // Set up. - let size = 2usize.pow(exp); - let elems: Vec = (0..(size as u32)).map(M31::from_u32_unchecked).collect(); - - // Benchmark Loop. - merkle_bench::(&mut group, &elems); - } - group.finish(); -} - -// Compare Blake2s256 w. Blake3. -fn compare_blakes(c: &mut Criterion) { - let mut group = c.benchmark_group("Comparison of hashing algorithms and caching overhead"); - for exp in 15u32..20u32 { - // Set up. - let size = 2usize.pow(exp); - let elems: Vec = (0..(size as u32)).map(M31::from_u32_unchecked).collect(); - - // Benchmark Loop. - merkle_bench::(&mut group, &elems); - merkle_bench::(&mut group, &elems); - } - group.finish(); -} - -fn single_blake2s_hash_benchmark(c: &mut Criterion) { - let input = [0u8; 1]; - c.bench_function("Single blake2s hash", |b| { - b.iter_batched( - || -> Blake2s256 { Blake2s256::new() }, - |mut h| { - h.update(&input[..]); - h.finalize() - }, - BatchSize::SmallInput, - ) - }); -} - -fn single_blake3_hash_benchmark(c: &mut Criterion) { - let input = [0u8; 1]; - c.bench_function("Single blake3 hash", |b| b.iter(|| blake3::hash(&input))); -} - -criterion_group!( - merkle_benches, - merkle_blake2s_benchmark, - merkle_blake3_benchmark, -); - -criterion_group!(comparisons, compare_blakes,); - -criterion_group!( - single_hash, - single_blake2s_hash_benchmark, - single_blake3_hash_benchmark, -); - -criterion_main!(comparisons); diff --git a/src/commitment_scheme/blake2_hash.rs b/src/commitment_scheme/blake2_hash.rs index 8b29bb96a..b530a083a 100644 --- a/src/commitment_scheme/blake2_hash.rs +++ b/src/commitment_scheme/blake2_hash.rs @@ -1,7 +1,6 @@ use std::fmt; -use blake2::digest::{Update, VariableOutput}; -use blake2::{Blake2s256, Blake2sVar, Digest}; +use blake2::{Blake2s256, Digest}; // Wrapper for the blake2s hash type. #[derive(Clone, Copy, PartialEq, Default, Eq)] @@ -96,24 +95,6 @@ impl super::hasher::Hasher for Blake2sHasher { fn finalize_reset(&mut self) -> Blake2sHash { Blake2sHash(self.state.finalize_reset().into()) } - - unsafe fn hash_many_in_place( - data: &[*const u8], - single_input_length_bytes: usize, - dst: &[*mut u8], - ) { - data.iter() - .map(|p| std::slice::from_raw_parts(*p, single_input_length_bytes)) - .zip( - dst.iter() - .map(|p| std::slice::from_raw_parts_mut(*p, Self::OUTPUT_SIZE)), - ) - .for_each(|(input, out)| { - let mut hasher = Blake2sVar::new(Self::OUTPUT_SIZE).unwrap(); - hasher.update(input); - hasher.finalize_variable(out).unwrap(); - }) - } } #[cfg(test)] @@ -131,19 +112,6 @@ mod tests { ); } - #[test] - fn hash_many_xof_test() { - let input1 = "a"; - let input2 = "b"; - let input_arr = [input1.as_ptr(), input2.as_ptr()]; - - let mut out = [0_u8; 96]; - let out_ptrs = [out.as_mut_ptr(), unsafe { out.as_mut_ptr().add(42) }]; - unsafe { Blake2sHasher::hash_many_in_place(&input_arr, 1, &out_ptrs) }; - - assert_eq!("4a0d129873403037c2cd9b9048203687f6233fb6738956e0349bd4320fec3e900000000000000000000004449e92c9a7657ef2d677b8ef9da46c088f13575ea887e4818fc455a2bca50000000000000000000000000000000000000000000000", hex::encode(out)); - } - #[test] fn hash_state_test() { let mut state = Blake2sHasher::new(); diff --git a/src/commitment_scheme/blake3_hash.rs b/src/commitment_scheme/blake3_hash.rs index e4dad5554..62e820ce9 100644 --- a/src/commitment_scheme/blake3_hash.rs +++ b/src/commitment_scheme/blake3_hash.rs @@ -91,26 +91,6 @@ impl super::hasher::Hasher for Blake3Hasher { self.state.reset(); res } - - unsafe fn hash_many_in_place( - data: &[*const u8], - single_input_length_bytes: usize, - dst: &[*mut u8], - ) { - let mut hasher = blake3::Hasher::new(); - data.iter() - .map(|p| std::slice::from_raw_parts(*p, single_input_length_bytes)) - .zip( - dst.iter() - .map(|p| std::slice::from_raw_parts_mut(*p, Self::OUTPUT_SIZE)), - ) - .for_each(|(input, out)| { - hasher.update(input); - let mut output_reader = hasher.finalize_xof(); - output_reader.fill(&mut out[..Self::OUTPUT_SIZE]); - hasher.reset(); - }) - } } #[cfg(test)] @@ -127,19 +107,6 @@ mod tests { ); } - #[test] - fn hash_many_xof_test() { - let input1 = "a"; - let input2 = "b"; - let input_arr = [input1.as_ptr(), input2.as_ptr()]; - - let mut out = [0_u8; 96]; - let out_ptrs = [out.as_mut_ptr(), unsafe { out.as_mut_ptr().add(42) }]; - unsafe { Blake3Hasher::hash_many_in_place(&input_arr, 1, &out_ptrs) }; - - assert_eq!("17762fddd969a453925d65717ac3eea21320b66b54342fde15128d6caf21215f0000000000000000000010e5cf3d3c8a4f9f3468c8cc58eea84892a22fdadbc1acb22410190044c1d55300000000000000000000000000000000000000000000", hex::encode(out)); - } - #[test] fn hash_state_test() { let mut state = Blake3Hasher::new(); diff --git a/src/commitment_scheme/hasher.rs b/src/commitment_scheme/hasher.rs index caf527a19..116dfa0bb 100644 --- a/src/commitment_scheme/hasher.rs +++ b/src/commitment_scheme/hasher.rs @@ -52,20 +52,6 @@ pub trait Hasher: Sized + Default { hasher.update(data); hasher.finalize() } - - /// Hash many inputs of the same length. - /// Writes output directly to corresponding pointers in dst. - /// - /// # Safety - /// - /// Inputs must be of the same size. output locations must all point to valid, allocated and - /// distinct locations in memory. - // TODO(Ohad): make redundent and delete. - unsafe fn hash_many_in_place( - data: &[*const Self::NativeType], - single_input_length_bytes: usize, - dst: &[*mut Self::NativeType], - ); } pub trait Hash: diff --git a/src/commitment_scheme/merkle_decommitment.rs b/src/commitment_scheme/merkle_decommitment.rs deleted file mode 100644 index 4c0c129e0..000000000 --- a/src/commitment_scheme/merkle_decommitment.rs +++ /dev/null @@ -1,244 +0,0 @@ -use std::fmt::{self, Display}; -use std::iter::Peekable; - -use itertools::Itertools; - -use super::hasher::Hasher; -use crate::core::fields::IntoSlice; - -/// Merkle proof of queried indices. -/// Used for storing a merkle proof of a given tree and a set of queries. -/// # Attributes -/// * `leaf_blocks` - The blocks of the bottom layer of the tree. -/// * `layers` - Internal nodes(hashes) of a specific layer in the tree. nodes that are not in a -/// queried path, or nodes with both children in the queried path are excluded. -/// * `n_rows_in_leaf_block` - The number of trace-rows packed in each leaf block. -// TODO(Ohad): derive Debug. -#[derive(Default, Debug)] -pub struct MerkleDecommitment { - pub leaf_blocks: Vec>, - pub layers: Vec>, - pub n_rows_in_leaf_block: usize, - queries: Vec, -} - -impl MerkleDecommitment -where - T: IntoSlice, -{ - pub fn new( - leaf_blocks: Vec>, - layers: Vec>, - n_rows_in_leaf_block: usize, - queries: Vec, - ) -> Self { - Self { - leaf_blocks, - layers, - n_rows_in_leaf_block, - queries, - } - } - - pub fn height(&self) -> usize { - self.layers.len() + 1 - } - - // TODO(Ohad): Implement more verbose error handling. - /// Verifies the decommitment against a given root. Queries are assumed to be sorted. - pub fn verify(&self, root: H::Hash, queries: &[usize]) -> bool { - let leaf_block_queries = queries - .iter() - .map(|q| q / self.n_rows_in_leaf_block) - .dedup() - .collect::>(); - assert_eq!(self.leaf_blocks.len(), leaf_block_queries.len()); - - let mut curr_hashes = self - .leaf_blocks - .iter() - .map(|leaf_block| H::hash(>::into_slice(leaf_block))) - .collect::>(); - - let mut layer_queries = leaf_block_queries.clone(); - for layer in self.layers.iter() { - let mut next_layer_hashes = Vec::::new(); - let mut query_iter = layer_queries.iter().enumerate().peekable(); - let mut layer_iter = layer.iter(); - - while let Some((i, q)) = query_iter.next() { - let mut f = || -> Option<_> { - if *q % 2 != 0 { - // Right child. - return Some(H::concat_and_hash(layer_iter.next()?, curr_hashes.get(i)?)); - } - match query_iter.peek() { - Some((_, next_q)) if *q + 1 == **next_q => { - query_iter.next(); - Some(H::concat_and_hash( - curr_hashes.get(i)?, - curr_hashes.get(i + 1)?, - )) - } - _ => Some(H::concat_and_hash(curr_hashes.get(i)?, layer_iter.next()?)), - } - }; - next_layer_hashes.push(f().expect("Error verifying proof!")); - } - assert!(layer_iter.next().is_none()); - curr_hashes = next_layer_hashes; - layer_queries = layer_queries - .iter() - .map(|q| q / 2) - .dedup() - .collect::>(); - } - assert_eq!( - layer_queries.into_iter().collect::>(), - vec![0_usize] - ); - assert_eq!(curr_hashes.len(), 1); - curr_hashes[0].into() == root.into() - } - - pub fn values(&self) -> impl Iterator> + '_ { - QueriedValuesIterator { - query_iterator: self.queries.iter(), - leaf_block_iterator: self.leaf_blocks.iter().peekable(), - current_leaf_block_index: self.queries[0] / self.n_rows_in_leaf_block, - n_elements_in_row: self.leaf_blocks[0].len() / self.n_rows_in_leaf_block, - n_rows_in_leaf_block: self.n_rows_in_leaf_block, - } - } -} - -pub struct QueriedValuesIterator<'a, T: Sized + Display> { - query_iterator: std::slice::Iter<'a, usize>, - leaf_block_iterator: Peekable>>, - current_leaf_block_index: usize, - n_elements_in_row: usize, - n_rows_in_leaf_block: usize, -} - -impl<'a, T: Sized + Display + Clone> Iterator for QueriedValuesIterator<'a, T> { - type Item = Vec; - - fn next(&mut self) -> Option { - match self.query_iterator.next() { - Some(query) => { - let leaf_block_index = self.get_leaf_block_index(*query); - if leaf_block_index != self.current_leaf_block_index { - self.leaf_block_iterator.next(); - self.current_leaf_block_index = leaf_block_index; - } - let row_start_index = (query % self.n_rows_in_leaf_block) * self.n_elements_in_row; - let row_end_index = row_start_index + self.n_elements_in_row; - Some( - self.leaf_block_iterator.peek().unwrap().to_vec() - [row_start_index..row_end_index] - .to_vec(), - ) - } - None => None, - } - } -} - -impl<'a, T: Sized + Display> QueriedValuesIterator<'a, T> { - pub fn get_leaf_block_index(&self, query: usize) -> usize { - query / self.n_rows_in_leaf_block - } -} - -impl fmt::Display for MerkleDecommitment { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.layers.last() { - Some(_) => { - self.leaf_blocks.iter().enumerate().for_each(|(i, leaf)| { - f.write_str(&std::format!("\nLeaf #[{:}]: ", i)).unwrap(); - leaf.iter() - .for_each(|node| f.write_str(&std::format!("{} ", node)).unwrap()); - }); - for (i, layer) in self.layers.iter().enumerate().take(self.layers.len()) { - f.write_str(&std::format!("\nLayer #[{}]:", i))?; - for (j, node) in layer.iter().enumerate() { - f.write_str(&std::format!("\n\tNode #[{}]: {}", j, node))?; - } - } - } - None => f.write_str("Empty Path!")?, - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::commitment_scheme::blake3_hash::Blake3Hasher; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::merkle_tree::MerkleTree; - use crate::commitment_scheme::utils::tests::generate_test_queries; - use crate::commitment_scheme::utils::ColumnArray; - use crate::core::fields::m31::M31; - - #[test] - pub fn verify_test() { - let trace: ColumnArray = vec![(0..4096).map(M31::from_u32_unchecked).collect(); 7]; - let tree = MerkleTree::::commit(trace); - let queries = generate_test_queries(100, 4096); - let decommitment = tree.generate_decommitment(queries.clone()); - - assert!(decommitment.verify(tree.root(), &queries)); - } - - #[test] - pub fn verify_false_proof_test() { - let trace_column_length = 1 << 12; - let trace: ColumnArray = vec![ - (0..trace_column_length) - .map(M31::from_u32_unchecked) - .collect(); - 4 - ]; - let tree = MerkleTree::::commit(trace); - let queries = generate_test_queries(10, trace_column_length as usize); - let mut wrong_internal_node_decommitment = tree.generate_decommitment(queries.clone()); - let mut wrong_leaf_block_decommitment = tree.generate_decommitment(queries.clone()); - - wrong_internal_node_decommitment.layers[0][0] = Blake3Hasher::hash(&[0]); - wrong_leaf_block_decommitment.leaf_blocks[0][0] += M31::from_u32_unchecked(1); - - assert!( - !wrong_internal_node_decommitment.verify(tree.root(), &queries), - "Wrong internal node decommitment passed!" - ); - assert!( - !wrong_leaf_block_decommitment.verify(tree.root(), &queries), - "Wrong leaf block decommitment passed!" - ); - } - - #[test] - fn values_test() { - let trace_column_length = 1 << 6; - let trace_column = (0..trace_column_length) - .map(M31::from_u32_unchecked) - .collect::>(); - let reversed_trace_column = trace_column.iter().rev().cloned().collect::>(); - let trace: ColumnArray = vec![trace_column, reversed_trace_column]; - let tree = MerkleTree::::commit(trace.clone()); - let random_queries = generate_test_queries(10, trace_column_length as usize); - let test_skip_queries = vec![17, 50]; - let random_query_decommitment = tree.generate_decommitment(random_queries.clone()); - let test_skip_decommitment = tree.generate_decommitment(test_skip_queries.clone()); - - assert!(random_queries - .iter() - .zip(random_query_decommitment.values()) - .all(|(q, v)| v == vec![trace[0][*q], trace[1][*q]])); - assert!(test_skip_queries - .iter() - .zip(test_skip_decommitment.values()) - .all(|(q, v)| v == vec![trace[0][*q], trace[1][*q]])); - } -} diff --git a/src/commitment_scheme/merkle_input.rs b/src/commitment_scheme/merkle_input.rs deleted file mode 100644 index 6c1180e61..000000000 --- a/src/commitment_scheme/merkle_input.rs +++ /dev/null @@ -1,304 +0,0 @@ -use super::utils::get_column_chunk; -use crate::core::fields::Field; - -/// The Input of a Merkle-Tree Mixed-Degree commitment scheme. -/// A map from the depth of the tree requested to be injected to the to-be-injected columns. -/// A layer of depth 'd' in a merkle tree, holds 2^(d-1) hash buckets, each containing 2 sibling -/// hashes and the injected values of that depth. -/// -/// # Example -/// -/// ```rust -/// use stwo::commitment_scheme::merkle_input::MerkleTreeInput; -/// use stwo::core::fields::m31::M31; -/// -/// let mut input = MerkleTreeInput::::new(); -/// let column = vec![M31::from_u32_unchecked(0); 1024]; -/// input.insert_column(2, &column); -/// input.insert_column(3, &column); -/// input.insert_column(3, &column); -/// -/// assert_eq!(input.get_columns(2).len(), 1); -/// assert_eq!(input.get_columns(3).len(), 2); -/// assert_eq!(input.max_injected_depth(), 3); -/// ```` -// `columns_to_inject` - A vector of columns to be injected to the merkle tree, ordered as -// inserted. -// `injected_depths_map` - A mapping from a depth of the tree to the columns injected at that -// depth, ordered as inserted. -#[derive(Default)] -pub struct MerkleTreeInput<'a, F: Field> { - columns_to_inject: Vec<&'a [F]>, - injected_depths_map: Vec>, -} - -pub type LayerColumns<'a, F> = Vec<&'a [F]>; - -impl<'a, F: Field> MerkleTreeInput<'a, F> { - pub fn new() -> Self { - Self { - columns_to_inject: vec![], - injected_depths_map: vec![], - } - } - - pub fn insert_column(&mut self, depth: usize, column: &'a [F]) { - assert_ne!(depth, 0, "Injection to layer 0 undefined!"); - assert!( - column.len().is_power_of_two(), - "Column is of size: {}, not a power of 2!", - column.len() - ); - - // Column is spread over 'hash buckets' in the layer, every layer holds 2^(depth-1) buckets. - // TODO(Ohad): implement embedd by repeatition and remove assert. - assert!( - column.len() >= 2usize.pow((depth - 1) as u32), - "Column of size: {} is too small for injection at layer:{}", - column.len(), - depth - ); - - if self.injected_depths_map.len() < depth { - self.injected_depths_map.resize(depth, vec![]); - } - self.injected_depths_map[depth - 1].push(self.columns_to_inject.len()); - self.columns_to_inject.push(column); - } - - pub fn get_columns(&'a self, depth: usize) -> Vec<&[F]> { - match self.injected_depths_map.get(depth - 1) { - Some(injected_column_indices) => injected_column_indices - .iter() - .map(|&index| self.columns_to_inject[index]) - .collect::>(), - _ => panic!( - "Attempted extraction of columns from depth: {}, but max injected depth is: {}", - depth, - self.max_injected_depth() - ), - } - } - - pub fn max_injected_depth(&self) -> usize { - self.injected_depths_map.len() - } - - pub fn get_injected_elements(&self, depth: usize, bag_index: usize) -> Vec { - let n_bags_in_layer = 1 << (depth - 1); - let mut injected_elements = Vec::::new(); - for column in self.get_columns(depth).iter() { - let col_chunk = get_column_chunk(column, bag_index, n_bags_in_layer); - injected_elements.extend(col_chunk); - } - injected_elements - } - - pub fn n_injected_columns(&self) -> usize { - self.columns_to_inject.len() - } - - // Returns the column layout of the merkle tree. i.e. for each depth, the length of the columns - // assigned to it. - // TODO(Ohad): implement this logic for the verifier. - pub fn column_layout(&self) -> MerkleTreeColumnLayout { - let column_sizes = self - .columns_to_inject - .iter() - .map(|col| col.len()) - .collect::>(); - MerkleTreeColumnLayout { - column_sizes, - injected_depths_map: self.injected_depths_map.clone(), - } - } -} - -/// The column layout of a mixed degree merkle tree. -/// The sizes of columns assigned to every layer, ordered as they were inserted & injected into hash -/// blocks. -#[derive(Debug, Default)] -pub struct MerkleTreeColumnLayout { - column_sizes: Vec, - injected_depths_map: Vec>, -} - -impl MerkleTreeColumnLayout { - pub fn sort_queries_by_layer(&self, queries: &[Vec]) -> Vec>> { - let mut queries_to_layers = vec![vec![]; self.height()]; - (1..=self.height()).for_each(|i| { - let columns_in_layer = self.column_indices_at(i); - columns_in_layer.iter().for_each(|&column_index| { - queries_to_layers[i - 1].push(queries[column_index].clone()); - }); - }); - queries_to_layers - } - - pub fn column_lengths_at_depth(&self, depth: usize) -> Vec { - self.column_indices_at(depth) - .iter() - .map(|&index| self.column_sizes[index]) - .collect::>() - } - - pub fn height(&self) -> usize { - self.injected_depths_map.len() - } - - fn column_indices_at(&self, depth: usize) -> &[usize] { - &self.injected_depths_map[depth - 1] - } - - pub fn build_input<'a, F: Field>(&self, columns: &[&'a [F]]) -> MerkleTreeInput<'a, F> { - MerkleTreeInput { - columns_to_inject: columns.to_vec(), - injected_depths_map: self.injected_depths_map.clone(), - } - } -} - -#[cfg(test)] -mod tests { - use std::vec; - - use crate::core::fields::m31::M31; - use crate::m31; - - #[test] - pub fn md_input_insert_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1024]; - - input.insert_column(3, &column); - input.insert_column(3, &column); - input.insert_column(2, &column); - - assert_eq!(input.get_columns(3).len(), 2); - assert_eq!(input.get_columns(2).len(), 1); - } - - #[test] - pub fn md_input_max_depth_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1024]; - - input.insert_column(3, &column); - input.insert_column(2, &column); - - assert_eq!(input.max_injected_depth(), 3); - } - - #[test] - #[should_panic] - pub fn get_invalid_depth_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1024]; - input.insert_column(3, &column); - - input.get_columns(4); - } - - #[test] - pub fn merkle_tree_input_empty_vec_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1024]; - input.insert_column(3, &column); - - assert_eq!(input.get_columns(2), Vec::>::new().as_slice()); - } - #[test] - #[should_panic] - pub fn mt_input_column_too_short_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1024]; - - input.insert_column(12, &column); - } - - #[test] - #[should_panic] - pub fn mt_input_wrong_size_test() { - let mut input = super::MerkleTreeInput::::default(); - let not_pow_2_column = vec![M31::from_u32_unchecked(0); 1023]; - - input.insert_column(2, ¬_pow_2_column); - } - - #[test] - fn get_injected_elements_test() { - let trace_column = (0..4).map(M31::from_u32_unchecked).collect::>(); - let mut merkle_input = super::MerkleTreeInput::::new(); - merkle_input.insert_column(3, &trace_column); - merkle_input.insert_column(2, &trace_column); - - let injected_elements_30 = merkle_input.get_injected_elements(3, 0); - let injected_elements_31 = merkle_input.get_injected_elements(3, 1); - let injected_elements_32 = merkle_input.get_injected_elements(3, 2); - let injected_elements_33 = merkle_input.get_injected_elements(3, 3); - let injected_elements_20 = merkle_input.get_injected_elements(2, 0); - let injected_elements_21 = merkle_input.get_injected_elements(2, 1); - - assert_eq!(injected_elements_30, vec![m31!(0)]); - assert_eq!(injected_elements_31, vec![m31!(1)]); - assert_eq!(injected_elements_32, vec![m31!(2)]); - assert_eq!(injected_elements_33, vec![m31!(3)]); - assert_eq!(injected_elements_20, vec![m31!(0), m31!(1)]); - assert_eq!(injected_elements_21, vec![m31!(2), m31!(3)]); - } - - #[test] - fn n_injected_columns_test() { - let mut merkle_input = super::MerkleTreeInput::::new(); - let trace_column = (0..4).map(M31::from_u32_unchecked).collect::>(); - merkle_input.insert_column(3, &trace_column); - merkle_input.insert_column(2, &trace_column); - merkle_input.insert_column(2, &trace_column); - - assert_eq!(merkle_input.n_injected_columns(), 3); - } - - #[test] - fn config_length_at_depth_test() { - let mut merkle_input = super::MerkleTreeInput::::new(); - let column_length_4 = (0..4).map(M31::from_u32_unchecked).collect::>(); - let column_length_8 = (0..8).map(M31::from_u32_unchecked).collect::>(); - let column_length_16 = (0..16).map(M31::from_u32_unchecked).collect::>(); - merkle_input.insert_column(3, &column_length_4); - merkle_input.insert_column(2, &column_length_8); - merkle_input.insert_column(2, &column_length_4); - merkle_input.insert_column(3, &column_length_16); - - let merkle_config = merkle_input.column_layout(); - - assert_eq!(merkle_config.column_lengths_at_depth(3), vec![4, 16]); - assert_eq!(merkle_config.column_lengths_at_depth(2), vec![8, 4]); - } - - #[test] - fn sort_queries_by_layer_test() { - let mut merkle_input = super::MerkleTreeInput::::new(); - let column = [M31::from_u32_unchecked(0); 64]; - merkle_input.insert_column(3, &column); - merkle_input.insert_column(2, &column); - merkle_input.insert_column(4, &column); - merkle_input.insert_column(2, &column); - merkle_input.insert_column(4, &column); - merkle_input.insert_column(3, &column); - - let queries = vec![vec![0], vec![1], vec![2], vec![3], vec![4], vec![5]]; - - let merkle_config = merkle_input.column_layout(); - let sorted_queries = merkle_config.sort_queries_by_layer(&queries); - - assert_eq!( - sorted_queries, - vec![ - vec![], - vec![vec![1], vec![3]], - vec![vec![0], vec![5]], - vec![vec![2], vec![4]] - ] - ); - } -} diff --git a/src/commitment_scheme/merkle_multilayer.rs b/src/commitment_scheme/merkle_multilayer.rs deleted file mode 100644 index 5614c2387..000000000 --- a/src/commitment_scheme/merkle_multilayer.rs +++ /dev/null @@ -1,436 +0,0 @@ -use std::fmt::{self, Display}; - -use super::hasher::Hasher; -use super::merkle_input::MerkleTreeInput; -use super::utils::{get_column_chunk, inject_and_hash_layer}; -use crate::core::fields::{Field, IntoSlice}; - -/// A MerkleMultiLayer represents multiple sequential merkle-tree layers, as a SubTreeMajor array of -/// hash values. Each SubTree is a balanced binary tree of height `sub_trees_height`. -/// Intended to be used as a layer of L1/L2 cache-sized sub-trees and commited on serially, and -/// multithreaded within the multilayer. -// TODO(Ohad): Implement get_layer_view() and get_layer_mut() for subtrees. -// TODO(Ohad): Implement .commit(), .decommit() for MerkleMultiLayer. -// TODO(Ohad): Add as an attribute of the merkle tree. -// TODO(Ohad): Implement Iterator for MerkleMultiLayer. -pub struct MerkleMultiLayer { - pub data: Vec, - pub config: MerkleMultiLayerConfig, -} - -pub struct MerkleMultiLayerConfig { - pub n_sub_trees: usize, - pub sub_tree_height: usize, - pub sub_tree_size: usize, -} - -impl MerkleMultiLayerConfig { - pub fn new(sub_tree_height: usize, n_sub_trees: usize) -> Self { - let sub_tree_size = (1 << sub_tree_height) - 1; - Self { - n_sub_trees, - sub_tree_height, - sub_tree_size, - } - } -} - -impl MerkleMultiLayer { - pub fn new(config: MerkleMultiLayerConfig) -> Self { - // TODO(Ohad): investigate if this is the best way to initialize the vector. Consider unsafe - // implementation. - let data = vec![H::Hash::default(); config.sub_tree_size * config.n_sub_trees]; - Self { data, config } - } - - /// Returns the roots of the sub-trees. - pub fn get_roots(&self) -> impl ExactSizeIterator { - self.data - .iter() - .skip(self.config.sub_tree_size - 1) - .step_by(self.config.sub_tree_size) - } - - pub fn commit_layer, const IS_INTERMEDIATE: bool>( - &mut self, - input: &MerkleTreeInput<'_, F>, - prev_hashes: &[H::Hash], - ) { - // TODO(Ohad): implement multithreading (rayon par iter). - let tree_iter = self.data.chunks_mut(self.config.sub_tree_size); - tree_iter.enumerate().for_each(|(i, tree_data)| { - let prev_hashes = if IS_INTERMEDIATE { - let sub_layer_size = 1 << self.config.sub_tree_height; - &prev_hashes[i * sub_layer_size..(i + 1) * sub_layer_size] - } else { - &[] - }; - hash_subtree::( - tree_data, - input, - self.config.n_sub_trees.ilog2() as usize, - prev_hashes, - &self.config, - i, - ); - }); - } - - pub fn get_hash_value(&self, layer: usize, node_idx: usize) -> H::Hash { - assert!(layer < self.config.sub_tree_height); - assert!(node_idx < (1 << layer) * self.config.n_sub_trees); - let layer_len = 1 << layer; - let tree_idx = node_idx >> layer; - let sub_tree_data = self - .data - .chunks(self.config.sub_tree_size) - .nth(tree_idx) - .unwrap(); - let layer_view = &sub_tree_data[sub_tree_data.len() - (layer_len * 2 - 1) - ..sub_tree_data.len() - (layer_len * 2 - 1) + layer_len]; - let layer_mask = layer_len - 1; - layer_view[node_idx & layer_mask] - } -} - -// Hashes a single sub-tree. -fn hash_subtree( - sub_tree_data: &mut [H::Hash], - input: &MerkleTreeInput<'_, F>, - relative_depth: usize, - prev_hashes: &[H::Hash], - config: &MerkleMultiLayerConfig, - index_in_layer: usize, -) where - F: IntoSlice, -{ - // First layer is special, as it is the only layer that might have inputs from the previous - // MultiLayer, and does not need to look at the current sub_tree for previous hash values. - let dst = sub_tree_data - .split_at_mut(1 << (config.sub_tree_height - 1)) - .0; - inject_and_hash_layer::( - prev_hashes, - dst, - &input - .get_columns(config.sub_tree_height + relative_depth) - .iter() - .map(|c| get_column_chunk(c, index_in_layer, config.n_sub_trees)) - .collect::>() - .iter(), - ); - - // Rest of the layers. - let mut offset_idx = 0; - for hashed_layer_idx in (1..(config.sub_tree_height)).rev() { - let hashed_layer_len = 1 << hashed_layer_idx; - let produced_layer_len = hashed_layer_len / 2; - let (s1, s2) = sub_tree_data.split_at_mut(offset_idx + hashed_layer_len); - let (prev_hashes, dst) = (&s1[offset_idx..], &mut s2[..produced_layer_len]); - offset_idx += hashed_layer_len; - - inject_and_hash_layer::( - prev_hashes, - dst, - &input - .get_columns(hashed_layer_idx + relative_depth) - .iter() - .map(|c| get_column_chunk(c, index_in_layer, config.n_sub_trees)) - .collect::>() - .iter(), - ); - } -} - -// TODO(Ohad): change according to the future implementation of get_layer_view() and -// get_root(). -impl Display for MerkleMultiLayer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.data - .chunks(self.config.sub_tree_size) - .enumerate() - .for_each(|(i, c)| { - f.write_str(&std::format!("\nSubTree #[{}]:", i)).unwrap(); - for (i, h) in c.iter().enumerate() { - f.write_str(&std::format!("\nNode #[{}]: {}", i, h)) - .unwrap(); - } - }); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::commitment_scheme::blake3_hash::Blake3Hasher; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::merkle_input::MerkleTreeInput; - use crate::commitment_scheme::merkle_multilayer; - use crate::core::fields::m31::M31; - - #[test] - pub fn multi_layer_init_test() { - let (sub_trees_height, n_sub_trees) = (4, 4); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, n_sub_trees); - let sub_tree_layer = super::MerkleMultiLayer::::new(config); - assert_eq!( - sub_tree_layer.data.len(), - ((1 << sub_trees_height) - 1) * n_sub_trees - ); - } - - #[test] - pub fn multi_layer_display_test() { - let (sub_trees_height, n_sub_trees) = (8, 8); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, n_sub_trees); - let multi_layer = super::MerkleMultiLayer::::new(config); - println!("{}", multi_layer); - } - - #[test] - pub fn get_roots_test() { - let (sub_trees_height, n_sub_trees) = (4, 4); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, n_sub_trees); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - multi_layer - .data - .chunks_mut(multi_layer.config.sub_tree_size) - .enumerate() - .for_each(|(i, sub_tree)| { - sub_tree[sub_tree.len() - 1] = Blake3Hasher::hash(&i.to_le_bytes()); - }); - - let roots = multi_layer.get_roots(); - - assert_eq!(roots.len(), n_sub_trees); - roots - .enumerate() - .for_each(|(i, r)| assert_eq!(r, &Blake3Hasher::hash(&i.to_le_bytes()))); - } - - fn gen_example_column() -> Vec { - let mut trace_column = std::iter::repeat(M31::from_u32_unchecked(1)) - .take(8) - .collect::>(); - trace_column.extend( - std::iter::repeat(M31::from_u32_unchecked(2)) - .take(8) - .collect::>(), - ); - trace_column - } - - fn hash_symmetric_path( - initial_value: &[H::NativeType], - path_length: usize, - ) -> H::Hash { - (1..path_length).fold(H::hash(initial_value), |curr_hash, _| { - H::concat_and_hash(&curr_hash, &curr_hash) - }) - } - - fn assert_correct_roots( - initial_value_0: &[H::NativeType], - initial_value_1: &[H::NativeType], - path_length: usize, - roots: &[H::Hash], - ) { - let expected_root0 = hash_symmetric_path::(initial_value_0, path_length); - let expected_root1 = hash_symmetric_path::(initial_value_1, path_length); - assert_eq!(roots[0], expected_root0); - assert_eq!(roots[1], expected_root1); - } - - fn prepare_intermediate_initial_values() -> (Vec, Vec) { - // Column will get spread to one value per leaf. - let mut leaf_0_input: Vec = vec![]; - leaf_0_input.extend(Blake3Hasher::hash(b"a").as_ref()); - leaf_0_input.extend(Blake3Hasher::hash(b"a").as_ref()); - leaf_0_input.extend(&u32::to_le_bytes(1)); - let mut leaf_1_input: Vec = vec![]; - leaf_1_input.extend(Blake3Hasher::hash(b"b").as_ref()); - leaf_1_input.extend(Blake3Hasher::hash(b"b").as_ref()); - leaf_1_input.extend(&u32::to_le_bytes(2)); - (leaf_0_input, leaf_1_input) - } - - #[test] - pub fn hash_sub_tree_non_intermediate_test() { - // trace_column: [M31;16] = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2] - let trace_column = gen_example_column(); - let sub_trees_height = 4; - let mut input = MerkleTreeInput::new(); - input.insert_column(sub_trees_height, &trace_column); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, 2); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - - merkle_multilayer::hash_subtree::( - &mut multi_layer.data[..multi_layer.config.sub_tree_size], - &input, - 0, - &[], - &multi_layer.config, - 0, - ); - merkle_multilayer::hash_subtree::( - &mut multi_layer.data[multi_layer.config.sub_tree_size..], - &input, - 0, - &[], - &multi_layer.config, - 1, - ); - let roots = multi_layer.get_roots(); - - assert_correct_roots::( - &u32::to_le_bytes(1), - &u32::to_le_bytes(2), - sub_trees_height, - &roots.copied().collect::>(), - ); - } - - #[test] - fn hash_sub_tree_intermediate_test() { - // trace_column: [M31;16] = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2] - let trace_column = gen_example_column(); - let mut prev_hash_values = vec![Blake3Hasher::hash(b"a"); 16]; - prev_hash_values.extend(vec![Blake3Hasher::hash(b"b"); 16]); - let sub_trees_height = 4; - let mut input = MerkleTreeInput::new(); - input.insert_column(sub_trees_height, &trace_column); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, 2); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - let (leaf_0_input, leaf_1_input) = prepare_intermediate_initial_values(); - - merkle_multilayer::hash_subtree::( - &mut multi_layer.data[..multi_layer.config.sub_tree_size], - &input, - 0, - &prev_hash_values[..prev_hash_values.len() / 2], - &multi_layer.config, - 0, - ); - merkle_multilayer::hash_subtree::( - &mut multi_layer.data[multi_layer.config.sub_tree_size..], - &input, - 0, - &prev_hash_values[prev_hash_values.len() / 2..], - &multi_layer.config, - 1, - ); - let roots = multi_layer.get_roots(); - - assert_correct_roots::( - leaf_0_input.as_slice(), - leaf_1_input.as_slice(), - sub_trees_height, - &roots.copied().collect::>(), - ); - } - - #[test] - fn commit_layer_non_intermediate_test() { - // trace_column: [M31;16] = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2] - let trace_column = gen_example_column(); - let sub_trees_height = 4; - let n_sub_trees: usize = 2; - let mut input = MerkleTreeInput::new(); - input.insert_column( - sub_trees_height + n_sub_trees.ilog2() as usize, - &trace_column, - ); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, n_sub_trees); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - - multi_layer.commit_layer::(&input, &[]); - let roots = multi_layer.get_roots(); - - assert_correct_roots::( - &u32::to_le_bytes(1), - &u32::to_le_bytes(2), - sub_trees_height, - &roots.copied().collect::>(), - ); - } - - #[test] - fn commit_layer_intermediate_test() { - // trace_column: [M31;16] = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2] - let trace_column = gen_example_column(); - let mut prev_hash_values = vec![Blake3Hasher::hash(b"a"); 16]; - prev_hash_values.extend(vec![Blake3Hasher::hash(b"b"); 16]); - let sub_trees_height = 4; - let n_sub_trees: usize = 2; - let mut input = MerkleTreeInput::new(); - input.insert_column( - sub_trees_height + n_sub_trees.ilog2() as usize, - &trace_column, - ); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, n_sub_trees); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - let (leaf_0_input, leaf_1_input) = prepare_intermediate_initial_values(); - - multi_layer.commit_layer::(&input, &prev_hash_values); - let roots = multi_layer.get_roots(); - - assert_correct_roots::( - leaf_0_input.as_slice(), - leaf_1_input.as_slice(), - sub_trees_height, - &roots.copied().collect::>(), - ) - } - - #[test] - fn get_hash_at_test() { - let trace_column = (0..16).map(M31::from_u32_unchecked).collect::>(); - let sub_trees_height = 4; - let n_sub_trees: usize = 2; - let mut input = MerkleTreeInput::new(); - input.insert_column( - sub_trees_height + n_sub_trees.ilog2() as usize, - &trace_column, - ); - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, 2); - let mut multi_layer = super::MerkleMultiLayer::::new(config); - multi_layer.commit_layer::(&input, &[]); - - let mut hasher = Blake3Hasher::new(); - hasher.update(&0_u32.to_le_bytes()); - let expected_hash_result = hasher.finalize_reset(); - let most_left_hash = multi_layer.get_hash_value(3, 0); - assert_eq!(most_left_hash, expected_hash_result); - hasher.update(&1_u32.to_le_bytes()); - let expected_hash_result = hasher.finalize_reset(); - let most_left_hash_sibling = multi_layer.get_hash_value(3, 1); - assert_eq!(most_left_hash_sibling, expected_hash_result); - - let expected_most_left_parent = - Blake3Hasher::concat_and_hash(&most_left_hash, &most_left_hash_sibling); - assert_eq!(multi_layer.get_hash_value(2, 0), expected_most_left_parent); - - hasher.update(&15_u32.to_le_bytes()); - let expected_hash_result = hasher.finalize_reset(); - let most_right_hash = multi_layer.get_hash_value(3, 15); - assert_eq!(most_right_hash, expected_hash_result); - } - - #[test] - #[should_panic] - fn get_hash_at_index_out_of_range_test() { - let sub_trees_height = 4; - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, 2); - let multi_layer = super::MerkleMultiLayer::::new(config); - multi_layer.get_hash_value(3, 16); - } - - #[test] - #[should_panic] - fn get_hash_at_layer_index_out_of_range_test() { - let sub_trees_height = 4; - let config = super::MerkleMultiLayerConfig::new(sub_trees_height, 2); - let multi_layer = super::MerkleMultiLayer::::new(config); - multi_layer.get_hash_value(4, 0); - } -} diff --git a/src/commitment_scheme/merkle_tree.rs b/src/commitment_scheme/merkle_tree.rs deleted file mode 100644 index 34000d775..000000000 --- a/src/commitment_scheme/merkle_tree.rs +++ /dev/null @@ -1,238 +0,0 @@ -use std::cmp::max; -use std::collections::BTreeSet; -use std::fmt::{Debug, Display}; - -use itertools::Itertools; - -use super::hasher::Hasher; -use super::merkle_decommitment::MerkleDecommitment; -use crate::commitment_scheme::utils::{ - allocate_balanced_tree, column_to_row_major, hash_merkle_tree_from_bottom_layer, - tree_data_as_mut_ref, ColumnArray, TreeData, -}; -use crate::core::fields::{Field, IntoSlice}; -use crate::math::utils::{prev_pow_two, usize_div_ceil}; - -pub struct MerkleTree { - pub bottom_layer: Vec, - pub bottom_layer_block_size: usize, - pub bottom_layer_n_rows_in_node: usize, - pub data: TreeData, - pub height: usize, - phantom: std::marker::PhantomData, -} - -impl MerkleTree -where - T: IntoSlice, -{ - /// Commits on a given trace(matrix). - pub fn commit(trace: ColumnArray) -> Self { - let mut tree = Self::init_from_column_array(trace); - - hash_merkle_tree_from_bottom_layer::( - &tree.bottom_layer[..], - tree.bottom_layer_block_size * std::mem::size_of::(), - &mut tree_data_as_mut_ref(&mut tree.data)[..], - ); - - tree - } - - /// Builds the base layer of the tree from the given trace. - /// Allocates the rest of the tree. - // TODO(Ohad): add support for columns of different lengths. - fn init_from_column_array(trace: ColumnArray) -> Self { - assert!(!trace.is_empty()); - assert!(trace[0].len().is_power_of_two()); - trace.iter().for_each(|column| { - assert_eq!(column.len(), trace[0].len()); - }); - - let n_rows_in_node = std::cmp::min( - prev_pow_two(max( - H::BLOCK_SIZE / (trace.len() * std::mem::size_of::()), - 1, - )), - trace[0].len(), - ); - - let bottom_layer_block_size = n_rows_in_node * trace.len(); - let bottom_layer = column_to_row_major(trace); - - // Allocate rest of the tree. - let bottom_layer_length_nodes = usize_div_ceil(bottom_layer.len(), bottom_layer_block_size); - let tree_data = - allocate_balanced_tree(bottom_layer_length_nodes, H::BLOCK_SIZE, H::OUTPUT_SIZE); - - Self { - bottom_layer, - bottom_layer_block_size, - bottom_layer_n_rows_in_node: n_rows_in_node, - height: tree_data.len() + 1, // +1 for the bottom layer. - data: tree_data, - phantom: std::marker::PhantomData, - } - } - - pub fn root(&self) -> H::Hash { - (&self.data.last().unwrap()[..]).into() - } - - /// Generates a merkle decommitment for the given queries. Queries must be sorted. - pub fn generate_decommitment(&self, queries: Vec) -> MerkleDecommitment { - let leaf_block_indices: Vec = queries - .iter() - .map(|query| query / self.bottom_layer_n_rows_in_node) - .dedup() - .collect(); - let mut leaf_blocks = Vec::>::new(); - - // Input layer, every leaf-block holds 'bottom_layer_block_size' elements. - leaf_block_indices.iter().for_each(|block_index| { - leaf_blocks.push(self.get_leaf_block(*block_index)); - }); - - // TODO(Ohad): Change to Vec. - // Sorted indices of the current layer. - let mut curr_layer_indices = leaf_block_indices - .iter() - .map(|index| index ^ 1) - .collect::>(); - let mut layers = Vec::>::new(); - for i in 0..self.height - 2 { - let mut proof_layer = Vec::::with_capacity(curr_layer_indices.len()); - let mut indices_iterator = curr_layer_indices.iter().peekable(); - while let Some(q) = indices_iterator.next() { - let mut f = || -> Option<_> { - match indices_iterator.peek() { - // If both childs are in the layer, no extra data is needed to calculate - // parent. - Some(next_q) if *q % 2 == 0 && *q + 1 == **next_q => { - indices_iterator.next(); - None - } - _ => { - let node: H::Hash = - self.data[i][*q * H::OUTPUT_SIZE..(*q + 1) * H::OUTPUT_SIZE].into(); - Some(node) - } - } - }; - if let Some(node) = f() { - proof_layer.push(node); - } - } - layers.push(proof_layer); - - // Next layer indices are the parents' siblings. - curr_layer_indices = curr_layer_indices - .iter() - .map(|index| (index / 2) ^ 1) - .collect(); - } - MerkleDecommitment::new( - leaf_blocks, - layers, - self.bottom_layer_n_rows_in_node, - queries, - ) - } - - fn get_leaf_block(&self, block_index: usize) -> Vec { - assert!(block_index * self.bottom_layer_block_size < self.bottom_layer.len()); - Vec::from( - &self.bottom_layer[block_index * self.bottom_layer_block_size - ..(block_index + 1) * self.bottom_layer_block_size], - ) - } -} -#[cfg(test)] -mod tests { - use crate::commitment_scheme::blake3_hash::*; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::utils::tests::generate_test_queries; - use crate::core::fields::m31::M31; - use crate::core::fields::IntoSlice; - - fn init_m31_test_trace(len: usize) -> Vec { - assert!(len.is_power_of_two()); - (0..len as u32).map(M31::from_u32_unchecked).collect() - } - - #[test] - pub fn from_matrix_test() { - const TRACE_LEN: usize = 16; - let trace = init_m31_test_trace(TRACE_LEN); - let matrix = vec![trace; 2]; - - let tree = super::MerkleTree::::init_from_column_array(matrix); - - assert_eq!(tree.bottom_layer.len(), 32); - assert_eq!(tree.height, 3); - (0..TRACE_LEN).for_each(|i| { - assert_eq!(tree.bottom_layer[i * 2], M31::from_u32_unchecked(i as u32)); - assert_eq!( - tree.bottom_layer[i * 2 + 1], - M31::from_u32_unchecked(i as u32) - ); - }); - } - - #[test] - pub fn commit_test() { - let trace = init_m31_test_trace(64); - let matrix = vec![trace; 4]; - - let tree_from_matrix = super::MerkleTree::::commit(matrix); - - assert_eq!( - hex::encode(tree_from_matrix.root()), - "c07e98e8a5d745ea99c3c3eac4c43b9df5ceb9e78973a785d90b3ffe4d5fcf5e" - ); - } - - #[test] - pub fn get_leaf_block_test() { - let trace = vec![init_m31_test_trace(128)]; - const BLOCK_LEN: usize = Blake3Hasher::BLOCK_SIZE / std::mem::size_of::(); - let tree_from_matrix = super::MerkleTree::::commit(trace); - let queries = generate_test_queries(100, 128); - - for query in queries { - let leaf_block_index = query / BLOCK_LEN; - assert_eq!( - tree_from_matrix.get_leaf_block(leaf_block_index), - tree_from_matrix.bottom_layer - [leaf_block_index * BLOCK_LEN..(leaf_block_index + 1) * BLOCK_LEN] - ); - } - } - - #[test] - pub fn test_decommitment() { - let trace = vec![init_m31_test_trace(128)]; - - let tree = super::MerkleTree::::commit(trace); - let queries: Vec = (16..64).collect(); - let decommitment = tree.generate_decommitment(queries); - - assert_eq!(decommitment.leaf_blocks.len(), 3); - - // Every leaf block in the first half of the trace is queried except for the first one, - // therefore it should be the only one who's hash is in the decommitment's first - // layer. - assert_eq!(decommitment.layers[0].len(), 1); - assert_eq!( - decommitment.layers[0][0], - Blake3Hasher::hash(>::into_slice(&tree.get_leaf_block(0))) - ); - - // The queried leaves' parents can be computed by verifer therefore excluded from the proof. - assert!(decommitment.layers[1].is_empty()); - - // A verifer can compute the left child of the root from the previous layer, therefore - // the proof only needs to contain the right child. - assert_eq!(decommitment.layers[2].len(), 1); - } -} diff --git a/src/commitment_scheme/mixed_degree_decommitment.rs b/src/commitment_scheme/mixed_degree_decommitment.rs deleted file mode 100644 index cabd4613d..000000000 --- a/src/commitment_scheme/mixed_degree_decommitment.rs +++ /dev/null @@ -1,221 +0,0 @@ -use std::iter::Peekable; - -use itertools::Itertools; -use merging_iterator::MergeIter; - -use super::hasher::Hasher; -use super::merkle_input::MerkleTreeColumnLayout; -use super::mixed_degree_merkle_tree::queried_nodes_in_layer; -use crate::core::fields::{Field, IntoSlice}; - -/// A Merkle proof of queried indices. -/// Used for storing a all the paths from the query leaves to the root. -/// A correctly generated decommitment should hold all the information needed to generate the root -/// of the tree, proving the queried values and the tree's column layout. -// TODO(Ohad): write printing functions. -#[derive(Debug, Default)] -pub struct MixedDecommitment { - pub hashes: Vec, - pub witness_elements: Vec, - - // TODO(Ohad): remove these in non-debug builds. - pub queried_values: Vec, - pub column_layout: MerkleTreeColumnLayout, -} - -impl MixedDecommitment { - pub fn new() -> Self { - Self::default() - } - - pub fn verify( - &self, - root: H::Hash, - queries: &[Vec], - mut queried_values: impl Iterator, - ) -> bool - where - F: IntoSlice, - { - let mut witness_hashes = self.hashes.iter(); - let sorted_queries_by_layer = self.column_layout.sort_queries_by_layer(queries); - - let mut next_layer_hashes = vec![]; - let mut ancestor_indices = vec![]; - let mut witness_elements = self.witness_elements.iter().copied(); - for i in (1..=self.column_layout.height()).rev() { - (next_layer_hashes, ancestor_indices) = Self::verify_single_layer( - i, - &sorted_queries_by_layer[i - 1], - &self.column_layout, - ancestor_indices.iter().copied().peekable(), - queried_values.by_ref(), - &mut witness_elements, - &mut witness_hashes, - next_layer_hashes.into_iter(), - ); - } - debug_assert_eq!(next_layer_hashes.len(), 1); - next_layer_hashes[0] == root - } - - #[allow(clippy::too_many_arguments)] - fn verify_single_layer<'a>( - layer_depth: usize, - queries_to_layer: &[Vec], - column_layout: &MerkleTreeColumnLayout, - mut previous_layers_indices: Peekable + Clone>, - mut queried_values: impl Iterator, - mut witness_elements: impl Iterator, - mut witness_hashes_iter: impl Iterator, - mut produced_hashes: impl Iterator, - ) -> (Vec, Vec) - where - F: IntoSlice, - { - let directly_queried_node_indices = - queried_nodes_in_layer(queries_to_layer.iter(), column_layout, layer_depth); - let mut node_indices = MergeIter::new( - directly_queried_node_indices.iter().copied(), - previous_layers_indices.clone().map(|q| q / 2), - ) - .collect_vec(); - node_indices.dedup(); - - // Instead of iterating over every query for every column in the layer, we advance the - // specific column query-iterator only when it's in the current node. - let mut column_query_iterators = queries_to_layer - .iter() - .map(|column_queries| column_queries.iter().peekable()) - .collect_vec(); - let mut next_layer_hashes = vec![]; - let mut hasher = H::new(); - for &node_index in &node_indices { - // Push correct child hashes to the hasher. - match previous_layers_indices.next_if(|hash_index| *hash_index / 2 == node_index) { - None if layer_depth < column_layout.height() => { - hasher.update(witness_hashes_iter.next().unwrap().as_ref()); - hasher.update(witness_hashes_iter.next().unwrap().as_ref()); - } - Some(hash_index) => { - if previous_layers_indices - .next_if(|&next_h| next_h ^ 1 == hash_index) - .is_some() - { - hasher.update(produced_hashes.next().unwrap().as_ref()); - hasher.update(produced_hashes.next().unwrap().as_ref()); - } else { - let (left_hash, right_hash) = if hash_index % 2 == 0 { - ( - produced_hashes.next().unwrap(), - *witness_hashes_iter.next().unwrap(), - ) - } else { - ( - *witness_hashes_iter.next().unwrap(), - produced_hashes.next().unwrap(), - ) - }; - hasher.update(left_hash.as_ref()); - hasher.update(right_hash.as_ref()); - } - } - _ => {} - } - - // Chunk size - according to the column's length and the current depth, we calculate the - // number of elements from that column 'injected' to the current node. - for (chunk_size, column_queries) in column_layout - .column_lengths_at_depth(layer_depth) - .iter() - .map(|&column_length| column_length >> (layer_depth - 1)) - .zip(&mut column_query_iterators) - { - for i in 0..chunk_size { - let column_chunk_start_index = chunk_size * node_index; - match column_queries.next_if(|&&q| q == i + column_chunk_start_index) { - Some(_) => hasher.update(F::into_slice(&[queried_values.next().unwrap()])), - None => hasher.update(F::into_slice(&[witness_elements.next().unwrap()])), - } - } - } - next_layer_hashes.push(hasher.finalize_reset()); - } - (next_layer_hashes, node_indices) - } -} - -#[cfg(test)] -mod tests { - use crate::commitment_scheme::blake3_hash::Blake3Hasher; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::merkle_input::MerkleTreeInput; - use crate::commitment_scheme::mixed_degree_merkle_tree::MixedDegreeMerkleTree; - use crate::core::fields::m31::M31; - - #[test] - fn verify_test() { - const TREE_HEIGHT: usize = 4; - let mut input = MerkleTreeInput::::new(); - let column_length_8 = (80..88).map(M31::from_u32_unchecked).collect::>(); - let column_length_4 = (40..44).map(M31::from_u32_unchecked).collect::>(); - input.insert_column(TREE_HEIGHT, &column_length_8); - input.insert_column(TREE_HEIGHT - 1, &column_length_4); - input.insert_column(TREE_HEIGHT - 1, &column_length_8); - let (tree, commitment) = MixedDegreeMerkleTree::::commit_default(&input); - let queries: Vec> = vec![vec![2], vec![0_usize], vec![4, 7]]; - - let decommitment = tree.decommit(&input, &queries); - assert!(decommitment.verify( - commitment, - &queries, - decommitment.queried_values.iter().copied(), - )); - } - - #[test] - #[should_panic] - fn verify_proof_invalid_commitment_fails_test() { - const TREE_HEIGHT: usize = 4; - let mut input = MerkleTreeInput::::new(); - let column_length_8 = (80..88).map(M31::from_u32_unchecked).collect::>(); - let column_length_4 = (40..44).map(M31::from_u32_unchecked).collect::>(); - input.insert_column(TREE_HEIGHT, &column_length_8); - input.insert_column(TREE_HEIGHT - 1, &column_length_4); - input.insert_column(TREE_HEIGHT - 1, &column_length_8); - let (tree, _) = MixedDegreeMerkleTree::::commit_default(&input); - let false_commitment = Blake3Hasher::hash(b"false_commitment"); - - let queries: Vec> = vec![vec![2], vec![0_usize], vec![4, 7]]; - let decommitment = tree.decommit(&input, &queries); - - assert!(decommitment.verify( - false_commitment, - &queries, - decommitment.queried_values.iter().copied(), - )); - } - - #[test] - #[should_panic] - fn verify_amended_hash_witness_proof_fails_test() { - const TREE_HEIGHT: usize = 4; - let mut input = MerkleTreeInput::::new(); - let column_length_8 = (80..88).map(M31::from_u32_unchecked).collect::>(); - let column_length_4 = (40..44).map(M31::from_u32_unchecked).collect::>(); - input.insert_column(TREE_HEIGHT, &column_length_8); - input.insert_column(TREE_HEIGHT - 1, &column_length_4); - input.insert_column(TREE_HEIGHT - 1, &column_length_8); - let (tree, commitment) = MixedDegreeMerkleTree::::commit_default(&input); - - let queries: Vec> = vec![vec![2], vec![0_usize], vec![4, 7]]; - let mut decommitment = tree.decommit(&input, &queries); - decommitment.hashes[0] = Blake3Hasher::hash(b"false_hash"); - - assert!(decommitment.verify( - commitment, - &queries, - decommitment.queried_values.iter().copied(), - )); - } -} diff --git a/src/commitment_scheme/mixed_degree_merkle_tree.rs b/src/commitment_scheme/mixed_degree_merkle_tree.rs deleted file mode 100644 index 4d0890687..000000000 --- a/src/commitment_scheme/mixed_degree_merkle_tree.rs +++ /dev/null @@ -1,663 +0,0 @@ -use std::iter::Peekable; - -use itertools::Itertools; -use merging_iterator::MergeIter; - -use super::hasher::Hasher; -use super::merkle_input::{MerkleTreeColumnLayout, MerkleTreeInput}; -use super::merkle_multilayer::MerkleMultiLayer; -use super::mixed_degree_decommitment::MixedDecommitment; -use crate::commitment_scheme::merkle_multilayer::MerkleMultiLayerConfig; -use crate::commitment_scheme::utils::get_column_chunk; -use crate::core::fields::{Field, IntoSlice}; - -/// A mixed degree merkle tree. -/// Stored as a vector of ['MerkleMultiLayer']s, each with a configurable height. -/// Only stores the generated hash values. -/// -/// # Example -/// -/// ```rust -/// use stwo::commitment_scheme::merkle_input::MerkleTreeInput; -/// use stwo::commitment_scheme::mixed_degree_merkle_tree::*; -/// use stwo::commitment_scheme::blake3_hash::Blake3Hasher; -/// use stwo::core::fields::m31::M31; -/// -/// let mut input = MerkleTreeInput::::new(); -/// let column = vec![M31::from_u32_unchecked(0); 1024]; -/// input.insert_column(7, &column); -/// -/// -/// let (tree, commitment) = MixedDegreeMerkleTree::::commit_default(&input); -pub struct MixedDegreeMerkleTree { - column_layout: MerkleTreeColumnLayout, - multi_layers: Vec>, - _field: std::marker::PhantomData, -} - -/// Sets the heights of the multi layers in the tree in ascending order. -pub struct MixedDegreeMerkleTreeConfig { - pub multi_layer_sizes: Vec, -} - -impl<'a, F: Field, H: Hasher> MixedDegreeMerkleTree -where - F: IntoSlice, -{ - pub fn commit_default(input: &MerkleTreeInput<'a, F>) -> (Self, H::Hash) { - // Default configuration is a single tree. - let config = MixedDegreeMerkleTreeConfig { - multi_layer_sizes: vec![input.max_injected_depth()], - }; - - Self::commit(input, config) - } - - /// Generate a mixed degree merkle tree with configured ['MerkleMultiLayer'] sizes. - /// The sum of layer sizes must match the depth of the deepest column of the input. - /// - /// # Example - /// - /// ```rust - /// use stwo::commitment_scheme::merkle_input::MerkleTreeInput; - /// use stwo::commitment_scheme::mixed_degree_merkle_tree::*; - /// use stwo::commitment_scheme::blake3_hash::Blake3Hasher; - /// use stwo::core::fields::m31::M31; - /// - /// let mut input = MerkleTreeInput::::new(); - /// let column = vec![M31::from_u32_unchecked(0); 1024]; - /// input.insert_column(7, &column); - /// let config = MixedDegreeMerkleTreeConfig {multi_layer_sizes: vec![4,2,1]}; - /// - /// - /// let (tree, commitment) = MixedDegreeMerkleTree::::commit(&input, config); - pub fn commit( - input: &MerkleTreeInput<'a, F>, - config: MixedDegreeMerkleTreeConfig, - ) -> (Self, H::Hash) { - let mut tree = Self { - column_layout: input.column_layout(), - multi_layers: vec![], - _field: std::marker::PhantomData, - }; - tree.init_memory(config); - let root = tree.hash(input); - (tree, root) - } - - fn init_memory(&mut self, config: MixedDegreeMerkleTreeConfig) { - let tree_height = self.height(); - Self::validate_config(&config, tree_height); - - let mut layers = Vec::>::new(); - let mut current_depth = tree_height; - for layer_height in config.multi_layer_sizes.into_iter() { - let layer_config = - MerkleMultiLayerConfig::new(layer_height, 1 << (current_depth - layer_height)); - layers.push(MerkleMultiLayer::::new(layer_config)); - current_depth -= layer_height; - } - - self.multi_layers = layers; - } - - fn hash(&mut self, input: &MerkleTreeInput<'a, F>) -> H::Hash { - let mut curr_layer = self.height() - self.multi_layer_height(0); - // Bottom layer. - self.multi_layers[0].commit_layer::(input, &[]); - // Rest of the tree. - for i in 1..self.multi_layers.len() { - // TODO(Ohad): implement Hash oracle and avoid these copies. - let prev_hashes = self.multi_layers[i - 1] - .get_roots() - .copied() - .collect::>(); - debug_assert_eq!(prev_hashes.len(), 1 << (curr_layer)); - curr_layer -= self.multi_layer_height(i); - self.multi_layers[i].commit_layer::(input, &prev_hashes); - } - let mut top_layer_roots = self.multi_layers.last().unwrap().get_roots(); - let root = top_layer_roots - .next() - .expect("Top layer should have exactly one root") - .to_owned(); - debug_assert_eq!(top_layer_roots.count(), 0); - root - } - - pub fn height(&self) -> usize { - self.column_layout.height() - } - - /// Generates a mixed degree merkle decommitment. - /// - /// # Arguments - /// - /// * 'queries' - A sequence of queries to each of the columns. Expected to be ordered by the - /// order in which the columns were inserted to the tree. - /// - /// # Example - /// - /// ```rust - /// use stwo::commitment_scheme::blake3_hash::Blake3Hasher; - /// use stwo::commitment_scheme::merkle_input::MerkleTreeInput; - /// use stwo::commitment_scheme::mixed_degree_merkle_tree::MixedDegreeMerkleTree; - /// use stwo::core::fields::m31::M31; - /// - /// let mut input = MerkleTreeInput::::new(); - /// let column_0 = vec![M31::from_u32_unchecked(0); 1024]; - /// let column_1 = vec![M31::from_u32_unchecked(0); 512]; - /// input.insert_column(7, &column_0); - /// input.insert_column(6, &column_1); - /// let (tree, commitment) = MixedDegreeMerkleTree::::commit_default(&input); - /// - /// let queries = vec![vec![0], vec![300, 511]]; - /// let decommitment = tree.decommit(&input, queries.as_ref()); - /// ``` - // TODO(Ohad): introduce a proper query struct, then deprecate 'drain' usage and accepting vecs. - pub fn decommit( - &self, - input: &MerkleTreeInput<'a, F>, - queries: &[Vec], - ) -> MixedDecommitment { - assert_eq!( - queries.len(), - input.n_injected_columns(), - "Number of query vectors does not match number of injected columns." - ); - let mut decommitment = MixedDecommitment::::new(); - let queries_to_layers = input.column_layout().sort_queries_by_layer(queries); - - // Decommitment layers are built from the bottom up, excluding the root. - let mut ancestor_indices = vec![]; - (1..=input.max_injected_depth()).rev().for_each(|i| { - ancestor_indices = self.decommit_single_layer( - input, - i, - &queries_to_layers[i - 1], - ancestor_indices.iter().copied().peekable(), - &mut decommitment, - ); - }); - decommitment.column_layout = input.column_layout(); - decommitment - } - - pub fn get_hash_at(&self, layer_depth: usize, position: usize) -> H::Hash { - // Determine correct multilayer - let mut depth_accumulator = layer_depth; - for multi_layer in self.multi_layers.iter().rev() { - let multi_layer_height = multi_layer.config.sub_tree_height; - if multi_layer_height > depth_accumulator { - return multi_layer.get_hash_value(depth_accumulator, position); - } - depth_accumulator -= multi_layer_height; - } - panic!() - } - - pub fn root(&self) -> H::Hash { - match &self.multi_layers.last() { - Some(top_layer) => { - let mut roots = top_layer.get_roots(); - assert_eq!(roots.len(), 1, "Top layer should have exactly one root"); - *roots.next().unwrap() - } - None => panic!("Empty tree!"), - } - } - - fn validate_config(config: &MixedDegreeMerkleTreeConfig, tree_height: usize) { - let config_tree_height = config.multi_layer_sizes.iter().sum::(); - assert_eq!( - config.multi_layer_sizes.iter().sum::(), - tree_height, - "Sum of the layer heights {} does not match merkle input size {}.", - config_tree_height, - tree_height - ); - } - - fn multi_layer_height(&self, layer_index: usize) -> usize { - assert!(layer_index < self.multi_layers.len()); - self.multi_layers[layer_index].config.sub_tree_height - } - - // Generates the witness of a single layer and adds it to the decommitment. - // 'previous_layer_indices' - node indices that are part of the witness for a query below . - // 'queries_to_layer'- queries to columns at this layer. - fn decommit_single_layer( - &self, - input: &MerkleTreeInput<'a, F>, - layer_depth: usize, - queries_to_layer: &[Vec], - mut previous_layers_indices: Peekable + Clone>, - decommitment: &mut MixedDecommitment, - ) -> Vec { - let directly_queried_node_indices = - queried_nodes_in_layer(queries_to_layer.iter(), &self.column_layout, layer_depth); - let mut index_value_iterator = directly_queried_node_indices - .iter() - .copied() - .zip(Self::layer_felt_witnesses_and_queried_elements( - input, - layer_depth, - queries_to_layer.iter(), - directly_queried_node_indices.iter().copied(), - )) - .peekable(); - let mut node_indices = MergeIter::new( - directly_queried_node_indices.iter().copied(), - previous_layers_indices.clone().map(|q| q / 2), - ) - .collect_vec(); - node_indices.dedup(); - - for &node_index in node_indices.iter() { - match previous_layers_indices.next_if(|&q| q / 2 == node_index) { - None if layer_depth < self.height() => { - // If the node is not a direct query, include both hashes. - let (l_hash, r_hash) = self.child_hashes(node_index, layer_depth); - decommitment.hashes.push(l_hash); - decommitment.hashes.push(r_hash); - } - Some(q) - if previous_layers_indices - .next_if(|&next_q| next_q ^ 1 == q) - .is_none() => - { - decommitment.hashes.push(self.sibling_hash(q, layer_depth)); - } - _ => {} - } - - if let Some((_, (witness, queried))) = - index_value_iterator.next_if(|(n, _)| *n == node_index) - { - decommitment.witness_elements.extend(witness); - decommitment.queried_values.extend(queried); - } else { - let injected_elements = input.get_injected_elements(layer_depth, node_index); - decommitment.witness_elements.extend(injected_elements); - } - } - node_indices - } - - // Returns the felt witnesses and queried elements for the given node indices in the specified - // layer. Assumes that the queries & node indices are sorted in ascending order. - fn layer_felt_witnesses_and_queried_elements( - input: &MerkleTreeInput<'a, F>, - layer_depth: usize, - queries: impl Iterator>, - node_indices: impl ExactSizeIterator, - ) -> Vec<(Vec, Vec)> { - let mut witnesses_and_queried_values_by_node = vec![(vec![], vec![]); node_indices.len()]; - let mut column_query_iterators = queries - .map(|column_queries| column_queries.iter().peekable()) - .collect_vec(); - - // For every node --> For every column --> For every column chunk --> Append - // queried/witness elements according to that layer's queries. - for (node_index, (witness_elements, queried_elements)) in - node_indices.zip(witnesses_and_queried_values_by_node.iter_mut()) - { - for (column, column_queries) in input - .get_columns(layer_depth) - .iter() - .zip(column_query_iterators.iter_mut()) - { - let column_chunk = get_column_chunk(column, node_index, 1 << (layer_depth - 1)); - let column_chunk_start_index = column_chunk.len() * node_index; - for (i, &felt) in column_chunk.iter().enumerate() { - match column_queries.next_if(|&&q| q == i + column_chunk_start_index) { - Some(_) => queried_elements.push(felt), - None => witness_elements.push(felt), - } - } - } - } - - witnesses_and_queried_values_by_node - } - - fn sibling_hash(&self, query: usize, layer_depth: usize) -> H::Hash { - self.get_hash_at(layer_depth, query ^ 1) - } - - fn child_hashes(&self, node_index: usize, layer_depth: usize) -> (H::Hash, H::Hash) { - ( - self.get_hash_at(layer_depth, node_index * 2), - self.get_hash_at(layer_depth, node_index * 2 + 1), - ) - } -} - -/// Translates queries of the form to the form -/// Input queries are per column, i.e `queries[0]` is a vector of queries for the first column that -/// was inserted to the tree's input in that layer. -pub fn queried_nodes_in_layer<'a>( - queries: impl Iterator>, - column_layout: &MerkleTreeColumnLayout, - layer_depth: usize, -) -> Vec { - let columns_lengths = column_layout.column_lengths_at_depth(layer_depth); - let column_log_lengths = columns_lengths.iter().map(|c_len| c_len.ilog2() as usize); - let mut node_queries = queries - .into_iter() - .zip(column_log_lengths) - .flat_map(|(column_queries, log_column_length)| { - let log_n_bags_in_layer = layer_depth - 1; - let log_n_elements_in_bag = log_column_length - log_n_bags_in_layer; - column_queries - .iter() - .map(move |q| q >> log_n_elements_in_bag) - }) - .collect::>(); - node_queries.sort(); - node_queries.dedup(); - node_queries -} - -#[cfg(test)] -mod tests { - use std::vec; - - use itertools::Itertools; - - use super::{MixedDegreeMerkleTree, MixedDegreeMerkleTreeConfig}; - use crate::commitment_scheme::blake3_hash::Blake3Hasher; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::merkle_input::MerkleTreeInput; - use crate::commitment_scheme::mixed_degree_merkle_tree::queried_nodes_in_layer; - use crate::core::fields::m31::M31; - use crate::core::fields::Field; - use crate::m31; - - fn hash_symmetric_path( - initial_value: &[H::NativeType], - path_length: usize, - ) -> H::Hash { - (1..path_length).fold(H::hash(initial_value), |curr_hash, _| { - H::concat_and_hash(&curr_hash, &curr_hash) - }) - } - - #[test] - fn commit_configured_multi_layer_sizes_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 1 << 12]; - input.insert_column(12, &column); - - let multi_layer_sizes = [5, 4, 3].to_vec(); - let (tree, _root) = MixedDegreeMerkleTree::::commit( - &input, - MixedDegreeMerkleTreeConfig { - multi_layer_sizes: multi_layer_sizes.clone(), - }, - ); - - let mut remaining_height = multi_layer_sizes.iter().sum::(); - multi_layer_sizes - .iter() - .enumerate() - .for_each(|(i, layer_height)| { - assert_eq!(tree.multi_layers[i].config.sub_tree_height, *layer_height); - assert_eq!( - tree.multi_layers[i].config.n_sub_trees, - 1 << (remaining_height - layer_height) - ); - remaining_height -= layer_height; - }); - } - - #[test] - #[should_panic] - fn mixed_degree_merkle_tree_bad_config_test() { - let mut input = super::MerkleTreeInput::::new(); - let column = vec![M31::from_u32_unchecked(0); 4096]; - input.insert_column(12, &column); - - // This should panic because the sum of the layer heights is not equal to the tree height - // deferred by the input. - MixedDegreeMerkleTree::::commit( - &input, - MixedDegreeMerkleTreeConfig { - multi_layer_sizes: [5, 4, 2].to_vec(), - }, - ); - } - - #[test] - fn commit_default_test() { - const TREE_HEIGHT: usize = 8; - const INJECT_DEPTH: usize = 3; - let mut input = super::MerkleTreeInput::::new(); - let base_column = vec![M31::from_u32_unchecked(0); 1 << (TREE_HEIGHT)]; - let injected_column = vec![M31::from_u32_unchecked(1); 1 << (INJECT_DEPTH - 1)]; - input.insert_column(TREE_HEIGHT + 1, &base_column); - input.insert_column(INJECT_DEPTH, &injected_column); - - let expected_hash_at_injected_depth = hash_symmetric_path::( - 0_u32.to_le_bytes().as_ref(), - TREE_HEIGHT + 1 - INJECT_DEPTH, - ); - let mut sack_at_injected_depth = expected_hash_at_injected_depth.as_ref().to_vec(); - sack_at_injected_depth.extend(expected_hash_at_injected_depth.as_ref().to_vec()); - sack_at_injected_depth.extend(1u32.to_le_bytes()); - let expected_result = - hash_symmetric_path::(sack_at_injected_depth.as_ref(), INJECT_DEPTH); - - let (_, root) = MixedDegreeMerkleTree::::commit_default(&input); - assert_eq!(root, expected_result); - } - - #[test] - fn commit_configured_test() { - const TREE_HEIGHT: usize = 8; - const INJECT_DEPTH: usize = 3; - let mut input = super::MerkleTreeInput::::new(); - let base_column = vec![M31::from_u32_unchecked(0); 1 << (TREE_HEIGHT)]; - let injected_column = vec![M31::from_u32_unchecked(1); 1 << (INJECT_DEPTH - 1)]; - input.insert_column(TREE_HEIGHT + 1, &base_column); - input.insert_column(INJECT_DEPTH, &injected_column); - - let config = super::MixedDegreeMerkleTreeConfig { - multi_layer_sizes: vec![5, 2, 2], - }; - let (_, expected) = MixedDegreeMerkleTree::::commit_default(&input); - - let (_, root) = MixedDegreeMerkleTree::::commit(&input, config); - - assert_eq!(root, expected); - } - - #[test] - fn get_hash_at_test() { - const TREE_HEIGHT: usize = 3; - let mut input = super::MerkleTreeInput::::new(); - let base_column = (0..4).map(M31::from_u32_unchecked).collect::>(); - input.insert_column(TREE_HEIGHT, &base_column); - - let (tree, root) = MixedDegreeMerkleTree::::commit_default(&input); - assert_eq!(root, tree.get_hash_at(0, 0)); - - let mut hasher = Blake3Hasher::new(); - hasher.update(&0_u32.to_le_bytes()); - // hasher.update(&1_u32.to_le_bytes()); - let expected_hash_at_2_0 = hasher.finalize_reset(); - let hash_at_2_0 = tree.get_hash_at(2, 0); - assert_eq!(hash_at_2_0, expected_hash_at_2_0); - - hasher.update(&2_u32.to_le_bytes()); - let expected_hash_at_2_2 = hasher.finalize_reset(); - let hash_at_2_2 = tree.get_hash_at(2, 2); - assert_eq!(hash_at_2_2, expected_hash_at_2_2); - hasher.update(&3_u32.to_le_bytes()); - let expected_hash_at_2_3 = hasher.finalize_reset(); - let hash_at_2_3 = tree.get_hash_at(2, 3); - assert_eq!(hash_at_2_3, expected_hash_at_2_3); - - let expected_parent_of_2_2_and_2_3 = - Blake3Hasher::concat_and_hash(&expected_hash_at_2_2, &expected_hash_at_2_3); - let parent_of_2_2_and_2_3 = tree.get_hash_at(1, 1); - assert_eq!(parent_of_2_2_and_2_3, expected_parent_of_2_2_and_2_3); - } - - #[test] - #[should_panic] - fn get_hash_at_invalid_layer_test() { - const TREE_HEIGHT: usize = 3; - let mut input = super::MerkleTreeInput::::new(); - let base_column = (0..4).map(M31::from_u32_unchecked).collect::>(); - input.insert_column(TREE_HEIGHT, &base_column); - let (tree, _) = MixedDegreeMerkleTree::::commit_default(&input); - tree.get_hash_at(4, 0); - } - - // TODO(Ohad): remove after test sub-routine is used. - fn translate_queries( - mut queries: Vec>, - input: &MerkleTreeInput<'_, F>, - ) -> Vec> { - (1..=input.max_injected_depth()) - .rev() - .map(|i| { - let n_columns_injected_at_depth = input.get_columns(i).len(); - let column_queries_at_depth = queries - .drain(..n_columns_injected_at_depth) - .collect::>(); - super::queried_nodes_in_layer( - column_queries_at_depth.iter(), - &input.column_layout(), - i, - ) - }) - .collect::>>() - } - - #[test] - fn translate_queries_test() { - let col_length_8 = [m31!(0); 8]; - let col_length_4 = [m31!(0); 4]; - let mut merkle_input = MerkleTreeInput::::new(); - - // Column Length 8 -> depth 4 - // Column Length 8 -> depth 3 - // Column Length 4 -> depth 3 - merkle_input.insert_column(4, &col_length_8); - merkle_input.insert_column(3, &col_length_8); - merkle_input.insert_column(3, &col_length_4); - - let first_column_queries = [0, 7]; - let second_column_queries = [3, 7]; - let third_column_queries = [1, 2]; - - let expeted_queries_at_depth_4 = [0, 7]; - let expeted_queries_at_depth_3 = [1, 2, 3]; // [1,3] U [1,2] - - let translated_queries = translate_queries( - vec![ - first_column_queries.to_vec(), - second_column_queries.to_vec(), - third_column_queries.to_vec(), - ], - &merkle_input, - ); - - assert_eq!(translated_queries[0], expeted_queries_at_depth_4); - assert_eq!(translated_queries[1], expeted_queries_at_depth_3); - assert_eq!(translated_queries[2], vec![]); - assert_eq!(translated_queries[3], vec![]); - } - - #[test] - fn build_node_felt_witness_test() { - let col_length_16 = (0..16).map(M31::from_u32_unchecked).collect::>(); - let col_length_8 = (0..8).map(M31::from_u32_unchecked).collect::>(); - let col_length_4 = (0..4).map(M31::from_u32_unchecked).collect::>(); - let mut merkle_input = MerkleTreeInput::::new(); - - // Column Length 8 -> depth 4 - // Column Length 8 -> depth 3 - // Column Length 4 -> depth 3 - merkle_input.insert_column(4, &col_length_16); - merkle_input.insert_column(4, &col_length_8); - merkle_input.insert_column(3, &col_length_8); - merkle_input.insert_column(3, &col_length_4); - let (tree, _root) = - MixedDegreeMerkleTree::::commit_default(&merkle_input); - - let zero_column_queries = vec![0, 15]; - let first_column_queries = vec![0, 7]; - let second_column_queries = vec![3, 7]; - let third_column_queries = vec![1, 2]; - let queries = vec![ - zero_column_queries, - first_column_queries, - second_column_queries, - third_column_queries, - ]; - - let node_indices = queried_nodes_in_layer(queries.iter().take(2), &tree.column_layout, 4); - let w4 = - MixedDegreeMerkleTree::::layer_felt_witnesses_and_queried_elements( - &merkle_input, - 4, - queries[..2].iter(), - node_indices.iter().copied(), - ); - let node_indices = queried_nodes_in_layer(queries.iter().skip(2), &tree.column_layout, 3); - let w3 = - MixedDegreeMerkleTree::::layer_felt_witnesses_and_queried_elements( - &merkle_input, - 4, - queries[2..4].iter(), - node_indices.iter().copied(), - ); - - assert_eq!( - format!("{:?}", w4), - "[([M31(1)], [M31(0), M31(0)]), ([M31(14)], [M31(15), M31(7)])]" - ); - assert_eq!( - format!("{:?}", w3), - "[([M31(2)], [M31(3), M31(1)]), ([M31(4), M31(5)], [M31(2)]), ([M31(6), M31(3)], [M31(7)])]" - ); - } - - #[test] - fn decommit_test() { - let mut input = MerkleTreeInput::::new(); - let column_0 = (1600..1616).map(M31::from_u32_unchecked).collect_vec(); - let column_1 = (800..808).map(M31::from_u32_unchecked).collect_vec(); - let column_2 = (400..404).map(M31::from_u32_unchecked).collect_vec(); - let column_3 = (0..4096).map(M31::from_u32_unchecked).collect_vec(); - - const TREE_HEIGHT: usize = 8; - input.insert_column(TREE_HEIGHT, &column_3); - input.insert_column(TREE_HEIGHT - 5, &column_1); - input.insert_column(TREE_HEIGHT, &column_3); - input.insert_column(TREE_HEIGHT - 4, &column_0); - input.insert_column(TREE_HEIGHT - 6, &column_2); - input.insert_column(TREE_HEIGHT - 4, &column_1); - input.insert_column(TREE_HEIGHT, &column_3); - let (tree, commitment) = MixedDegreeMerkleTree::::commit_default(&input); - let queries: Vec> = vec![ - vec![2], - vec![0], - vec![], - vec![3], - vec![0, 1, 2, 3], - vec![4, 7], - vec![0, 1, 1000, 4095], - ]; - - let test_decommitment = tree.decommit(&input, &queries); - assert!(test_decommitment.verify( - commitment, - &queries, - test_decommitment.queried_values.iter().copied() - )); - } -} diff --git a/src/commitment_scheme/mod.rs b/src/commitment_scheme/mod.rs index c7108e08c..81022e141 100644 --- a/src/commitment_scheme/mod.rs +++ b/src/commitment_scheme/mod.rs @@ -3,13 +3,6 @@ pub mod blake2_merkle; pub mod blake2s_ref; pub mod blake3_hash; pub mod hasher; -pub mod merkle_decommitment; -pub mod merkle_input; -pub mod merkle_multilayer; -pub mod merkle_tree; -pub mod mixed_degree_decommitment; -pub mod mixed_degree_merkle_tree; pub mod ops; pub mod prover; -pub mod utils; pub mod verifier; diff --git a/src/commitment_scheme/ops.rs b/src/commitment_scheme/ops.rs index 2ea09d1da..174c68c31 100644 --- a/src/commitment_scheme/ops.rs +++ b/src/commitment_scheme/ops.rs @@ -11,7 +11,7 @@ use crate::core::fields::m31::BaseField; /// At each layer, the tree may have multiple columns of the same length as the layer. /// Each node in that layer contains one value from each column. pub trait MerkleHasher: Debug { - type Hash: Clone + Eq + std::fmt::Debug; + type Hash: Copy + Clone + Eq + std::fmt::Debug; /// Hashes a single Merkle node. See [MerkleHasher] for more details. fn hash_node( children_hashes: Option<(Self::Hash, Self::Hash)>, diff --git a/src/commitment_scheme/prover.rs b/src/commitment_scheme/prover.rs index c240562cf..aba82b078 100644 --- a/src/commitment_scheme/prover.rs +++ b/src/commitment_scheme/prover.rs @@ -209,7 +209,8 @@ impl MerkleDecommitment { impl Clone for MerkleDecommitment { fn clone(&self) -> Self { Self { - witness: self.witness.clone(), + hash_witness: self.hash_witness.clone(), + column_witness: self.column_witness.clone(), } } } diff --git a/src/commitment_scheme/utils.rs b/src/commitment_scheme/utils.rs deleted file mode 100644 index 34420c297..000000000 --- a/src/commitment_scheme/utils.rs +++ /dev/null @@ -1,693 +0,0 @@ -use std::collections::BTreeMap; -use std::slice::Iter; - -use super::hasher::Hasher; -use crate::core::fields::{Field, IntoSlice}; -use crate::math::utils::{log2_ceil, usize_safe_div}; - -pub type ColumnArray = Vec>; -pub type ColumnLengthMap = BTreeMap>; -pub type TreeLayer = Box<[T]>; -pub type TreeData = Box<[TreeLayer]>; - -pub fn allocate_layer(n_bytes: usize) -> TreeLayer { - // Safe bacuase 0 is a valid u8 value. - unsafe { Box::<[T]>::new_zeroed_slice(n_bytes).assume_init() } -} - -pub fn allocate_balanced_tree( - bottom_layer_length: usize, - size_of_node_bytes: usize, - output_size_bytes: usize, -) -> TreeData { - assert!(output_size_bytes.is_power_of_two()); - let tree_height = log2_ceil(bottom_layer_length * size_of_node_bytes / output_size_bytes); - - // Safe because pointers are initialized later. - let mut data: TreeData = unsafe { TreeData::new_zeroed_slice(tree_height).assume_init() }; - for i in 0..tree_height { - let layer = allocate_layer( - 2_usize.pow((tree_height - i - 1).try_into().expect("Failed cast!")) - * output_size_bytes, - ); - data[i] = layer; - } - data -} - -/// Performes a 2-to-1 hash on a layer of a merkle tree. -pub fn hash_layer(layer: &[H::NativeType], node_size: usize, dst: &mut [H::NativeType]) { - let n_nodes_in_layer = usize_safe_div(layer.len(), node_size); - assert!(n_nodes_in_layer.is_power_of_two()); - assert!(n_nodes_in_layer <= dst.len() / H::OUTPUT_SIZE); - - let src_ptrs: Vec<*const H::NativeType> = (0..n_nodes_in_layer) - .map(|i| unsafe { layer.as_ptr().add(node_size * i) }) - .collect(); - let dst_ptrs: Vec<*mut H::NativeType> = (0..n_nodes_in_layer) - .map(|i| unsafe { dst.as_mut_ptr().add(H::OUTPUT_SIZE * i) }) - .collect(); - - // Safe because pointers are valid and distinct. - unsafe { - H::hash_many_in_place(&src_ptrs, node_size, &dst_ptrs); - } -} - -// Given a data of a tree, hashes the entire tree. -pub fn hash_merkle_tree(data: &mut [&mut [H::NativeType]]) { - (0..data.len() - 1).for_each(|i| { - let (src, dst) = data.split_at_mut(i + 1); - let src = src.get(i).unwrap(); - let dst = dst.get_mut(0).unwrap(); - hash_layer::(src, H::BLOCK_SIZE, dst) - }) -} - -/// Given a data of a tree, and a bottom layer of 'bottom_layer_node_size_bytes' sized nodes, hashes -/// the entire tree. Nodes are hashed individually at the bottom layer. -// TODO(Ohad): Write a similiar function for when F does not implement IntoSlice(Non le platforms). -pub fn hash_merkle_tree_from_bottom_layer<'a, F: Field, H: Hasher>( - bottom_layer: &[F], - bottom_layer_node_size_bytes: usize, - data: &mut [&mut [H::NativeType]], -) where - F: IntoSlice, - H::NativeType: 'a, -{ - // Hash bottom layer. - let dst_slice = data.get_mut(0).expect("Empty tree!"); - let bottom_layer_data: &[H::NativeType] = - >::into_slice(bottom_layer); - hash_layer::(bottom_layer_data, bottom_layer_node_size_bytes, dst_slice); - - // Rest of the sub-tree - hash_merkle_tree::(data); -} - -/// Maps columns by length. -/// Mappings are sorted by length. i.e the first entry is a matrix of the shortest columns. -pub fn map_columns_sorted(cols: ColumnArray) -> ColumnLengthMap { - let mut columns_length_map: ColumnLengthMap = BTreeMap::new(); - for c in cols { - let length_index_entry = columns_length_map.entry(c.len()).or_default(); - length_index_entry.push(c); - } - columns_length_map -} - -/// Given columns of the same length, transforms to bytes and concatenates corresponding column -/// elements. Assumes columns are of the same length. -/// -/// # Safety -/// -/// Pointers in 'dst' should point to pre-allocated memory with enough space to store -/// column_array.len() amount of u32 elements. -// TODO(Ohad): Change tree impl and remove. -pub unsafe fn transpose_to_bytes(column_array: &ColumnArray, dst: &[*mut u8]) { - let column_length = column_array[0].len(); - - for (i, ptr) in dst.iter().enumerate().take(column_length) { - unsafe { - let mut dst_ptr = *ptr; - for c in column_array { - std::ptr::copy_nonoverlapping( - c.as_ptr().add(i) as *mut u8, - dst_ptr, - std::mem::size_of::(), - ); - dst_ptr = dst_ptr.add(std::mem::size_of::()); - } - } - } -} - -pub fn tree_data_as_mut_ref(tree_data: &mut TreeData) -> Vec<&mut [T]> { - tree_data.iter_mut().map(|layer| &mut layer[..]).collect() -} - -/// Inject columns to pre-allocated arrays. -/// -/// # Arguments -/// -/// * 'gap_offset' - The offset in bytes between end of target to the beginning of the next. -/// -/// # Safety -/// -/// dst should point to pre-allocated memory with enough space to store the entire column array + -/// offset*(n_rows/n_rows_in_node) amount of T elements. -// TODO(Ohad): Change tree impl and remove. -pub unsafe fn inject( - column_array: &ColumnArray, - dst: &mut [u8], - n_rows_in_node: usize, - gap_offset: usize, -) { - let ptr_offset = column_array.len() * n_rows_in_node * std::mem::size_of::() + gap_offset; - let offseted_pointers: Vec<*mut u8> = (gap_offset..dst.len()) - .step_by(ptr_offset) - .map(|i| unsafe { dst.as_mut_ptr().add(i) }) - .collect(); - transpose_to_bytes::(column_array, &offseted_pointers); -} - -/// Given a matrix, returns a vector of the matrix elements in row-major order. -/// Assumes all columns are of the same length and non-zero. -// TODO(Ohad): Change tree impl and remove. -pub fn column_to_row_major(mut mat: ColumnArray) -> Vec { - if mat.len() == 1 { - return mat.remove(0); - }; - - // Flattening the matrix into a single vector. - let vec_length = mat.len() * mat[0].len(); - let mut row_major_matrix_vec: Vec = Vec::with_capacity(vec_length); - - // Inject(transpose). - // Safe because enough memory is allocated. - unsafe { - let row_major_matrix_byte_slice = std::slice::from_raw_parts_mut( - row_major_matrix_vec.as_mut_ptr() as *mut u8, - vec_length * std::mem::size_of::(), - ); - inject(&mat, row_major_matrix_byte_slice, 1, 0); - row_major_matrix_vec.set_len(vec_length); - } - row_major_matrix_vec -} - -pub fn inject_hash_in_pairs<'a: 'b, 'b, H: Hasher>( - hash_inputs: &'b mut [Vec<&'a [H::NativeType]>], - values_to_inject: &'a [H::Hash], -) { - assert_eq!( - values_to_inject.len(), - hash_inputs.len() * 2, - "Attempted injecting {} hash values into {} hash inputs", - values_to_inject.len(), - hash_inputs.len() - ); - for (j, hashes) in values_to_inject.chunks(2).enumerate() { - // TODO(Ohad): Implement 'IntoSlice' for H::Hash and reduce here to one push. - hash_inputs[j].push(hashes[0].as_ref()); - hash_inputs[j].push(hashes[1].as_ref()); - } -} - -/// Injects Field element values into existing hash inputs. -/// -/// For Large Merkle tree constructions, holding reference-arrays for every node -/// in a layer hinders performance greatly. Hence, the input is traversed in small chunks, -/// and the refrences are discarded upon use. -/// -/// # Arguments -/// -/// * `columns` - an array of injection element columns. -/// * `hash_inputs` - The hash inputs to inject into. -/// * `chunk_idx` - The index of the chunk to inject. -/// * `n_chunks_in_column` - The number of chunks every column is divided into. -pub fn inject_column_chunks<'b, 'a: 'b, H: Hasher, F: Field>( - columns: &'a [&'a [F]], - hash_inputs: &'b mut [Vec<&'a [H::NativeType]>], - chunk_idx: usize, - n_chunks_in_column: usize, -) where - F: IntoSlice, -{ - for column in columns { - let column_slice = get_column_chunk(column, chunk_idx, n_chunks_in_column); - - // TODO(Ohad): consider implementing a 'duplicate' feature and changing or removing this - // assert. - assert_eq!( - column_slice.len() % hash_inputs.len(), - 0, - "Column of size {} can not be divided into {} hash inputs", - column_slice.len(), - hash_inputs.len() - ); - let n_elements_in_chunk = column_slice.len() / hash_inputs.len(); - column_slice - .chunks(n_elements_in_chunk) - .zip(hash_inputs.iter_mut()) - .for_each(|(chunk, hash_input)| { - hash_input.push(F::into_slice(chunk)); - }); - } -} - -/// Returns the i'th chunk of a column split into n_total_chunks. -// TODO(Ohad): change div to shift-right. -pub fn get_column_chunk(column: &[F], index_to_view: usize, n_total_chunks: usize) -> &[F] { - let slice_length = column.len() / n_total_chunks; - let slice_start_idx = slice_length * index_to_view; - &column[slice_start_idx..slice_start_idx + slice_length] -} - -/// Hashes a layer of a Merkle tree. Nodes are injected with child hashes and chunks from the input -/// columns (if any). -// TODO(Ohad): Consider renaming after current hash_layer function is deprecated. -pub fn inject_and_hash_layer( - child_hashes: &[H::Hash], - dst: &mut [H::Hash], - input_columns: &Iter<'_, &[F]>, -) where - F: IntoSlice, -{ - let produced_layer_length = dst.len(); - if IS_INTERMEDIATE { - assert_eq!( - child_hashes.len(), - produced_layer_length * 2, - "The number of child hashes ({}) must be double the destination size ({})", - child_hashes.len(), - produced_layer_length - ); - } else { - assert_eq!( - child_hashes.len(), - 0, - "The bottom layer must not receive child hashes!" - ); - } - - let mut hasher = H::new(); - match input_columns.clone().peekable().next() { - Some(_) => { - // Match the input columns to corresponding chunk sizes. - let input_columns = input_columns.clone().zip( - input_columns - .clone() - .map(|c| c.len() / produced_layer_length), - ); - dst.iter_mut().enumerate().for_each(|(i, dst_node)| { - // Inject previous hash values if intermediate layer, and input columns. - if IS_INTERMEDIATE { - inject_previous_hash_values::(i, &mut hasher, child_hashes); - } - for (column, n_elements_in_chunk) in input_columns.clone() { - let chunk = &column[i * n_elements_in_chunk..(i + 1) * n_elements_in_chunk]; - hasher.update(F::into_slice(chunk)); - } - *dst_node = hasher.finalize_reset(); - }); - } - None => { - // Intermediate layer with no input columns. - dst.iter_mut().enumerate().for_each(|(i, dst)| { - inject_previous_hash_values::(i, &mut hasher, child_hashes); - *dst = hasher.finalize_reset(); - }); - } - } -} - -fn inject_previous_hash_values( - i: usize, - hash_state: &mut H, - prev_hashes: &[::Hash], -) { - hash_state.update(prev_hashes[i * 2].as_ref()); - hash_state.update(prev_hashes[i * 2 + 1].as_ref()); -} - -#[cfg(test)] -pub mod tests { - use num_traits::One; - use rand::{thread_rng, Rng}; - - use super::{ - allocate_balanced_tree, inject_and_hash_layer, inject_column_chunks, map_columns_sorted, - ColumnArray, - }; - use crate::commitment_scheme::blake3_hash::Blake3Hasher; - use crate::commitment_scheme::hasher::Hasher; - use crate::commitment_scheme::merkle_input::MerkleTreeInput; - use crate::commitment_scheme::utils::{ - allocate_layer, hash_layer, hash_merkle_tree, hash_merkle_tree_from_bottom_layer, inject, - inject_hash_in_pairs, transpose_to_bytes, tree_data_as_mut_ref, - }; - use crate::core::fields::m31::M31; - use crate::math::utils::log2_ceil; - - pub fn generate_test_queries(n_queries: usize, trace_length: usize) -> Vec { - let mut queries: Vec = (0..n_queries) - .map(|_| thread_rng().gen_range(0..trace_length)) - .collect(); - queries.sort(); - queries.dedup(); - queries - } - - fn init_test_trace() -> ColumnArray { - let col0 = std::iter::repeat(0).take(8).collect(); - let col1 = vec![1, 2, 3, 4]; - let col2 = vec![5, 6]; - let col3 = vec![7, 8]; - let col4 = vec![9]; - let cols: ColumnArray = vec![col0, col1, col2, col3, col4]; - cols - } - - fn init_transpose_test_trace() -> ColumnArray { - let col1 = vec![1, 2, 3, 4]; - let col2 = vec![5, 6, 7, 8]; - let col3 = vec![9, 10]; - let col4 = vec![11]; - let cols: ColumnArray = vec![col1, col2, col3, col4]; - cols - } - - #[test] - fn sort_columns_and_extract_remainder_test() { - let cols = init_test_trace(); - let mut col_length_map = map_columns_sorted(cols.clone()); - - // Test map - assert_eq!(col_length_map.get(&4).expect("no such key: 4").len(), 1); - assert_eq!(col_length_map.get(&2).expect("no such key: 2").len(), 2); - assert_eq!(col_length_map.pop_last().expect("Empty map").1[0][0], 0); - assert_eq!(col_length_map.pop_last().expect("Empty map").1[0][0], 1); - assert_eq!(col_length_map.pop_last().expect("Empty map").1[0][0], 5); - } - - #[test] - fn transpose_test() { - let cols = init_transpose_test_trace(); - let mut map = map_columns_sorted(cols); - let columns_to_transpose = map.pop_last().expect("msg").1; - - let mut out1 = [0_u8; 8]; - let mut out2 = [0_u8; 8]; - let mut out3 = [0_u8; 8]; - let mut out4 = [0_u8; 8]; - - let ptrs = [ - out1.as_mut_ptr(), - out2.as_mut_ptr(), - out3.as_mut_ptr(), - out4.as_mut_ptr(), - ]; - unsafe { - transpose_to_bytes(&columns_to_transpose, &ptrs); - } - - let outs = [out1, out2, out3, out4]; - assert_eq!( - format!("{:?}", outs), - "[[1, 0, 0, 0, 5, 0, 0, 0], [2, 0, 0, 0, 6, 0, 0, 0], [3, 0, 0, 0, 7, 0, 0, 0], [4, 0, 0, 0, 8, 0, 0, 0]]" - ); - } - - // TODO(Ohad): generelize over a hash function and use hash-in-place functions - // to initialize output arrays instead of zeros. - #[test] - fn inject_test() { - let cols = init_transpose_test_trace(); - let mut map = map_columns_sorted(cols); - let columns_to_transpose = map.pop_last().expect("msg").1; // [[1, 2, 3, 4],[5, 6, 7, 8]]. - let gap_offset: usize = 1; - let mut out = [0_u8; 36]; - unsafe { - inject::(&columns_to_transpose, &mut out[..], 1, gap_offset); - } - - assert_eq!( - hex::encode(&out[..]), - hex::encode(0u8.to_le_bytes()) - + &hex::encode(1u32.to_le_bytes()) - + &hex::encode(5u32.to_le_bytes()) - + &hex::encode(0u8.to_le_bytes()) - + &hex::encode(2u32.to_le_bytes()) - + &hex::encode(6u32.to_le_bytes()) - + &hex::encode(0u8.to_le_bytes()) - + &hex::encode(3u32.to_le_bytes()) - + &hex::encode(7u32.to_le_bytes()) - + &hex::encode(0u8.to_le_bytes()) - + &hex::encode(4u32.to_le_bytes()) - + &hex::encode(8u32.to_le_bytes()) - ); - } - - #[test] - fn allocate_layer_test() { - let layer = allocate_layer::(10); - assert_eq!(layer.len(), 10); - } - - #[test] - fn allocate_empty_layer_test() { - let layer = allocate_layer::(0); - assert_eq!(layer.len(), 0); - } - - #[test] - fn allocate_balanced_tree_test() { - let n_nodes = 8; - let node_size = Blake3Hasher::BLOCK_SIZE; - let output_size = Blake3Hasher::OUTPUT_SIZE; - let tree = allocate_balanced_tree::(n_nodes, node_size, output_size); - - assert_eq!(tree.len(), log2_ceil(n_nodes) + 1); - assert_eq!(tree[0].len(), n_nodes * output_size); - assert_eq!(tree[1].len(), 4 * output_size); - assert_eq!(tree[2].len(), 2 * output_size); - assert_eq!(tree[3].len(), output_size); - } - - #[test] - fn hash_layer_test() { - let layer = allocate_layer(16); - let mut res_layer = allocate_layer(64); - hash_layer::(&layer, 8, &mut res_layer); - assert_eq!( - hex::encode(&res_layer[..Blake3Hasher::OUTPUT_SIZE]), - Blake3Hasher::hash(&0u64.to_le_bytes()).to_string() - ); - } - - #[test] - fn hash_tree_test() { - let mut tree_data = - allocate_balanced_tree(16, Blake3Hasher::BLOCK_SIZE, Blake3Hasher::OUTPUT_SIZE); - - hash_merkle_tree::(&mut tree_data_as_mut_ref(&mut tree_data)[..]); - - assert_eq!( - hex::encode(tree_data.last().unwrap()), - "31b471b27b22b57b1ac82c9ed537231d53faf017fbe0c903c9668f47dc4151e1" - ) - } - - #[test] - fn hash_tree_from_bottom_layer_test() { - const TEST_SIZE: usize = 512; - - let bottom_layer = [M31::one(); TEST_SIZE]; - let mut tree_data = allocate_balanced_tree( - TEST_SIZE * std::mem::size_of::() / Blake3Hasher::BLOCK_SIZE, - Blake3Hasher::BLOCK_SIZE, - Blake3Hasher::OUTPUT_SIZE, - ); - - hash_merkle_tree_from_bottom_layer::( - &bottom_layer[..], - Blake3Hasher::BLOCK_SIZE, - &mut tree_data_as_mut_ref(&mut tree_data)[..], - ); - - assert_eq!( - hex::encode(tree_data.last().unwrap()), - "234d7011f24adb0fec6604ff1fdfe4745340886418b6e2cd0633f6ad1c7e52d9" - ) - } - - #[test] - fn inject_hash_in_pairs_test() { - let mut hash_inputs = vec![vec![], vec![]]; - let values_to_inject = vec![ - Blake3Hasher::hash(b"a"), - Blake3Hasher::hash(b"b"), - Blake3Hasher::hash(b"c"), - Blake3Hasher::hash(b"d"), - ]; - - inject_hash_in_pairs::(&mut hash_inputs, &values_to_inject); - - assert_eq!( - hex::encode(hash_inputs[0][0]), - Blake3Hasher::hash(b"a").to_string() - ); - assert_eq!( - hex::encode(hash_inputs[0][1]), - Blake3Hasher::hash(b"b").to_string() - ); - assert_eq!( - hex::encode(hash_inputs[1][0]), - Blake3Hasher::hash(b"c").to_string() - ); - assert_eq!( - hex::encode(hash_inputs[1][1]), - Blake3Hasher::hash(b"d").to_string() - ); - } - - #[test] - fn inject_column_single_chunk_test() { - let col1: Vec = (0..4).map(M31::from_u32_unchecked).collect(); - let col2: Vec = (4..8).map(M31::from_u32_unchecked).collect(); - let columns = vec![&col1[..], &col2[..]]; - let mut hash_inputs = vec![vec![], vec![]]; - - inject_column_chunks::(&columns, &mut hash_inputs, 0, 1); - - assert_eq!( - format!("{:?}", hash_inputs[0]), - "[[0, 0, 0, 0, 1, 0, 0, 0], [4, 0, 0, 0, 5, 0, 0, 0]]" - ); - assert_eq!( - format!("{:?}", hash_inputs[1]), - "[[2, 0, 0, 0, 3, 0, 0, 0], [6, 0, 0, 0, 7, 0, 0, 0]]" - ); - } - - #[test] - fn inject_column_multiple_chunks_test() { - let col1: Vec = (0..4).map(M31::from_u32_unchecked).collect(); - let col2: Vec = (4..8).map(M31::from_u32_unchecked).collect(); - let columns = vec![&col1[..], &col2[..]]; - let mut hash_inputs = vec![vec![], vec![]]; - - // Inject the second half of the columns. - inject_column_chunks::(&columns, &mut hash_inputs, 1, 2); - - assert_eq!( - format!("{:?}", hash_inputs[0]), - "[[2, 0, 0, 0], [6, 0, 0, 0]]" - ); - assert_eq!( - format!("{:?}", hash_inputs[1]), - "[[3, 0, 0, 0], [7, 0, 0, 0]]" - ); - } - - #[test] - #[should_panic] - fn inject_columns_too_short_test() { - let col1: Vec = (0..1).map(M31::from_u32_unchecked).collect(); - let columns = vec![&col1[..]]; - let mut hash_inputs = vec![vec![], vec![]]; - - inject_column_chunks::(&columns, &mut hash_inputs, 0, 1); - } - - #[test] - #[should_panic] - fn inject_columns_size_not_divisible_test() { - let col1: Vec = (0..3).map(M31::from_u32_unchecked).collect(); - let columns = vec![&col1[..]]; - let mut hash_inputs = vec![vec![], vec![]]; - - inject_column_chunks::(&columns, &mut hash_inputs, 0, 1); - } - - #[test] - fn inject_and_hash_layer_test() { - // trace_column: [M31;16] = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] - let trace_column_0 = (0..16).map(M31::from_u32_unchecked).collect::>(); - let trace_column_1 = (0..8).map(M31::from_u32_unchecked).collect::>(); - let sub_trees_height = 3; - let mut input = MerkleTreeInput::new(); - input.insert_column(sub_trees_height, &trace_column_0); - input.insert_column(sub_trees_height - 1, &trace_column_1); - let mut leaf_layer = vec![::Hash::default(); 1 << sub_trees_height]; - let mut hashed_leaf_layer_injected = - vec![::Hash::default(); 1 << (sub_trees_height - 1)]; - let mut hashed_leaf_layer_not_injected = - vec![::Hash::default(); 1 << (sub_trees_height - 1)]; - - inject_and_hash_layer::( - &[], - &mut leaf_layer, - &input.get_columns(sub_trees_height).iter(), - ); - inject_and_hash_layer::( - &leaf_layer[..], - &mut hashed_leaf_layer_injected, - &input.get_columns(sub_trees_height - 1).iter(), - ); - inject_and_hash_layer::( - &leaf_layer[..], - &mut hashed_leaf_layer_not_injected, - &input.get_columns(sub_trees_height - 2).iter(), - ); - - // leaf_layer: [Blake3Hash;8] = [h(0,1),h(2,3),...h(14,15)] - leaf_layer.iter().enumerate().for_each(|(i, h)| { - let mut hasher = Blake3Hasher::new(); - hasher.update(&(2 * i as u32).to_le_bytes()); - hasher.update(&((2 * i + 1) as u32).to_le_bytes()); - assert_eq!(h, &hasher.finalize()); - }); - - // hashed_leaf_layer_injected: [Blake3Hash;4] = - // [h(h(0,1),h(2,3),0,1),,...h(h(12,13),h(14,15),6,7)] - hashed_leaf_layer_injected - .iter() - .enumerate() - .for_each(|(i, h)| { - let mut hasher = Blake3Hasher::new(); - hasher.update(leaf_layer[i * 2].as_ref()); - hasher.update(leaf_layer[i * 2 + 1].as_ref()); - hasher.update(&((2 * i) as u32).to_le_bytes()); - hasher.update(&((2 * i + 1) as u32).to_le_bytes()); - assert_eq!(h, &hasher.finalize()); - }); - - // hashed_leaf_layer_not_injected: [Blake3Hash;4] = - // [h(h(0,1),h(2,3)),...h(h(12,13),h(14,15))] - hashed_leaf_layer_not_injected - .iter() - .enumerate() - .for_each(|(i, h)| { - let mut hasher = Blake3Hasher::new(); - hasher.update(leaf_layer[i * 2].as_ref()); - hasher.update(leaf_layer[i * 2 + 1].as_ref()); - assert_eq!(h, &hasher.finalize()); - }) - } - - #[test] - #[should_panic] - fn inject_and_hash_layer_wrong_size_test() { - let layer_length = 1 << 3; - let trace_column_0 = (0..16).map(M31::from_u32_unchecked).collect::>(); - let prev_hashes = vec![::Hash::default(); layer_length]; - let mut dst = vec![::Hash::default(); layer_length / 4]; - let mut input = MerkleTreeInput::new(); - - input.insert_column(1, &trace_column_0); - inject_and_hash_layer::( - &prev_hashes[..], - &mut dst, - &input.get_columns(1).iter(), - ); - } - - #[test] - #[should_panic] - fn inject_and_hash_bottom_layer_with_child_hashes_test() { - let layer_length = 1 << 3; - let prev_hashes = vec![::Hash::default(); layer_length]; - let mut dst = vec![::Hash::default(); layer_length / 2]; - let mut input = MerkleTreeInput::new(); - let trace_column_0 = (0..16).map(M31::from_u32_unchecked).collect::>(); - - input.insert_column(1, &trace_column_0); - inject_and_hash_layer::( - &prev_hashes[..], - &mut dst, - &input.get_columns(1).iter(), - ); - } -} diff --git a/src/commitment_scheme/verifier.rs b/src/commitment_scheme/verifier.rs index 8b8a362ff..48f3af02c 100644 --- a/src/commitment_scheme/verifier.rs +++ b/src/commitment_scheme/verifier.rs @@ -121,7 +121,7 @@ impl MerkleVerifier { // If the left child was not computed, read it from the witness. let left_hash = prev_layer_hashes .next_if(|(index, _)| *index == 2 * node_index) - .map(|(_, hash)| Ok(hash.clone())) + .map(|(_, hash)| Ok(*hash)) .unwrap_or_else(|| { hash_witness .next() @@ -131,7 +131,7 @@ impl MerkleVerifier { // If the right child was not computed, read it to from the witness. let right_hash = prev_layer_hashes .next_if(|(index, _)| *index == 2 * node_index + 1) - .map(|(_, hash)| Ok(hash.clone())) + .map(|(_, hash)| Ok(*hash)) .unwrap_or_else(|| { hash_witness .next() diff --git a/src/core/commitment_scheme/prover.rs b/src/core/commitment_scheme/prover.rs index acb39ceab..6a92aaed5 100644 --- a/src/core/commitment_scheme/prover.rs +++ b/src/core/commitment_scheme/prover.rs @@ -18,7 +18,7 @@ use super::super::prover::{ use super::super::ColumnVec; use super::quotients::{compute_fri_quotients, PointSample}; use super::utils::TreeVec; -use crate::commitment_scheme::blake2_hash::{Blake2sHash, Blake2sHasher}; +use crate::commitment_scheme::blake2_hash::Blake2sHash; use crate::commitment_scheme::blake2_merkle::Blake2sMerkleHasher; use crate::commitment_scheme::prover::{MerkleDecommitment, MerkleProver}; use crate::core::channel::Channel; @@ -91,7 +91,7 @@ impl CommitmentSchemeProver { // Run FRI commitment phase on the oods quotients. let fri_config = FriConfig::new(LOG_LAST_LAYER_DEGREE_BOUND, LOG_BLOWUP_FACTOR, N_QUERIES); let fri_prover = - FriProver::::commit(channel, fri_config, "ients); + FriProver::::commit(channel, fri_config, "ients); // Proof of work. let proof_of_work = ProofOfWork::new(PROOF_OF_WORK_BITS).prove(channel); @@ -127,7 +127,7 @@ pub struct CommitmentSchemeProof { pub decommitments: TreeVec>, pub queried_values: TreeVec>>, pub proof_of_work: ProofOfWorkProof, - pub fri_proof: FriProof, + pub fri_proof: FriProof, } /// Prover data for a single commitment tree in a commitment scheme. The commitment scheme allows to diff --git a/src/core/fri.rs b/src/core/fri.rs index 96a5bd48e..0166c97e7 100644 --- a/src/core/fri.rs +++ b/src/core/fri.rs @@ -11,7 +11,7 @@ use thiserror::Error; use super::backend::{Backend, CPUBackend}; use super::channel::Channel; use super::fields::qm31::SecureField; -use super::fields::secure_column::SecureColumn; +use super::fields::secure_column::{SecureColumn, SECURE_EXTENSION_DEGREE}; use super::poly::circle::{CircleEvaluation, SecureEvaluation}; use super::poly::line::{LineEvaluation, LinePoly}; use super::poly::BitReversedOrder; @@ -19,7 +19,7 @@ use super::poly::BitReversedOrder; use super::queries::{Queries, SparseSubCircleDomain}; use crate::commitment_scheme::ops::{MerkleHasher, MerkleOps}; use crate::commitment_scheme::prover::{MerkleDecommitment, MerkleProver}; -use crate::commitment_scheme::verifier::MerkleTreeVerifier; +use crate::commitment_scheme::verifier::{MerkleVerificationError, MerkleVerifier}; use crate::core::circle::Coset; use crate::core::poly::line::LineDomain; use crate::core::utils::bit_reverse_index; @@ -112,7 +112,7 @@ pub struct FriProver, H: MerkleHasher> { column_log_sizes: Vec, } -impl, H: MerkleHasher> FriProver { +impl, H: MerkleHasher> FriProver { /// Commits to multiple [CircleEvaluation]s. /// /// `columns` must be provided in descending order by size. @@ -289,7 +289,7 @@ pub struct FriVerifier { queries: Option, } -impl> FriVerifier { +impl FriVerifier { /// Verifies the commitment stage of FRI. /// /// `column_bounds` should be the committed circle polynomial degree bounds in descending order. @@ -532,7 +532,10 @@ pub enum FriVerificationError { #[error("proof contains an invalid number of FRI layers")] InvalidNumFriLayers, #[error("queries do not resolve to their commitment in layer {layer}")] - InnerLayerCommitmentInvalid { layer: usize }, + InnerLayerCommitmentInvalid { + layer: usize, + error: MerkleVerificationError, + }, #[error("evaluations are invalid in layer {layer}")] InnerLayerEvaluationsInvalid { layer: usize }, #[error("degree of last layer is invalid")] @@ -623,7 +626,7 @@ struct FriLayerVerifier { proof: FriLayerProof, } -impl> FriLayerVerifier { +impl FriLayerVerifier { /// Verifies the layer's merkle decommitment and returns the the folded queries and query evals. /// /// # Errors @@ -665,24 +668,23 @@ impl> FriLayerVerifier { }) .collect::>(); - let merkle_verifier = MerkleTreeVerifier { root: commitment }; + let merkle_verifier = MerkleVerifier::new( + commitment, + vec![self.domain.log_size(); SECURE_EXTENSION_DEGREE], + ); // TODO(spapini): Propagate error. - if merkle_verifier + merkle_verifier .verify( - decommitment_positions, - actual_decommitment_evals - .columns + [(self.domain.log_size(), decommitment_positions)] .into_iter() - .map(|e| (self.domain.log_size(), e)) - .collect_vec(), + .collect(), + actual_decommitment_evals.columns.to_vec(), decommitment, ) - .is_err() - { - return Err(FriVerificationError::InnerLayerCommitmentInvalid { + .map_err(|e| FriVerificationError::InnerLayerCommitmentInvalid { layer: self.layer_index, - }); - } + error: e, + })?; let evals_at_folded_queries = sparse_evaluation.fold(self.folding_alpha); @@ -766,7 +768,7 @@ struct FriLayerProver, H: MerkleHasher> { merkle_tree: MerkleProver, } -impl, H: MerkleHasher> FriLayerProver { +impl, H: MerkleHasher> FriLayerProver { fn new(evaluation: LineEvaluation) -> Self { // TODO(spapini): Commit on slice. // TODO(spapini): Merkle tree in backend. @@ -806,7 +808,13 @@ impl, H: MerkleHasher> FriLayerProver< } let commitment = self.merkle_tree.root(); - let decommitment = self.merkle_tree.decommit(decommit_positions); + // TODO(spapini): Use _evals. + let (_evals, decommitment) = self.merkle_tree.decommit( + [(self.evaluation.len().ilog2(), decommit_positions)] + .into_iter() + .collect(), + self.evaluation.values.columns.iter().collect_vec(), + ); FriLayerProof { evals_subset, @@ -897,7 +905,7 @@ mod tests { use num_traits::{One, Zero}; use super::{get_opening_positions, FriVerificationError, SparseCircleEvaluation}; - use crate::commitment_scheme::blake2_hash::Blake2sHasher; + use crate::commitment_scheme::blake2_merkle::Blake2sMerkleHasher; use crate::core::backend::cpu::{CPUCircleEvaluation, CPUCirclePoly}; use crate::core::backend::{CPUBackend, Col, Column, ColumnOps}; use crate::core::circle::{CirclePointIndex, Coset}; @@ -918,7 +926,7 @@ mod tests { /// Default blowup factor used for tests. const LOG_BLOWUP_FACTOR: u32 = 2; - type FriProver = super::FriProver; + type FriProver = super::FriProver; #[test] fn fold_line_works() { @@ -1151,7 +1159,7 @@ mod tests { assert!(matches!( verification_result, - Err(FriVerificationError::InnerLayerCommitmentInvalid { layer: 1 }) + Err(FriVerificationError::InnerLayerCommitmentInvalid { layer: 1, .. }) )); } diff --git a/src/examples/fibonacci/mod.rs b/src/examples/fibonacci/mod.rs index b958fef85..ab577b594 100644 --- a/src/examples/fibonacci/mod.rs +++ b/src/examples/fibonacci/mod.rs @@ -111,9 +111,10 @@ impl MultiFibonacci { mod tests { use itertools::Itertools; use num_traits::One; + use rand::rngs::StdRng; + use rand::{Rng, SeedableRng}; use super::{Fibonacci, MultiFibonacci}; - use crate::commitment_scheme::utils::tests::generate_test_queries; use crate::core::air::accumulation::PointEvaluationAccumulator; use crate::core::air::{AirExt, Component, ComponentTrace}; use crate::core::circle::CirclePoint; @@ -125,6 +126,16 @@ mod tests { use crate::core::utils::bit_reverse; use crate::{m31, qm31}; + pub fn generate_test_queries(n_queries: usize, trace_length: usize) -> Vec { + let rng = &mut StdRng::seed_from_u64(0); + let mut queries: Vec = (0..n_queries) + .map(|_| rng.gen_range(0..trace_length)) + .collect(); + queries.sort(); + queries.dedup(); + queries + } + #[test] fn test_composition_polynomial_is_low_degree() { let fib = Fibonacci::new(5, m31!(443693538)); diff --git a/src/hash_functions/poseidon.rs b/src/hash_functions/poseidon.rs index 0ba236bac..99af45989 100644 --- a/src/hash_functions/poseidon.rs +++ b/src/hash_functions/poseidon.rs @@ -215,14 +215,6 @@ impl Hasher for PoseidonHasher { res } - unsafe fn hash_many_in_place( - _data: &[*const Self::NativeType], - _single_input_length_bytes: usize, - _dst: &[*mut Self::NativeType], - ) { - unimplemented!("hash_many_in_place for PoseidonHasher") - } - fn concat_and_hash(_v1: &PoseidonHash, _v2: &PoseidonHash) -> PoseidonHash { unimplemented!("concat_and_hash for PoseidonHasher") }