-
Notifications
You must be signed in to change notification settings - Fork 93
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
b8b4d6b
commit 7b36b0e
Showing
5 changed files
with
456 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,193 @@ | ||
use itertools::Itertools; | ||
use num_traits::Zero; | ||
|
||
use super::blake2s_ref::compress; | ||
use super::ops::{MerkleHasher, MerkleOps}; | ||
use crate::core::backend::CPUBackend; | ||
use crate::core::fields::m31::BaseField; | ||
|
||
pub struct Blake2Hasher; | ||
impl MerkleHasher for Blake2Hasher { | ||
type Hash = [u32; 8]; | ||
|
||
fn hash_node( | ||
children_hashes: Option<(Self::Hash, Self::Hash)>, | ||
column_values: &[BaseField], | ||
) -> Self::Hash { | ||
let mut state = [0; 8]; | ||
if let Some((left, right)) = children_hashes { | ||
state = compress( | ||
state, | ||
unsafe { std::mem::transmute([left, right]) }, | ||
0, | ||
0, | ||
0, | ||
0, | ||
); | ||
} | ||
let rem = 15 - ((column_values.len() + 15) % 16); | ||
let padded_values = column_values | ||
.iter() | ||
.copied() | ||
.chain(std::iter::repeat(BaseField::zero()).take(rem)); | ||
for chunk in padded_values.array_chunks::<16>() { | ||
state = compress(state, unsafe { std::mem::transmute(chunk) }, 0, 0, 0, 0); | ||
} | ||
state | ||
} | ||
} | ||
|
||
impl MerkleOps<Blake2Hasher> for CPUBackend { | ||
fn commit_on_layer( | ||
log_size: u32, | ||
prev_layer: Option<&Vec<[u32; 8]>>, | ||
columns: &[&Vec<BaseField>], | ||
) -> Vec<[u32; 8]> { | ||
(0..(1 << log_size)) | ||
.map(|i| { | ||
Blake2Hasher::hash_node( | ||
prev_layer.map(|prev_layer| (prev_layer[2 * i], prev_layer[2 * i + 1])), | ||
&columns.iter().map(|column| column[i]).collect_vec(), | ||
) | ||
}) | ||
.collect() | ||
} | ||
} | ||
|
||
#[cfg(test)] | ||
mod tests { | ||
use itertools::Itertools; | ||
use num_traits::Zero; | ||
use rand::rngs::StdRng; | ||
use rand::{Rng, SeedableRng}; | ||
|
||
use crate::commitment_scheme::blake2_merkle::Blake2Hasher; | ||
use crate::commitment_scheme::prover::{Decommitment, MerkleProver}; | ||
use crate::commitment_scheme::verifier::{MerkleTreeVerifier, MerkleVerificationError}; | ||
use crate::core::backend::CPUBackend; | ||
use crate::core::fields::m31::BaseField; | ||
|
||
type TestData = ( | ||
Vec<usize>, | ||
Decommitment<Blake2Hasher>, | ||
Vec<(u32, Vec<BaseField>)>, | ||
MerkleTreeVerifier<Blake2Hasher>, | ||
); | ||
fn prepare_merkle() -> TestData { | ||
const N_COLS: usize = 400; | ||
const N_QUERIES: usize = 7; | ||
|
||
let rng = &mut StdRng::seed_from_u64(0); | ||
let log_sizes = (0..N_COLS) | ||
.map(|_| rng.gen_range(6..9)) | ||
.sorted() | ||
.rev() | ||
.collect_vec(); | ||
let max_log_size = *log_sizes.iter().max().unwrap(); | ||
let cols = log_sizes | ||
.iter() | ||
.map(|&log_size| { | ||
(0..(1 << log_size)) | ||
.map(|_| BaseField::from(rng.gen_range(0..(1 << 30)))) | ||
.collect_vec() | ||
}) | ||
.collect_vec(); | ||
let merkle = MerkleProver::<CPUBackend, Blake2Hasher>::commit(cols.iter().collect_vec()); | ||
|
||
let queries = (0..N_QUERIES) | ||
.map(|_| rng.gen_range(0..(1 << max_log_size))) | ||
.sorted() | ||
.dedup() | ||
.collect_vec(); | ||
let decommitment = merkle.decommit(queries.clone()); | ||
let values = cols | ||
.iter() | ||
.map(|col| { | ||
let layer_queries = queries | ||
.iter() | ||
.map(|&q| q >> (max_log_size - col.len().ilog2())) | ||
.dedup(); | ||
layer_queries.map(|q| col[q]).collect_vec() | ||
}) | ||
.collect_vec(); | ||
let values = log_sizes.into_iter().zip(values).collect_vec(); | ||
|
||
let verifier = MerkleTreeVerifier { | ||
root: merkle.root(), | ||
}; | ||
(queries, decommitment, values, verifier) | ||
} | ||
|
||
#[test] | ||
fn test_merkle_success() { | ||
let (queries, decommitment, values, verifier) = prepare_merkle(); | ||
|
||
verifier.verify(queries, values, decommitment).unwrap(); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_invalid_witness() { | ||
let (queries, mut decommitment, values, verifier) = prepare_merkle(); | ||
decommitment.witness[20] = [0; 8]; | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::RootMismatch | ||
); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_invalid_value() { | ||
let (queries, decommitment, mut values, verifier) = prepare_merkle(); | ||
values[3].1[6] = BaseField::zero(); | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::RootMismatch | ||
); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_witness_too_short() { | ||
let (queries, mut decommitment, values, verifier) = prepare_merkle(); | ||
decommitment.witness.pop(); | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::WitnessTooShort | ||
); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_column_values_too_long() { | ||
let (queries, decommitment, mut values, verifier) = prepare_merkle(); | ||
values[3].1.push(BaseField::zero()); | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::ColumnValuesTooLong | ||
); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_column_values_too_short() { | ||
let (queries, decommitment, mut values, verifier) = prepare_merkle(); | ||
values[3].1.pop(); | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::ColumnValuesTooShort | ||
); | ||
} | ||
|
||
#[test] | ||
fn test_merkle_witness_too_long() { | ||
let (queries, mut decommitment, values, verifier) = prepare_merkle(); | ||
decommitment.witness.push([0; 8]); | ||
|
||
assert_eq!( | ||
verifier.verify(queries, values, decommitment).unwrap_err(), | ||
MerkleVerificationError::WitnessTooLong | ||
); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
use crate::core::backend::{Col, ColumnOps}; | ||
use crate::core::fields::m31::BaseField; | ||
|
||
pub trait MerkleHasher { | ||
type Hash: Clone + Eq + std::fmt::Debug; | ||
/// Hashes a single Merkle node. | ||
/// The node may or may not need to hash 2 hashes from the previous layer - depending if it is a | ||
/// leaf or not. | ||
/// In addition, the node may have extra column values that need to be hashed. | ||
fn hash_node( | ||
children_hashes: Option<(Self::Hash, Self::Hash)>, | ||
column_values: &[BaseField], | ||
) -> Self::Hash; | ||
} | ||
|
||
pub trait MerkleOps<H: MerkleHasher>: ColumnOps<BaseField> + ColumnOps<H::Hash> { | ||
/// Commits on an entire layer of the Merkle tree. | ||
/// The layer has 2^`log_size` nodes that need be hashed. The top most layer has 1 node, | ||
/// which is a hash of 2 children and some columns. | ||
/// `prev_layer` is the previous layer of the Merkle tree, if this is not the leaf layer.. | ||
/// That layer is assumed to have 2^(`log_size`+1) nodes. | ||
/// `columns` are the extra columns that need to be hashed in each node. | ||
/// They are assumed to be on size 2^`log_size`. | ||
/// Return the next Merkle layer hashes. | ||
fn commit_on_layer( | ||
log_size: u32, | ||
prev_layer: Option<&Col<Self, H::Hash>>, | ||
columns: &[&Col<Self, BaseField>], | ||
) -> Col<Self, H::Hash>; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
use std::cmp::Reverse; | ||
|
||
use itertools::Itertools; | ||
|
||
use super::ops::{MerkleHasher, MerkleOps}; | ||
use crate::core::backend::{Col, Column}; | ||
use crate::core::fields::m31::BaseField; | ||
|
||
pub struct MerkleProver<B: MerkleOps<H>, H: MerkleHasher> { | ||
pub layers: Vec<Col<B, H::Hash>>, | ||
} | ||
impl<B: MerkleOps<H>, H: MerkleHasher> MerkleProver<B, H> { | ||
/// Commits to columns. | ||
/// Columns must be of power of 2 sizes and sorted in descending order. | ||
pub fn commit(columns: Vec<&Col<B, BaseField>>) -> Self { | ||
// Check that columns are of descending order. | ||
assert!(!columns.is_empty()); | ||
assert!(columns.is_sorted_by_key(|c| Reverse(c.len()))); | ||
|
||
let mut columns = &mut columns.into_iter().peekable(); | ||
let mut layers: Vec<Col<B, H::Hash>> = Vec::new(); | ||
|
||
let max_log_size = columns.peek().unwrap().len().ilog2(); | ||
for log_size in (0..=max_log_size).rev() { | ||
// Take columns of the current log_size. | ||
let layer_columns = (&mut columns) | ||
.take_while(|column| column.len().ilog2() == log_size) | ||
.collect_vec(); | ||
|
||
layers.push(B::commit_on_layer(log_size, layers.last(), &layer_columns)); | ||
} | ||
Self { layers } | ||
} | ||
|
||
/// Decommits to columns on the given queries. | ||
/// Queries are given as indices to the largest column. | ||
pub fn decommit(&self, mut queries: Vec<usize>) -> Decommitment<H> { | ||
let mut witness = Vec::new(); | ||
for layer in &self.layers { | ||
let mut queries_iter = queries.into_iter().peekable(); | ||
|
||
// Propagate queries and hashes to the next layer. | ||
let mut next_queries = Vec::new(); | ||
while let Some(query) = queries_iter.next() { | ||
next_queries.push(query / 2); | ||
if queries_iter.next_if_eq(&(query ^ 1)).is_some() { | ||
continue; | ||
} | ||
if layer.len() > 1 { | ||
witness.push(layer.at(query ^ 1)); | ||
} | ||
} | ||
queries = next_queries; | ||
} | ||
Decommitment { witness } | ||
} | ||
|
||
pub fn root(&self) -> H::Hash { | ||
self.layers.last().unwrap().at(0) | ||
} | ||
} | ||
|
||
pub struct Decommitment<H: MerkleHasher> { | ||
pub witness: Vec<H::Hash>, | ||
} |
Oops, something went wrong.
7b36b0e
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Possible performance regression was detected for benchmark.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold
2
.SecureField add
212093570
ns/iter (± 35993734
)105246889
ns/iter (± 1780394
)2.02
This comment was automatically generated by workflow using github-action-benchmark.
CC: @spapinistarkware