(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer,
) {
@@ -236,7 +236,7 @@ pub(crate) fn eval_ext_circuit_addcy, const D: usiz
}
}
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut CircuitBuilder,
lv: &[ExtensionTarget; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer,
diff --git a/evm/src/arithmetic/arithmetic_stark.rs b/evm/src/arithmetic/arithmetic_stark.rs
index 3d281c868c..5e3f039cdf 100644
--- a/evm/src/arithmetic/arithmetic_stark.rs
+++ b/evm/src/arithmetic/arithmetic_stark.rs
@@ -1,5 +1,5 @@
-use std::marker::PhantomData;
-use std::ops::Range;
+use core::marker::PhantomData;
+use core::ops::Range;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
@@ -11,19 +11,19 @@ use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::util::transpose;
use static_assertions::const_assert;
-use super::columns::NUM_ARITH_COLUMNS;
+use super::columns::{op_flags, NUM_ARITH_COLUMNS};
use super::shift;
use crate::all_stark::Table;
-use crate::arithmetic::columns::{RANGE_COUNTER, RC_FREQUENCIES, SHARED_COLS};
+use crate::arithmetic::columns::{NUM_SHARED_COLS, RANGE_COUNTER, RC_FREQUENCIES, SHARED_COLS};
use crate::arithmetic::{addcy, byte, columns, divmod, modular, mul, Operation};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
-use crate::cross_table_lookup::{Column, TableWithColumns};
+use crate::cross_table_lookup::TableWithColumns;
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
-use crate::lookup::Lookup;
+use crate::lookup::{Column, Filter, Lookup};
use crate::stark::Stark;
-/// Link the 16-bit columns of the arithmetic table, split into groups
-/// of N_LIMBS at a time in `regs`, with the corresponding 32-bit
+/// Creates a vector of `Columns` to link the 16-bit columns of the arithmetic table,
+/// split into groups of N_LIMBS at a time in `regs`, with the corresponding 32-bit
/// columns of the CPU table. Does this for all ops in `ops`.
///
/// This is done by taking pairs of columns (x, y) of the arithmetic
@@ -57,11 +57,18 @@ fn cpu_arith_data_link(
res
}
-pub fn ctl_arithmetic_rows() -> TableWithColumns {
+/// Returns the `TableWithColumns` for `ArithmeticStark` rows where one of the arithmetic operations has been called.
+pub(crate) fn ctl_arithmetic_rows() -> TableWithColumns {
// We scale each filter flag with the associated opcode value.
// If an arithmetic operation is happening on the CPU side,
// the CTL will enforce that the reconstructed opcode value
// from the opcode bits matches.
+ // These opcodes are missing the syscall and prover_input opcodes,
+ // since `IS_RANGE_CHECK` can be associated to multiple opcodes.
+ // For `IS_RANGE_CHECK`, the opcodes are written in OPCODE_COL,
+ // and we use that column for scaling and the CTL checks.
+ // Note that we ensure in the STARK's constraints that the
+ // value in `OPCODE_COL` is 0 if `IS_RANGE_CHECK` = 0.
const COMBINED_OPS: [(usize, u8); 16] = [
(columns::IS_ADD, 0x01),
(columns::IS_MUL, 0x02),
@@ -88,30 +95,38 @@ pub fn ctl_arithmetic_rows() -> TableWithColumns {
columns::OUTPUT_REGISTER,
];
- let filter_column = Some(Column::sum(COMBINED_OPS.iter().map(|(c, _v)| *c)));
+ let mut filter_cols = COMBINED_OPS.to_vec();
+ filter_cols.push((columns::IS_RANGE_CHECK, 0x01));
+ let filter = Some(Filter::new_simple(Column::sum(
+ filter_cols.iter().map(|(c, _v)| *c),
+ )));
+
+ let mut all_combined_cols = COMBINED_OPS.to_vec();
+ all_combined_cols.push((columns::OPCODE_COL, 0x01));
// Create the Arithmetic Table whose columns are those of the
// operations listed in `ops` whose inputs and outputs are given
// by `regs`, where each element of `regs` is a range of columns
// corresponding to a 256-bit input or output register (also `ops`
// is used as the operation filter).
TableWithColumns::new(
- Table::Arithmetic,
- cpu_arith_data_link(&COMBINED_OPS, ®ISTER_MAP),
- filter_column,
+ *Table::Arithmetic,
+ cpu_arith_data_link(&all_combined_cols, ®ISTER_MAP),
+ filter,
)
}
+/// Structure representing the `Arithmetic` STARK, which carries out all the arithmetic operations.
#[derive(Copy, Clone, Default)]
-pub struct ArithmeticStark {
+pub(crate) struct ArithmeticStark {
pub f: PhantomData,
}
-const RANGE_MAX: usize = 1usize << 16; // Range check strict upper bound
+pub(crate) const RANGE_MAX: usize = 1usize << 16; // Range check strict upper bound
impl ArithmeticStark {
/// Expects input in *column*-major layout
- fn generate_range_checks(&self, cols: &mut Vec>) {
+ fn generate_range_checks(&self, cols: &mut [Vec]) {
debug_assert!(cols.len() == columns::NUM_ARITH_COLUMNS);
let n_rows = cols[0].len();
@@ -193,6 +208,20 @@ impl, const D: usize> Stark for ArithmeticSta
let lv: &[P; NUM_ARITH_COLUMNS] = vars.get_local_values().try_into().unwrap();
let nv: &[P; NUM_ARITH_COLUMNS] = vars.get_next_values().try_into().unwrap();
+ // Flags must be boolean.
+ for flag_idx in op_flags() {
+ let flag = lv[flag_idx];
+ yield_constr.constraint(flag * (flag - P::ONES));
+ }
+
+ // Only a single flag must be activated at once.
+ let all_flags = op_flags().map(|i| lv[i]).sum::();
+ yield_constr.constraint(all_flags * (all_flags - P::ONES));
+
+ // Check that `OPCODE_COL` holds 0 if the operation is not a range_check.
+ let opcode_constraint = (P::ONES - lv[columns::IS_RANGE_CHECK]) * lv[columns::OPCODE_COL];
+ yield_constr.constraint(opcode_constraint);
+
// Check the range column: First value must be 0, last row
// must be 2^16-1, and intermediate rows must increment by 0
// or 1.
@@ -204,11 +233,17 @@ impl, const D: usize> Stark for ArithmeticSta
let range_max = P::Scalar::from_canonical_u64((RANGE_MAX - 1) as u64);
yield_constr.constraint_last_row(rc1 - range_max);
+ // Evaluate constraints for the MUL operation.
mul::eval_packed_generic(lv, yield_constr);
+ // Evaluate constraints for ADD, SUB, LT and GT operations.
addcy::eval_packed_generic(lv, yield_constr);
+ // Evaluate constraints for DIV and MOD operations.
divmod::eval_packed(lv, nv, yield_constr);
+ // Evaluate constraints for ADDMOD, SUBMOD, MULMOD and for FP254 modular operations.
modular::eval_packed(lv, nv, yield_constr);
+ // Evaluate constraints for the BYTE operation.
byte::eval_packed(lv, yield_constr);
+ // Evaluate constraints for SHL and SHR operations.
shift::eval_packed_generic(lv, nv, yield_constr);
}
@@ -223,6 +258,31 @@ impl, const D: usize> Stark for ArithmeticSta
let nv: &[ExtensionTarget; NUM_ARITH_COLUMNS] =
vars.get_next_values().try_into().unwrap();
+ // Flags must be boolean.
+ for flag_idx in op_flags() {
+ let flag = lv[flag_idx];
+ let constraint = builder.mul_sub_extension(flag, flag, flag);
+ yield_constr.constraint(builder, constraint);
+ }
+
+ // Only a single flag must be activated at once.
+ let all_flags = builder.add_many_extension(op_flags().map(|i| lv[i]));
+ let constraint = builder.mul_sub_extension(all_flags, all_flags, all_flags);
+ yield_constr.constraint(builder, constraint);
+
+ // Check that `OPCODE_COL` holds 0 if the operation is not a range_check.
+ let opcode_constraint = builder.arithmetic_extension(
+ F::NEG_ONE,
+ F::ONE,
+ lv[columns::IS_RANGE_CHECK],
+ lv[columns::OPCODE_COL],
+ lv[columns::OPCODE_COL],
+ );
+ yield_constr.constraint(builder, opcode_constraint);
+
+ // Check the range column: First value must be 0, last row
+ // must be 2^16-1, and intermediate rows must increment by 0
+ // or 1.
let rc1 = lv[columns::RANGE_COUNTER];
let rc2 = nv[columns::RANGE_COUNTER];
yield_constr.constraint_first_row(builder, rc1);
@@ -234,11 +294,17 @@ impl, const D: usize> Stark for ArithmeticSta
let t = builder.sub_extension(rc1, range_max);
yield_constr.constraint_last_row(builder, t);
+ // Evaluate constraints for the MUL operation.
mul::eval_ext_circuit(builder, lv, yield_constr);
+ // Evaluate constraints for ADD, SUB, LT and GT operations.
addcy::eval_ext_circuit(builder, lv, yield_constr);
+ // Evaluate constraints for DIV and MOD operations.
divmod::eval_ext_circuit(builder, lv, nv, yield_constr);
+ // Evaluate constraints for ADDMOD, SUBMOD, MULMOD and for FP254 modular operations.
modular::eval_ext_circuit(builder, lv, nv, yield_constr);
+ // Evaluate constraints for the BYTE operation.
byte::eval_ext_circuit(builder, lv, yield_constr);
+ // Evaluate constraints for SHL and SHR operations.
shift::eval_ext_circuit(builder, lv, nv, yield_constr);
}
@@ -246,11 +312,12 @@ impl, const D: usize> Stark for ArithmeticSta
3
}
- fn lookups(&self) -> Vec {
+ fn lookups(&self) -> Vec> {
vec![Lookup {
- columns: SHARED_COLS.collect(),
- table_column: RANGE_COUNTER,
- frequencies_column: RC_FREQUENCIES,
+ columns: Column::singles(SHARED_COLS).collect(),
+ table_column: Column::single(RANGE_COUNTER),
+ frequencies_column: Column::single(RC_FREQUENCIES),
+ filter_columns: vec![None; NUM_SHARED_COLS],
}]
}
}
diff --git a/evm/src/arithmetic/byte.rs b/evm/src/arithmetic/byte.rs
index bb8cd12122..f7581efa77 100644
--- a/evm/src/arithmetic/byte.rs
+++ b/evm/src/arithmetic/byte.rs
@@ -60,7 +60,7 @@
//! y * 256 ∈ {0, 256, 512, ..., 2^16 - 256}
//! 8. Hence y ∈ {0, 1, ..., 255}
-use std::ops::Range;
+use core::ops::Range;
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
@@ -197,7 +197,7 @@ pub(crate) fn generate(lv: &mut [F], idx: U256, val: U256) {
);
}
-pub fn eval_packed(
+pub(crate) fn eval_packed(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer,
) {
@@ -293,7 +293,7 @@ pub fn eval_packed(
}
}
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut CircuitBuilder,
lv: &[ExtensionTarget; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer,
@@ -306,6 +306,7 @@ pub fn eval_ext_circuit, const D: usize>(
let idx_decomp = &lv[AUX_INPUT_REGISTER_0];
let tree = &lv[AUX_INPUT_REGISTER_1];
+ // low 5 bits of the first limb of idx:
let mut idx0_lo5 = builder.zero_extension();
for i in 0..5 {
let bit = idx_decomp[i];
@@ -316,6 +317,9 @@ pub fn eval_ext_circuit, const D: usize>(
let scale = builder.constant_extension(scale);
idx0_lo5 = builder.mul_add_extension(bit, scale, idx0_lo5);
}
+ // Verify that idx0_hi is the high (11) bits of the first limb of
+ // idx (in particular idx0_hi is at most 11 bits, since idx[0] is
+ // at most 16 bits).
let t = F::Extension::from(F::from_canonical_u64(32));
let t = builder.constant_extension(t);
let t = builder.mul_add_extension(idx_decomp[5], t, idx0_lo5);
@@ -323,6 +327,9 @@ pub fn eval_ext_circuit, const D: usize>(
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
+ // Verify the layers of the tree
+ // NB: Each of the bit values is negated in place to account for
+ // the reversed indexing.
let one = builder.one_extension();
let bit = idx_decomp[4];
for i in 0..8 {
@@ -362,6 +369,8 @@ pub fn eval_ext_circuit, const D: usize>(
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
+ // Check byte decomposition of last limb:
+
let base8 = F::Extension::from(F::from_canonical_u64(1 << 8));
let base8 = builder.constant_extension(base8);
let lo_byte = lv[BYTE_LAST_LIMB_LO];
@@ -380,19 +389,29 @@ pub fn eval_ext_circuit, const D: usize>(
yield_constr.constraint(builder, t);
let expected_out_byte = tree[15];
+ // Sum all higher limbs; sum will be non-zero iff idx >= 32.
let mut hi_limb_sum = lv[BYTE_IDX_DECOMP_HI];
for i in 1..N_LIMBS {
hi_limb_sum = builder.add_extension(hi_limb_sum, idx[i]);
}
+ // idx_is_large is 0 or 1
let idx_is_large = lv[BYTE_IDX_IS_LARGE];
let t = builder.mul_sub_extension(idx_is_large, idx_is_large, idx_is_large);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
+ // If hi_limb_sum is nonzero, then idx_is_large must be one.
let t = builder.sub_extension(idx_is_large, one);
let t = builder.mul_many_extension([is_byte, hi_limb_sum, t]);
yield_constr.constraint(builder, t);
+ // If idx_is_large is 1, then hi_limb_sum_inv must be the inverse
+ // of hi_limb_sum, hence hi_limb_sum is non-zero, hence idx is
+ // indeed "large".
+ //
+ // Otherwise, if idx_is_large is 0, then hi_limb_sum * hi_limb_sum_inv
+ // is zero, which is only possible if hi_limb_sum is zero, since
+ // hi_limb_sum_inv is non-zero.
let base16 = F::from_canonical_u64(1 << 16);
let hi_limb_sum_inv = builder.mul_const_add_extension(
base16,
@@ -414,6 +433,7 @@ pub fn eval_ext_circuit, const D: usize>(
let t = builder.mul_extension(is_byte, check);
yield_constr.constraint(builder, t);
+ // Check that the rest of the output limbs are zero
for i in 1..N_LIMBS {
let t = builder.mul_extension(is_byte, out[i]);
yield_constr.constraint(builder, t);
diff --git a/evm/src/arithmetic/columns.rs b/evm/src/arithmetic/columns.rs
index df2d12476b..e4172bc073 100644
--- a/evm/src/arithmetic/columns.rs
+++ b/evm/src/arithmetic/columns.rs
@@ -1,8 +1,8 @@
//! Arithmetic unit
-use std::ops::Range;
+use core::ops::Range;
-pub const LIMB_BITS: usize = 16;
+pub(crate) const LIMB_BITS: usize = 16;
const EVM_REGISTER_BITS: usize = 256;
/// Return the number of LIMB_BITS limbs that are in an EVM
@@ -20,7 +20,7 @@ const fn n_limbs() -> usize {
}
/// Number of LIMB_BITS limbs that are in on EVM register-sized number.
-pub const N_LIMBS: usize = n_limbs();
+pub(crate) const N_LIMBS: usize = n_limbs();
pub(crate) const IS_ADD: usize = 0;
pub(crate) const IS_MUL: usize = IS_ADD + 1;
@@ -38,8 +38,14 @@ pub(crate) const IS_GT: usize = IS_LT + 1;
pub(crate) const IS_BYTE: usize = IS_GT + 1;
pub(crate) const IS_SHL: usize = IS_BYTE + 1;
pub(crate) const IS_SHR: usize = IS_SHL + 1;
+pub(crate) const IS_RANGE_CHECK: usize = IS_SHR + 1;
+/// Column that stores the opcode if the operation is a range check.
+pub(crate) const OPCODE_COL: usize = IS_RANGE_CHECK + 1;
+pub(crate) const START_SHARED_COLS: usize = OPCODE_COL + 1;
-pub(crate) const START_SHARED_COLS: usize = IS_SHR + 1;
+pub(crate) const fn op_flags() -> Range {
+ IS_ADD..IS_RANGE_CHECK + 1
+}
/// Within the Arithmetic Unit, there are shared columns which can be
/// used by any arithmetic circuit, depending on which one is active
@@ -109,4 +115,5 @@ pub(crate) const RANGE_COUNTER: usize = START_SHARED_COLS + NUM_SHARED_COLS;
/// The frequencies column used in logUp.
pub(crate) const RC_FREQUENCIES: usize = RANGE_COUNTER + 1;
-pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS + 2;
+/// Number of columns in `ArithmeticStark`.
+pub(crate) const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS + 2;
diff --git a/evm/src/arithmetic/divmod.rs b/evm/src/arithmetic/divmod.rs
index e143ded6dd..a4599dc721 100644
--- a/evm/src/arithmetic/divmod.rs
+++ b/evm/src/arithmetic/divmod.rs
@@ -1,4 +1,8 @@
-use std::ops::Range;
+//! Support for EVM instructions DIV and MOD.
+//!
+//! The logic for verifying them is detailed in the `modular` submodule.
+
+use core::ops::Range;
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
diff --git a/evm/src/arithmetic/mod.rs b/evm/src/arithmetic/mod.rs
index 7763e98a06..f9a816c1f8 100644
--- a/evm/src/arithmetic/mod.rs
+++ b/evm/src/arithmetic/mod.rs
@@ -1,6 +1,11 @@
use ethereum_types::U256;
use plonky2::field::types::PrimeField64;
+use self::columns::{
+ INPUT_REGISTER_0, INPUT_REGISTER_1, INPUT_REGISTER_2, OPCODE_COL, OUTPUT_REGISTER,
+};
+use self::utils::u256_to_array;
+use crate::arithmetic::columns::IS_RANGE_CHECK;
use crate::extension_tower::BN_BASE;
use crate::util::{addmod, mulmod, submod};
@@ -15,6 +20,9 @@ mod utils;
pub mod arithmetic_stark;
pub(crate) mod columns;
+/// An enum representing different binary operations.
+///
+/// `Shl` and `Shr` are handled differently, by leveraging `Mul` and `Div` respectively.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum BinaryOperator {
Add,
@@ -33,6 +41,7 @@ pub(crate) enum BinaryOperator {
}
impl BinaryOperator {
+ /// Computes the result of a binary arithmetic operation given two inputs.
pub(crate) fn result(&self, input0: U256, input1: U256) -> U256 {
match self {
BinaryOperator::Add => input0.overflowing_add(input1).0,
@@ -81,7 +90,8 @@ impl BinaryOperator {
}
}
- pub(crate) fn row_filter(&self) -> usize {
+ /// Maps a binary arithmetic operation to its associated flag column in the trace.
+ pub(crate) const fn row_filter(&self) -> usize {
match self {
BinaryOperator::Add => columns::IS_ADD,
BinaryOperator::Mul => columns::IS_MUL,
@@ -100,6 +110,7 @@ impl BinaryOperator {
}
}
+/// An enum representing different ternary operations.
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum TernaryOperator {
@@ -109,6 +120,7 @@ pub(crate) enum TernaryOperator {
}
impl TernaryOperator {
+ /// Computes the result of a ternary arithmetic operation given three inputs.
pub(crate) fn result(&self, input0: U256, input1: U256, input2: U256) -> U256 {
match self {
TernaryOperator::AddMod => addmod(input0, input1, input2),
@@ -117,7 +129,8 @@ impl TernaryOperator {
}
}
- pub(crate) fn row_filter(&self) -> usize {
+ /// Maps a ternary arithmetic operation to its associated flag column in the trace.
+ pub(crate) const fn row_filter(&self) -> usize {
match self {
TernaryOperator::AddMod => columns::IS_ADDMOD,
TernaryOperator::MulMod => columns::IS_MULMOD,
@@ -127,6 +140,7 @@ impl TernaryOperator {
}
/// An enum representing arithmetic operations that can be either binary or ternary.
+#[allow(clippy::enum_variant_names)]
#[derive(Debug)]
pub(crate) enum Operation {
BinaryOperation {
@@ -142,10 +156,17 @@ pub(crate) enum Operation {
input2: U256,
result: U256,
},
+ RangeCheckOperation {
+ input0: U256,
+ input1: U256,
+ input2: U256,
+ opcode: U256,
+ result: U256,
+ },
}
impl Operation {
- /// Create a binary operator with given inputs.
+ /// Creates a binary operator with given inputs.
///
/// NB: This works as you would expect, EXCEPT for SHL and SHR,
/// whose inputs need a small amount of preprocessing. Specifically,
@@ -170,6 +191,7 @@ impl Operation {
}
}
+ /// Creates a ternary operator with given inputs.
pub(crate) fn ternary(
operator: TernaryOperator,
input0: U256,
@@ -186,10 +208,28 @@ impl Operation {
}
}
+ pub(crate) const fn range_check(
+ input0: U256,
+ input1: U256,
+ input2: U256,
+ opcode: U256,
+ result: U256,
+ ) -> Self {
+ Self::RangeCheckOperation {
+ input0,
+ input1,
+ input2,
+ opcode,
+ result,
+ }
+ }
+
+ /// Gets the result of an arithmetic operation.
pub(crate) fn result(&self) -> U256 {
match self {
Operation::BinaryOperation { result, .. } => *result,
Operation::TernaryOperation { result, .. } => *result,
+ _ => panic!("This function should not be called for range checks."),
}
}
@@ -218,10 +258,18 @@ impl Operation {
input2,
result,
} => ternary_op_to_rows(operator.row_filter(), input0, input1, input2, result),
+ Operation::RangeCheckOperation {
+ input0,
+ input1,
+ input2,
+ opcode,
+ result,
+ } => range_check_to_rows(input0, input1, input2, opcode, result),
}
}
}
+/// Converts a ternary arithmetic operation to one or two rows of the `ArithmeticStark` table.
fn ternary_op_to_rows(
row_filter: usize,
input0: U256,
@@ -239,6 +287,7 @@ fn ternary_op_to_rows(
(row1, Some(row2))
}
+/// Converts a binary arithmetic operation to one or two rows of the `ArithmeticStark` table.
fn binary_op_to_rows(
op: BinaryOperator,
input0: U256,
@@ -281,3 +330,21 @@ fn binary_op_to_rows(
}
}
}
+
+fn range_check_to_rows(
+ input0: U256,
+ input1: U256,
+ input2: U256,
+ opcode: U256,
+ result: U256,
+) -> (Vec, Option>) {
+ let mut row = vec![F::ZERO; columns::NUM_ARITH_COLUMNS];
+ row[IS_RANGE_CHECK] = F::ONE;
+ row[OPCODE_COL] = F::from_canonical_u64(opcode.as_u64());
+ u256_to_array(&mut row[INPUT_REGISTER_0], input0);
+ u256_to_array(&mut row[INPUT_REGISTER_1], input1);
+ u256_to_array(&mut row[INPUT_REGISTER_2], input2);
+ u256_to_array(&mut row[OUTPUT_REGISTER], result);
+
+ (row, None)
+}
diff --git a/evm/src/arithmetic/modular.rs b/evm/src/arithmetic/modular.rs
index 4e6e21a632..5a1df5c733 100644
--- a/evm/src/arithmetic/modular.rs
+++ b/evm/src/arithmetic/modular.rs
@@ -1,5 +1,5 @@
-//! Support for the EVM modular instructions ADDMOD, MULMOD and MOD,
-//! as well as DIV.
+//! Support for the EVM modular instructions ADDMOD, SUBMOD, MULMOD and MOD,
+//! as well as DIV and FP254 related modular instructions.
//!
//! This crate verifies an EVM modular instruction, which takes three
//! 256-bit inputs A, B and M, and produces a 256-bit output C satisfying
@@ -108,7 +108,7 @@
//! only require 96 columns, or 80 if the output doesn't need to be
//! reduced.
-use std::ops::Range;
+use core::ops::Range;
use ethereum_types::U256;
use num::bigint::Sign;
@@ -478,7 +478,7 @@ pub(crate) fn modular_constr_poly(
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let offset = P::Scalar::from_canonical_u64(AUX_COEFF_ABS_MAX as u64);
- // constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x)
+ // constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x)c
let mut aux = [P::ZEROS; 2 * N_LIMBS];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
// MODULAR_AUX_INPUT elements were offset by 2^20 in
@@ -625,10 +625,13 @@ pub(crate) fn modular_constr_poly_ext_circuit, cons
) -> [ExtensionTarget; 2 * N_LIMBS] {
let mod_is_zero = nv[MODULAR_MOD_IS_ZERO];
+ // Check that mod_is_zero is zero or one
let t = builder.mul_sub_extension(mod_is_zero, mod_is_zero, mod_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
+ // Check that mod_is_zero is zero if modulus is not zero (they
+ // could both be zero)
let limb_sum = builder.add_many_extension(modulus);
let t = builder.mul_extension(limb_sum, mod_is_zero);
let t = builder.mul_extension(filter, t);
@@ -636,13 +639,19 @@ pub(crate) fn modular_constr_poly_ext_circuit, cons
modulus[0] = builder.add_extension(modulus[0], mod_is_zero);
+ // Is 1 iff the operation is DIV or SHR and the denominator is zero.
let div_denom_is_zero = nv[MODULAR_DIV_DENOM_IS_ZERO];
let div_shr_filter = builder.add_extension(lv[IS_DIV], lv[IS_SHR]);
let t = builder.mul_sub_extension(mod_is_zero, div_shr_filter, div_denom_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
+
+ // Needed to compensate for adding mod_is_zero to modulus above,
+ // since the call eval_packed_generic_addcy() below subtracts modulus
+ // to verify in the case of a DIV or SHR.
output[0] = builder.add_extension(output[0], div_denom_is_zero);
+ // Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
let one = builder.one_extension();
let zero = builder.zero_extension();
@@ -660,24 +669,31 @@ pub(crate) fn modular_constr_poly_ext_circuit, cons
&is_less_than,
true,
);
+ // restore output[0]
output[0] = builder.sub_extension(output[0], div_denom_is_zero);
+ // prod = q(x) * m(x)
let prod = pol_mul_wide2_ext_circuit(builder, quot, modulus);
+ // higher order terms must be zero
for &x in prod[2 * N_LIMBS..].iter() {
let t = builder.mul_extension(filter, x);
yield_constr.constraint_transition(builder, t);
}
+ // constr_poly = c(x) + q(x) * m(x)
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign_ext_circuit(builder, &mut constr_poly, &output);
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
let zero = builder.zero_extension();
+
+ // constr_poly = c(x) + q(x) * m(x)
let mut aux = [zero; 2 * N_LIMBS];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
*c = builder.sub_extension(nv[i], offset);
}
+ // add high 16-bits of aux input
let base = F::from_canonical_u64(1u64 << LIMB_BITS);
for (c, j) in aux.iter_mut().zip(MODULAR_AUX_INPUT_HI) {
*c = builder.mul_const_add_extension(base, nv[j], *c);
@@ -700,10 +716,13 @@ pub(crate) fn submod_constr_poly_ext_circuit, const
modulus: [ExtensionTarget; N_LIMBS],
mut quot: [ExtensionTarget; 2 * N_LIMBS],
) -> [ExtensionTarget; 2 * N_LIMBS] {
+ // quot was offset by 2^16 - 1 if it was negative; we undo that
+ // offset here:
let (lo, hi) = quot.split_at_mut(N_LIMBS);
let sign = hi[0];
let t = builder.mul_sub_extension(sign, sign, sign);
let t = builder.mul_extension(filter, t);
+ // sign must be 1 (negative) or 0 (positive)
yield_constr.constraint(builder, t);
let offset = F::from_canonical_u16(u16::max_value());
for c in lo {
@@ -712,6 +731,7 @@ pub(crate) fn submod_constr_poly_ext_circuit, const
}
hi[0] = builder.zero_extension();
for d in hi {
+ // All higher limbs must be zero
let t = builder.mul_extension(filter, *d);
yield_constr.constraint(builder, t);
}
@@ -737,8 +757,12 @@ pub(crate) fn eval_ext_circuit, const D: usize>(
bn254_filter,
]);
+ // Ensure that this operation is not the last row of the table;
+ // needed because we access the next row of the table in nv.
yield_constr.constraint_last_row(builder, filter);
+ // Verify that the modulus is the BN254 modulus for the
+ // {ADD,MUL,SUB}FP254 operations.
let modulus = read_value::(lv, MODULAR_MODULUS);
for (&mi, bi) in modulus.iter().zip(bn254_modulus_limbs()) {
// bn254_filter * (mi - bi)
@@ -760,6 +784,7 @@ pub(crate) fn eval_ext_circuit, const D: usize>(
let mul_filter = builder.add_extension(lv[columns::IS_MULMOD], lv[columns::IS_MULFP254]);
let addmul_filter = builder.add_extension(add_filter, mul_filter);
+ // constr_poly has 2*N_LIMBS limbs
let submod_constr_poly = submod_constr_poly_ext_circuit(
lv,
nv,
diff --git a/evm/src/arithmetic/mul.rs b/evm/src/arithmetic/mul.rs
index c09c39d8dc..01c9d5c1c0 100644
--- a/evm/src/arithmetic/mul.rs
+++ b/evm/src/arithmetic/mul.rs
@@ -107,7 +107,7 @@ pub(crate) fn generate_mul(lv: &mut [F], left_in: [i64; 16], ri
.copy_from_slice(&aux_limbs.map(|c| F::from_canonical_u16((c >> 16) as u16)));
}
-pub fn generate(lv: &mut [F], left_in: U256, right_in: U256) {
+pub(crate) fn generate(lv: &mut [F], left_in: U256, right_in: U256) {
// TODO: It would probably be clearer/cleaner to read the U256
// into an [i64;N] and then copy that to the lv table.
u256_to_array(&mut lv[INPUT_REGISTER_0], left_in);
@@ -173,7 +173,7 @@ pub(crate) fn eval_packed_generic_mul(
}
}
-pub fn eval_packed_generic(
+pub(crate) fn eval_packed_generic(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer,
) {
@@ -195,6 +195,8 @@ pub(crate) fn eval_ext_mul_circuit, const D: usize>
let output_limbs = read_value::(lv, OUTPUT_REGISTER);
let aux_limbs = {
+ // MUL_AUX_INPUT was offset by 2^20 in generation, so we undo
+ // that here
let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
@@ -211,17 +213,22 @@ pub(crate) fn eval_ext_mul_circuit, const D: usize>
let mut constr_poly = pol_mul_lo_ext_circuit(builder, left_in_limbs, right_in_limbs);
pol_sub_assign_ext_circuit(builder, &mut constr_poly, &output_limbs);
+ // This subtracts (x - β) * s(x) from constr_poly.
let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let rhs = pol_adjoin_root_ext_circuit(builder, aux_limbs, base);
pol_sub_assign_ext_circuit(builder, &mut constr_poly, &rhs);
+ // At this point constr_poly holds the coefficients of the
+ // polynomial a(x)b(x) - c(x) - (x - β)*s(x). The
+ // multiplication is valid if and only if all of those
+ // coefficients are zero.
for &c in &constr_poly {
let filter = builder.mul_extension(filter, c);
yield_constr.constraint(builder, filter);
}
}
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut CircuitBuilder,
lv: &[ExtensionTarget; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer,
diff --git a/evm/src/arithmetic/shift.rs b/evm/src/arithmetic/shift.rs
index 6600c01e54..bb83798495 100644
--- a/evm/src/arithmetic/shift.rs
+++ b/evm/src/arithmetic/shift.rs
@@ -38,7 +38,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
/// NB: if `shift >= 256`, then the third register holds 0.
/// We leverage the functions in mul.rs and divmod.rs to carry out
/// the computation.
-pub fn generate(
+pub(crate) fn generate(
lv: &mut [F],
nv: &mut [F],
is_shl: bool,
@@ -117,7 +117,7 @@ fn eval_packed_shr(
);
}
-pub fn eval_packed_generic(
+pub(crate) fn eval_packed_generic(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer,
@@ -168,7 +168,7 @@ fn eval_ext_circuit_shr, const D: usize>(
);
}
-pub fn eval_ext_circuit, const D: usize>(
+pub(crate) fn eval_ext_circuit, const D: usize>(
builder: &mut CircuitBuilder,
lv: &[ExtensionTarget; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget; NUM_ARITH_COLUMNS],
diff --git a/evm/src/arithmetic/utils.rs b/evm/src/arithmetic/utils.rs
index 6ea375fef3..7350dd3263 100644
--- a/evm/src/arithmetic/utils.rs
+++ b/evm/src/arithmetic/utils.rs
@@ -1,4 +1,4 @@
-use std::ops::{Add, AddAssign, Mul, Neg, Range, Shr, Sub, SubAssign};
+use core::ops::{Add, AddAssign, Mul, Neg, Range, Shr, Sub, SubAssign};
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
@@ -319,6 +319,7 @@ pub(crate) fn read_value_i64_limbs(
}
#[inline]
+/// Turn a 64-bit integer into 4 16-bit limbs and convert them to field elements.
fn u64_to_array(out: &mut [F], x: u64) {
const_assert!(LIMB_BITS == 16);
debug_assert!(out.len() == 4);
@@ -329,6 +330,7 @@ fn u64_to_array(out: &mut [F], x: u64) {
out[3] = F::from_canonical_u16((x >> 48) as u16);
}
+/// Turn a 256-bit integer into 16 16-bit limbs and convert them to field elements.
// TODO: Refactor/replace u256_limbs in evm/src/util.rs
pub(crate) fn u256_to_array(out: &mut [F], x: U256) {
const_assert!(N_LIMBS == 16);
diff --git a/evm/src/byte_packing/byte_packing_stark.rs b/evm/src/byte_packing/byte_packing_stark.rs
index c28b055a81..ff7a18c06d 100644
--- a/evm/src/byte_packing/byte_packing_stark.rs
+++ b/evm/src/byte_packing/byte_packing_stark.rs
@@ -1,28 +1,22 @@
//! This crate enforces the correctness of reading and writing sequences
//! of bytes in Big-Endian ordering from and to the memory.
//!
-//! The trace layout consists in N consecutive rows for an `N` byte sequence,
-//! with the byte values being cumulatively written to the trace as they are
-//! being processed.
+//! The trace layout consists in one row for an `N` byte sequence (where 32 ≥ `N` > 0).
//!
-//! At row `i` of such a group (starting from 0), the `i`-th byte flag will be activated
-//! (to indicate which byte we are going to be processing), but all bytes with index
-//! 0 to `i` may have non-zero values, as they have already been processed.
+//! At each row the `i`-th byte flag will be activated to indicate a sequence of
+//! length i+1.
//!
-//! The length of a sequence is stored within each group of rows corresponding to that
-//! sequence in a dedicated `SEQUENCE_LEN` column. At any row `i`, the remaining length
-//! of the sequence being processed is retrieved from that column and the active byte flag
-//! as:
+//! The length of a sequence can be retrieved for CTLs as:
//!
-//! remaining_length = sequence_length - \sum_{i=0}^31 b[i] * i
+//! sequence_length = \sum_{i=0}^31 b[i] * (i + 1)
//!
//! where b[i] is the `i`-th byte flag.
//!
//! Because of the discrepancy in endianness between the different tables, the byte sequences
//! are actually written in the trace in reverse order from the order they are provided.
-//! As such, the memory virtual address for a group of rows corresponding to a sequence starts
-//! with the final virtual address, corresponding to the final byte being read/written, and
-//! is being decremented at each step.
+//! We only store the virtual address `virt` of the first byte, and the virtual address for byte `i`
+//! can be recovered as:
+//! virt_i = virt + sequence_length - 1 - i
//!
//! Note that, when writing a sequence of bytes to memory, both the `U256` value and the
//! corresponding sequence length are being read from the stack. Because of the endianness
@@ -31,7 +25,7 @@
//! This means that the higher-order bytes will be thrown away during the process, if the value
//! is greater than 256^length, and as a result a different value will be stored in memory.
-use std::marker::PhantomData;
+use core::marker::PhantomData;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
@@ -46,19 +40,20 @@ use plonky2::util::transpose;
use super::NUM_BYTES;
use crate::byte_packing::columns::{
- index_bytes, value_bytes, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, BYTE_INDICES_COLS, IS_READ,
- NUM_COLUMNS, RANGE_COUNTER, RC_FREQUENCIES, SEQUENCE_END, TIMESTAMP,
+ index_len, value_bytes, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, IS_READ, LEN_INDICES_COLS,
+ NUM_COLUMNS, RANGE_COUNTER, RC_FREQUENCIES, TIMESTAMP,
};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
-use crate::cross_table_lookup::Column;
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
-use crate::lookup::Lookup;
+use crate::lookup::{Column, Filter, Lookup};
use crate::stark::Stark;
use crate::witness::memory::MemoryAddress;
/// Strict upper bound for the individual bytes range-check.
const BYTE_RANGE_MAX: usize = 1usize << 8;
+/// Creates the vector of `Columns` for `BytePackingStark` corresponding to the final packed limbs being read/written.
+/// `CpuStark` will look into these columns, as the CPU needs the output of byte packing.
pub(crate) fn ctl_looked_data() -> Vec> {
// Reconstruct the u32 limbs composing the final `U256` word
// being read/written from the underlying byte values. For each,
@@ -66,37 +61,49 @@ pub(crate) fn ctl_looked_data() -> Vec> {
// obtain the corresponding limb.
let outputs: Vec> = (0..8)
.map(|i| {
- let range = (value_bytes(i * 4)..value_bytes(i * 4) + 4).collect_vec();
+ let range = value_bytes(i * 4)..value_bytes(i * 4) + 4;
Column::linear_combination(
range
- .iter()
.enumerate()
- .map(|(j, &c)| (c, F::from_canonical_u64(1 << (8 * j)))),
+ .map(|(j, c)| (c, F::from_canonical_u64(1 << (8 * j)))),
)
})
.collect();
- // This will correspond to the actual sequence length when the `SEQUENCE_END` flag is on.
let sequence_len: Column = Column::linear_combination(
- (0..NUM_BYTES).map(|i| (index_bytes(i), F::from_canonical_usize(i + 1))),
+ (0..NUM_BYTES).map(|i| (index_len(i), F::from_canonical_usize(i + 1))),
);
- Column::singles([ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL])
+ Column::singles([IS_READ, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL])
.chain([sequence_len])
.chain(Column::singles(&[TIMESTAMP]))
.chain(outputs)
.collect()
}
-pub fn ctl_looked_filter() -> Column {
+/// CTL filter for the `BytePackingStark` looked table.
+pub(crate) fn ctl_looked_filter() -> Filter {
// The CPU table is only interested in our sequence end rows,
// since those contain the final limbs of our packed int.
- Column::single(SEQUENCE_END)
+ Filter::new_simple(Column::sum((0..NUM_BYTES).map(index_len)))
}
+/// Column linear combination for the `BytePackingStark` table reading/writing the `i`th byte sequence from `MemoryStark`.
pub(crate) fn ctl_looking_memory(i: usize) -> Vec> {
- let mut res =
- Column::singles([IS_READ, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL]).collect_vec();
+ let mut res = Column::singles([IS_READ, ADDR_CONTEXT, ADDR_SEGMENT]).collect_vec();
+
+ // Compute the virtual address: `ADDR_VIRTUAL` + `sequence_len` - 1 - i.
+ let sequence_len_minus_one = (0..NUM_BYTES)
+ .map(|j| (index_len(j), F::from_canonical_usize(j)))
+ .collect::>();
+ let mut addr_virt_cols = vec![(ADDR_VIRTUAL, F::ONE)];
+ addr_virt_cols.extend(sequence_len_minus_one);
+ let addr_virt = Column::linear_combination_with_constant(
+ addr_virt_cols,
+ F::NEG_ONE * F::from_canonical_usize(i),
+ );
+
+ res.push(addr_virt);
// The i'th input byte being read/written.
res.push(Column::single(value_bytes(i)));
@@ -110,8 +117,8 @@ pub(crate) fn ctl_looking_memory(i: usize) -> Vec> {
}
/// CTL filter for reading/writing the `i`th byte of the byte sequence from/to memory.
-pub(crate) fn ctl_looking_memory_filter(i: usize) -> Column {
- Column::single(index_bytes(i))
+pub(crate) fn ctl_looking_memory_filter(i: usize) -> Filter {
+ Filter::new_simple(Column::sum((i..NUM_BYTES).map(index_len)))
}
/// Information about a byte packing operation needed for witness generation.
@@ -132,7 +139,7 @@ pub(crate) struct BytePackingOp {
}
#[derive(Copy, Clone, Default)]
-pub struct BytePackingStark {
+pub(crate) struct BytePackingStark {
pub(crate) f: PhantomData,
}
@@ -162,12 +169,14 @@ impl, const D: usize> BytePackingStark {
ops: Vec,
min_rows: usize,
) -> Vec<[F; NUM_COLUMNS]> {
- let base_len: usize = ops.iter().map(|op| op.bytes.len()).sum();
+ let base_len: usize = ops.iter().map(|op| usize::from(!op.bytes.is_empty())).sum();
let num_rows = core::cmp::max(base_len.max(BYTE_RANGE_MAX), min_rows).next_power_of_two();
let mut rows = Vec::with_capacity(num_rows);
for op in ops {
- rows.extend(self.generate_rows_for_op(op));
+ if !op.bytes.is_empty() {
+ rows.push(self.generate_row_for_op(op));
+ }
}
for _ in rows.len()..num_rows {
@@ -177,7 +186,7 @@ impl, const D: usize> BytePackingStark {
rows
}
- fn generate_rows_for_op(&self, op: BytePackingOp) -> Vec<[F; NUM_COLUMNS]> {
+ fn generate_row_for_op(&self, op: BytePackingOp) -> [F; NUM_COLUMNS] {
let BytePackingOp {
is_read,
base_address,
@@ -191,40 +200,32 @@ impl, const D: usize> BytePackingStark {
virt,
} = base_address;
- let mut rows = Vec::with_capacity(bytes.len());
let mut row = [F::ZERO; NUM_COLUMNS];
row[IS_READ] = F::from_bool(is_read);
row[ADDR_CONTEXT] = F::from_canonical_usize(context);
row[ADDR_SEGMENT] = F::from_canonical_usize(segment);
- // Because of the endianness, we start by the final virtual address value
- // and decrement it at each step. Similarly, we process the byte sequence
- // in reverse order.
- row[ADDR_VIRTUAL] = F::from_canonical_usize(virt + bytes.len() - 1);
+ // We store the initial virtual segment. But the CTLs,
+ // we start with virt + sequence_len - 1.
+ row[ADDR_VIRTUAL] = F::from_canonical_usize(virt);
row[TIMESTAMP] = F::from_canonical_usize(timestamp);
+ row[index_len(bytes.len() - 1)] = F::ONE;
+
for (i, &byte) in bytes.iter().rev().enumerate() {
- if i == bytes.len() - 1 {
- row[SEQUENCE_END] = F::ONE;
- }
row[value_bytes(i)] = F::from_canonical_u8(byte);
- row[index_bytes(i)] = F::ONE;
-
- rows.push(row);
- row[index_bytes(i)] = F::ZERO;
- row[ADDR_VIRTUAL] -= F::ONE;
}
- rows
+ row
}
- fn generate_padding_row(&self) -> [F; NUM_COLUMNS] {
+ const fn generate_padding_row(&self) -> [F; NUM_COLUMNS] {
[F::ZERO; NUM_COLUMNS]
}
/// Expects input in *column*-major layout
- fn generate_range_checks(&self, cols: &mut Vec>) {
+ fn generate_range_checks(&self, cols: &mut [Vec]) {
debug_assert!(cols.len() == NUM_COLUMNS);
let n_rows = cols[0].len();
@@ -254,37 +255,6 @@ impl, const D: usize> BytePackingStark {
}
}
}
-
- /// There is only one `i` for which `local_values[index_bytes(i)]` is non-zero,
- /// and `i+1` is the current position:
- fn get_active_position(&self, row: &[P; NUM_COLUMNS]) -> P
- where
- FE: FieldExtension,
- P: PackedField,
- {
- (0..NUM_BYTES)
- .map(|i| row[index_bytes(i)] * P::Scalar::from_canonical_usize(i + 1))
- .sum()
- }
-
- /// Recursive version of `get_active_position`.
- fn get_active_position_circuit(
- &self,
- builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder,
- row: &[ExtensionTarget; NUM_COLUMNS],
- ) -> ExtensionTarget {
- let mut current_position = row[index_bytes(0)];
-
- for i in 1..NUM_BYTES {
- current_position = builder.mul_const_add_extension(
- F::from_canonical_usize(i + 1),
- row[index_bytes(i)],
- current_position,
- );
- }
-
- current_position
- }
}
impl, const D: usize> Stark for BytePackingStark {
@@ -306,11 +276,22 @@ impl, const D: usize> Stark for BytePackingSt
let local_values: &[P; NUM_COLUMNS] = vars.get_local_values().try_into().unwrap();
let next_values: &[P; NUM_COLUMNS] = vars.get_next_values().try_into().unwrap();
+ // Check the range column: First value must be 0, last row
+ // must be 255, and intermediate rows must increment by 0
+ // or 1.
+ let rc1 = local_values[RANGE_COUNTER];
+ let rc2 = next_values[RANGE_COUNTER];
+ yield_constr.constraint_first_row(rc1);
+ let incr = rc2 - rc1;
+ yield_constr.constraint_transition(incr * incr - incr);
+ let range_max = P::Scalar::from_canonical_u64((BYTE_RANGE_MAX - 1) as u64);
+ yield_constr.constraint_last_row(rc1 - range_max);
+
let one = P::ONES;
// We filter active columns by summing all the byte indices.
// Constraining each of them to be boolean is done later on below.
- let current_filter = local_values[BYTE_INDICES_COLS].iter().copied().sum::();
+ let current_filter = local_values[LEN_INDICES_COLS].iter().copied().sum::
();
yield_constr.constraint(current_filter * (current_filter - one));
// The filter column must start by one.
@@ -322,86 +303,20 @@ impl, const D: usize> Stark for BytePackingSt
// Each byte index must be boolean.
for i in 0..NUM_BYTES {
- let idx_i = local_values[index_bytes(i)];
+ let idx_i = local_values[index_len(i)];
yield_constr.constraint(idx_i * (idx_i - one));
}
- // The sequence start flag column must start by one.
- let current_sequence_start = local_values[index_bytes(0)];
- yield_constr.constraint_first_row(current_sequence_start - one);
-
- // The sequence end flag must be boolean
- let current_sequence_end = local_values[SEQUENCE_END];
- yield_constr.constraint(current_sequence_end * (current_sequence_end - one));
-
- // If filter is off, all flags and byte indices must be off.
- let byte_indices = local_values[BYTE_INDICES_COLS].iter().copied().sum::();
- yield_constr.constraint(
- (current_filter - one) * (current_is_read + current_sequence_end + byte_indices),
- );
-
// Only padding rows have their filter turned off.
- let next_filter = next_values[BYTE_INDICES_COLS].iter().copied().sum::
();
+ let next_filter = next_values[LEN_INDICES_COLS].iter().copied().sum::
();
yield_constr.constraint_transition(next_filter * (next_filter - current_filter));
- // Unless the current sequence end flag is activated, the is_read filter must remain unchanged.
- let next_is_read = next_values[IS_READ];
- yield_constr
- .constraint_transition((current_sequence_end - one) * (next_is_read - current_is_read));
-
- // If the sequence end flag is activated, the next row must be a new sequence or filter must be off.
- let next_sequence_start = next_values[index_bytes(0)];
- yield_constr.constraint_transition(
- current_sequence_end * next_filter * (next_sequence_start - one),
- );
-
- // The active position in a byte sequence must increase by one on every row
- // or be one on the next row (i.e. at the start of a new sequence).
- let current_position = self.get_active_position(local_values);
- let next_position = self.get_active_position(next_values);
- yield_constr.constraint_transition(
- next_filter * (next_position - one) * (next_position - current_position - one),
- );
-
- // The last row must be the end of a sequence or a padding row.
- yield_constr.constraint_last_row(current_filter * (current_sequence_end - one));
-
- // If the next position is one in an active row, the current end flag must be one.
- yield_constr
- .constraint_transition(next_filter * current_sequence_end * (next_position - one));
-
- // The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
- // The virtual address must decrement by one at each step of a sequence.
- let current_context = local_values[ADDR_CONTEXT];
- let next_context = next_values[ADDR_CONTEXT];
- let current_segment = local_values[ADDR_SEGMENT];
- let next_segment = next_values[ADDR_SEGMENT];
- let current_virtual = local_values[ADDR_VIRTUAL];
- let next_virtual = next_values[ADDR_VIRTUAL];
- let current_timestamp = local_values[TIMESTAMP];
- let next_timestamp = next_values[TIMESTAMP];
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_context - current_context),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_segment - current_segment),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (next_timestamp - current_timestamp),
- );
- yield_constr.constraint_transition(
- next_filter * (next_sequence_start - one) * (current_virtual - next_virtual - one),
- );
-
- // If not at the end of a sequence, each next byte must equal the current one
- // when reading through the sequence, or the next byte index must be one.
- for i in 0..NUM_BYTES {
- let current_byte = local_values[value_bytes(i)];
- let next_byte = next_values[value_bytes(i)];
- let next_byte_index = next_values[index_bytes(i)];
- yield_constr.constraint_transition(
- (current_sequence_end - one) * (next_byte_index - one) * (next_byte - current_byte),
- );
+ // Check that all limbs after final length are 0.
+ for i in 0..NUM_BYTES - 1 {
+ // If the length is i+1, then value_bytes(i+1),...,value_bytes(NUM_BYTES-1) must be 0.
+ for j in i + 1..NUM_BYTES {
+ yield_constr.constraint(local_values[index_len(i)] * local_values[value_bytes(j)]);
+ }
}
}
@@ -416,9 +331,23 @@ impl, const D: usize> Stark for BytePackingSt
let next_values: &[ExtensionTarget; NUM_COLUMNS] =
vars.get_next_values().try_into().unwrap();
+ // Check the range column: First value must be 0, last row
+ // must be 255, and intermediate rows must increment by 0
+ // or 1.
+ let rc1 = local_values[RANGE_COUNTER];
+ let rc2 = next_values[RANGE_COUNTER];
+ yield_constr.constraint_first_row(builder, rc1);
+ let incr = builder.sub_extension(rc2, rc1);
+ let t = builder.mul_sub_extension(incr, incr, incr);
+ yield_constr.constraint_transition(builder, t);
+ let range_max =
+ builder.constant_extension(F::Extension::from_canonical_usize(BYTE_RANGE_MAX - 1));
+ let t = builder.sub_extension(rc1, range_max);
+ yield_constr.constraint_last_row(builder, t);
+
// We filter active columns by summing all the byte indices.
// Constraining each of them to be boolean is done later on below.
- let current_filter = builder.add_many_extension(&local_values[BYTE_INDICES_COLS]);
+ let current_filter = builder.add_many_extension(&local_values[LEN_INDICES_COLS]);
let constraint = builder.mul_sub_extension(current_filter, current_filter, current_filter);
yield_constr.constraint(builder, constraint);
@@ -434,119 +363,25 @@ impl, const D: usize> Stark for BytePackingSt
// Each byte index must be boolean.
for i in 0..NUM_BYTES {
- let idx_i = local_values[index_bytes(i)];
+ let idx_i = local_values[index_len(i)];
let constraint = builder.mul_sub_extension(idx_i, idx_i, idx_i);
yield_constr.constraint(builder, constraint);
}
- // The sequence start flag column must start by one.
- let current_sequence_start = local_values[index_bytes(0)];
- let constraint = builder.add_const_extension(current_sequence_start, F::NEG_ONE);
- yield_constr.constraint_first_row(builder, constraint);
-
- // The sequence end flag must be boolean
- let current_sequence_end = local_values[SEQUENCE_END];
- let constraint = builder.mul_sub_extension(
- current_sequence_end,
- current_sequence_end,
- current_sequence_end,
- );
- yield_constr.constraint(builder, constraint);
-
- // If filter is off, all flags and byte indices must be off.
- let byte_indices = builder.add_many_extension(&local_values[BYTE_INDICES_COLS]);
- let constraint = builder.add_extension(current_sequence_end, byte_indices);
- let constraint = builder.add_extension(constraint, current_is_read);
- let constraint = builder.mul_sub_extension(constraint, current_filter, constraint);
- yield_constr.constraint(builder, constraint);
-
// Only padding rows have their filter turned off.
- let next_filter = builder.add_many_extension(&next_values[BYTE_INDICES_COLS]);
+ let next_filter = builder.add_many_extension(&next_values[LEN_INDICES_COLS]);
let constraint = builder.sub_extension(next_filter, current_filter);
let constraint = builder.mul_extension(next_filter, constraint);
yield_constr.constraint_transition(builder, constraint);
- // Unless the current sequence end flag is activated, the is_read filter must remain unchanged.
- let next_is_read = next_values[IS_READ];
- let diff_is_read = builder.sub_extension(next_is_read, current_is_read);
- let constraint =
- builder.mul_sub_extension(diff_is_read, current_sequence_end, diff_is_read);
- yield_constr.constraint_transition(builder, constraint);
-
- // If the sequence end flag is activated, the next row must be a new sequence or filter must be off.
- let next_sequence_start = next_values[index_bytes(0)];
- let constraint = builder.mul_sub_extension(
- current_sequence_end,
- next_sequence_start,
- current_sequence_end,
- );
- let constraint = builder.mul_extension(next_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
-
- // The active position in a byte sequence must increase by one on every row
- // or be one on the next row (i.e. at the start of a new sequence).
- let current_position = self.get_active_position_circuit(builder, local_values);
- let next_position = self.get_active_position_circuit(builder, next_values);
-
- let position_diff = builder.sub_extension(next_position, current_position);
- let is_new_or_inactive = builder.mul_sub_extension(next_filter, next_position, next_filter);
- let constraint =
- builder.mul_sub_extension(is_new_or_inactive, position_diff, is_new_or_inactive);
- yield_constr.constraint_transition(builder, constraint);
-
- // The last row must be the end of a sequence or a padding row.
- let constraint =
- builder.mul_sub_extension(current_filter, current_sequence_end, current_filter);
- yield_constr.constraint_last_row(builder, constraint);
-
- // If the next position is one in an active row, the current end flag must be one.
- let constraint = builder.mul_extension(next_filter, current_sequence_end);
- let constraint = builder.mul_sub_extension(constraint, next_position, constraint);
- yield_constr.constraint_transition(builder, constraint);
-
- // The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
- // The virtual address must decrement by one at each step of a sequence.
- let current_context = local_values[ADDR_CONTEXT];
- let next_context = next_values[ADDR_CONTEXT];
- let current_segment = local_values[ADDR_SEGMENT];
- let next_segment = next_values[ADDR_SEGMENT];
- let current_virtual = local_values[ADDR_VIRTUAL];
- let next_virtual = next_values[ADDR_VIRTUAL];
- let current_timestamp = local_values[TIMESTAMP];
- let next_timestamp = next_values[TIMESTAMP];
- let addr_filter = builder.mul_sub_extension(next_filter, next_sequence_start, next_filter);
- {
- let constraint = builder.sub_extension(next_context, current_context);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(next_segment, current_segment);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(next_timestamp, current_timestamp);
- let constraint = builder.mul_extension(addr_filter, constraint);
- yield_constr.constraint_transition(builder, constraint);
- }
- {
- let constraint = builder.sub_extension(current_virtual, next_virtual);
- let constraint = builder.mul_sub_extension(addr_filter, constraint, addr_filter);
- yield_constr.constraint_transition(builder, constraint);
- }
-
- // If not at the end of a sequence, each next byte must equal the current one
- // when reading through the sequence, or the next byte index must be one.
- for i in 0..NUM_BYTES {
- let current_byte = local_values[value_bytes(i)];
- let next_byte = next_values[value_bytes(i)];
- let next_byte_index = next_values[index_bytes(i)];
- let byte_diff = builder.sub_extension(next_byte, current_byte);
- let constraint = builder.mul_sub_extension(byte_diff, next_byte_index, byte_diff);
- let constraint =
- builder.mul_sub_extension(constraint, current_sequence_end, constraint);
- yield_constr.constraint_transition(builder, constraint);
+ // Check that all limbs after final length are 0.
+ for i in 0..NUM_BYTES - 1 {
+ // If the length is i+1, then value_bytes(i+1),...,value_bytes(NUM_BYTES-1) must be 0.
+ for j in i + 1..NUM_BYTES {
+ let constr =
+ builder.mul_extension(local_values[index_len(i)], local_values[value_bytes(j)]);
+ yield_constr.constraint(builder, constr);
+ }
}
}
@@ -554,11 +389,12 @@ impl, const D: usize> Stark for BytePackingSt
3
}
- fn lookups(&self) -> Vec {
+ fn lookups(&self) -> Vec> {
vec![Lookup {
- columns: (value_bytes(0)..value_bytes(0) + NUM_BYTES).collect(),
- table_column: RANGE_COUNTER,
- frequencies_column: RC_FREQUENCIES,
+ columns: Column::singles(value_bytes(0)..value_bytes(0) + NUM_BYTES).collect(),
+ table_column: Column::single(RANGE_COUNTER),
+ frequencies_column: Column::single(RC_FREQUENCIES),
+ filter_columns: vec![None; NUM_BYTES],
}]
}
}
diff --git a/evm/src/byte_packing/columns.rs b/evm/src/byte_packing/columns.rs
index 4eff0df8f5..cbed53de1d 100644
--- a/evm/src/byte_packing/columns.rs
+++ b/evm/src/byte_packing/columns.rs
@@ -6,28 +6,28 @@ use crate::byte_packing::NUM_BYTES;
/// 1 if this is a READ operation, and 0 if this is a WRITE operation.
pub(crate) const IS_READ: usize = 0;
-/// 1 if this is the end of a sequence of bytes.
-/// This is also used as filter for the CTL.
-pub(crate) const SEQUENCE_END: usize = IS_READ + 1;
-pub(super) const BYTES_INDICES_START: usize = SEQUENCE_END + 1;
-pub(crate) const fn index_bytes(i: usize) -> usize {
+pub(super) const LEN_INDICES_START: usize = IS_READ + 1;
+// There are `NUM_BYTES` columns used to represent the length of
+// the input byte sequence for a (un)packing operation.
+// index_len(i) is 1 iff the length is i+1.
+pub(crate) const fn index_len(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
- BYTES_INDICES_START + i
+ LEN_INDICES_START + i
}
-// Note: Those are used as filter for distinguishing active vs padding rows,
-// and also to obtain the length of a sequence of bytes being processed.
-pub(crate) const BYTE_INDICES_COLS: Range =
- BYTES_INDICES_START..BYTES_INDICES_START + NUM_BYTES;
+// Note: Those are used to obtain the length of a sequence of bytes being processed.
+pub(crate) const LEN_INDICES_COLS: Range = LEN_INDICES_START..LEN_INDICES_START + NUM_BYTES;
-pub(crate) const ADDR_CONTEXT: usize = BYTES_INDICES_START + NUM_BYTES;
+pub(crate) const ADDR_CONTEXT: usize = LEN_INDICES_START + NUM_BYTES;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;
// 32 byte limbs hold a total of 256 bits.
const BYTES_VALUES_START: usize = TIMESTAMP + 1;
+// There are `NUM_BYTES` columns used to store the values of the bytes
+// that are being read/written for an (un)packing operation.
pub(crate) const fn value_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_VALUES_START + i
@@ -38,4 +38,5 @@ pub(crate) const RANGE_COUNTER: usize = BYTES_VALUES_START + NUM_BYTES;
/// The frequencies column used in logUp.
pub(crate) const RC_FREQUENCIES: usize = RANGE_COUNTER + 1;
+/// Number of columns in `BytePackingStark`.
pub(crate) const NUM_COLUMNS: usize = RANGE_COUNTER + 2;
diff --git a/evm/src/byte_packing/mod.rs b/evm/src/byte_packing/mod.rs
index 7cc93374ca..3767b21ed6 100644
--- a/evm/src/byte_packing/mod.rs
+++ b/evm/src/byte_packing/mod.rs
@@ -6,4 +6,5 @@
pub mod byte_packing_stark;
pub mod columns;
+/// Maximum number of bytes being processed by a byte (un)packing operation.
pub(crate) const NUM_BYTES: usize = 32;
diff --git a/evm/src/config.rs b/evm/src/config.rs
index a593c827c2..3f88d99f5d 100644
--- a/evm/src/config.rs
+++ b/evm/src/config.rs
@@ -1,20 +1,29 @@
use plonky2::fri::reduction_strategies::FriReductionStrategy;
use plonky2::fri::{FriConfig, FriParams};
+/// A configuration containing the different parameters to be used by the STARK prover.
pub struct StarkConfig {
+ /// The targeted security level for the proofs generated with this configuration.
pub security_bits: usize,
/// The number of challenge points to generate, for IOPs that have soundness errors of (roughly)
/// `degree / |F|`.
pub num_challenges: usize,
+ /// The configuration of the FRI sub-protocol.
pub fri_config: FriConfig,
}
+impl Default for StarkConfig {
+ fn default() -> Self {
+ Self::standard_fast_config()
+ }
+}
+
impl StarkConfig {
/// A typical configuration with a rate of 2, resulting in fast but large proofs.
/// Targets ~100 bit conjectured security.
- pub fn standard_fast_config() -> Self {
+ pub const fn standard_fast_config() -> Self {
Self {
security_bits: 100,
num_challenges: 2,
diff --git a/evm/src/constraint_consumer.rs b/evm/src/constraint_consumer.rs
index 49dc018ce3..919b51638a 100644
--- a/evm/src/constraint_consumer.rs
+++ b/evm/src/constraint_consumer.rs
@@ -1,4 +1,4 @@
-use std::marker::PhantomData;
+use core::marker::PhantomData;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
@@ -29,7 +29,7 @@ pub struct ConstraintConsumer {
}
impl ConstraintConsumer {
- pub fn new(
+ pub(crate) fn new(
alphas: Vec,
z_last: P,
lagrange_basis_first: P,
@@ -44,17 +44,17 @@ impl ConstraintConsumer {
}
}
- pub fn accumulators(self) -> Vec
{
+ pub(crate) fn accumulators(self) -> Vec
{
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
- pub fn constraint_transition(&mut self, constraint: P) {
+ pub(crate) fn constraint_transition(&mut self, constraint: P) {
self.constraint(constraint * self.z_last);
}
/// Add one constraint on all rows.
- pub fn constraint(&mut self, constraint: P) {
+ pub(crate) fn constraint(&mut self, constraint: P) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc *= alpha;
*acc += constraint;
@@ -63,13 +63,13 @@ impl ConstraintConsumer {
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
- pub fn constraint_first_row(&mut self, constraint: P) {
+ pub(crate) fn constraint_first_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_first);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
- pub fn constraint_last_row(&mut self, constraint: P) {
+ pub(crate) fn constraint_last_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_last);
}
}
@@ -96,7 +96,7 @@ pub struct RecursiveConstraintConsumer, const D: us
}
impl, const D: usize> RecursiveConstraintConsumer {
- pub fn new(
+ pub(crate) fn new(
zero: ExtensionTarget,
alphas: Vec,
z_last: ExtensionTarget,
@@ -113,12 +113,12 @@ impl, const D: usize> RecursiveConstraintConsumer Vec> {
+ pub(crate) fn accumulators(self) -> Vec> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
- pub fn constraint_transition(
+ pub(crate) fn constraint_transition(
&mut self,
builder: &mut CircuitBuilder,
constraint: ExtensionTarget,
@@ -128,7 +128,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
@@ -140,7 +140,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
@@ -151,7 +151,7 @@ impl, const D: usize> RecursiveConstraintConsumer,
constraint: ExtensionTarget,
diff --git a/evm/src/cpu/bootstrap_kernel.rs b/evm/src/cpu/bootstrap_kernel.rs
deleted file mode 100644
index 759c852aae..0000000000
--- a/evm/src/cpu/bootstrap_kernel.rs
+++ /dev/null
@@ -1,161 +0,0 @@
-//! The initial phase of execution, where the kernel code is hashed while being written to memory.
-//! The hash is then checked against a precomputed kernel hash.
-
-use itertools::Itertools;
-use plonky2::field::extension::Extendable;
-use plonky2::field::packed::PackedField;
-use plonky2::field::types::Field;
-use plonky2::hash::hash_types::RichField;
-use plonky2::iop::ext_target::ExtensionTarget;
-use plonky2::plonk::circuit_builder::CircuitBuilder;
-
-use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
-use crate::cpu::columns::CpuColumnsView;
-use crate::cpu::kernel::aggregator::KERNEL;
-use crate::cpu::membus::NUM_GP_CHANNELS;
-use crate::generation::state::GenerationState;
-use crate::memory::segments::Segment;
-use crate::witness::memory::MemoryAddress;
-use crate::witness::util::{keccak_sponge_log, mem_write_gp_log_and_fill};
-
-pub(crate) fn generate_bootstrap_kernel(state: &mut GenerationState) {
- // Iterate through chunks of the code, such that we can write one chunk to memory per row.
- for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
- let mut cpu_row = CpuColumnsView::default();
- cpu_row.clock = F::from_canonical_usize(state.traces.clock());
- cpu_row.is_bootstrap_kernel = F::ONE;
-
- // Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
- for (channel, (addr, &byte)) in chunk.enumerate() {
- let address = MemoryAddress::new(0, Segment::Code, addr);
- let write =
- mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, byte.into());
- state.traces.push_memory(write);
- }
-
- state.traces.push_cpu(cpu_row);
- }
-
- let mut final_cpu_row = CpuColumnsView::default();
- final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
- final_cpu_row.is_bootstrap_kernel = F::ONE;
- final_cpu_row.is_keccak_sponge = F::ONE;
- // The Keccak sponge CTL uses memory value columns for its inputs and outputs.
- final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context
- final_cpu_row.mem_channels[1].value[0] = F::from_canonical_usize(Segment::Code as usize); // segment
- final_cpu_row.mem_channels[2].value[0] = F::ZERO; // virt
- final_cpu_row.mem_channels[3].value[0] = F::from_canonical_usize(KERNEL.code.len()); // len
- final_cpu_row.mem_channels[4].value = KERNEL.code_hash.map(F::from_canonical_u32);
- final_cpu_row.mem_channels[4].value.reverse();
- keccak_sponge_log(
- state,
- MemoryAddress::new(0, Segment::Code, 0),
- KERNEL.code.clone(),
- );
- state.traces.push_cpu(final_cpu_row);
- log::info!("Bootstrapping took {} cycles", state.traces.clock());
-}
-
-pub(crate) fn eval_bootstrap_kernel_packed>(
- local_values: &CpuColumnsView,
- next_values: &CpuColumnsView
,
- yield_constr: &mut ConstraintConsumer
,
-) {
- // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
- let local_is_bootstrap = local_values.is_bootstrap_kernel;
- let next_is_bootstrap = next_values.is_bootstrap_kernel;
- yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
- yield_constr.constraint_last_row(local_is_bootstrap);
- let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
- yield_constr.constraint_transition(delta_is_bootstrap * (delta_is_bootstrap + P::ONES));
-
- // If this is a bootloading row and the i'th memory channel is used, it must have the right
- // address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
- let code_segment = F::from_canonical_usize(Segment::Code as usize);
- for (i, channel) in local_values.mem_channels.iter().enumerate() {
- let filter = local_is_bootstrap * channel.used;
- yield_constr.constraint(filter * channel.addr_context);
- yield_constr.constraint(filter * (channel.addr_segment - code_segment));
- let expected_virt = local_values.clock * F::from_canonical_usize(NUM_GP_CHANNELS)
- + F::from_canonical_usize(i);
- yield_constr.constraint(filter * (channel.addr_virtual - expected_virt));
- }
-
- // If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
- // - all memory channels are disabled
- // - the current kernel hash matches a precomputed one
- for channel in local_values.mem_channels.iter() {
- yield_constr.constraint_transition(delta_is_bootstrap * channel.used);
- }
- for (&expected, actual) in KERNEL
- .code_hash
- .iter()
- .rev()
- .zip(local_values.mem_channels.last().unwrap().value)
- {
- let expected = P::from(F::from_canonical_u32(expected));
- let diff = expected - actual;
- yield_constr.constraint_transition(delta_is_bootstrap * diff);
- }
-}
-
-pub(crate) fn eval_bootstrap_kernel_ext_circuit, const D: usize>(
- builder: &mut CircuitBuilder,
- local_values: &CpuColumnsView>,
- next_values: &CpuColumnsView>,
- yield_constr: &mut RecursiveConstraintConsumer,
-) {
- let one = builder.one_extension();
-
- // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
- let local_is_bootstrap = local_values.is_bootstrap_kernel;
- let next_is_bootstrap = next_values.is_bootstrap_kernel;
- let constraint = builder.sub_extension(local_is_bootstrap, one);
- yield_constr.constraint_first_row(builder, constraint);
- yield_constr.constraint_last_row(builder, local_is_bootstrap);
- let delta_is_bootstrap = builder.sub_extension(next_is_bootstrap, local_is_bootstrap);
- let constraint =
- builder.mul_add_extension(delta_is_bootstrap, delta_is_bootstrap, delta_is_bootstrap);
- yield_constr.constraint_transition(builder, constraint);
-
- // If this is a bootloading row and the i'th memory channel is used, it must have the right
- // address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
- let code_segment =
- builder.constant_extension(F::Extension::from_canonical_usize(Segment::Code as usize));
- for (i, channel) in local_values.mem_channels.iter().enumerate() {
- let filter = builder.mul_extension(local_is_bootstrap, channel.used);
- let constraint = builder.mul_extension(filter, channel.addr_context);
- yield_constr.constraint(builder, constraint);
-
- let segment_diff = builder.sub_extension(channel.addr_segment, code_segment);
- let constraint = builder.mul_extension(filter, segment_diff);
- yield_constr.constraint(builder, constraint);
-
- let i_ext = builder.constant_extension(F::Extension::from_canonical_usize(i));
- let num_gp_channels_f = F::from_canonical_usize(NUM_GP_CHANNELS);
- let expected_virt =
- builder.mul_const_add_extension(num_gp_channels_f, local_values.clock, i_ext);
- let virt_diff = builder.sub_extension(channel.addr_virtual, expected_virt);
- let constraint = builder.mul_extension(filter, virt_diff);
- yield_constr.constraint(builder, constraint);
- }
-
- // If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
- // - all memory channels are disabled
- // - the current kernel hash matches a precomputed one
- for channel in local_values.mem_channels.iter() {
- let constraint = builder.mul_extension(delta_is_bootstrap, channel.used);
- yield_constr.constraint_transition(builder, constraint);
- }
- for (&expected, actual) in KERNEL
- .code_hash
- .iter()
- .rev()
- .zip(local_values.mem_channels.last().unwrap().value)
- {
- let expected = builder.constant_extension(F::Extension::from_canonical_u32(expected));
- let diff = builder.sub_extension(expected, actual);
- let constraint = builder.mul_extension(delta_is_bootstrap, diff);
- yield_constr.constraint_transition(builder, constraint);
- }
-}
diff --git a/evm/src/cpu/byte_unpacking.rs b/evm/src/cpu/byte_unpacking.rs
new file mode 100644
index 0000000000..39053141d6
--- /dev/null
+++ b/evm/src/cpu/byte_unpacking.rs
@@ -0,0 +1,94 @@
+use plonky2::field::extension::Extendable;
+use plonky2::field::packed::PackedField;
+use plonky2::field::types::Field;
+use plonky2::hash::hash_types::RichField;
+use plonky2::iop::ext_target::ExtensionTarget;
+use plonky2::plonk::circuit_builder::CircuitBuilder;
+
+use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
+use crate::cpu::columns::CpuColumnsView;
+
+pub(crate) fn eval_packed(
+ lv: &CpuColumnsView,
+ nv: &CpuColumnsView
,
+ yield_constr: &mut ConstraintConsumer
,
+) {
+ // The MSTORE_32BYTES opcodes are differentiated from MLOAD_32BYTES
+ // by the 5th bit set to 0.
+ let filter = lv.op.m_op_32bytes * (lv.opcode_bits[5] - P::ONES);
+
+ // The address to write to is stored in the first memory channel.
+ // It contains virt, segment, ctx in its first 3 limbs, and 0 otherwise.
+ // The new address is identical, except for its `virtual` limb that is increased by the corresponding `len` offset.
+ let new_addr = nv.mem_channels[0].value;
+ let written_addr = lv.mem_channels[0].value;
+
+ // Read len from opcode bits and constrain the pushed new offset.
+ let len_bits: P = lv.opcode_bits[..5]
+ .iter()
+ .enumerate()
+ .map(|(i, &bit)| bit * P::Scalar::from_canonical_u64(1 << i))
+ .sum();
+ let len = len_bits + P::ONES;
+
+ // Check that `virt` is increased properly.
+ yield_constr.constraint(filter * (new_addr[0] - written_addr[0] - len));
+
+ // Check that `segment` and `ctx` do not change.
+ yield_constr.constraint(filter * (new_addr[1] - written_addr[1]));
+ yield_constr.constraint(filter * (new_addr[2] - written_addr[2]));
+
+ // Check that the rest of the returned address is null.
+ for &limb in &new_addr[3..] {
+ yield_constr.constraint(filter * limb);
+ }
+}
+
+pub(crate) fn eval_ext_circuit, const D: usize>(
+ builder: &mut CircuitBuilder,
+ lv: &CpuColumnsView>,
+ nv: &CpuColumnsView