Skip to content

Commit

Permalink
Merge branch 'main' into real_prevrandao
Browse files Browse the repository at this point in the history
# Conflicts:
#	evm/src/cpu/kernel/constants/global_metadata.rs
#	evm/src/get_challenges.rs
#	evm/src/proof.rs
#	evm/src/recursive_verifier.rs
  • Loading branch information
wborgeaud committed Sep 25, 2023
2 parents 3c2adf5 + 0abc3b9 commit 45e6cda
Show file tree
Hide file tree
Showing 109 changed files with 4,854 additions and 1,826 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/continuous-integration-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2023-06-30
toolchain: nightly
override: true

- name: rust-cache
Expand Down Expand Up @@ -95,7 +95,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2023-06-30
toolchain: nightly
override: true
components: rustfmt, clippy

Expand Down Expand Up @@ -124,5 +124,5 @@ jobs:
command: clippy
args: --all-features --all-targets -- -D warnings -A incomplete-features
env:
CARGO_INCREMENTAL: 1

# Seems necessary until https://github.com/rust-lang/rust/pull/115819 is merged.
CARGO_INCREMENTAL: 0
9 changes: 0 additions & 9 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,3 @@ incremental = true

[profile.bench]
opt-level = 3

[patch.crates-io]
eth_trie_utils = { git = "https://github.com/mir-protocol/eth_trie_utils.git", rev = "e9ec4ec2aa2ae976b7c699ef40c1ffc716d87ed5" }
plonky2_evm = { path = "evm" }
plonky2_field = { path = "field" }
plonky2_maybe_rayon = { path = "maybe_rayon" }
plonky2 = { path = "plonky2" }
starky = { path = "starky" }
plonky2_util = { path = "util" }
8 changes: 4 additions & 4 deletions evm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,21 @@ edition = "2021"
anyhow = "1.0.40"
bytes = "1.4.0"
env_logger = "0.10.0"
eth_trie_utils = "0.6.0"
eth_trie_utils = { git = "https://github.com/mir-protocol/eth_trie_utils.git", rev = "e9ec4ec2aa2ae976b7c699ef40c1ffc716d87ed5" }
ethereum-types = "0.14.0"
hex = { version = "0.4.3", optional = true }
hex-literal = "0.4.1"
itertools = "0.11.0"
keccak-hash = "0.10.0"
log = "0.4.14"
plonky2_maybe_rayon = "0.1.1"
plonky2_maybe_rayon = { path = "../maybe_rayon" }
num = "0.4.0"
num-bigint = "0.4.3"
once_cell = "1.13.0"
pest = "2.1.3"
pest_derive = "2.1.0"
plonky2 = { version = "0.1.4", default-features = false, features = ["timing"] }
plonky2_util = { version = "0.1.1" }
plonky2 = { path = "../plonky2", default-features = false, features = ["timing"] }
plonky2_util = { path = "../util" }
rand = "0.8.5"
rand_chacha = "0.3.1"
rlp = "0.5.1"
Expand Down
48 changes: 43 additions & 5 deletions evm/src/all_stark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use plonky2::hash::hash_types::RichField;

use crate::arithmetic::arithmetic_stark;
use crate::arithmetic::arithmetic_stark::ArithmeticStark;
use crate::byte_packing::byte_packing_stark::{self, BytePackingStark};
use crate::config::StarkConfig;
use crate::cpu::cpu_stark;
use crate::cpu::cpu_stark::CpuStark;
Expand All @@ -25,6 +26,7 @@ use crate::stark::Stark;
#[derive(Clone)]
pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub arithmetic_stark: ArithmeticStark<F, D>,
pub byte_packing_stark: BytePackingStark<F, D>,
pub cpu_stark: CpuStark<F, D>,
pub keccak_stark: KeccakStark<F, D>,
pub keccak_sponge_stark: KeccakSpongeStark<F, D>,
Expand All @@ -37,6 +39,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Default for AllStark<F, D> {
fn default() -> Self {
Self {
arithmetic_stark: ArithmeticStark::default(),
byte_packing_stark: BytePackingStark::default(),
cpu_stark: CpuStark::default(),
keccak_stark: KeccakStark::default(),
keccak_sponge_stark: KeccakSpongeStark::default(),
Expand All @@ -51,6 +54,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
pub(crate) fn nums_permutation_zs(&self, config: &StarkConfig) -> [usize; NUM_TABLES] {
[
self.arithmetic_stark.num_permutation_batches(config),
self.byte_packing_stark.num_permutation_batches(config),
self.cpu_stark.num_permutation_batches(config),
self.keccak_stark.num_permutation_batches(config),
self.keccak_sponge_stark.num_permutation_batches(config),
Expand All @@ -62,6 +66,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
pub(crate) fn permutation_batch_sizes(&self) -> [usize; NUM_TABLES] {
[
self.arithmetic_stark.permutation_batch_size(),
self.byte_packing_stark.permutation_batch_size(),
self.cpu_stark.permutation_batch_size(),
self.keccak_stark.permutation_batch_size(),
self.keccak_sponge_stark.permutation_batch_size(),
Expand All @@ -74,11 +79,12 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Table {
Arithmetic = 0,
Cpu = 1,
Keccak = 2,
KeccakSponge = 3,
Logic = 4,
Memory = 5,
BytePacking = 1,
Cpu = 2,
Keccak = 3,
KeccakSponge = 4,
Logic = 5,
Memory = 6,
}

pub(crate) const NUM_TABLES: usize = Table::Memory as usize + 1;
Expand All @@ -87,6 +93,7 @@ impl Table {
pub(crate) fn all() -> [Self; NUM_TABLES] {
[
Self::Arithmetic,
Self::BytePacking,
Self::Cpu,
Self::Keccak,
Self::KeccakSponge,
Expand All @@ -99,6 +106,7 @@ impl Table {
pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
vec![
ctl_arithmetic(),
ctl_byte_packing(),
ctl_keccak_sponge(),
ctl_keccak(),
ctl_logic(),
Expand All @@ -116,6 +124,28 @@ fn ctl_arithmetic<F: Field>() -> CrossTableLookup<F> {
)
}

fn ctl_byte_packing<F: Field>() -> CrossTableLookup<F> {
let cpu_packing_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_byte_packing(),
Some(cpu_stark::ctl_filter_byte_packing()),
);
let cpu_unpacking_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_byte_unpacking(),
Some(cpu_stark::ctl_filter_byte_unpacking()),
);
let byte_packing_looked = TableWithColumns::new(
Table::BytePacking,
byte_packing_stark::ctl_looked_data(),
Some(byte_packing_stark::ctl_looked_filter()),
);
CrossTableLookup::new(
vec![cpu_packing_looking, cpu_unpacking_looking],
byte_packing_looked,
)
}

fn ctl_keccak<F: Field>() -> CrossTableLookup<F> {
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
Expand Down Expand Up @@ -184,9 +214,17 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
Some(keccak_sponge_stark::ctl_looking_memory_filter(i)),
)
});
let byte_packing_ops = (0..32).map(|i| {
TableWithColumns::new(
Table::BytePacking,
byte_packing_stark::ctl_looking_memory(i),
Some(byte_packing_stark::ctl_looking_memory_filter(i)),
)
});
let all_lookers = iter::once(cpu_memory_code_read)
.chain(cpu_memory_gp_ops)
.chain(keccak_sponge_reads)
.chain(byte_packing_ops)
.collect();
let memory_looked = TableWithColumns::new(
Table::Memory,
Expand Down
78 changes: 51 additions & 27 deletions evm/src/arithmetic/arithmetic_stark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,20 @@ use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::util::transpose;
use static_assertions::const_assert;

use super::columns::NUM_ARITH_COLUMNS;
use crate::all_stark::Table;
use crate::arithmetic::{addcy, byte, columns, divmod, modular, mul, Operation};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, TableWithColumns};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::permutation::PermutationPair;
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};

/// Link the 16-bit columns of the arithmetic table, split into groups
/// of N_LIMBS at a time in `regs`, with the corresponding 32-bit
Expand All @@ -27,10 +29,17 @@ use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
/// This is done by taking pairs of columns (x, y) of the arithmetic
/// table and combining them as x + y*2^16 to ensure they equal the
/// corresponding 32-bit number in the CPU table.
fn cpu_arith_data_link<F: Field>(ops: &[usize], regs: &[Range<usize>]) -> Vec<Column<F>> {
fn cpu_arith_data_link<F: Field>(
combined_ops: &[(usize, u8)],
regs: &[Range<usize>],
) -> Vec<Column<F>> {
let limb_base = F::from_canonical_u64(1 << columns::LIMB_BITS);

let mut res = Column::singles(ops).collect_vec();
let mut res = vec![Column::linear_combination(
combined_ops
.iter()
.map(|&(col, code)| (col, F::from_canonical_u8(code))),
)];

// The inner for loop below assumes N_LIMBS is even.
const_assert!(columns::N_LIMBS % 2 == 0);
Expand All @@ -49,21 +58,27 @@ fn cpu_arith_data_link<F: Field>(ops: &[usize], regs: &[Range<usize>]) -> Vec<Co
}

pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
const ARITH_OPS: [usize; 14] = [
columns::IS_ADD,
columns::IS_SUB,
columns::IS_MUL,
columns::IS_LT,
columns::IS_GT,
columns::IS_ADDFP254,
columns::IS_MULFP254,
columns::IS_SUBFP254,
columns::IS_ADDMOD,
columns::IS_MULMOD,
columns::IS_SUBMOD,
columns::IS_DIV,
columns::IS_MOD,
columns::IS_BYTE,
// We scale each filter flag with the associated opcode value.
// If an arithmetic operation is happening on the CPU side,
// the CTL will enforce that the reconstructed opcode value
// from the opcode bits matches.
const COMBINED_OPS: [(usize, u8); 16] = [
(columns::IS_ADD, 0x01),
(columns::IS_MUL, 0x02),
(columns::IS_SUB, 0x03),
(columns::IS_DIV, 0x04),
(columns::IS_MOD, 0x06),
(columns::IS_ADDMOD, 0x08),
(columns::IS_MULMOD, 0x09),
(columns::IS_ADDFP254, 0x0c),
(columns::IS_MULFP254, 0x0d),
(columns::IS_SUBFP254, 0x0e),
(columns::IS_SUBMOD, 0x0f),
(columns::IS_LT, 0x10),
(columns::IS_GT, 0x11),
(columns::IS_BYTE, 0x1a),
(columns::IS_SHL, 0x1b),
(columns::IS_SHR, 0x1c),
];

const REGISTER_MAP: [Range<usize>; 4] = [
Expand All @@ -73,15 +88,17 @@ pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
columns::OUTPUT_REGISTER,
];

let filter_column = Some(Column::sum(COMBINED_OPS.iter().map(|(c, _v)| *c)));

// Create the Arithmetic Table whose columns are those of the
// operations listed in `ops` whose inputs and outputs are given
// by `regs`, where each element of `regs` is a range of columns
// corresponding to a 256-bit input or output register (also `ops`
// is used as the operation filter).
TableWithColumns::new(
Table::Arithmetic,
cpu_arith_data_link(&ARITH_OPS, &REGISTER_MAP),
Some(Column::sum(ARITH_OPS)),
cpu_arith_data_link(&COMBINED_OPS, &REGISTER_MAP),
filter_column,
)
}

Expand Down Expand Up @@ -153,11 +170,16 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
}

impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticStark<F, D> {
const COLUMNS: usize = columns::NUM_ARITH_COLUMNS;
type EvaluationFrame<FE, P, const D2: usize> = StarkFrame<P, NUM_ARITH_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;

type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_ARITH_COLUMNS>;

fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: StarkEvaluationVars<FE, P, { Self::COLUMNS }>,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
Expand All @@ -168,8 +190,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
eval_lookups(vars, yield_constr, col, col + 1);
}

let lv = vars.local_values;
let nv = vars.next_values;
let lv: &[P; NUM_ARITH_COLUMNS] = vars.get_local_values().try_into().unwrap();
let nv: &[P; NUM_ARITH_COLUMNS] = vars.get_next_values().try_into().unwrap();

// Check the range column: First value must be 0, last row
// must be 2^16-1, and intermediate rows must increment by 0
Expand All @@ -192,16 +214,18 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, { Self::COLUMNS }>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// Range check all the columns
for col in columns::RC_COLS.step_by(2) {
eval_lookups_circuit(builder, vars, yield_constr, col, col + 1);
}

let lv = vars.local_values;
let nv = vars.next_values;
let lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS] =
vars.get_next_values().try_into().unwrap();

let rc1 = lv[columns::RANGE_COUNTER];
let rc2 = nv[columns::RANGE_COUNTER];
Expand Down
4 changes: 3 additions & 1 deletion evm/src/arithmetic/columns.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,10 @@ pub(crate) const IS_SUBMOD: usize = IS_SUBFP254 + 1;
pub(crate) const IS_LT: usize = IS_SUBMOD + 1;
pub(crate) const IS_GT: usize = IS_LT + 1;
pub(crate) const IS_BYTE: usize = IS_GT + 1;
pub(crate) const IS_SHL: usize = IS_BYTE + 1;
pub(crate) const IS_SHR: usize = IS_SHL + 1;

pub(crate) const START_SHARED_COLS: usize = IS_BYTE + 1;
pub(crate) const START_SHARED_COLS: usize = IS_SHR + 1;

/// Within the Arithmetic Unit, there are shared columns which can be
/// used by any arithmetic circuit, depending on which one is active
Expand Down
Loading

0 comments on commit 45e6cda

Please sign in to comment.