Skip to content

Commit

Permalink
Merge branch 'main' into top_stack_no_cols
Browse files Browse the repository at this point in the history
  • Loading branch information
hratoanina committed Sep 25, 2023
2 parents 9d0cb88 + 0abc3b9 commit 7320731
Show file tree
Hide file tree
Showing 45 changed files with 1,465 additions and 1,360 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/continuous-integration-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -124,5 +124,5 @@ jobs:
command: clippy
args: --all-features --all-targets -- -D warnings -A incomplete-features
env:
CARGO_INCREMENTAL: 1

# Seems necessary until https://github.com/rust-lang/rust/pull/115819 is merged.
CARGO_INCREMENTAL: 0
25 changes: 17 additions & 8 deletions evm/src/arithmetic/arithmetic_stark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,20 @@ use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::util::transpose;
use static_assertions::const_assert;

use super::columns::NUM_ARITH_COLUMNS;
use crate::all_stark::Table;
use crate::arithmetic::{addcy, byte, columns, divmod, modular, mul, Operation};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, TableWithColumns};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::permutation::PermutationPair;
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};

/// Link the 16-bit columns of the arithmetic table, split into groups
/// of N_LIMBS at a time in `regs`, with the corresponding 32-bit
Expand Down Expand Up @@ -168,11 +170,16 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
}

impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticStark<F, D> {
const COLUMNS: usize = columns::NUM_ARITH_COLUMNS;
type EvaluationFrame<FE, P, const D2: usize> = StarkFrame<P, NUM_ARITH_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;

type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_ARITH_COLUMNS>;

fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: StarkEvaluationVars<FE, P, { Self::COLUMNS }>,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
Expand All @@ -183,8 +190,8 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
eval_lookups(vars, yield_constr, col, col + 1);
}

let lv = vars.local_values;
let nv = vars.next_values;
let lv: &[P; NUM_ARITH_COLUMNS] = vars.get_local_values().try_into().unwrap();
let nv: &[P; NUM_ARITH_COLUMNS] = vars.get_next_values().try_into().unwrap();

// Check the range column: First value must be 0, last row
// must be 2^16-1, and intermediate rows must increment by 0
Expand All @@ -207,16 +214,18 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, { Self::COLUMNS }>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// Range check all the columns
for col in columns::RC_COLS.step_by(2) {
eval_lookups_circuit(builder, vars, yield_constr, col, col + 1);
}

let lv = vars.local_values;
let nv = vars.next_values;
let lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS] =
vars.get_next_values().try_into().unwrap();

let rc1 = lv[columns::RANGE_COUNTER];
let rc2 = nv[columns::RANGE_COUNTER];
Expand Down
203 changes: 95 additions & 108 deletions evm/src/byte_packing/byte_packing_stark.rs

Large diffs are not rendered by default.

9 changes: 3 additions & 6 deletions evm/src/byte_packing/columns.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ pub(crate) const fn index_bytes(i: usize) -> usize {
BYTES_INDICES_START + i
}

// Note: Those are used as filter for distinguishing active vs padding rows.
// Note: Those are used as filter for distinguishing active vs padding rows,
// and also to obtain the length of a sequence of bytes being processed.
pub(crate) const BYTE_INDICES_COLS: Range<usize> =
BYTES_INDICES_START..BYTES_INDICES_START + NUM_BYTES;

Expand All @@ -25,12 +26,8 @@ pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;

/// The total length of a sequence of bytes.
/// Cannot be greater than 32.
pub(crate) const SEQUENCE_LEN: usize = TIMESTAMP + 1;

// 32 byte limbs hold a total of 256 bits.
const BYTES_VALUES_START: usize = SEQUENCE_LEN + 1;
const BYTES_VALUES_START: usize = TIMESTAMP + 1;
pub(crate) const fn value_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_VALUES_START + i
Expand Down
33 changes: 14 additions & 19 deletions evm/src/cpu/bootstrap_kernel.rs
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
//! The initial phase of execution, where the kernel code is hashed while being written to memory.
//! The hash is then checked against a precomputed kernel hash.
use std::borrow::Borrow;

use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;

use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
use crate::witness::memory::MemoryAddress;
use crate::witness::util::{keccak_sponge_log, mem_write_gp_log_and_fill};

Expand All @@ -25,6 +23,7 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>
for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_bootstrap_kernel = F::ONE;

// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
for (channel, (addr, &byte)) in chunk.enumerate() {
Expand All @@ -39,6 +38,7 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>

let mut final_cpu_row = CpuColumnsView::default();
final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
final_cpu_row.is_bootstrap_kernel = F::ONE;
final_cpu_row.is_keccak_sponge = F::ONE;
// The Keccak sponge CTL uses memory value columns for its inputs and outputs.
final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context
Expand All @@ -56,16 +56,14 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>
log::info!("Bootstrapping took {} cycles", state.traces.clock());
}

pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
vars: StarkEvaluationVars<F, P, NUM_CPU_COLUMNS>,
pub(crate) fn eval_bootstrap_kernel_packed<F: Field, P: PackedField<Scalar = F>>(
local_values: &CpuColumnsView<P>,
next_values: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let local_values: &CpuColumnsView<_> = vars.local_values.borrow();
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();

// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
let local_is_bootstrap = P::ONES - local_values.op.into_iter().sum::<P>();
let next_is_bootstrap = P::ONES - next_values.op.into_iter().sum::<P>();
let local_is_bootstrap = local_values.is_bootstrap_kernel;
let next_is_bootstrap = next_values.is_bootstrap_kernel;
yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
yield_constr.constraint_last_row(local_is_bootstrap);
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
Expand Down Expand Up @@ -101,20 +99,17 @@ pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
}
}

pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_bootstrap_kernel_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, NUM_CPU_COLUMNS>,
local_values: &CpuColumnsView<ExtensionTarget<D>>,
next_values: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &CpuColumnsView<_> = vars.local_values.borrow();
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();
let one = builder.one_extension();

// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
let local_is_bootstrap = builder.add_many_extension(local_values.op.iter());
let local_is_bootstrap = builder.sub_extension(one, local_is_bootstrap);
let next_is_bootstrap = builder.add_many_extension(next_values.op.iter());
let next_is_bootstrap = builder.sub_extension(one, next_is_bootstrap);
let local_is_bootstrap = local_values.is_bootstrap_kernel;
let next_is_bootstrap = next_values.is_bootstrap_kernel;
let constraint = builder.sub_extension(local_is_bootstrap, one);
yield_constr.constraint_first_row(builder, constraint);
yield_constr.constraint_last_row(builder, local_is_bootstrap);
Expand Down
3 changes: 3 additions & 0 deletions evm/src/cpu/columns/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ pub struct MemoryChannelView<T: Copy> {
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct CpuColumnsView<T: Copy> {
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
pub is_bootstrap_kernel: T,

/// If CPU cycle: Current context.
// TODO: this is currently unconstrained
pub context: T,
Expand Down
72 changes: 24 additions & 48 deletions evm/src/cpu/control_flow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,9 @@ const NATIVE_INSTRUCTIONS: [usize; 18] = [
// not exceptions (also jump)
];

pub(crate) fn get_halt_pcs<F: Field>() -> (F, F) {
let halt_pc0 = KERNEL.global_labels["halt_pc0"];
let halt_pc1 = KERNEL.global_labels["halt_pc1"];

(
F::from_canonical_usize(halt_pc0),
F::from_canonical_usize(halt_pc1),
)
pub(crate) fn get_halt_pc<F: Field>() -> F {
let halt_pc = KERNEL.global_labels["halt"];
F::from_canonical_usize(halt_pc)
}

pub(crate) fn get_start_pc<F: Field>() -> F {
Expand All @@ -57,8 +52,15 @@ pub fn eval_packed_generic<P: PackedField>(
) {
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
// Once we start executing instructions, then we continue until the end of the table.
yield_constr.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next - P::ONES));

let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next;

// Once we start executing instructions, then we continue until the end of the table
// or we reach dummy padding rows. This, along with the constraints on the first row,
// enforces that operation flags and the halt flag are mutually exclusive over the entire
// CPU trace.
yield_constr
.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next + next_halt_state - P::ONES));

// If a row is a CPU cycle and executing a native instruction (implemented as a table row; not
// microcoded) then the program counter is incremented by 1 to obtain the next row's program
Expand All @@ -79,16 +81,6 @@ pub fn eval_packed_generic<P: PackedField>(
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);

// The last row must be a CPU cycle row.
yield_constr.constraint_last_row(is_cpu_cycle - P::ONES);
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that
// that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels.
let (halt_pc0, halt_pc1) = get_halt_pcs::<P::Scalar>();
yield_constr
.constraint_last_row((lv.program_counter - halt_pc0) * (lv.program_counter - halt_pc1));
// Finally, the last row must be in kernel mode.
yield_constr.constraint_last_row(lv.is_kernel_mode - P::ONES);
}

pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
Expand All @@ -97,11 +89,21 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();

let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
// Once we start executing instructions, then we continue until the end of the table.

let next_halt_state = builder.add_extension(nv.is_bootstrap_kernel, is_cpu_cycle_next);
let next_halt_state = builder.sub_extension(one, next_halt_state);

// Once we start executing instructions, then we continue until the end of the table
// or we reach dummy padding rows. This, along with the constraints on the first row,
// enforces that operation flags and the halt flag are mutually exclusive over the entire
// CPU trace.
{
let constr = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle);
let constr = builder.add_extension(is_cpu_cycle_next, next_halt_state);
let constr = builder.mul_sub_extension(is_cpu_cycle, constr, is_cpu_cycle);
yield_constr.constraint_transition(builder, constr);
}

Expand Down Expand Up @@ -144,30 +146,4 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
let kernel_constr = builder.mul_extension(is_last_noncpu_cycle, nv.stack_len);
yield_constr.constraint_transition(builder, kernel_constr);
}

// The last row must be a CPU cycle row.
{
let one = builder.one_extension();
let constr = builder.sub_extension(is_cpu_cycle, one);
yield_constr.constraint_last_row(builder, constr);
}
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that
// that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels.
{
let (halt_pc0, halt_pc1) = get_halt_pcs();
let halt_pc0_target = builder.constant_extension(halt_pc0);
let halt_pc1_target = builder.constant_extension(halt_pc1);

let halt_pc0_offset = builder.sub_extension(lv.program_counter, halt_pc0_target);
let halt_pc1_offset = builder.sub_extension(lv.program_counter, halt_pc1_target);
let constr = builder.mul_extension(halt_pc0_offset, halt_pc1_offset);

yield_constr.constraint_last_row(builder, constr);
}
// Finally, the last row must be in kernel mode.
{
let one = builder.one_extension();
let constr = builder.sub_extension(lv.is_kernel_mode, one);
yield_constr.constraint_last_row(builder, constr);
}
}
Loading

0 comments on commit 7320731

Please sign in to comment.