Skip to content

Commit

Permalink
chore!: remove ec module from stdlib (noir-lang/noir#6612)
Browse files Browse the repository at this point in the history
fix(LSP): use generic self type to narrow down methods to complete (noir-lang/noir#6617)
fix!: Disallow `#[export]` on associated methods (noir-lang/noir#6626)
chore: redo typo PR by donatik27 (noir-lang/noir#6575)
chore: redo typo PR by Dimitrolito (noir-lang/noir#6614)
feat: simplify `jmpif`s by reversing branches if condition is negated (noir-lang/noir#5891)
fix: Do not warn on unused functions marked with #[export] (noir-lang/noir#6625)
chore: Add panic for compiler error described in #6620 (noir-lang/noir#6621)
feat(perf): Track last loads per block in mem2reg and remove them if possible (noir-lang/noir#6088)
fix(ssa): Track all local allocations during flattening (noir-lang/noir#6619)
feat(comptime): Implement blackbox functions in comptime interpreter (noir-lang/noir#6551)
chore: derive PartialEq and Hash for FieldElement (noir-lang/noir#6610)
chore: ignore almost-empty directories in nargo_cli tests (noir-lang/noir#6611)
chore: remove temporary allocations from `num_bits` (noir-lang/noir#6600)
chore: Release Noir(1.0.0-beta.0) (noir-lang/noir#6562)
feat: Add `array_refcount` and `slice_refcount` builtins for debugging (noir-lang/noir#6584)
chore!: Require types of globals to be specified (noir-lang/noir#6592)
fix: don't report visibility errors when elaborating comptime value (noir-lang/noir#6498)
fix: preserve newlines between comments when formatting statements (noir-lang/noir#6601)
fix: parse a bit more SSA stuff (noir-lang/noir#6599)
chore!: remove eddsa from stdlib (noir-lang/noir#6591)
chore: Typo in oracles how to (noir-lang/noir#6598)
feat(ssa): Loop invariant code motion (noir-lang/noir#6563)
fix: remove `compiler_version` from new `Nargo.toml` (noir-lang/noir#6590)
feat: Avoid incrementing reference counts in some cases (noir-lang/noir#6568)
chore: fix typo in test name (noir-lang/noir#6589)
fix: consider prereleases to be compatible with pre-1.0.0 releases (noir-lang/noir#6580)
feat: try to inline brillig calls with all constant arguments  (noir-lang/noir#6548)
fix: correct type when simplifying `derive_pedersen_generators` (noir-lang/noir#6579)
feat: Sync from aztec-packages (noir-lang/noir#6576)
  • Loading branch information
AztecBot committed Nov 27, 2024
2 parents a5d4de0 + cc17c3c commit 657adc4
Show file tree
Hide file tree
Showing 53 changed files with 647 additions and 2,028 deletions.
2 changes: 1 addition & 1 deletion .noir-sync-commit
Original file line number Diff line number Diff line change
@@ -1 +1 @@
649117570b95b26776150e337c458d478eb48c2e
1e965bc8b9c4222c7b2ad7502df415781308de7f
2 changes: 1 addition & 1 deletion noir/noir-repo/.github/ACVM_NOT_PUBLISHABLE.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ assignees: TomAFrench, Savio-Sou

The ACVM crates are currently unpublishable, making a release will NOT push our crates to crates.io.

This is likely due to a crate we depend on bumping its MSRV above our own. Our lockfile is not taken into account when publishing to crates.io (as people downloading our crate don't use it) so we need to be able to use the most up to date versions of our dependencies (including transient dependencies) specified.
This is likely due to a crate we depend on bumping its MSRV above our own. Our lockfile is not taken into account when publishing to crates.io (as people downloading our crate don't use it) so we need to be able to use the most up-to-date versions of our dependencies (including transient dependencies) specified.

Check the [MSRV check]({{env.WORKFLOW_URL}}) workflow for details.

Expand Down
29 changes: 29 additions & 0 deletions noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,16 @@ impl<'f> Context<'f> {
};
self.condition_stack.push(cond_context);
self.insert_current_side_effects_enabled();

// We disallow this case as it results in the `else_destination` block
// being inlined before the `then_destination` block due to block deduplication in the work queue.
//
// The `else_destination` block then gets treated as if it were the `then_destination` block
// and has the incorrect condition applied to it.
assert_ne!(
self.branch_ends[if_entry], *then_destination,
"ICE: branches merge inside of `then` branch"
);
vec![self.branch_ends[if_entry], *else_destination, *then_destination]
}

Expand Down Expand Up @@ -1475,4 +1485,23 @@ mod test {
_ => unreachable!("Should have terminator instruction"),
}
}

#[test]
#[should_panic = "ICE: branches merge inside of `then` branch"]
fn panics_if_branches_merge_within_then_branch() {
//! This is a regression test for https://github.com/noir-lang/noir/issues/6620

let src = "
acir(inline) fn main f0 {
b0(v0: u1):
jmpif v0 then: b2, else: b1
b2():
return
b1():
jmp b2()
}
";
let merged_ssa = Ssa::from_str(src).unwrap();
let _ = merged_ssa.flatten_cfg();
}
}
223 changes: 221 additions & 2 deletions noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
//! - A reference with 0 aliases means we were unable to find which reference this reference
//! refers to. If such a reference is stored to, we must conservatively invalidate every
//! reference in the current block.
//! - We also track the last load instruction to each address per block.
//!
//! From there, to figure out the value of each reference at the end of block, iterate each instruction:
//! - On `Instruction::Allocate`:
Expand All @@ -28,6 +29,13 @@
//! - Furthermore, if the result of the load is a reference, mark the result as an alias
//! of the reference it dereferences to (if known).
//! - If which reference it dereferences to is not known, this load result has no aliases.
//! - We also track the last instance of a load instruction to each address in a block.
//! If we see that the last load instruction was from the same address as the current load instruction,
//! we move to replace the result of the current load with the result of the previous load.
//! This removal requires a couple conditions:
//! - No store occurs to that address before the next load,
//! - The address is not used as an argument to a call
//! This optimization helps us remove repeated loads for which there are not known values.
//! - On `Instruction::Store { address, value }`:
//! - If the address of the store is known:
//! - If the address has exactly 1 alias:
Expand All @@ -40,11 +48,13 @@
//! - Conservatively mark every alias in the block to `Unknown`.
//! - Additionally, if there were no Loads to any alias of the address between this Store and
//! the previous Store to the same address, the previous store can be removed.
//! - Remove the instance of the last load instruction to the address and its aliases
//! - On `Instruction::Call { arguments }`:
//! - If any argument of the call is a reference, set the value of each alias of that
//! reference to `Unknown`
//! - Any builtin functions that may return aliases if their input also contains a
//! reference should be tracked. Examples: `slice_push_back`, `slice_insert`, `slice_remove`, etc.
//! - Remove the instance of the last load instruction for any reference arguments and their aliases
//!
//! On a terminator instruction:
//! - If the terminator is a `Jmp`:
Expand Down Expand Up @@ -274,6 +284,9 @@ impl<'f> PerFunctionContext<'f> {
if let Some(first_predecessor) = predecessors.next() {
let mut first = self.blocks.get(&first_predecessor).cloned().unwrap_or_default();
first.last_stores.clear();
// Last loads are tracked per block. During unification we are creating a new block from the current one,
// so we must clear the last loads of the current block before we return the new block.
first.last_loads.clear();

// Note that we have to start folding with the first block as the accumulator.
// If we started with an empty block, an empty block union'd with any other block
Expand Down Expand Up @@ -410,6 +423,28 @@ impl<'f> PerFunctionContext<'f> {

self.last_loads.insert(address, (instruction, block_id));
}

// Check whether the block has a repeat load from the same address (w/ no calls or stores in between the loads).
// If we do have a repeat load, we can remove the current load and map its result to the previous load's result.
if let Some(last_load) = references.last_loads.get(&address) {
let Instruction::Load { address: previous_address } =
&self.inserter.function.dfg[*last_load]
else {
panic!("Expected a Load instruction here");
};
let result = self.inserter.function.dfg.instruction_results(instruction)[0];
let previous_result =
self.inserter.function.dfg.instruction_results(*last_load)[0];
if *previous_address == address {
self.inserter.map_value(result, previous_result);
self.instructions_to_remove.insert(instruction);
}
}
// We want to set the load for every load even if the address has a known value
// and the previous load instruction was removed.
// We are safe to still remove a repeat load in this case as we are mapping from the current load's
// result to the previous load, which if it was removed should already have a mapping to the known value.
references.set_last_load(address, instruction);
}
Instruction::Store { address, value } => {
let address = self.inserter.function.dfg.resolve(*address);
Expand Down Expand Up @@ -437,6 +472,8 @@ impl<'f> PerFunctionContext<'f> {
}

references.set_known_value(address, value);
// If we see a store to an address, the last load to that address needs to remain.
references.keep_last_load_for(address, self.inserter.function);
references.last_stores.insert(address, instruction);
}
Instruction::Allocate => {
Expand Down Expand Up @@ -544,6 +581,9 @@ impl<'f> PerFunctionContext<'f> {
let value = self.inserter.function.dfg.resolve(*value);
references.set_unknown(value);
references.mark_value_used(value, self.inserter.function);

// If a reference is an argument to a call, the last load to that address and its aliases needs to remain.
references.keep_last_load_for(value, self.inserter.function);
}
}
}
Expand Down Expand Up @@ -574,6 +614,12 @@ impl<'f> PerFunctionContext<'f> {
let destination_parameters = self.inserter.function.dfg[*destination].parameters();
assert_eq!(destination_parameters.len(), arguments.len());

// If we have multiple parameters that alias that same argument value,
// then those parameters also alias each other.
// We save parameters with repeat arguments to later mark those
// parameters as aliasing one another.
let mut arg_set: HashMap<ValueId, BTreeSet<ValueId>> = HashMap::default();

// Add an alias for each reference parameter
for (parameter, argument) in destination_parameters.iter().zip(arguments) {
if self.inserter.function.dfg.value_is_reference(*parameter) {
Expand All @@ -583,10 +629,27 @@ impl<'f> PerFunctionContext<'f> {
if let Some(aliases) = references.aliases.get_mut(expression) {
// The argument reference is possibly aliased by this block parameter
aliases.insert(*parameter);

// Check if we have seen the same argument
let seen_parameters = arg_set.entry(argument).or_default();
// Add the current parameter to the parameters we have seen for this argument.
// The previous parameters and the current one alias one another.
seen_parameters.insert(*parameter);
}
}
}
}

// Set the aliases of the parameters
for (_, aliased_params) in arg_set {
for param in aliased_params.iter() {
self.set_aliases(
references,
*param,
AliasSet::known_multiple(aliased_params.clone()),
);
}
}
}
TerminatorInstruction::Return { return_values, .. } => {
// Removing all `last_stores` for each returned reference is more important here
Expand Down Expand Up @@ -902,7 +965,7 @@ mod tests {
// v10 = eq v9, Field 2
// constrain v9 == Field 2
// v11 = load v2
// v12 = load v10
// v12 = load v11
// v13 = eq v12, Field 2
// constrain v11 == Field 2
// return
Expand Down Expand Up @@ -961,7 +1024,7 @@ mod tests {
let main = ssa.main();
assert_eq!(main.reachable_blocks().len(), 4);

// The store from the original SSA should remain
// The stores from the original SSA should remain
assert_eq!(count_stores(main.entry_block(), &main.dfg), 2);
assert_eq!(count_stores(b2, &main.dfg), 1);

Expand Down Expand Up @@ -1008,4 +1071,160 @@ mod tests {
let main = ssa.main();
assert_eq!(count_loads(main.entry_block(), &main.dfg), 1);
}

#[test]
fn remove_repeat_loads() {
// This tests starts with two loads from the same unknown load.
// Specifically you should look for `load v2` in `b3`.
// We should be able to remove the second repeated load.
let src = "
acir(inline) fn main f0 {
b0():
v0 = allocate -> &mut Field
store Field 0 at v0
v2 = allocate -> &mut &mut Field
store v0 at v2
jmp b1(Field 0)
b1(v3: Field):
v4 = eq v3, Field 0
jmpif v4 then: b2, else: b3
b2():
v5 = load v2 -> &mut Field
store Field 2 at v5
v8 = add v3, Field 1
jmp b1(v8)
b3():
v9 = load v0 -> Field
v10 = eq v9, Field 2
constrain v9 == Field 2
v11 = load v2 -> &mut Field
v12 = load v2 -> &mut Field
v13 = load v12 -> Field
v14 = eq v13, Field 2
constrain v13 == Field 2
return
}
";

let ssa = Ssa::from_str(src).unwrap();

// The repeated load from v3 should be removed
// b3 should only have three loads now rather than four previously
//
// All stores are expected to remain.
let expected = "
acir(inline) fn main f0 {
b0():
v1 = allocate -> &mut Field
store Field 0 at v1
v3 = allocate -> &mut &mut Field
store v1 at v3
jmp b1(Field 0)
b1(v0: Field):
v4 = eq v0, Field 0
jmpif v4 then: b3, else: b2
b3():
v11 = load v3 -> &mut Field
store Field 2 at v11
v13 = add v0, Field 1
jmp b1(v13)
b2():
v5 = load v1 -> Field
v7 = eq v5, Field 2
constrain v5 == Field 2
v8 = load v3 -> &mut Field
v9 = load v8 -> Field
v10 = eq v9, Field 2
constrain v9 == Field 2
return
}
";

let ssa = ssa.mem2reg();
assert_normalized_ssa_equals(ssa, expected);
}

#[test]
fn keep_repeat_loads_passed_to_a_call() {
// The test is the exact same as `remove_repeat_loads` above except with the call
// to `f1` between the repeated loads.
let src = "
acir(inline) fn main f0 {
b0():
v1 = allocate -> &mut Field
store Field 0 at v1
v3 = allocate -> &mut &mut Field
store v1 at v3
jmp b1(Field 0)
b1(v0: Field):
v4 = eq v0, Field 0
jmpif v4 then: b3, else: b2
b3():
v13 = load v3 -> &mut Field
store Field 2 at v13
v15 = add v0, Field 1
jmp b1(v15)
b2():
v5 = load v1 -> Field
v7 = eq v5, Field 2
constrain v5 == Field 2
v8 = load v3 -> &mut Field
call f1(v3)
v10 = load v3 -> &mut Field
v11 = load v10 -> Field
v12 = eq v11, Field 2
constrain v11 == Field 2
return
}
acir(inline) fn foo f1 {
b0(v0: &mut Field):
return
}
";

let ssa = Ssa::from_str(src).unwrap();

let ssa = ssa.mem2reg();
// We expect the program to be unchanged
assert_normalized_ssa_equals(ssa, src);
}

#[test]
fn keep_repeat_loads_with_alias_store() {
// v7, v8, and v9 alias one another. We want to make sure that a repeat load to v7 with a store
// to its aliases in between the repeat loads does not remove those loads.
let src = "
acir(inline) fn main f0 {
b0(v0: u1):
jmpif v0 then: b2, else: b1
b2():
v6 = allocate -> &mut Field
store Field 0 at v6
jmp b3(v6, v6, v6)
b3(v1: &mut Field, v2: &mut Field, v3: &mut Field):
v8 = load v1 -> Field
store Field 2 at v2
v10 = load v1 -> Field
store Field 1 at v3
v11 = load v1 -> Field
store Field 3 at v3
v13 = load v1 -> Field
constrain v8 == Field 0
constrain v10 == Field 2
constrain v11 == Field 1
constrain v13 == Field 3
return
b1():
v4 = allocate -> &mut Field
store Field 1 at v4
jmp b3(v4, v4, v4)
}
";

let ssa = Ssa::from_str(src).unwrap();

let ssa = ssa.mem2reg();
// We expect the program to be unchanged
assert_normalized_ssa_equals(ssa, src);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ impl AliasSet {
Self { aliases: Some(aliases) }
}

pub(super) fn known_multiple(values: BTreeSet<ValueId>) -> AliasSet {
Self { aliases: Some(values) }
}

/// In rare cases, such as when creating an empty array of references, the set of aliases for a
/// particular value will be known to be zero, which is distinct from being unknown and
/// possibly referring to any alias.
Expand Down
Loading

0 comments on commit 657adc4

Please sign in to comment.