Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
b9b6483
chore: don't optimize ArrayGet from previous ArraySet
asterite Feb 13, 2026
6441fa1
clippy
asterite Feb 13, 2026
c1c06da
Run a separate ArrayGet optimization as an early pass
asterite Feb 13, 2026
9810fea
Also do the optimization one step for ValueMerger
asterite Feb 13, 2026
fba1d9e
Recover some lost optimizations
asterite Feb 14, 2026
3cf0271
Run again after preprocessing functions
asterite Feb 14, 2026
114fd21
One more after mem2reg
asterite Feb 14, 2026
26d937e
Run after every mem2reg
asterite Feb 14, 2026
c8cbe56
Fix tests
asterite Feb 14, 2026
07e93c9
Reuse
asterite Feb 18, 2026
6155470
Doc comments
asterite Feb 18, 2026
54ea4d2
More docs
asterite Feb 18, 2026
abc1e6a
Fixes
asterite Feb 18, 2026
c2f089d
Another fix
asterite Feb 18, 2026
86e92dc
Reduce visibility of method
asterite Feb 18, 2026
462751a
Remove the ValueMerger mode
asterite Feb 18, 2026
e405e80
Snapshots
asterite Feb 18, 2026
f119c9e
Remove extra test
asterite Feb 18, 2026
1391771
No need for a mode, just take `Option`
asterite Feb 18, 2026
83dc45d
Rename
asterite Feb 18, 2026
3bedfaf
Fix typo
asterite Feb 18, 2026
adbc3da
Comment
asterite Feb 18, 2026
ff01c65
See what happens if we don't optimize in ValueMerger
asterite Feb 18, 2026
0acc2d1
Revert "See what happens if we don't optimize in ValueMerger"
asterite Feb 18, 2026
06ddd1d
Apply suggestions from code review
asterite Feb 20, 2026
e661f7b
Don't optimize get from param if we end up with the same array
asterite Feb 20, 2026
ae6ca10
Set side effects var to one when entering block
asterite Feb 20, 2026
6028306
Check that we didn't optimize to get from the same array
asterite Feb 20, 2026
9a5dd41
Assume that we know the predicate of every array_set before an array_get
asterite Feb 20, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions compiler/noirc_evaluator/src/ssa/ir/dfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -812,17 +812,26 @@ impl DataFlowGraph {
/// Uses value information to determine whether an instruction is from
/// this function's DFG or the global space's DFG.
pub(crate) fn get_local_or_global_instruction(&self, value: ValueId) -> Option<&Instruction> {
self.get_local_or_global_instruction_with_id(value).map(|(instruction, _id)| instruction)
}

/// Uses value information to determine whether an instruction is from
/// this function's DFG or the global space's DFG, including the instruction ID.
pub(crate) fn get_local_or_global_instruction_with_id(
&self,
value: ValueId,
) -> Option<(&Instruction, InstructionId)> {
match &self[value] {
Value::Instruction { instruction, .. } => {
Value::Instruction { instruction: instruction_id, .. } => {
let instruction = if self.is_global(value) {
let instruction = &self.globals[*instruction];
let instruction = &self.globals[*instruction_id];
// We expect to only have MakeArray instructions in the global space
assert!(matches!(instruction, Instruction::MakeArray { .. }));
instruction
} else {
&self[*instruction]
&self[*instruction_id]
};
Some(instruction)
Some((instruction, *instruction_id))
}
_ => None,
}
Expand Down
113 changes: 42 additions & 71 deletions compiler/noirc_evaluator/src/ssa/ir/dfg/simplify.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
use crate::ssa::ir::{
basic_block::BasicBlockId,
dfg::simplify::value_merger::ValueMerger,
instruction::{
Binary, BinaryOp, ConstrainError, Instruction,
binary::{truncate, truncate_field},
use crate::ssa::{
ir::{
basic_block::BasicBlockId,
dfg::simplify::value_merger::ValueMerger,
instruction::{
Binary, BinaryOp, ConstrainError, Instruction,
binary::{truncate, truncate_field},
},
types::{NumericType, Type},
value::{Value, ValueId},
},
types::{NumericType, Type},
value::{Value, ValueId},
opt::ArrayGetOptimizationResult,
};
use acvm::{
AcirField as _, FieldElement,
Expand Down Expand Up @@ -115,7 +118,7 @@ pub(crate) fn simplify(
Instruction::ConstrainNotEqual(..) => None,
Instruction::ArrayGet { array, index } => {
if let Some(index) = dfg.get_numeric_constant(*index) {
return try_optimize_array_get_from_previous_set(dfg, *array, index);
return try_optimize_array_get_from_previous_instructions(dfg, *array, index);
}

let array_or_vector_type = dfg.type_of_value(*array);
Expand Down Expand Up @@ -357,81 +360,44 @@ fn optimize_length_one_array_read(
);
dfg.insert_instruction_and_results(index_constraint, block, None, call_stack);

let result = try_optimize_array_get_from_previous_set(dfg, array, FieldElement::zero());
let result =
try_optimize_array_get_from_previous_instructions(dfg, array, FieldElement::zero());
if let SimplifyResult::None = result {
SimplifyResult::SimplifiedToInstruction(Instruction::ArrayGet { array, index: zero })
} else {
result
}
}

/// Given a chain of operations like:
/// v1 = array_set [10, 11, 12], index 1, value: 5
/// v2 = array_set v1, index 2, value: 6
/// v3 = array_set v2, index 2, value: 7
/// v4 = array_get v3, index 1
///
/// We want to optimize `v4` to `11`. To do this we need to follow the array value
/// through several array sets. For each array set:
/// - If the index is non-constant we fail the optimization since any index may be changed
/// - If the index is constant and is our target index, we conservatively fail the optimization
/// in case the array_set is disabled from a previous `enable_side_effects_if` and the array get
/// was not.
/// - Otherwise, we check the array value of the array set.
/// - If the array value is constant, we use that array.
/// - If the array value is from a previous array-set, we recur.
/// - If the array value is from an array parameter, we use that array.
///
/// That is, we have multiple `array_set` instructions setting various constant indexes
/// of the same array, returning a modified version. We want to go backwards until we
/// find the last `array_set` for the index we are interested in, and return the value set.
fn try_optimize_array_get_from_previous_set(
/// See [`crate::ssa::opt::try_optimize_array_get_from_previous_instructions`] for more information.
fn try_optimize_array_get_from_previous_instructions(
dfg: &mut DataFlowGraph,
mut array_id: ValueId,
array_id: ValueId,
target_index: FieldElement,
) -> SimplifyResult {
// The target index must be less than the maximum array length
let Some(target_index_u32) = target_index.try_to_u32() else {
return SimplifyResult::None;
};

// Arbitrary number of maximum tries just to prevent this optimization from taking too long.
let max_tries = 5;
for _ in 0..max_tries {
if let Some(instruction) = dfg.get_local_or_global_instruction(array_id) {
match instruction {
Instruction::ArraySet { array, index, value, .. } => {
if let Some(constant) = dfg.get_numeric_constant(*index) {
if constant == target_index {
return SimplifyResult::SimplifiedTo(*value);
}
let side_effects = None;
let result = crate::ssa::opt::try_optimize_array_get_from_previous_instructions(
array_id,
target_index,
dfg,
side_effects,
);
match result {
Some(ArrayGetOptimizationResult::Value(value)) => SimplifyResult::SimplifiedTo(value),
Some(ArrayGetOptimizationResult::ArrayGet(new_array_id)) => {
assert_ne!(
new_array_id, array_id,
"ArrayGetOptimizationResult::ArrayGet returned the same array_id"
);

array_id = *array; // recur
continue;
}
}
Instruction::MakeArray { elements: array, typ: _ } => {
let index = target_index_u32 as usize;
if index < array.len() {
return SimplifyResult::SimplifiedTo(array[index]);
}
}
_ => (),
}
} else if let Value::Param { typ: Type::Array(_, length), .. } = &dfg[array_id]
&& target_index_u32 < length.0
{
let index = dfg.make_constant(target_index, NumericType::length_type());
return SimplifyResult::SimplifiedToInstruction(Instruction::ArrayGet {
array: array_id,
SimplifyResult::SimplifiedToInstruction(Instruction::ArrayGet {
array: new_array_id,
index,
});
})
}

break;
None => SimplifyResult::None,
}

SimplifyResult::None
}

/// If we have an array set whose value is from an array get on the same array at the same index,
Expand Down Expand Up @@ -753,12 +719,17 @@ mod tests {
";
let ssa = Ssa::from_str_simplifying(src).unwrap();

// Only v3 was optimized because v2 reads from the same index
// as v1 and this basic optimization can't know if that array_set
// is under the same predicate as the array_get (there's a dedicated
// `array_get` optimization for that)
assert_ssa_snapshot!(ssa, @r"
acir(inline) predicate_pure fn main f0 {
b0(v0: [Field; 2]):
v3 = array_set mut v0, index u32 0, value Field 4
v5 = array_get v0, index u32 1 -> Field
return Field 4, v5
v4 = array_get v3, index u32 0 -> Field
v6 = array_get v0, index u32 1 -> Field
return v4, v6
}
");
}
Expand Down
9 changes: 8 additions & 1 deletion compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,14 @@ fn simplify_vector_push_back(
vector_sizes.insert(set_last_vector_value, vector_size / element_size);
vector_sizes.insert(new_vector, vector_size / element_size);

let mut value_merger = ValueMerger::new(dfg, block, &vector_sizes, call_stack);
let array_get_optimization_side_effects = None;
let mut value_merger = ValueMerger::new(
dfg,
block,
&vector_sizes,
call_stack,
array_get_optimization_side_effects,
);

let Ok(new_vector) = value_merger.merge_values(
len_not_equals_capacity,
Expand Down
95 changes: 72 additions & 23 deletions compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/value_merger.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,25 @@
use acvm::acir::brillig::lengths::{ElementTypesLength, SemanticLength, SemiFlattenedLength};
use acvm::{
FieldElement,
acir::brillig::lengths::{ElementTypesLength, SemanticLength, SemiFlattenedLength},
};
use noirc_errors::{Location, call_stack::CallStackId};
use rustc_hash::FxHashMap as HashMap;

use crate::{
brillig::assert_u32,
errors::{RtResult, RuntimeError},
ssa::ir::{
basic_block::BasicBlockId,
dfg::DataFlowGraph,
instruction::{BinaryOp, Instruction},
types::{NumericType, Type},
value::ValueId,
ssa::{
ir::{
basic_block::BasicBlockId,
dfg::DataFlowGraph,
instruction::{BinaryOp, Instruction},
types::{NumericType, Type},
value::ValueId,
},
opt::{
ArrayGetOptimizationResult, ArrayGetOptimizationSideEffects,
try_optimize_array_get_from_previous_instructions,
},
},
};

Expand All @@ -23,6 +32,19 @@ pub(crate) struct ValueMerger<'a> {
vector_sizes: &'a HashMap<ValueId, SemanticLength>,

call_stack: CallStackId,

/// Optional information about the current side effects variable, and what
/// side effects variables are applied to each `array_set` instruction that happen
/// before the values being merged.
///
/// When arrays or vectors are merged as a result of an `if ... else` instruction,
/// a new array will be built that does `array_get` on the arrays on the "then" and
/// "else" branches, and combine those values. These newly inserted `array_get` could
/// be optimized by reusing previously inserted instructions, such as an `array_set` at
/// the same index as the one in the `array_get`. However, this is only safe to do
/// if we know the side effects var of those two instructions is the same. Hence, that
/// information can optionally be specified here.
array_get_optimization_side_effects: Option<ArrayGetOptimizationSideEffects<'a>>,
}

impl<'a> ValueMerger<'a> {
Expand All @@ -31,8 +53,9 @@ impl<'a> ValueMerger<'a> {
block: BasicBlockId,
vector_sizes: &'a HashMap<ValueId, SemanticLength>,
call_stack: CallStackId,
array_get_optimization_side_effects: Option<ArrayGetOptimizationSideEffects<'a>>,
) -> Self {
ValueMerger { dfg, block, vector_sizes, call_stack }
ValueMerger { dfg, block, vector_sizes, call_stack, array_get_optimization_side_effects }
}

/// Choose a call stack to return with the [RuntimeError].
Expand Down Expand Up @@ -147,7 +170,7 @@ impl<'a> ValueMerger<'a> {
/// Given an if expression that returns an array: `if c { array1 } else { array2 }`,
/// this function will recursively merge array1 and array2 into a single resulting array
/// by creating a new array containing the result of `self.merge_values` for each element.
pub(crate) fn merge_array_values(
fn merge_array_values(
&mut self,
typ: Type,
then_condition: ValueId,
Expand All @@ -166,16 +189,13 @@ impl<'a> ValueMerger<'a> {

for i in 0..len.0 {
for (element_index, element_type) in element_types.iter().enumerate() {
let index = u128::from(i * element_count + element_index as u32).into();
let index = self.dfg.make_constant(index, NumericType::length_type());
let index_value = u128::from(i * element_count + element_index as u32).into();
let index = self.dfg.make_constant(index_value, NumericType::length_type());

let typevars = Some(vec![element_type.clone()]);

let mut get_element = |array, typevars| {
let get = Instruction::ArrayGet { array, index };
self.dfg
.insert_instruction_and_results(get, self.block, typevars, self.call_stack)
.first()
self.maybe_optimized_array_get(array, index, index_value, typevars)
};

let then_element = get_element(then_value, typevars.clone());
Expand Down Expand Up @@ -235,14 +255,8 @@ impl<'a> ValueMerger<'a> {

let mut get_element = |array, typevars, len: SemiFlattenedLength| {
assert!(index_u32 < len.0, "get_element invoked with an out of bounds index");
let get = Instruction::ArrayGet { array, index };
let results = self.dfg.insert_instruction_and_results(
get,
self.block,
typevars,
self.call_stack,
);
results.first()

self.maybe_optimized_array_get(array, index, index_value, typevars)
};

// If it's out of bounds for the "then" vector, a value in the "else" *must* exist.
Expand Down Expand Up @@ -280,4 +294,39 @@ impl<'a> ValueMerger<'a> {
self.dfg.insert_instruction_and_results(instruction, self.block, None, call_stack);
Ok(result.first())
}

fn maybe_optimized_array_get(
&mut self,
array: ValueId,
index: ValueId,
index_value: FieldElement,
typevars: Option<Vec<Type>>,
) -> ValueId {
let side_effects = self.array_get_optimization_side_effects.as_ref();
match try_optimize_array_get_from_previous_instructions(
array,
index_value,
self.dfg,
side_effects,
) {
Some(ArrayGetOptimizationResult::Value(value)) => value,
Some(ArrayGetOptimizationResult::ArrayGet(new_array)) => {
assert_ne!(
new_array, array,
"ArrayGetOptimizationResult::ArrayGet returned the same array_id"
);

let get = Instruction::ArrayGet { array: new_array, index };
self.dfg
.insert_instruction_and_results(get, self.block, typevars, self.call_stack)
.first()
}
None => {
let get = Instruction::ArrayGet { array, index };
self.dfg
.insert_instruction_and_results(get, self.block, typevars, self.call_stack)
.first()
}
}
}
}
Loading
Loading