diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index a257bf16eb2..fc29439043c 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -44,6 +44,7 @@ use crate::acir::GeneratedAcir; mod checks; pub mod function_builder; +pub mod interpreter; pub mod ir; pub(crate) mod opt; #[cfg(test)] diff --git a/compiler/noirc_evaluator/src/ssa/interpreter/intrinsics.rs b/compiler/noirc_evaluator/src/ssa/interpreter/intrinsics.rs new file mode 100644 index 00000000000..3eaa4e767a8 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/interpreter/intrinsics.rs @@ -0,0 +1,262 @@ +use acvm::FieldElement; +use iter_extended::vecmap; + +use crate::ssa::{ + interpreter::NumericValue, + ir::{ + dfg, + instruction::{Endian, Intrinsic}, + types::{NumericType, Type}, + value::ValueId, + }, +}; + +use super::{IResults, Interpreter, Value}; + +impl Interpreter<'_> { + pub(super) fn call_intrinsic( + &mut self, + intrinsic: Intrinsic, + mut args: Vec, + results: &[ValueId], + ) -> IResults { + match intrinsic { + Intrinsic::ArrayLen => { + assert_eq!(args.len(), 1); + let length = args[0].as_array_or_slice().unwrap().elements.borrow().len(); + Ok(vec![Value::Numeric(NumericValue::U32(length as u32))]) + } + Intrinsic::ArrayAsStrUnchecked => { + assert_eq!(args.len(), 1); + Ok(args) + } + Intrinsic::AsSlice => { + assert_eq!(args.len(), 1); + let array = args[0].as_array_or_slice().unwrap(); + let length = array.elements.borrow().len(); + let length = Value::Numeric(NumericValue::U32(length as u32)); + + let elements = array.elements.borrow().to_vec(); + let slice = Value::slice(elements, array.element_types.clone()); + Ok(vec![length, slice]) + } + Intrinsic::AssertConstant => { + // Nothing we can do here unfortunately if we want to allow code with + // assert_constant to still pass pre-inlining and other optimizations. + Ok(Vec::new()) + } + Intrinsic::StaticAssert => { + assert_eq!(args.len(), 2); + + let condition = args[0].as_bool().unwrap(); + if !condition { + let message = args[1].as_string().unwrap(); + panic!("static_assert failed: {message}"); + } + + Ok(Vec::new()) + } + Intrinsic::SlicePushBack => self.slice_push_back(args), + Intrinsic::SlicePushFront => self.slice_push_front(args), + Intrinsic::SlicePopBack => self.slice_pop_back(args), + Intrinsic::SlicePopFront => self.slice_pop_front(args), + Intrinsic::SliceInsert => self.slice_insert(args), + Intrinsic::SliceRemove => self.slice_remove(args), + Intrinsic::ApplyRangeConstraint => { + todo!("Intrinsic::ApplyRangeConstraint is currently unimplemented") + } + // Both of these are no-ops + Intrinsic::StrAsBytes | Intrinsic::AsWitness => { + assert_eq!(args.len(), 1); + let arg = args.pop().unwrap(); + Ok(vec![arg]) + } + Intrinsic::ToBits(endian) => { + assert_eq!(args.len(), 1); + assert_eq!(results.len(), 1); + let field = args[0].as_field().unwrap(); + self.to_radix(endian, field, 2, results[0]) + } + Intrinsic::ToRadix(endian) => { + assert_eq!(args.len(), 2); + assert_eq!(results.len(), 1); + let field = args[0].as_field().unwrap(); + let radix = args[1].as_u32().unwrap(); + self.to_radix(endian, field, radix, results[0]) + } + Intrinsic::BlackBox(black_box_func) => { + todo!("Intrinsic::BlackBox({black_box_func}) is currently unimplemented") + } + Intrinsic::Hint(_) => todo!("Intrinsic::Hint is currently unimplemented"), + Intrinsic::IsUnconstrained => { + assert_eq!(args.len(), 0); + Ok(vec![Value::bool(self.in_unconstrained_context())]) + } + Intrinsic::DerivePedersenGenerators => { + todo!("Intrinsic::DerivePedersenGenerators is currently unimplemented") + } + Intrinsic::FieldLessThan => { + assert!( + self.in_unconstrained_context(), + "FieldLessThan can only be called in unconstrained" + ); + assert_eq!(args.len(), 2); + let lhs = args[0].as_field().unwrap(); + let rhs = args[1].as_field().unwrap(); + Ok(vec![Value::bool(lhs < rhs)]) + } + Intrinsic::ArrayRefCount | Intrinsic::SliceRefCount => { + let array = args[0].as_array_or_slice().unwrap(); + let rc = *array.rc.borrow(); + Ok(vec![Value::from_constant(rc.into(), NumericType::unsigned(32))]) + } + } + } + + fn to_radix( + &self, + endian: Endian, + field: FieldElement, + radix: u32, + result: ValueId, + ) -> IResults { + let Type::Array(_, limb_count) = self.dfg().type_of_value(result) else { + unreachable!("Expected result of to_radix/to_bytes to be an array") + }; + + let Some(limbs) = dfg::simplify::constant_to_radix(endian, field, radix, limb_count) else { + panic!("Unable to convert `{field}` to radix `{radix}`") + }; + + let elements = vecmap(limbs, |limb| Value::from_constant(limb, NumericType::unsigned(8))); + Ok(vec![Value::array(elements, vec![Type::unsigned(8)])]) + } + + /// (length, slice, elem...) -> (length, slice) + fn slice_push_back(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + + // The resulting slice should be cloned - should we check RC here to try mutating it? + // It'd need to be brillig-only if so since RC is always 1 in acir. + let mut new_elements = slice.elements.borrow().to_vec(); + let element_types = slice.element_types.clone(); + + new_elements.extend(args.into_iter().skip(2)); + + let new_length = Value::Numeric(NumericValue::U32(length + 1)); + let new_slice = Value::slice(new_elements, element_types); + Ok(vec![new_length, new_slice]) + } + + /// (length, slice, elem...) -> (length, slice) + fn slice_push_front(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + let slice_elements = slice.elements.clone(); + let element_types = slice.element_types.clone(); + + let mut new_elements = args.into_iter().skip(2).collect::>(); + new_elements.extend_from_slice(&slice_elements.borrow()); + + let new_length = Value::Numeric(NumericValue::U32(length + 1)); + let new_slice = Value::slice(new_elements, element_types); + Ok(vec![new_length, new_slice]) + } + + /// (length, slice) -> (length, slice, elem...) + fn slice_pop_back(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + + let mut slice_elements = slice.elements.borrow().to_vec(); + let element_types = slice.element_types.clone(); + + if slice_elements.is_empty() { + panic!("slice_pop_back: empty slice"); + } + + assert!(slice_elements.len() >= element_types.len()); + + let mut popped_elements = vecmap(0..element_types.len(), |_| slice_elements.pop().unwrap()); + popped_elements.reverse(); + + let new_length = Value::Numeric(NumericValue::U32(length - 1)); + let new_slice = Value::slice(slice_elements, element_types); + let mut results = vec![new_length, new_slice]; + results.extend(popped_elements); + Ok(results) + } + + /// (length, slice) -> (elem..., length, slice) + fn slice_pop_front(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + + let mut slice_elements = slice.elements.borrow().to_vec(); + let element_types = slice.element_types.clone(); + + if slice_elements.is_empty() { + panic!("slice_pop_front: empty slice"); + } + + assert!(slice_elements.len() >= element_types.len()); + let mut results = slice_elements.drain(0..element_types.len()).collect::>(); + + let new_length = Value::Numeric(NumericValue::U32(length - 1)); + let new_slice = Value::slice(slice_elements, element_types); + results.push(new_length); + results.push(new_slice); + Ok(results) + } + + /// (length, slice, index:u32, elem...) -> (length, slice) + fn slice_insert(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + let index = args[2].as_u32().unwrap(); + + let mut slice_elements = slice.elements.borrow().to_vec(); + let element_types = slice.element_types.clone(); + + let mut index = index as usize * element_types.len(); + for arg in args.into_iter().skip(3) { + slice_elements.insert(index, arg); + index += 1; + } + + let new_length = Value::Numeric(NumericValue::U32(length + 1)); + let new_slice = Value::slice(slice_elements, element_types); + Ok(vec![new_length, new_slice]) + } + + /// (length, slice, index:u32) -> (length, slice, elem...) + fn slice_remove(&self, args: Vec) -> IResults { + let length = args[0].as_u32().unwrap(); + let slice = args[1].as_array_or_slice().unwrap(); + let index = args[2].as_u32().unwrap(); + + let mut slice_elements = slice.elements.borrow().to_vec(); + let element_types = slice.element_types.clone(); + + if slice_elements.is_empty() { + panic!("slice_remove: empty slice"); + } + assert!(slice_elements.len() >= element_types.len()); + + let index = index as usize * element_types.len(); + let removed: Vec<_> = slice_elements.drain(index..index + element_types.len()).collect(); + + let new_length = Value::Numeric(NumericValue::U32(length - 1)); + let new_slice = Value::slice(slice_elements, element_types); + let mut results = vec![new_length, new_slice]; + results.extend(removed); + Ok(results) + } + + /// Print is not an intrinsic but it is treated like one. + pub(super) fn call_print(&mut self, _args: Vec) -> IResults { + // Stub the call for now + Ok(Vec::new()) + } +} diff --git a/compiler/noirc_evaluator/src/ssa/interpreter/mod.rs b/compiler/noirc_evaluator/src/ssa/interpreter/mod.rs new file mode 100644 index 00000000000..95c79d05e94 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/interpreter/mod.rs @@ -0,0 +1,932 @@ +use std::cmp::Ordering; + +use super::{ + Ssa, + ir::{ + dfg::DataFlowGraph, + function::{Function, FunctionId, RuntimeType}, + instruction::{Binary, BinaryOp, Instruction, TerminatorInstruction}, + types::Type, + value::ValueId, + }, +}; +use crate::{errors::RuntimeError, ssa::ir::instruction::binary::truncate_field}; +use acvm::{AcirField, FieldElement}; +use fxhash::FxHashMap as HashMap; +use iter_extended::vecmap; +use noirc_frontend::Shared; +use value::{ArrayValue, NumericValue}; + +mod intrinsics; +mod tests; +pub mod value; + +use value::Value; + +struct Interpreter<'ssa> { + /// Contains each function called with `main` (or the first called function if + /// the interpreter was manually invoked on a different function) at + /// the front of the Vec. + call_stack: Vec, + + ssa: &'ssa Ssa, + + /// This variable can be modified by `enable_side_effects_if` instructions and is + /// expected to have no effect if there are no such instructions or if the code + /// being executed is an unconstrained function. + side_effects_enabled: bool, +} + +struct CallContext { + /// The function that was called. This is `None` only for the top-level global + /// scope where global instructions are evaluated. + called_function: Option, + + /// Contains each value currently defined and visible to the current function. + scope: HashMap, +} + +impl CallContext { + fn new(called_function: FunctionId) -> Self { + Self { called_function: Some(called_function), scope: Default::default() } + } + + fn global_context() -> Self { + Self { called_function: None, scope: Default::default() } + } +} + +type IResult = Result; +type IResults = IResult>; + +#[allow(unused)] +impl Ssa { + pub(crate) fn interpret(&self, args: Vec) -> IResults { + self.interpret_function(self.main_id, args) + } + + pub(crate) fn interpret_function(&self, function: FunctionId, args: Vec) -> IResults { + let mut interpreter = Interpreter::new(self); + interpreter.interpret_globals()?; + interpreter.call_function(function, args) + } +} + +impl<'ssa> Interpreter<'ssa> { + fn new(ssa: &'ssa Ssa) -> Self { + let call_stack = vec![CallContext::global_context()]; + Self { ssa, call_stack, side_effects_enabled: true } + } + + fn call_context(&self) -> &CallContext { + self.call_stack.last().expect("call_stack should always be non-empty") + } + + fn call_context_mut(&mut self) -> &mut CallContext { + self.call_stack.last_mut().expect("call_stack should always be non-empty") + } + + fn global_scope(&self) -> &HashMap { + &self.call_stack.first().expect("call_stack should always be non-empty").scope + } + + fn current_function(&self) -> &'ssa Function { + let current_function_id = self.call_context().called_function; + let current_function_id = current_function_id.expect( + "Tried calling `Interpreter::current_function` while evaluating global instructions", + ); + &self.ssa.functions[¤t_function_id] + } + + fn dfg(&self) -> &'ssa DataFlowGraph { + &self.current_function().dfg + } + + fn in_unconstrained_context(&self) -> bool { + self.current_function().runtime().is_brillig() + } + + /// Define or redefine a value. + /// Redefinitions are expected in the case of loops. + fn define(&mut self, id: ValueId, value: Value) { + self.call_context_mut().scope.insert(id, value); + } + + fn interpret_globals(&mut self) -> IResult<()> { + let globals = &self.ssa.main().dfg.globals; + for (global_id, global) in globals.values_iter() { + let value = match dbg!(global) { + super::ir::value::Value::Instruction { instruction, .. } => { + let instruction = &globals[*instruction]; + self.interpret_instruction(instruction, &[global_id])?; + continue; + } + super::ir::value::Value::NumericConstant { constant, typ } => { + Value::from_constant(*constant, *typ) + } + super::ir::value::Value::Function(id) => Value::Function(*id), + super::ir::value::Value::Intrinsic(intrinsic) => Value::Intrinsic(*intrinsic), + super::ir::value::Value::ForeignFunction(name) => { + Value::ForeignFunction(name.clone()) + } + super::ir::value::Value::Global(_) | super::ir::value::Value::Param { .. } => { + unreachable!() + } + }; + self.define(global_id, value); + } + Ok(()) + } + + fn call_function(&mut self, function_id: FunctionId, mut arguments: Vec) -> IResults { + self.call_stack.push(CallContext::new(function_id)); + + let function = &self.ssa.functions[&function_id]; + let mut block_id = function.entry_block(); + let dfg = self.dfg(); + + // Loop over blocks & instructions inline here to avoid pushing more + // call frames (in rust). We only push call frames for function calls which + // should prevent stack overflows for all but excessively large call stacks + // that may overflow in the brillig vm as well. + let return_values = loop { + let block = &dfg[block_id]; + + if arguments.len() != block.parameters().len() { + panic!("Block argument count does not match the expected parameter count"); + } + + for (parameter, argument) in block.parameters().iter().zip(arguments) { + self.define(*parameter, argument); + } + + for instruction_id in block.instructions() { + let results = dfg.instruction_results(*instruction_id); + self.interpret_instruction(&dfg[*instruction_id], results)?; + } + + match block.terminator() { + None => panic!("No block terminator in block {block_id}"), + Some(TerminatorInstruction::Jmp { destination, arguments: jump_args, .. }) => { + block_id = *destination; + arguments = self.lookup_all(jump_args); + } + Some(TerminatorInstruction::JmpIf { + condition, + then_destination, + else_destination, + call_stack: _, + }) => { + block_id = if self.lookup(*condition).as_bool().unwrap() { + *then_destination + } else { + *else_destination + }; + arguments = Vec::new(); + } + Some(TerminatorInstruction::Return { return_values, call_stack: _ }) => { + break self.lookup_all(return_values); + } + } + }; + + self.call_stack.pop(); + Ok(return_values) + } + + fn lookup(&self, id: ValueId) -> Value { + if let Some(value) = self.call_context().scope.get(&id) { + return value.clone(); + } + + if let Some(value) = self.global_scope().get(&id) { + return value.clone(); + } + + match &self.dfg()[id] { + super::ir::value::Value::NumericConstant { constant, typ } => { + Value::from_constant(*constant, *typ) + } + super::ir::value::Value::Function(id) => Value::Function(*id), + super::ir::value::Value::Intrinsic(intrinsic) => Value::Intrinsic(*intrinsic), + super::ir::value::Value::ForeignFunction(name) => Value::ForeignFunction(name.clone()), + super::ir::value::Value::Instruction { .. } + | super::ir::value::Value::Param { .. } + | super::ir::value::Value::Global(_) => { + unreachable!("`{id}` should already be in scope") + } + } + } + + fn lookup_all(&self, ids: &[ValueId]) -> Vec { + vecmap(ids, |id| self.lookup(*id)) + } + + fn side_effects_enabled(&self) -> bool { + match self.current_function().runtime() { + RuntimeType::Acir(_) => self.side_effects_enabled, + RuntimeType::Brillig(_) => true, + } + } + + #[allow(unused)] + fn interpret_instruction( + &mut self, + instruction: &Instruction, + results: &[ValueId], + ) -> Result<(), RuntimeError> { + match instruction { + Instruction::Binary(binary) => { + let result = self.interpret_binary(binary)?; + self.define(results[0], result); + } + // Cast in SSA changes the type without altering the value + Instruction::Cast(value, numeric_type) => { + let field = self.lookup(*value).as_numeric().unwrap().convert_to_field(); + let result = Value::Numeric(NumericValue::from_constant(field, *numeric_type)); + self.define(results[0], result); + } + Instruction::Not(id) => self.interpret_not(*id, results[0]), + Instruction::Truncate { value, bit_size, max_bit_size } => { + self.interpret_truncate(*value, *bit_size, *max_bit_size, results[0]); + } + Instruction::Constrain(lhs, rhs, constrain_error) => { + let lhs = self.lookup(*lhs); + let rhs = self.lookup(*rhs); + if self.side_effects_enabled() && lhs != rhs { + panic!("Constrain {lhs} == {rhs} failed!"); + } + } + Instruction::ConstrainNotEqual(lhs, rhs, constrain_error) => { + let lhs = self.lookup(*lhs); + let rhs = self.lookup(*rhs); + if self.side_effects_enabled() && lhs == rhs { + panic!("Constrain {lhs} != {rhs} failed!"); + } + } + Instruction::RangeCheck { value, max_bit_size, assert_message } => { + self.interpret_range_check(*value, *max_bit_size, assert_message.as_ref()); + } + Instruction::Call { func, arguments } => { + self.interpret_call(*func, arguments, results)?; + } + Instruction::Allocate => self.interpret_allocate(results[0]), + Instruction::Load { address } => self.interpret_load(*address, results[0]), + Instruction::Store { address, value } => self.interpret_store(*address, *value), + Instruction::EnableSideEffectsIf { condition } => { + self.side_effects_enabled = self.lookup(*condition).as_bool().unwrap(); + } + Instruction::ArrayGet { array, index } => { + self.interpret_array_get(*array, *index, results[0]); + } + Instruction::ArraySet { array, index, value, mutable } => { + self.interpret_array_set(*array, *index, *value, *mutable, results[0]); + } + Instruction::IncrementRc { value } => self.interpret_inc_rc(*value), + Instruction::DecrementRc { value } => self.interpret_dec_rc(*value), + Instruction::IfElse { then_condition, then_value, else_condition, else_value } => self + .interpret_if_else( + *then_condition, + *then_value, + *else_condition, + *else_value, + results[0], + ), + Instruction::MakeArray { elements, typ } => { + self.interpret_make_array(elements, results[0], typ); + } + Instruction::Noop => (), + } + Ok(()) + } + + fn interpret_not(&mut self, id: ValueId, result: ValueId) { + let new_result = match self.lookup(id).as_numeric().unwrap() { + NumericValue::Field(field) => { + unreachable!("not: Expected integer value, found field {field}") + } + NumericValue::U1(value) => NumericValue::U1(!value), + NumericValue::U8(value) => NumericValue::U8(!value), + NumericValue::U16(value) => NumericValue::U16(!value), + NumericValue::U32(value) => NumericValue::U32(!value), + NumericValue::U64(value) => NumericValue::U64(!value), + NumericValue::U128(value) => NumericValue::U128(!value), + NumericValue::I8(value) => NumericValue::I8(!value), + NumericValue::I16(value) => NumericValue::I16(!value), + NumericValue::I32(value) => NumericValue::I32(!value), + NumericValue::I64(value) => NumericValue::I64(!value), + }; + self.define(result, Value::Numeric(new_result)); + } + + fn interpret_truncate( + &mut self, + value: ValueId, + bit_size: u32, + _max_bit_size: u32, + result: ValueId, + ) { + let value = self.lookup(value).as_numeric().unwrap(); + let bit_mask = (1u128 << bit_size) - 1; + assert_ne!(bit_mask, 0); + + let truncated = match value { + NumericValue::Field(value) => NumericValue::Field(truncate_field(value, bit_size)), + NumericValue::U1(value) => NumericValue::U1(value), + NumericValue::U8(value) => NumericValue::U8(truncate_unsigned(value, bit_size)), + NumericValue::U16(value) => NumericValue::U16(truncate_unsigned(value, bit_size)), + NumericValue::U32(value) => NumericValue::U32(truncate_unsigned(value, bit_size)), + NumericValue::U64(value) => NumericValue::U64(truncate_unsigned(value, bit_size)), + NumericValue::U128(value) => NumericValue::U128(truncate_unsigned(value, bit_size)), + NumericValue::I8(value) => NumericValue::I8(truncate_signed(value, bit_size)), + NumericValue::I16(value) => NumericValue::I16(truncate_signed(value, bit_size)), + NumericValue::I32(value) => NumericValue::I32(truncate_signed(value, bit_size)), + NumericValue::I64(value) => NumericValue::I64(truncate_signed(value, bit_size)), + }; + + self.define(result, Value::Numeric(truncated)); + } + + fn interpret_range_check( + &mut self, + value: ValueId, + max_bit_size: u32, + error_message: Option<&String>, + ) { + if !self.side_effects_enabled() { + return; + } + + let value = self.lookup(value).as_numeric().unwrap(); + assert_ne!(max_bit_size, 0); + + fn bit_count(x: impl Into) -> u32 { + let x = x.into(); + if x <= 0.0001 { 0 } else { x.log2() as u32 + 1 } + } + + let bit_count = match value { + NumericValue::Field(value) => value.num_bits(), + // max_bit_size > 0 so u1 should always pass these checks + NumericValue::U1(_) => return, + NumericValue::U8(value) => bit_count(value), + NumericValue::U16(value) => bit_count(value), + NumericValue::U32(value) => bit_count(value), + NumericValue::U64(value) => { + // u64, u128, and i64 don't impl Into + if value == 0 { 0 } else { value.ilog2() + 1 } + } + NumericValue::U128(value) => { + if value == 0 { + 0 + } else { + value.ilog2() + 1 + } + } + NumericValue::I8(value) => bit_count(value), + NumericValue::I16(value) => bit_count(value), + NumericValue::I32(value) => bit_count(value), + NumericValue::I64(value) => { + if value == 0 { + 0 + } else { + value.ilog2() + 1 + } + } + }; + + if bit_count > max_bit_size { + if let Some(message) = error_message { + panic!( + "bit count of {bit_count} exceeded max bit count of {max_bit_size}\n{message}" + ); + } else { + panic!("bit count of {bit_count} exceeded max bit count of {max_bit_size}"); + } + } + } + + fn interpret_call( + &mut self, + function: ValueId, + argument_ids: &[ValueId], + results: &[ValueId], + ) -> IResult<()> { + let function = self.lookup(function); + let mut arguments = vecmap(argument_ids, |argument| self.lookup(*argument)); + + let new_results = if self.side_effects_enabled() { + match function { + Value::Function(id) => { + // If we're crossing a constrained -> unconstrained boundary we have to wipe + // any shared mutable fields in our arguments since brillig should conceptually + // receive fresh array on each invocation. + if !self.in_unconstrained_context() + && self.ssa.functions[&id].runtime().is_brillig() + { + arguments.iter_mut().for_each(Self::reset_array_state); + } + self.call_function(id, arguments)? + } + Value::Intrinsic(intrinsic) => { + self.call_intrinsic(intrinsic, arguments, results)? + } + Value::ForeignFunction(name) if name == "print" => self.call_print(arguments)?, + Value::ForeignFunction(name) => { + todo!("call: ForeignFunction({name}) is not yet implemented") + } + other => panic!("call: Expected function, found {other:?}"), + } + } else { + vecmap(results, |result| { + let typ = self.dfg().type_of_value(*result); + Value::uninitialized(&typ, *result) + }) + }; + + assert_eq!(new_results.len(), results.len()); + for (result, new_result) in results.iter().zip(new_results) { + self.define(*result, new_result); + } + Ok(()) + } + + /// Reset the value's `Shared` states in each array within. This is used to mimic each + /// invocation of the brillig vm receiving fresh values. No matter the history of this value + /// (e.g. even if they were previously returned from another brillig function) the reference + /// count should always be 1 and it shouldn't alias any other arrays. + fn reset_array_state(value: &mut Value) { + match value { + Value::Numeric(_) + | Value::Function(_) + | Value::Intrinsic(_) + | Value::ForeignFunction(_) => (), + + Value::Reference(_) => panic!( + "No reference values are allowed when crossing the constrained -> unconstrained boundary" + ), + + Value::ArrayOrSlice(array_value) => { + let mut elements = array_value.elements.borrow().to_vec(); + elements.iter_mut().for_each(Self::reset_array_state); + array_value.elements = Shared::new(elements); + array_value.rc = Shared::new(1); + } + } + } + + fn interpret_allocate(&mut self, result: ValueId) { + let result_type = self.dfg().type_of_value(result); + let element_type = match result_type { + Type::Reference(element_type) => element_type, + other => unreachable!( + "Result of allocate should always be a reference type, but found {other}" + ), + }; + self.define(result, Value::reference(result, element_type)); + } + + fn interpret_load(&mut self, address: ValueId, result: ValueId) { + let address = self.lookup(address); + let address = address.as_reference().unwrap(); + + let element = address.element.borrow(); + let Some(value) = &*element else { + panic!( + "reference value {} is being loaded before it was stored to", + address.original_id + ); + }; + + self.define(result, value.clone()); + } + + fn interpret_store(&mut self, address: ValueId, value: ValueId) { + let address = self.lookup(address); + let address = address.as_reference().unwrap(); + let value = self.lookup(value); + *address.element.borrow_mut() = Some(value); + } + + fn interpret_array_get(&mut self, array: ValueId, index: ValueId, result: ValueId) { + let element = if self.side_effects_enabled() { + let array = self.lookup(array); + let array = array.as_array_or_slice().unwrap(); + let index = self.lookup(index).as_u32().unwrap(); + array.elements.borrow()[index as usize].clone() + } else { + let typ = self.dfg().type_of_value(result); + Value::uninitialized(&typ, result) + }; + self.define(result, element); + } + + fn interpret_array_set( + &mut self, + array: ValueId, + index: ValueId, + value: ValueId, + mutable: bool, + result: ValueId, + ) { + let result_array = if self.side_effects_enabled() { + let array = self.lookup(array); + let array = array.as_array_or_slice().unwrap(); + let index = self.lookup(index).as_u32().unwrap(); + let value = self.lookup(value); + + let should_mutate = + if self.in_unconstrained_context() { *array.rc.borrow() == 1 } else { mutable }; + + if should_mutate { + array.elements.borrow_mut()[index as usize] = value; + Value::ArrayOrSlice(array.clone()) + } else { + let mut elements = array.elements.borrow().to_vec(); + elements[index as usize] = value; + let elements = Shared::new(elements); + let rc = Shared::new(1); + let element_types = array.element_types.clone(); + let is_slice = array.is_slice; + Value::ArrayOrSlice(ArrayValue { elements, rc, element_types, is_slice }) + } + } else { + // Side effects are disabled, return the original array + self.lookup(array) + }; + self.define(result, result_array); + } + + fn interpret_inc_rc(&self, array: ValueId) { + if self.in_unconstrained_context() { + let array = self.lookup(array); + let array = array.as_array_or_slice().unwrap(); + let mut rc = array.rc.borrow_mut(); + + assert_ne!(*rc, 0, "inc_rc: increment from 0 back to 1 detected"); + *rc += 1; + } + } + + fn interpret_dec_rc(&self, array: ValueId) { + if self.in_unconstrained_context() { + let array = self.lookup(array); + let array = array.as_array_or_slice().unwrap(); + let mut rc = array.rc.borrow_mut(); + + assert_ne!(*rc, 0, "dec_rc: underflow detected"); + *rc -= 1; + } + } + + fn interpret_if_else( + &mut self, + then_condition: ValueId, + then_value: ValueId, + else_condition: ValueId, + else_value: ValueId, + result: ValueId, + ) { + let then_condition = self.lookup(then_condition).as_bool().unwrap(); + let else_condition = self.lookup(else_condition).as_bool().unwrap(); + let then_value = self.lookup(then_value); + let else_value = self.lookup(else_value); + + // Note that `then_condition = !else_condition` doesn't always hold! + // Notably if this is a nested if expression we could have something like: + // then_condition = outer_condition & a + // else_condition = outer_condition & !a + // If `outer_condition` is false, both will be false. + assert!(!then_condition || !else_condition); + + let new_result = if !then_condition && !else_condition { + // Returning uninitialized/zero if both conditions are false to match + // the decomposition of `cond * then_value + !cond * else_value` for numeric values. + let typ = self.dfg().type_of_value(result); + Value::uninitialized(&typ, result) + } else if then_condition { + then_value + } else { + else_value + }; + + self.define(result, new_result); + } + + fn interpret_make_array( + &mut self, + elements: &im::Vector, + result: ValueId, + result_type: &Type, + ) { + let elements = vecmap(elements, |element| self.lookup(*element)); + let is_slice = matches!(&result_type, Type::Slice(..)); + + let array = Value::ArrayOrSlice(ArrayValue { + elements: Shared::new(elements), + rc: Shared::new(1), + element_types: result_type.clone().element_types(), + is_slice, + }); + self.define(result, array); + } +} + +macro_rules! apply_int_binop { + ($lhs:expr, $rhs:expr, $f:expr) => {{ + use value::NumericValue::*; + match ($lhs, $rhs) { + (Field(_), Field(_)) => panic!("Expected only integer values, found field values"), + (U1(_), U1(_)) => panic!("Expected only large integer values, found u1"), + (U8(lhs), U8(rhs)) => U8($f(&lhs, &rhs)), + (U16(lhs), U16(rhs)) => U16($f(&lhs, &rhs)), + (U32(lhs), U32(rhs)) => U32($f(&lhs, &rhs)), + (U64(lhs), U64(rhs)) => U64($f(&lhs, &rhs)), + (U128(lhs), U128(rhs)) => U128($f(&lhs, &rhs)), + (I8(lhs), I8(rhs)) => I8($f(&lhs, &rhs)), + (I16(lhs), I16(rhs)) => I16($f(&lhs, &rhs)), + (I32(lhs), I32(rhs)) => I32($f(&lhs, &rhs)), + (I64(lhs), I64(rhs)) => I64($f(&lhs, &rhs)), + (lhs, rhs) => panic!("Got mismatched types in binop: {lhs:?} and {rhs:?}"), + } + }}; +} + +macro_rules! apply_int_binop_opt { + ($lhs:expr, $rhs:expr, $f:expr) => {{ + use value::NumericValue::*; + // TODO: Error if None instead of unwrapping + match ($lhs, $rhs) { + (Field(_), Field(_)) => panic!("Expected only integer values, found field values"), + (U1(_), U1(_)) => panic!("Expected only large integer values, found u1"), + (U8(lhs), U8(rhs)) => U8($f(&lhs, &rhs).unwrap()), + (U16(lhs), U16(rhs)) => U16($f(&lhs, &rhs).unwrap()), + (U32(lhs), U32(rhs)) => U32($f(&lhs, &rhs).unwrap()), + (U64(lhs), U64(rhs)) => U64($f(&lhs, &rhs).unwrap()), + (U128(lhs), U128(rhs)) => U128($f(&lhs, &rhs).unwrap()), + (I8(lhs), I8(rhs)) => I8($f(&lhs, &rhs).unwrap()), + (I16(lhs), I16(rhs)) => I16($f(&lhs, &rhs).unwrap()), + (I32(lhs), I32(rhs)) => I32($f(&lhs, &rhs).unwrap()), + (I64(lhs), I64(rhs)) => I64($f(&lhs, &rhs).unwrap()), + (lhs, rhs) => panic!("Got mismatched types in binop: {lhs:?} and {rhs:?}"), + } + }}; +} + +macro_rules! apply_int_comparison_op { + ($lhs:expr, $rhs:expr, $f:expr) => {{ + use NumericValue::*; + match ($lhs, $rhs) { + (Field(_), Field(_)) => panic!("Expected only integer values, found field values"), + (U1(_), U1(_)) => panic!("Expected only large integer values, found u1"), + (U8(lhs), U8(rhs)) => U1($f(&lhs, &rhs)), + (U16(lhs), U16(rhs)) => U1($f(&lhs, &rhs)), + (U32(lhs), U32(rhs)) => U1($f(&lhs, &rhs)), + (U64(lhs), U64(rhs)) => U1($f(&lhs, &rhs)), + (U128(lhs), U128(rhs)) => U1($f(&lhs, &rhs)), + (I8(lhs), I8(rhs)) => U1($f(&lhs, &rhs)), + (I16(lhs), I16(rhs)) => U1($f(&lhs, &rhs)), + (I32(lhs), I32(rhs)) => U1($f(&lhs, &rhs)), + (I64(lhs), I64(rhs)) => U1($f(&lhs, &rhs)), + (lhs, rhs) => panic!("Got mismatched types in binop: {lhs:?} and {rhs:?}"), + } + }}; +} + +impl Interpreter<'_> { + fn interpret_binary(&mut self, binary: &Binary) -> IResult { + // TODO: Replace unwrap with real error + let lhs = self.lookup(binary.lhs).as_numeric().unwrap(); + let rhs = self.lookup(binary.rhs).as_numeric().unwrap(); + + if lhs.get_type() != rhs.get_type() + && !matches!(binary.operator, BinaryOp::Shl | BinaryOp::Shr) + { + panic!( + "Type error in ({}: {}) {} ({}: {})", + binary.lhs, + lhs.get_type(), + binary.operator, + binary.rhs, + rhs.get_type() + ) + } + + // Disable this instruction if it is side-effectful and side effects are disabled. + if !self.side_effects_enabled() && binary.requires_acir_gen_predicate(self.dfg()) { + let zero = NumericValue::from_constant(FieldElement::zero(), lhs.get_type()); + return Ok(Value::Numeric(zero)); + } + + if let (Some(lhs), Some(rhs)) = (lhs.as_field(), rhs.as_field()) { + return self.interpret_field_binary_op(lhs, binary.operator, rhs); + } + + if let (Some(lhs), Some(rhs)) = (lhs.as_bool(), rhs.as_bool()) { + return self.interpret_u1_binary_op(lhs, binary.operator, rhs); + } + + let result = match binary.operator { + BinaryOp::Add { unchecked: false } => { + apply_int_binop_opt!(lhs, rhs, num_traits::CheckedAdd::checked_add) + } + BinaryOp::Add { unchecked: true } => { + apply_int_binop!(lhs, rhs, num_traits::WrappingAdd::wrapping_add) + } + BinaryOp::Sub { unchecked: false } => { + apply_int_binop_opt!(lhs, rhs, num_traits::CheckedSub::checked_sub) + } + BinaryOp::Sub { unchecked: true } => { + apply_int_binop!(lhs, rhs, num_traits::WrappingSub::wrapping_sub) + } + BinaryOp::Mul { unchecked: false } => { + apply_int_binop_opt!(lhs, rhs, num_traits::CheckedMul::checked_mul) + } + BinaryOp::Mul { unchecked: true } => { + apply_int_binop!(lhs, rhs, num_traits::WrappingMul::wrapping_mul) + } + BinaryOp::Div => { + apply_int_binop_opt!(lhs, rhs, num_traits::CheckedDiv::checked_div) + } + BinaryOp::Mod => { + apply_int_binop_opt!(lhs, rhs, num_traits::CheckedRem::checked_rem) + } + BinaryOp::Eq => apply_int_comparison_op!(lhs, rhs, |a, b| a == b), + BinaryOp::Lt => apply_int_comparison_op!(lhs, rhs, |a, b| a < b), + BinaryOp::And => { + apply_int_binop!(lhs, rhs, std::ops::BitAnd::bitand) + } + BinaryOp::Or => { + apply_int_binop!(lhs, rhs, std::ops::BitOr::bitor) + } + BinaryOp::Xor => { + apply_int_binop!(lhs, rhs, std::ops::BitXor::bitxor) + } + BinaryOp::Shl => { + let rhs = rhs.as_u32().expect("Expected rhs of shl to be a u32"); + let overflow_msg = "Overflow when evaluating `shl`, `rhs` is too large"; + use NumericValue::*; + match lhs { + Field(_) => unreachable!("<< is not implemented for Field"), + U1(_) => unreachable!("<< is not implemented for u1"), + U8(value) => U8(value.checked_shl(rhs).expect(overflow_msg)), + U16(value) => U16(value.checked_shl(rhs).expect(overflow_msg)), + U32(value) => U32(value.checked_shl(rhs).expect(overflow_msg)), + U64(value) => U64(value.checked_shl(rhs).expect(overflow_msg)), + U128(value) => U128(value.checked_shl(rhs).expect(overflow_msg)), + I8(value) => I8(value.checked_shl(rhs).expect(overflow_msg)), + I16(value) => I16(value.checked_shl(rhs).expect(overflow_msg)), + I32(value) => I32(value.checked_shl(rhs).expect(overflow_msg)), + I64(value) => I64(value.checked_shl(rhs).expect(overflow_msg)), + } + } + BinaryOp::Shr => { + let zero = || NumericValue::zero(lhs.get_type()); + let rhs = rhs.as_u32().expect("Expected rhs of shr to be a u32"); + + use NumericValue::*; + match lhs { + Field(_) => unreachable!(">> is not implemented for Field"), + U1(_) => unreachable!(">> is not implemented for u1"), + U8(value) => value.checked_shr(rhs).map(U8).unwrap_or_else(zero), + U16(value) => value.checked_shr(rhs).map(U16).unwrap_or_else(zero), + U32(value) => value.checked_shr(rhs).map(U32).unwrap_or_else(zero), + U64(value) => value.checked_shr(rhs).map(U64).unwrap_or_else(zero), + U128(value) => value.checked_shr(rhs).map(U128).unwrap_or_else(zero), + I8(value) => value.checked_shr(rhs).map(I8).unwrap_or_else(zero), + I16(value) => value.checked_shr(rhs).map(I16).unwrap_or_else(zero), + I32(value) => value.checked_shr(rhs).map(I32).unwrap_or_else(zero), + I64(value) => value.checked_shr(rhs).map(I64).unwrap_or_else(zero), + } + } + }; + Ok(Value::Numeric(result)) + } + + fn interpret_field_binary_op( + &mut self, + lhs: FieldElement, + operator: BinaryOp, + rhs: FieldElement, + ) -> IResult { + let result = match operator { + BinaryOp::Add { unchecked: _ } => NumericValue::Field(lhs + rhs), + BinaryOp::Sub { unchecked: _ } => NumericValue::Field(lhs - rhs), + BinaryOp::Mul { unchecked: _ } => NumericValue::Field(lhs * rhs), + BinaryOp::Div => { + // FieldElement::div returns a value with panicking on divide by zero + if rhs.is_zero() { + panic!("Field division by zero"); + } + NumericValue::Field(lhs / rhs) + } + BinaryOp::Mod => panic!("Unsupported operator `%` for Field"), + BinaryOp::Eq => NumericValue::U1(lhs == rhs), + BinaryOp::Lt => NumericValue::U1(lhs < rhs), + BinaryOp::And => panic!("Unsupported operator `&` for Field"), + BinaryOp::Or => panic!("Unsupported operator `|` for Field"), + BinaryOp::Xor => panic!("Unsupported operator `^` for Field"), + BinaryOp::Shl => panic!("Unsupported operator `<<` for Field"), + BinaryOp::Shr => panic!("Unsupported operator `>>` for Field"), + }; + Ok(Value::Numeric(result)) + } + + fn interpret_u1_binary_op( + &mut self, + lhs: bool, + operator: BinaryOp, + rhs: bool, + ) -> IResult { + let result = match operator { + BinaryOp::Add { unchecked: _ } => panic!("Unsupported operator `+` for u1"), + BinaryOp::Sub { unchecked: _ } => panic!("Unsupported operator `-` for u1"), + BinaryOp::Mul { unchecked: _ } => lhs & rhs, // (*) = (&) for u1 + BinaryOp::Div => panic!("Unsupported operator `/` for u1"), + BinaryOp::Mod => panic!("Unsupported operator `%` for u1"), + BinaryOp::Eq => lhs == rhs, + // clippy complains when you do `lhs < rhs` and recommends this instead + BinaryOp::Lt => !lhs & rhs, + BinaryOp::And => lhs & rhs, + BinaryOp::Or => lhs | rhs, + BinaryOp::Xor => lhs ^ rhs, + BinaryOp::Shl => panic!("Unsupported operator `<<` for u1"), + BinaryOp::Shr => panic!("Unsupported operator `>>` for u1"), + }; + Ok(Value::Numeric(NumericValue::U1(result))) + } +} + +fn truncate_unsigned(value: T, bit_size: u32) -> T +where + u128: From, + T: TryFrom, + >::Error: std::fmt::Debug, +{ + let value_u128 = u128::from(value); + let bit_mask = match bit_size.cmp(&128) { + Ordering::Less => (1u128 << bit_size) - 1, + Ordering::Equal => u128::MAX, + Ordering::Greater => panic!("truncate: Invalid bit size: {bit_size}"), + }; + + let result = value_u128 & bit_mask; + T::try_from(result).expect( + "The truncated result should always be smaller than or equal to the original `value`", + ) +} + +fn truncate_signed(value: T, bit_size: u32) -> T +where + i128: From, + T: TryFrom + num_traits::Bounded, + >::Error: std::fmt::Debug, +{ + let mut value_i128 = i128::from(value); + if value_i128 < 0 { + let max = 1i128 << (bit_size - 1); + value_i128 += max; + assert!(bit_size <= 64, "The maximum bit size for signed integers is 64"); + + let mask = (1i128 << bit_size) - 1; + let result = (value_i128 & mask) - max; + + T::try_from(result).expect( + "The truncated result should always be smaller than or equal to the original `value`", + ) + } else { + let result = truncate_unsigned::(value_i128 as u128, bit_size) as i128; + T::try_from(result).expect( + "The truncated result should always be smaller than or equal to the original `value`", + ) + } +} + +#[cfg(test)] +mod test { + #[test] + fn test_truncate_unsigned() { + assert_eq!(super::truncate_unsigned(57_u32, 8), 57); + assert_eq!(super::truncate_unsigned(257_u16, 8), 1); + assert_eq!(super::truncate_unsigned(130_u8, 7), 2); + assert_eq!(super::truncate_unsigned(u8::MAX, 8), u8::MAX); + assert_eq!(super::truncate_unsigned(u128::MAX, 128), u128::MAX); + } + + #[test] + fn test_truncate_signed() { + assert_eq!(super::truncate_signed(57_i32, 8), 57); + assert_eq!(super::truncate_signed(257_i16, 8), 1); + assert_eq!(super::truncate_signed(130_i64, 7), 2); + assert_eq!(super::truncate_signed(i16::MAX, 16), i16::MAX); + + assert_eq!(super::truncate_signed(-57_i32, 8), -57); + assert_eq!(super::truncate_signed(-1_i64, 3), -1_i64); + assert_eq!(super::truncate_signed(-258_i16, 8), -2); + assert_eq!(super::truncate_signed(-130_i16, 7), -2); + assert_eq!(super::truncate_signed(i8::MIN, 8), i8::MIN); + assert_eq!(super::truncate_signed(-8_i8, 4), -8); + assert_eq!(super::truncate_signed(-8_i8, 3), 0); + assert_eq!(super::truncate_signed(-129_i32, 8), 127); + } +} diff --git a/compiler/noirc_evaluator/src/ssa/interpreter/tests/instructions.rs b/compiler/noirc_evaluator/src/ssa/interpreter/tests/instructions.rs new file mode 100644 index 00000000000..2e33a7ff6f9 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/interpreter/tests/instructions.rs @@ -0,0 +1,840 @@ +use std::sync::Arc; + +use iter_extended::vecmap; +use noirc_frontend::Shared; + +use crate::ssa::{ + interpreter::{ + NumericValue, Value, + tests::{expect_value, expect_values}, + value::ReferenceValue, + }, + ir::{ + types::{NumericType, Type}, + value::ValueId, + }, +}; + +use super::{executes_with_no_errors, expect_error}; + +#[test] +fn add() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = add i32 2, i32 100 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::I32(102))); +} + +/// TODO: Replace panic with error +#[test] +#[should_panic(expected = "called `Option::unwrap()` on a `None` value")] +fn add_overflow() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = add u8 200, u8 100 + return v0 + } + ", + ); +} + +#[test] +fn add_unchecked() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + v0 = unchecked_add u8 200, u8 100 + return v0 + } + ", + ); +} + +#[test] +fn sub() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = sub i32 10101, i32 101 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::I32(10000))); +} + +/// TODO: Replace panic with error +#[test] +#[should_panic(expected = "called `Option::unwrap()` on a `None` value")] +fn sub_underflow() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = sub i8 -120, i8 10 + return v0 + } + ", + ); +} + +#[test] +fn sub_unchecked() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = unchecked_sub i8 3, i8 10 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::I8(-7))); +} + +#[test] +fn mul() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = mul u64 2, u64 100 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::U64(200))); +} + +/// TODO: Replace panic with error +#[test] +#[should_panic(expected = "called `Option::unwrap()` on a `None` value")] +fn mul_overflow() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = mul u8 128, u8 2 + return v0 + } + ", + ); +} + +#[test] +fn mul_unchecked() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = unchecked_mul u8 128, u8 2 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::U8(0))); +} + +#[test] +fn div() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = div i16 128, i16 2 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::I16(64))); +} + +/// TODO: Replace panic with error +#[test] +#[should_panic(expected = "Field division by zero")] +fn div_zero() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = div Field 12, Field 0 + return v0 + } + ", + ); +} + +#[test] +fn r#mod() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = mod i64 5, i64 3 + return v0 + } + ", + ); + assert_eq!(value, Value::Numeric(NumericValue::I64(2))); +} + +/// TODO: Replace panic with error +#[test] +#[should_panic(expected = "called `Option::unwrap()` on a `None` value")] +fn mod_zero() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = mod u8 12, u8 0 + return v0 + } + ", + ); +} + +#[test] +fn eq() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = eq u8 3, u8 4 + return v0 + } + ", + ); + assert_eq!(value, Value::bool(false)); +} + +#[test] +fn lt() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = lt u32 3, u32 7 + v1 = lt i32 3, i32 7 + v2 = lt i32 3, i32 -3 + + v3 = lt i32 -3, i32 -2 + v4 = lt i32 -3, i32 -3 + v5 = lt i32 -3, i32 -4 + return v0, v1, v2, v3, v4, v5 + } + ", + ); + assert_eq!(values[0], Value::bool(true)); + assert_eq!(values[1], Value::bool(true)); + assert_eq!(values[2], Value::bool(false)); + assert_eq!(values[3], Value::bool(true)); + assert_eq!(values[4], Value::bool(false)); + assert_eq!(values[5], Value::bool(false)); +} + +#[test] +fn and() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = and u1 1, u1 0 + v1 = and u8 3, u8 5 + return v0, v1 + } + ", + ); + assert_eq!(values[0], Value::bool(false)); + assert_eq!(values[1], Value::from_constant(1_u128.into(), NumericType::unsigned(8))); +} + +#[test] +fn or() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = or u1 1, u1 0 + v1 = or u8 3, u8 5 + return v0, v1 + } + ", + ); + assert_eq!(values[0], Value::bool(true)); + assert_eq!(values[1], Value::from_constant(7_u128.into(), NumericType::unsigned(8))); +} + +#[test] +fn xor() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = xor u1 1, u1 0 + v1 = xor u8 3, u8 5 + return v0, v1 + } + ", + ); + assert_eq!(values[0], Value::bool(true)); + assert_eq!(values[1], Value::from_constant(6_u128.into(), NumericType::unsigned(8))); +} + +#[test] +fn shl() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = shl u8 3, u32 2 + return v0 + } + ", + ); + assert_eq!(value, Value::from_constant(12_u128.into(), NumericType::unsigned(8))); +} + +#[test] +#[should_panic] +/// shl should overflow if the rhs is greater than the bit count +fn shl_overflow() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + v0 = shl u8 3, u32 9 + return v0 + } + ", + ); +} + +#[test] +fn shr() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = shr u8 12, u32 2 + v1 = shr u8 5, u32 1 + v2 = shr u8 5, u32 4 + return v0, v1, v2 + } + ", + ); + assert_eq!(values[0], Value::from_constant(3_u128.into(), NumericType::unsigned(8))); + assert_eq!(values[1], Value::from_constant(2_u128.into(), NumericType::unsigned(8))); + assert_eq!(values[2], Value::from_constant(0_u128.into(), NumericType::unsigned(8))); +} + +#[test] +/// Unlike shl, shr does not error on overflow. It just returns 0. See https://github.com/noir-lang/noir/pull/7509. +fn shr_overflow() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = shr u8 3, u32 9 + return v0 + } + ", + ); + assert_eq!(value, Value::from_constant(0_u128.into(), NumericType::unsigned(8))); +} + +#[test] +fn cast() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = cast u32 2 as Field + v1 = cast u32 3 as u8 + v2 = cast i8 -1 as i32 + return v0, v1, v2 + } + ", + ); + assert_eq!(values[0], Value::from_constant(2_u128.into(), NumericType::NativeField)); + assert_eq!(values[1], Value::from_constant(3_u128.into(), NumericType::unsigned(8))); + assert_eq!(values[2], Value::from_constant((-1_i128).into(), NumericType::signed(32))); +} + +#[test] +fn not() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = not u1 0 + v1 = not u1 1 + v2 = not u8 136 + return v0, v1, v2 + } + ", + ); + assert_eq!(values[0], Value::bool(true)); + assert_eq!(values[1], Value::bool(false)); + + let not_constant = !136_u8 as u128; + assert_eq!(values[2], Value::from_constant(not_constant.into(), NumericType::unsigned(8))); +} + +#[test] +fn truncate() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = truncate u32 257 to 8 bits, max_bit_size: 9 + return v0 + } + ", + ); + let constant = 257_u16 as u8 as u128; + assert_eq!(value, Value::from_constant(constant.into(), NumericType::unsigned(32))); +} + +#[test] +fn constrain() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + v0 = eq u8 3, u8 4 + constrain v0 == v0 + constrain v0 == u1 0 + return + } + ", + ); +} + +#[test] +fn constrain_disabled_by_enable_side_effects() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + enable_side_effects u1 0 + constrain u1 1 == u1 0 + return + } + ", + ); +} + +// SSA Parser does not yet parse ConstrainNotEqual +// #[test] +// fn constrain_not_equal() { +// executes_with_no_errors( +// " +// acir(inline) fn main f0 { +// b0(): +// v0 = eq u8 3, u8 4 +// constrain v0 != u1 1 +// return +// } +// ", +// ); +// } +// +// #[test] +// fn constrain_not_equal_disabled_by_enable_side_effects() { +// executes_with_no_errors( +// " +// acir(inline) fn main f0 { +// b0(): +// enable_side_effects u1 0 +// constrain u1 1 != u1 1 +// return +// } +// ", +// ); +// } + +#[test] +fn range_check() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + range_check u32 1000 to 16 bits + return + } + ", + ); +} + +#[test] +#[should_panic] +fn range_check_fail() { + expect_error( + " + acir(inline) fn main f0 { + b0(): + range_check u32 256 to 8 bits + return + } + ", + ); +} + +#[test] +fn range_check_disabled_by_enable_side_effects() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + enable_side_effects u1 0 + range_check u32 256 to 8 bits + return + } + ", + ); +} + +#[test] +fn call() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = call f1(Field 4) -> Field + return v0 + } + + acir(inline) fn square f1 { + b0(v0: Field): + v1 = mul v0, v0 + return v1 + } + ", + ); + assert_eq!(value, Value::from_constant(16_u32.into(), NumericType::NativeField)); +} + +#[test] +fn allocate() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = allocate -> &mut Field + return v0 + } + ", + ); + let expected = Value::Reference(ReferenceValue { + original_id: ValueId::test_new(0), + element: Shared::new(None), + element_type: Arc::new(Type::field()), + }); + assert_eq!(value, expected); +} + +#[test] +fn load() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = allocate -> &mut u1 + store u1 1 at v0 + v1 = load v0 -> u1 + return v1 + } + ", + ); + assert_eq!(value, Value::bool(true)); +} + +#[test] +fn store() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = allocate -> &mut u1 + store u1 1 at v0 + return v0 + } + ", + ); + let expected = Value::Reference(ReferenceValue { + original_id: ValueId::test_new(0), + element: Shared::new(Some(Value::bool(true))), + element_type: Arc::new(Type::bool()), + }); + assert_eq!(value, expected); +} + +#[test] +fn enable_side_effects() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + enable_side_effects u1 0 + v1 = allocate -> &mut Field + store Field 0 at v1 + v2 = call f1(v1) -> Field + return v1, v2 + } + + acir(inline) fn foo f1 { + b0(v0: &mut Field): + store Field 2 at v0 + return Field 7 + } + ", + ); + let field_zero = Value::from_constant(0u128.into(), NumericType::NativeField); + let expected = Value::Reference(ReferenceValue { + original_id: ValueId::test_new(1), + element: Shared::new(Some(field_zero.clone())), + element_type: Arc::new(Type::field()), + }); + assert_eq!(values[0], expected); + assert_eq!(values[1], field_zero); +} + +#[test] +fn array_get() { + let value = expect_value( + r#" + acir(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + v1 = array_get v0, index u32 1 -> Field + return v1 + } + "#, + ); + assert_eq!(value, Value::from_constant(2_u32.into(), NumericType::NativeField)); +} + +#[test] +fn array_get_disabled_by_enable_side_effects() { + let value = expect_value( + r#" + acir(inline) fn main f0 { + b0(): + enable_side_effects u1 0 + v0 = make_array [Field 1, Field 2] : [Field; 2] + v1 = array_get v0, index u32 1 -> Field + return v1 + } + "#, + ); + assert_eq!(value, Value::from_constant(0_u32.into(), NumericType::NativeField)); +} + +#[test] +fn array_set() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + v1 = array_set v0, index u32 1, value Field 5 + v2 = array_set mut v0, index u32 0, value Field 4 + return v0, v1, v2 + } + ", + ); + + let v0 = values[0].as_array_or_slice().unwrap(); + let v1 = values[1].as_array_or_slice().unwrap(); + let v2 = values[2].as_array_or_slice().unwrap(); + + // acir function, so all rcs are 1 + assert_eq!(*v0.rc.borrow(), 1); + assert_eq!(*v1.rc.borrow(), 1); + assert_eq!(*v2.rc.borrow(), 1); + + let one = Value::from_constant(1u32.into(), NumericType::NativeField); + let two = Value::from_constant(2u32.into(), NumericType::NativeField); + let four = Value::from_constant(4u32.into(), NumericType::NativeField); + let five = Value::from_constant(5u32.into(), NumericType::NativeField); + + // v0 was forcibly mutated via the last `array_set mut` + assert_eq!(*v0.elements.borrow(), vec![four.clone(), two.clone()]); + + // v1 was not mutated when v2 was created since it is conceptually a different array + assert_eq!(*v1.elements.borrow(), vec![one, five]); + + assert_eq!(*v2.elements.borrow(), vec![four, two]); +} + +#[test] +fn array_set_disabled_by_enable_side_effects() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + enable_side_effects u1 0 + v0 = make_array [Field 1, Field 2] : [Field; 2] + v1 = array_set v0, index u32 1, value Field 5 + v2 = array_set mut v0, index u32 0, value Field 4 + return v0, v1, v2 + } + ", + ); + + let v0 = values[0].as_array_or_slice().unwrap(); + let v1 = values[1].as_array_or_slice().unwrap(); + let v2 = values[2].as_array_or_slice().unwrap(); + + // acir function, so all rcs are 1 + assert_eq!(*v0.rc.borrow(), 1); + assert_eq!(*v1.rc.borrow(), 1); + assert_eq!(*v2.rc.borrow(), 1); + + let one = Value::from_constant(1u32.into(), NumericType::NativeField); + let two = Value::from_constant(2u32.into(), NumericType::NativeField); + let expected = vec![one, two]; + + // No changes are made in case an index is out of bounds + assert_eq!(*v0.elements.borrow(), expected); + assert_eq!(*v1.elements.borrow(), expected); + assert_eq!(*v2.elements.borrow(), expected); +} + +#[test] +fn increment_rc() { + let value = expect_value( + " + brillig(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + inc_rc v0 + inc_rc v0 + inc_rc v0 + return v0 + } + ", + ); + let array = value.as_array_or_slice().unwrap(); + assert_eq!(*array.rc.borrow(), 4); +} + +#[test] +fn increment_rc_disabled_in_acir() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + inc_rc v0 + inc_rc v0 + inc_rc v0 + return v0 + } + ", + ); + let array = value.as_array_or_slice().unwrap(); + assert_eq!(*array.rc.borrow(), 1); +} + +#[test] +fn decrement_rc() { + let value = expect_value( + " + brillig(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + dec_rc v0 + return v0 + } + ", + ); + let array = value.as_array_or_slice().unwrap(); + assert_eq!(*array.rc.borrow(), 0); +} + +#[test] +fn decrement_rc_disabled_in_acir() { + let value = expect_value( + " + acir(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + dec_rc v0 + return v0 + } + ", + ); + let array = value.as_array_or_slice().unwrap(); + assert_eq!(*array.rc.borrow(), 1); +} + +#[test] +fn if_else() { + let values = expect_values( + " + acir(inline) fn main f0 { + b0(): + v0 = if u1 1 then u8 2 else (if u1 0) u8 3 + v1 = if u1 0 then u8 2 else (if u1 1) u8 3 + v2 = if u1 0 then u8 2 else (if u1 0) u8 3 + return v0, v1, v2 + } + ", + ); + assert_eq!(values[0], Value::from_constant(2_u32.into(), NumericType::unsigned(8))); + assert_eq!(values[1], Value::from_constant(3_u32.into(), NumericType::unsigned(8))); + assert_eq!(values[2], Value::from_constant(0_u32.into(), NumericType::unsigned(8))); +} + +#[test] +fn make_array() { + let values = expect_values( + r#" + acir(inline) fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + v1 = make_array [Field 1, Field 2] : [Field] + v2 = make_array b"Hello" + v3 = make_array &b"Hello" + return v0, v1, v2, v3 + } + "#, + ); + let one_two = vec![ + Value::from_constant(1u128.into(), NumericType::NativeField), + Value::from_constant(2u128.into(), NumericType::NativeField), + ]; + assert_eq!(values[0], Value::array(one_two.clone(), vec![Type::field()])); + assert_eq!(values[1], Value::slice(one_two, Arc::new(vec![Type::field()]))); + + let hello = + vecmap(b"Hello", |char| Value::from_constant((*char as u32).into(), NumericType::char())); + assert_eq!(values[2], Value::array(hello.clone(), vec![Type::char()])); + assert_eq!(values[3], Value::slice(hello, Arc::new(vec![Type::char()]))); +} + +#[test] +fn nop() { + executes_with_no_errors( + " + acir(inline) fn main f0 { + b0(): + nop + nop + nop + return + } + ", + ); +} diff --git a/compiler/noirc_evaluator/src/ssa/interpreter/tests/mod.rs b/compiler/noirc_evaluator/src/ssa/interpreter/tests/mod.rs new file mode 100644 index 00000000000..7e5cc229d58 --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/interpreter/tests/mod.rs @@ -0,0 +1,1504 @@ +#![cfg(test)] + +use std::sync::Arc; + +use acvm::{AcirField, FieldElement}; + +use crate::{ + errors::RuntimeError, + ssa::{ + interpreter::value::NumericValue, + ir::types::{NumericType, Type}, + }, +}; + +use super::{Ssa, Value}; + +mod instructions; + +#[track_caller] +fn executes_with_no_errors(src: &str) { + let ssa = Ssa::from_str(src).unwrap(); + assert!(ssa.interpret(Vec::new()).is_ok()); +} + +#[track_caller] +fn expect_values(src: &str) -> Vec { + expect_values_with_args(src, Vec::new()) +} + +#[track_caller] +fn expect_value(src: &str) -> Value { + expect_value_with_args(src, Vec::new()) +} + +#[track_caller] +fn expect_error(src: &str) -> RuntimeError { + let ssa = Ssa::from_str(src).unwrap(); + ssa.interpret(Vec::new()).unwrap_err() +} + +#[track_caller] +fn expect_values_with_args(src: &str, args: Vec) -> Vec { + let ssa = Ssa::from_str(src).unwrap(); + ssa.interpret(args).unwrap() +} + +#[track_caller] +fn expect_value_with_args(src: &str, args: Vec) -> Value { + let mut results = expect_values_with_args(src, args); + assert_eq!(results.len(), 1); + results.pop().unwrap() +} + +#[test] +fn empty_program() { + let src = " + acir(inline) fn main f0 { + b0(): + return + } + "; + executes_with_no_errors(src); +} + +#[test] +fn return_all_numeric_constant_types() { + let src = " + acir(inline) fn main f0 { + b0(): + return Field 0, u1 1, u8 2, u16 3, u32 4, u64 5, u128 6, i8 -1, i16 -2, i32 -3, i64 -4 + } + "; + let returns = expect_values(src); + assert_eq!(returns.len(), 11); + + assert_eq!(returns[0], Value::Numeric(NumericValue::Field(FieldElement::zero()))); + assert_eq!(returns[1], Value::Numeric(NumericValue::U1(true))); + assert_eq!(returns[2], Value::Numeric(NumericValue::U8(2))); + assert_eq!(returns[3], Value::Numeric(NumericValue::U16(3))); + assert_eq!(returns[4], Value::Numeric(NumericValue::U32(4))); + assert_eq!(returns[5], Value::Numeric(NumericValue::U64(5))); + assert_eq!(returns[6], Value::Numeric(NumericValue::U128(6))); + assert_eq!(returns[7], Value::Numeric(NumericValue::I8(-1))); + assert_eq!(returns[8], Value::Numeric(NumericValue::I16(-2))); + assert_eq!(returns[9], Value::Numeric(NumericValue::I32(-3))); + assert_eq!(returns[10], Value::Numeric(NumericValue::I64(-4))); +} + +#[test] +fn call_function() { + let src = " + acir(inline) fn main f0 { + b0(): + v1 = call f1(u32 3) -> u32 + return v1 + } + + acir(inline) fn double f1 { + b0(v1: u32): + v2 = mul v1, u32 2 + return v2 + } + "; + let actual = expect_value(src); + assert_eq!(Value::Numeric(NumericValue::U32(6)), actual); +} + +#[test] +fn run_flattened_function() { + let src = " + acir(inline) pure fn main f0 { + b0(v0: u1, v1: [[u1; 2]; 3]): + v2 = not v0 + enable_side_effects v0 + v3 = not v0 + enable_side_effects v0 + v5 = array_get v1, index u32 0 -> [u1; 2] + v6 = not v0 + v7 = unchecked_mul v0, v6 + enable_side_effects v7 + v8 = array_get v1, index u32 1 -> [u1; 2] + enable_side_effects v0 + v9 = if v0 then v5 else (if v7) v8 + enable_side_effects v6 + v10 = array_get v1, index u32 2 -> [u1; 2] + enable_side_effects u1 1 + v12 = if v0 then v5 else (if v6) v10 + return v12 + }"; + + let v1_elements = vec![ + Value::array(vec![Value::bool(false), Value::bool(false)], vec![Type::unsigned(1)]), + Value::array(vec![Value::bool(true), Value::bool(true)], vec![Type::unsigned(1)]), + Value::array(vec![Value::bool(false), Value::bool(true)], vec![Type::unsigned(1)]), + ]; + + let v1_element_types = vec![Type::Array(Arc::new(vec![Type::unsigned(1)]), 2)]; + let v1 = Value::array(v1_elements, v1_element_types); + + let result = expect_value_with_args(src, vec![Value::bool(true), v1.clone()]); + assert_eq!(result.to_string(), "rc1 [u1 false, u1 false]"); + + let result = expect_value_with_args(src, vec![Value::bool(false), v1]); + assert_eq!(result.to_string(), "rc1 [u1 false, u1 true]"); +} + +#[test] +fn loads_passed_to_a_call() { + let src = " + acir(inline) fn main f0 { + b0(): + v1 = allocate -> &mut Field + store Field 0 at v1 + v3 = allocate -> &mut &mut Field + store v1 at v3 + jmp b1(Field 0) + b1(v0: Field): + v4 = eq v0, Field 0 + jmpif v4 then: b3, else: b2 + b2(): + v9 = load v1 -> Field + v10 = eq v9, Field 2 + constrain v9 == Field 2 + v11 = load v3 -> &mut Field + call f1(v3) + v13 = load v3 -> &mut Field + v14 = load v13 -> Field + v15 = eq v14, Field 2 + constrain v14 == Field 2 + return v14 + b3(): + v5 = load v3 -> &mut Field + store Field 2 at v5 + v8 = add v0, Field 1 + jmp b1(v8) + } + acir(inline) fn foo f1 { + b0(v0: &mut Field): + return + } + "; + + let value = expect_value(src); + assert_eq!(value, Value::from_constant(2_u128.into(), NumericType::NativeField)); +} + +#[test] +fn keep_repeat_loads_with_alias_store() { + let src = " + acir(inline) fn main f0 { + b0(v0: u1): + jmpif v0 then: b2, else: b1 + b1(): + v6 = allocate -> &mut Field + store Field 1 at v6 + jmp b3(v6, v6, v6) + b2(): + v4 = allocate -> &mut Field + store Field 0 at v4 + jmp b3(v4, v4, v4) + b3(v1: &mut Field, v2: &mut Field, v3: &mut Field): + v8 = load v1 -> Field + store Field 2 at v2 + v10 = load v1 -> Field + store Field 1 at v3 + v11 = load v1 -> Field + store Field 3 at v3 + v13 = load v1 -> Field + constrain v8 == Field 0 + constrain v10 == Field 2 + constrain v11 == Field 1 + constrain v13 == Field 3 + return v8, v11 + } + "; + + let values = expect_values_with_args(src, vec![Value::bool(true)]); + assert_eq!(values.len(), 2); + + assert_eq!(values[0], Value::from_constant(FieldElement::zero(), NumericType::NativeField)); + assert_eq!(values[1], Value::from_constant(FieldElement::one(), NumericType::NativeField)); +} + +#[test] +fn accepts_globals() { + let src = " + g0 = Field 1 + g1 = Field 2 + g2 = make_array [Field 1, Field 2] : [Field; 2] + + brillig(inline) predicate_pure fn main f0 { + b0(): + v0 = make_array [Field 1, Field 2] : [Field; 2] + constrain v0 == g2 + return + } + "; + executes_with_no_errors(src); +} + +#[test] +fn accepts_print() { + // fn main(x: Field) { + // print(x); + // println(x); + // } + let src = r#" + brillig(inline) impure fn main f0 { + b0(v0: Field): + v12 = make_array b"{\"kind\":\"field\"}" + call print(u1 0, v0, v12, u1 0) + inc_rc v12 + call print(u1 1, v0, v12, u1 0) + return + } + "#; + let values = expect_values_with_args( + src, + vec![Value::from_constant(5u128.into(), NumericType::NativeField)], + ); + assert_eq!(values.len(), 0); +} + +#[test] +fn calls_with_higher_order_function() { + let src = r#" + acir(inline) fn main f0 { + b0(v0: Field): + v4 = call f2(f1) -> function + v5 = call f3(v4) -> function + v6 = call v5(v0) -> Field + return v6 + } + + acir(inline) fn square f1 { + b0(v0: Field): + v1 = mul v0, v0 + return v1 + } + + acir(inline) fn id1 f2 { + b0(v0: function): + return v0 + } + + acir(inline) fn id2 f3 { + b0(v0: function): + return v0 + } + "#; + + // Program simplifies to `mul v0, v0` if inlined + let input = Value::from_constant(4u128.into(), NumericType::NativeField); + let output = Value::from_constant(16u128.into(), NumericType::NativeField); + let result = expect_value_with_args(src, vec![input]); + assert_eq!(result, output); +} + +#[test] +fn is_odd_is_even_recursive_calls() { + let src = r#" + brillig(inline) fn main f0 { + b0(v0: u32, v1: u1): + v3 = call f2(v0) -> u1 + v4 = eq v3, v1 + constrain v3 == v1 + return + } + brillig(inline) fn is_even f1 { + b0(v0: u32): + v3 = eq v0, u32 0 + jmpif v3 then: b2, else: b1 + b1(): + v5 = call f3(v0) -> u32 + v7 = call f2(v5) -> u1 + jmp b3(v7) + b2(): + jmp b3(u1 1) + b3(v1: u1): + return v1 + } + brillig(inline) fn is_odd f2 { + b0(v0: u32): + v3 = eq v0, u32 0 + jmpif v3 then: b2, else: b1 + b1(): + v5 = call f3(v0) -> u32 + v7 = call f1(v5) -> u1 + jmp b3(v7) + b2(): + jmp b3(u1 0) + b3(v1: u1): + return v1 + } + brillig(inline) fn decrement f3 { + b0(v0: u32): + v2 = sub v0, u32 1 + return v2 + } + "#; + let values = expect_values_with_args( + src, + vec![Value::from_constant(7_u128.into(), NumericType::unsigned(32)), Value::bool(true)], + ); + assert!(values.is_empty()); +} + +#[test] +fn store_with_aliases() { + let src = r#" + acir(inline) fn main f0 { + b0(): + v0 = allocate -> &mut Field + store Field 0 at v0 + v2 = allocate -> &mut &mut Field + store v0 at v2 + jmp b1(Field 0) + b1(v3: Field): + v4 = eq v3, Field 0 + jmpif v4 then: b2, else: b3 + b2(): + v5 = load v2 -> &mut Field + store Field 2 at v5 + v8 = add v3, Field 1 + jmp b1(v8) + b3(): + v9 = load v0 -> Field + v10 = eq v9, Field 2 + constrain v9 == Field 2 + v11 = load v2 -> &mut Field + v12 = load v11 -> Field + constrain v12 == Field 2 + return + } + "#; + executes_with_no_errors(src); +} + +#[test] +fn literally_just_the_slices_integration_test() { + let src = r#" +acir(inline) fn main f0 { + b0(v0: Field, v1: Field): + v4 = make_array [Field 0, Field 0] : [Field] + v5 = allocate -> &mut u32 + store u32 2 at v5 + v7 = allocate -> &mut [Field] + store v4 at v7 + v8 = load v5 -> u32 + v9 = load v7 -> [Field] + v11 = lt u32 0, v8 + constrain v11 == u1 1, "Index out of bounds" + v13 = array_get v9, index u32 0 -> Field + v14 = eq v13, Field 0 + constrain v13 == Field 0 + v15 = load v5 -> u32 + v16 = load v7 -> [Field] + v17 = lt u32 0, v15 + constrain v17 == u1 1, "Index out of bounds" + v18 = array_get v16, index u32 0 -> Field + v20 = eq v18, Field 1 + v21 = not v20 + constrain v20 == u1 0 + v23 = load v5 -> u32 + v24 = load v7 -> [Field] + v25 = lt u32 0, v23 + constrain v25 == u1 1, "Index out of bounds" + v26 = array_set v24, index u32 0, value v0 + store v23 at v5 + store v26 at v7 + v27 = load v5 -> u32 + v28 = load v7 -> [Field] + v29 = lt u32 0, v27 + constrain v29 == u1 1, "Index out of bounds" + v30 = array_get v28, index u32 0 -> Field + v31 = eq v30, v0 + constrain v30 == v0 + v32 = load v5 -> u32 + v33 = load v7 -> [Field] + v35, v36 = call slice_push_back(v32, v33, v1) -> (u32, [Field]) + v37 = lt u32 2, v35 + constrain v37 == u1 1, "Index out of bounds" + v38 = array_get v36, index u32 2 -> Field + v40 = eq v38, Field 10 + constrain v38 == Field 10 + v41 = lt u32 2, v35 + constrain v41 == u1 1, "Index out of bounds" + v42 = array_get v36, index u32 2 -> Field + v44 = eq v42, Field 8 + v45 = not v44 + constrain v44 == u1 0 + v47 = eq v35, u32 3 + constrain v35 == u32 3 + v48 = make_array [] : [u32] + v49 = allocate -> &mut u32 + store u32 0 at v49 + v50 = allocate -> &mut [u32] + store v48 at v50 + jmp b1(u32 0) + b1(v2: u32): + v52 = lt v2, u32 5 + jmpif v52 then: b2, else: b3 + b2(): + v167 = load v49 -> u32 + v168 = load v50 -> [u32] + v169, v170 = call slice_push_back(v167, v168, v2) -> (u32, [u32]) + store v169 at v49 + store v170 at v50 + v171 = unchecked_add v2, u32 1 + jmp b1(v171) + b3(): + v53 = load v49 -> u32 + v54 = load v50 -> [u32] + v55 = eq v53, u32 5 + constrain v53 == u32 5 + v56 = load v49 -> u32 + v57 = load v50 -> [u32] + v60, v61 = call slice_push_front(v56, v57, u32 20) -> (u32, [u32]) + store v60 at v49 + store v61 at v50 + v62 = load v49 -> u32 + v63 = load v50 -> [u32] + v64 = lt u32 0, v62 + constrain v64 == u1 1, "Index out of bounds" + v65 = array_get v63, index u32 0 -> u32 + v66 = eq v65, u32 20 + constrain v65 == u32 20 + v67 = load v49 -> u32 + v68 = load v50 -> [u32] + v70 = eq v67, u32 6 + constrain v67 == u32 6 + v71 = load v49 -> u32 + v72 = load v50 -> [u32] + v74, v75, v76 = call slice_pop_back(v71, v72) -> (u32, [u32], u32) + v78 = eq v76, u32 4 + constrain v76 == u32 4 + v79 = eq v74, u32 5 + constrain v74 == u32 5 + v81, v82, v83 = call slice_pop_front(v74, v75) -> (u32, u32, [u32]) + v84 = eq v81, u32 20 + constrain v81 == u32 20 + v85 = eq v82, u32 4 + constrain v82 == u32 4 + v87 = add v82, u32 1 + v88 = lt u32 2, v87 + constrain v88 == u1 1, "Index out of bounds" + v91, v92 = call slice_insert(v82, v83, u32 2, u32 100) -> (u32, [u32]) + store v91 at v49 + store v92 at v50 + v93 = load v49 -> u32 + v94 = load v50 -> [u32] + v95 = lt u32 2, v93 + constrain v95 == u1 1, "Index out of bounds" + v96 = array_get v94, index u32 2 -> u32 + v97 = eq v96, u32 100 + constrain v96 == u32 100 + v98 = load v49 -> u32 + v99 = load v50 -> [u32] + v100 = lt u32 4, v98 + constrain v100 == u1 1, "Index out of bounds" + v101 = array_get v99, index u32 4 -> u32 + v102 = eq v101, u32 3 + constrain v101 == u32 3 + v103 = load v49 -> u32 + v104 = load v50 -> [u32] + v105 = eq v103, u32 5 + constrain v103 == u32 5 + v106 = load v49 -> u32 + v107 = load v50 -> [u32] + v108 = lt u32 3, v106 + constrain v108 == u1 1, "Index out of bounds" + v110, v111, v112 = call slice_remove(v106, v107, u32 3) -> (u32, [u32], u32) + v113 = eq v112, u32 2 + constrain v112 == u32 2 + v114 = lt u32 3, v110 + constrain v114 == u1 1, "Index out of bounds" + v115 = array_get v111, index u32 3 -> u32 + v116 = eq v115, u32 3 + constrain v115 == u32 3 + v117 = eq v110, u32 4 + constrain v110 == u32 4 + v119 = make_array [Field 1, Field 2] : [Field] + v123 = make_array [Field 3, Field 4, Field 5] : [Field] + v125, v126 = call f1(u32 2, v119, u32 3, v123) -> (u32, [Field]) + v127 = eq v125, u32 5 + constrain v125 == u32 5 + v128 = lt u32 0, v125 + constrain v128 == u1 1, "Index out of bounds" + v129 = array_get v126, index u32 0 -> Field + v130 = eq v129, Field 1 + constrain v129 == Field 1 + v131 = lt u32 4, v125 + constrain v131 == u1 1, "Index out of bounds" + v132 = array_get v126, index u32 4 -> Field + v133 = eq v132, Field 5 + constrain v132 == Field 5 + v134 = make_array [Field 1, Field 2] : [Field] + v137, v138 = call f2(u32 2, v134, f3) -> (u32, [Field]) + v139 = make_array [Field 2, Field 3] : [Field] + v141 = call f4(v137, v138, u32 2, v139) -> u1 + constrain v141 == u1 1 + v142 = make_array [Field 1, Field 2, Field 3] : [Field] + v145 = call f5(u32 3, v142, Field 0, f6) -> Field + v147 = eq v145, Field 6 + constrain v145 == Field 6 + v148 = make_array [Field 1, Field 2, Field 3] : [Field] + v151 = call f7(u32 3, v148, f8) -> Field + v152 = eq v151, Field 6 + constrain v151 == Field 6 + v153 = make_array [u32 2, u32 4, u32 6] : [u32] + v156 = call f9(u32 3, v153, f10) -> u1 + constrain v156 == u1 1 + v157 = make_array [u32 2, u32 4, u32 6] : [u32] + v160 = call f11(u32 3, v157, f12) -> u1 + constrain v160 == u1 1 + call f13() + call f14(v0, v1) + call f15() + call f16(v0) + call f17(v0, v1) + call f18() + return +} +acir(inline) fn append f1 { + b0(v0: u32, v1: [Field], v2: u32, v3: [Field]): + v5 = allocate -> &mut u32 + store v0 at v5 + v6 = allocate -> &mut [Field] + store v1 at v6 + jmp b1(u32 0) + b1(v4: u32): + v8 = lt v4, v2 + jmpif v8 then: b2, else: b3 + b2(): + v11 = lt v4, v2 + constrain v11 == u1 1, "Index out of bounds" + v13 = array_get v3, index v4 -> Field + v14 = load v5 -> u32 + v15 = load v6 -> [Field] + v17, v18 = call slice_push_back(v14, v15, v13) -> (u32, [Field]) + store v17 at v5 + store v18 at v6 + v20 = unchecked_add v4, u32 1 + jmp b1(v20) + b3(): + v9 = load v5 -> u32 + v10 = load v6 -> [Field] + return v9, v10 +} +acir(inline) fn map f2 { + b0(v0: u32, v1: [Field], v2: function): + v4 = make_array [] : [Field] + v5 = allocate -> &mut u32 + store u32 0 at v5 + v7 = allocate -> &mut [Field] + store v4 at v7 + jmp b1(u32 0) + b1(v3: u32): + v8 = lt v3, v0 + jmpif v8 then: b2, else: b3 + b2(): + v11 = lt v3, v0 + constrain v11 == u1 1, "Index out of bounds" + v13 = array_get v1, index v3 -> Field + v14 = load v5 -> u32 + v15 = load v7 -> [Field] + v16 = call v2(v13) -> Field + v18, v19 = call slice_push_back(v14, v15, v16) -> (u32, [Field]) + store v18 at v5 + store v19 at v7 + v21 = unchecked_add v3, u32 1 + jmp b1(v21) + b3(): + v9 = load v5 -> u32 + v10 = load v7 -> [Field] + return v9, v10 +} +acir(inline) fn lambda f3 { + b0(v0: Field): + v2 = add v0, Field 1 + return v2 +} +acir(inline) fn eq f4 { + b0(v0: u32, v1: [Field], v2: u32, v3: [Field]): + v5 = eq v0, v2 + v6 = allocate -> &mut u1 + store v5 at v6 + jmp b1(u32 0) + b1(v4: u32): + v8 = lt v4, v0 + jmpif v8 then: b2, else: b3 + b2(): + v10 = load v6 -> u1 + v11 = lt v4, v0 + constrain v11 == u1 1, "Index out of bounds" + v13 = array_get v1, index v4 -> Field + v14 = lt v4, v2 + constrain v14 == u1 1, "Index out of bounds" + v15 = array_get v3, index v4 -> Field + v17 = call f31(v13, v15) -> u1 + v18 = unchecked_mul v10, v17 + store v18 at v6 + v20 = unchecked_add v4, u32 1 + jmp b1(v20) + b3(): + v9 = load v6 -> u1 + return v9 +} +acir(inline) fn fold f5 { + b0(v0: u32, v1: [Field], v2: Field, v3: function): + v5 = allocate -> &mut Field + store v2 at v5 + jmp b1(u32 0) + b1(v4: u32): + v7 = lt v4, v0 + jmpif v7 then: b2, else: b3 + b2(): + v9 = lt v4, v0 + constrain v9 == u1 1, "Index out of bounds" + v11 = array_get v1, index v4 -> Field + v12 = load v5 -> Field + v13 = call v3(v12, v11) -> Field + store v13 at v5 + v15 = unchecked_add v4, u32 1 + jmp b1(v15) + b3(): + v8 = load v5 -> Field + return v8 +} +acir(inline) fn lambda f6 { + b0(v0: Field, v1: Field): + v2 = add v0, v1 + return v2 +} +acir(inline) fn reduce f7 { + b0(v0: u32, v1: [Field], v2: function): + v5 = lt u32 0, v0 + constrain v5 == u1 1, "Index out of bounds" + v7 = array_get v1, index u32 0 -> Field + v8 = allocate -> &mut Field + store v7 at v8 + jmp b1(u32 1) + b1(v3: u32): + v10 = lt v3, v0 + jmpif v10 then: b2, else: b3 + b2(): + v12 = load v8 -> Field + v13 = lt v3, v0 + constrain v13 == u1 1, "Index out of bounds" + v14 = array_get v1, index v3 -> Field + v15 = call v2(v12, v14) -> Field + store v15 at v8 + v16 = unchecked_add v3, u32 1 + jmp b1(v16) + b3(): + v11 = load v8 -> Field + return v11 +} +acir(inline) fn lambda f8 { + b0(v0: Field, v1: Field): + v2 = add v0, v1 + return v2 +} +acir(inline) fn all f9 { + b0(v0: u32, v1: [u32], v2: function): + v4 = allocate -> &mut u1 + store u1 1 at v4 + jmp b1(u32 0) + b1(v3: u32): + v7 = lt v3, v0 + jmpif v7 then: b2, else: b3 + b2(): + v9 = lt v3, v0 + constrain v9 == u1 1, "Index out of bounds" + v10 = array_get v1, index v3 -> u32 + v11 = load v4 -> u1 + v12 = call v2(v10) -> u1 + v13 = unchecked_mul v11, v12 + store v13 at v4 + v15 = unchecked_add v3, u32 1 + jmp b1(v15) + b3(): + v8 = load v4 -> u1 + return v8 +} +acir(inline) fn lambda f10 { + b0(v0: u32): + v2 = lt u32 0, v0 + return v2 +} +acir(inline) fn any f11 { + b0(v0: u32, v1: [u32], v2: function): + v4 = allocate -> &mut u1 + store u1 0 at v4 + jmp b1(u32 0) + b1(v3: u32): + v7 = lt v3, v0 + jmpif v7 then: b2, else: b3 + b2(): + v9 = lt v3, v0 + constrain v9 == u1 1, "Index out of bounds" + v11 = array_get v1, index v3 -> u32 + v12 = load v4 -> u1 + v13 = call v2(v11) -> u1 + v14 = or v12, v13 + store v14 at v4 + v16 = unchecked_add v3, u32 1 + jmp b1(v16) + b3(): + v8 = load v4 -> u1 + return v8 +} +acir(inline) fn lambda f12 { + b0(v0: u32): + v2 = lt u32 5, v0 + return v2 +} +acir(inline) fn regression_2083 f13 { + b0(): + v2 = make_array [Field 1, Field 2] : [(Field, Field)] + v5 = make_array [Field 1, Field 2, Field 3, Field 4] : [(Field, Field)] + v8 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + v11 = make_array [Field 10, Field 11, Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + v14 = make_array [Field 12, Field 13, Field 10, Field 11, Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + v17 = make_array [Field 12, Field 13, Field 55, Field 56, Field 10, Field 11, Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + v18 = make_array [Field 12, Field 13, Field 55, Field 56, Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + v19 = make_array [Field 55, Field 56, Field 1, Field 2, Field 3, Field 4, Field 5, Field 6] : [(Field, Field)] + return +} +acir(inline) fn regression_merge_slices f14 { + b0(v0: Field, v1: Field): + call f22(v0, v1) + call f23(v0) + return +} +acir(inline) fn regression_2370 f15 { + b0(): + v0 = make_array [] : [Field] + v1 = allocate -> &mut u32 + store u32 0 at v1 + v3 = allocate -> &mut [Field] + store v0 at v3 + v7 = make_array [Field 1, Field 2, Field 3] : [Field] + store u32 3 at v1 + store v7 at v3 + return +} +acir(inline) fn regression_4418 f16 { + b0(v0: Field): + v2 = call f20(v0) -> [u8; 32] + v3 = allocate -> &mut [u8; 32] + store v2 at v3 + v5 = eq v0, Field 0 + v6 = not v5 + jmpif v6 then: b1, else: b2 + b1(): + v7 = load v3 -> [u8; 32] + v10 = array_set v7, index u32 0, value u8 10 + store v10 at v3 + jmp b2() + b2(): + return +} +acir(inline) fn regression_slice_call_result f17 { + b0(v0: Field, v1: Field): + v3, v4 = call f19(v0, v1) -> (u32, [Field]) + v5 = allocate -> &mut u32 + store v3 at v5 + v6 = allocate -> &mut [Field] + store v4 at v6 + v8 = eq v0, Field 0 + v9 = not v8 + jmpif v9 then: b1, else: b2 + b1(): + v16 = load v5 -> u32 + v17 = load v6 -> [Field] + v18, v19 = call slice_push_back(v16, v17, Field 5) -> (u32, [Field]) + store v18 at v5 + store v19 at v6 + v20 = load v5 -> u32 + v21 = load v6 -> [Field] + v23, v24 = call slice_push_back(v20, v21, Field 10) -> (u32, [Field]) + store v23 at v5 + store v24 at v6 + jmp b3() + b2(): + v10 = load v5 -> u32 + v11 = load v6 -> [Field] + v14, v15 = call slice_push_back(v10, v11, Field 5) -> (u32, [Field]) + store v14 at v5 + store v15 at v6 + jmp b3() + b3(): + v25 = load v5 -> u32 + v26 = load v6 -> [Field] + v28 = eq v25, u32 5 + constrain v25 == u32 5 + v29 = load v5 -> u32 + v30 = load v6 -> [Field] + v32 = lt u32 0, v29 + constrain v32 == u1 1, "Index out of bounds" + v34 = array_get v30, index u32 0 -> Field + v35 = eq v34, Field 0 + constrain v34 == Field 0 + v36 = load v5 -> u32 + v37 = load v6 -> [Field] + v39 = lt u32 1, v36 + constrain v39 == u1 1, "Index out of bounds" + v40 = array_get v37, index u32 1 -> Field + v41 = eq v40, Field 0 + constrain v40 == Field 0 + v42 = load v5 -> u32 + v43 = load v6 -> [Field] + v45 = lt u32 2, v42 + constrain v45 == u1 1, "Index out of bounds" + v46 = array_get v43, index u32 2 -> Field + v47 = eq v46, Field 10 + constrain v46 == Field 10 + v48 = load v5 -> u32 + v49 = load v6 -> [Field] + v51 = lt u32 3, v48 + constrain v51 == u1 1, "Index out of bounds" + v52 = array_get v49, index u32 3 -> Field + v53 = eq v52, Field 5 + constrain v52 == Field 5 + v54 = load v5 -> u32 + v55 = load v6 -> [Field] + v57 = lt u32 4, v54 + constrain v57 == u1 1, "Index out of bounds" + v58 = array_get v55, index u32 4 -> Field + v59 = eq v58, Field 10 + constrain v58 == Field 10 + return +} +acir(inline) fn regression_4506 f18 { + b0(): + v3 = make_array [Field 1, Field 2, Field 3] : [Field] + v6 = call f4(u32 3, v3, u32 3, v3) -> u1 + constrain v6 == u1 1 + return +} +acir(inline) fn merge_slices_return f19 { + b0(v0: Field, v1: Field): + v7 = make_array [Field 0, Field 0] : [Field] + v8 = eq v0, v1 + v9 = not v8 + jmpif v9 then: b1, else: b2 + b1(): + v12 = eq v0, Field 20 + v13 = not v12 + jmpif v13 then: b3, else: b4 + b2(): + jmp b6(u32 2, v7) + b3(): + v14 = make_array [Field 0, Field 0, v1] : [Field] + v15 = make_array [Field 0, Field 0, v1] : [Field] + v16 = make_array [Field 0, Field 0, v1] : [Field] + jmp b5(u32 3, v16) + b4(): + jmp b5(u32 2, v7) + b5(v2: u32, v3: [Field]): + jmp b6(v2, v3) + b6(v4: u32, v5: [Field]): + return v4, v5 +} +acir(inline) fn to_be_bytes f20 { + b0(v0: Field): + v31 = make_array [u8 1, u8 0, u8 0, u8 240, u8 147, u8 245, u8 225, u8 67, u8 145, u8 112, u8 185, u8 121, u8 72, u8 232, u8 51, u8 40, u8 93, u8 88, u8 129, u8 129, u8 182, u8 69, u8 80, u8 184, u8 41, u8 160, u8 49, u8 225, u8 114, u8 78, u8 100, u8 48] : [u8] + v47 = make_array b"N must be less than or equal to modulus_le_bytes().len()" + v50 = call f21(v0, u32 256) -> [u8; 32] + v51 = make_array [u8 48, u8 100, u8 78, u8 114, u8 225, u8 49, u8 160, u8 41, u8 184, u8 80, u8 69, u8 182, u8 129, u8 129, u8 88, u8 93, u8 40, u8 51, u8 232, u8 72, u8 121, u8 185, u8 112, u8 145, u8 67, u8 225, u8 245, u8 147, u8 240, u8 0, u8 0, u8 1] : [u8] + v52 = allocate -> &mut u1 + store u1 0 at v52 + jmp b1(u32 0) + b1(v1: u32): + v56 = lt v1, u32 32 + jmpif v56 then: b2, else: b3 + b2(): + v59 = load v52 -> u1 + v60 = not v59 + jmpif v60 then: b4, else: b5 + b3(): + v57 = load v52 -> u1 + constrain v57 == u1 1 + return v50 + b4(): + v61 = lt v1, u32 32 + constrain v61 == u1 1, "Index out of bounds" + v62 = array_get v50, index v1 -> u8 + v63 = lt v1, u32 32 + constrain v63 == u1 1, "Index out of bounds" + v64 = array_get v51, index v1 -> u8 + v65 = eq v62, v64 + v66 = not v65 + jmpif v66 then: b6, else: b7 + b5(): + v73 = unchecked_add v1, u32 1 + jmp b1(v73) + b6(): + v67 = lt v1, u32 32 + constrain v67 == u1 1, "Index out of bounds" + v68 = array_get v50, index v1 -> u8 + v69 = lt v1, u32 32 + constrain v69 == u1 1, "Index out of bounds" + v70 = array_get v51, index v1 -> u8 + v71 = lt v68, v70 + constrain v71 == u1 1 + store u1 1 at v52 + jmp b7() + b7(): + jmp b5() +} +acir(inline) fn to_be_radix f21 { + b0(v0: Field, v1: u32): + call assert_constant(v1) + v4 = call to_be_radix(v0, v1) -> [u8; 32] + return v4 +} +acir(inline) fn merge_slices_if f22 { + b0(v0: Field, v1: Field): + v3, v4 = call f19(v0, v1) -> (u32, [Field]) + v6 = eq v3, u32 3 + constrain v3 == u32 3 + v8 = lt u32 2, v3 + constrain v8 == u1 1, "Index out of bounds" + v10 = array_get v4, index u32 2 -> Field + v12 = eq v10, Field 10 + constrain v10 == Field 10 + v14, v15 = call f24(v0, v1) -> (u32, [Field]) + v17 = eq v14, u32 4 + constrain v14 == u32 4 + v18 = lt u32 3, v14 + constrain v18 == u1 1, "Index out of bounds" + v19 = array_get v15, index u32 3 -> Field + v21 = eq v19, Field 5 + constrain v19 == Field 5 + v23, v24 = call f25(v0, v1) -> (u32, [Field]) + v26 = eq v23, u32 7 + constrain v23 == u32 7 + v28 = lt u32 6, v23 + constrain v28 == u1 1, "Index out of bounds" + v29 = array_get v24, index u32 6 -> Field + v31 = eq v29, Field 4 + constrain v29 == Field 4 + v33, v34 = call f26(v0, v1) -> (u32, [Field]) + v35 = eq v33, u32 6 + constrain v33 == u32 6 + v36 = lt u32 3, v33 + constrain v36 == u1 1, "Index out of bounds" + v37 = array_get v34, index u32 3 -> Field + v38 = eq v37, Field 5 + constrain v37 == Field 5 + v39 = lt u32 4, v33 + constrain v39 == u1 1, "Index out of bounds" + v40 = array_get v34, index u32 4 -> Field + v42 = eq v40, Field 15 + constrain v40 == Field 15 + v44 = lt u32 5, v33 + constrain v44 == u1 1, "Index out of bounds" + v45 = array_get v34, index u32 5 -> Field + v47 = eq v45, Field 30 + constrain v45 == Field 30 + v49, v50 = call f27(v0, v1) -> (u32, [Field]) + v52 = eq v49, u32 8 + constrain v49 == u32 8 + v53 = lt u32 3, v49 + constrain v53 == u1 1, "Index out of bounds" + v54 = array_get v50, index u32 3 -> Field + v55 = eq v54, Field 5 + constrain v54 == Field 5 + v56 = lt u32 4, v49 + constrain v56 == u1 1, "Index out of bounds" + v57 = array_get v50, index u32 4 -> Field + v58 = eq v57, Field 30 + constrain v57 == Field 30 + v59 = lt u32 5, v49 + constrain v59 == u1 1, "Index out of bounds" + v60 = array_get v50, index u32 5 -> Field + v61 = eq v60, Field 15 + constrain v60 == Field 15 + v62 = lt u32 6, v49 + constrain v62 == u1 1, "Index out of bounds" + v63 = array_get v50, index u32 6 -> Field + v65 = eq v63, Field 50 + constrain v63 == Field 50 + v66 = lt u32 7, v49 + constrain v66 == u1 1, "Index out of bounds" + v67 = array_get v50, index u32 7 -> Field + v69 = eq v67, Field 60 + constrain v67 == Field 60 + call f28(v0, v1) + v72, v73 = call f29(v0, v1) -> (u32, [Field]) + v74 = eq v72, u32 7 + constrain v72 == u32 7 + v76 = lt u32 1, v72 + constrain v76 == u1 1, "Index out of bounds" + v77 = array_get v73, index u32 1 -> Field + v78 = eq v77, Field 50 + constrain v77 == Field 50 + v79 = lt u32 2, v72 + constrain v79 == u1 1, "Index out of bounds" + v80 = array_get v73, index u32 2 -> Field + v82 = eq v80, Field 0 + constrain v80 == Field 0 + v83 = lt u32 5, v72 + constrain v83 == u1 1, "Index out of bounds" + v84 = array_get v73, index u32 5 -> Field + v85 = eq v84, Field 30 + constrain v84 == Field 30 + v86 = lt u32 6, v72 + constrain v86 == u1 1, "Index out of bounds" + v87 = array_get v73, index u32 6 -> Field + v89 = eq v87, Field 100 + constrain v87 == Field 100 + v91, v92 = call f30(v0, v1) -> (u32, [Field]) + v93 = eq v91, u32 5 + constrain v91 == u32 5 + return +} +acir(inline) fn merge_slices_else f23 { + b0(v0: Field): + v3, v4 = call f19(v0, Field 5) -> (u32, [Field]) + v6 = lt u32 0, v3 + constrain v6 == u1 1, "Index out of bounds" + v8 = array_get v4, index u32 0 -> Field + v10 = eq v8, Field 0 + constrain v8 == Field 0 + v12 = lt u32 1, v3 + constrain v12 == u1 1, "Index out of bounds" + v13 = array_get v4, index u32 1 -> Field + v14 = eq v13, Field 0 + constrain v13 == Field 0 + v16 = eq v3, u32 2 + constrain v3 == u32 2 + v18, v19 = call f24(v0, Field 5) -> (u32, [Field]) + v20 = lt u32 2, v18 + constrain v20 == u1 1, "Index out of bounds" + v21 = array_get v19, index u32 2 -> Field + v22 = eq v21, Field 5 + constrain v21 == Field 5 + v24 = eq v18, u32 3 + constrain v18 == u32 3 + v26, v27 = call f25(v0, Field 5) -> (u32, [Field]) + v28 = lt u32 2, v26 + constrain v28 == u1 1, "Index out of bounds" + v29 = array_get v27, index u32 2 -> Field + v30 = eq v29, Field 5 + constrain v29 == Field 5 + v31 = eq v26, u32 3 + constrain v26 == u32 3 + return +} +acir(inline) fn merge_slices_mutate f24 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v22 = load v4 -> u32 + v23 = load v6 -> [Field] + return v22, v23 +} +acir(inline) fn merge_slices_mutate_in_loop f25 { + b0(v0: Field, v1: Field): + v4 = make_array [Field 0, Field 0] : [Field] + v5 = allocate -> &mut u32 + store u32 2 at v5 + v7 = allocate -> &mut [Field] + store v4 at v7 + v8 = eq v0, v1 + v9 = not v8 + jmpif v9 then: b1, else: b2 + b1(): + jmp b3(u32 0) + b2(): + v10 = load v5 -> u32 + v11 = load v7 -> [Field] + v13, v14 = call slice_push_back(v10, v11, v0) -> (u32, [Field]) + store v13 at v5 + store v14 at v7 + jmp b6() + b3(v2: u32): + v17 = lt v2, u32 5 + jmpif v17 then: b4, else: b5 + b4(): + v20 = load v5 -> u32 + v21 = load v7 -> [Field] + v22 = cast v2 as Field + v23, v24 = call slice_push_back(v20, v21, v22) -> (u32, [Field]) + store v23 at v5 + store v24 at v7 + v26 = unchecked_add v2, u32 1 + jmp b3(v26) + b5(): + jmp b6() + b6(): + v18 = load v5 -> u32 + v19 = load v7 -> [Field] + return v18, v19 +} +acir(inline) fn merge_slices_mutate_two_ifs f26 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v23 = eq v0, Field 20 + jmpif v23 then: b4, else: b5 + b4(): + v24 = load v4 -> u32 + v25 = load v6 -> [Field] + v26, v27 = call slice_push_back(v24, v25, Field 20) -> (u32, [Field]) + store v26 at v4 + store v27 at v6 + jmp b5() + b5(): + v28 = load v4 -> u32 + v29 = load v6 -> [Field] + v31, v32 = call slice_push_back(v28, v29, Field 15) -> (u32, [Field]) + store v31 at v4 + store v32 at v6 + v33 = load v4 -> u32 + v34 = load v6 -> [Field] + v36, v37 = call slice_push_back(v33, v34, Field 30) -> (u32, [Field]) + store v36 at v4 + store v37 at v6 + v38 = load v4 -> u32 + v39 = load v6 -> [Field] + return v38, v39 +} +acir(inline) fn merge_slices_mutate_between_ifs f27 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v22 = load v4 -> u32 + v23 = load v6 -> [Field] + v25, v26 = call slice_push_back(v22, v23, Field 30) -> (u32, [Field]) + store v25 at v4 + store v26 at v6 + v28 = eq v0, Field 20 + jmpif v28 then: b4, else: b5 + b4(): + v29 = load v4 -> u32 + v30 = load v6 -> [Field] + v31, v32 = call slice_push_back(v29, v30, Field 20) -> (u32, [Field]) + store v31 at v4 + store v32 at v6 + jmp b5() + b5(): + v33 = load v4 -> u32 + v34 = load v6 -> [Field] + v36, v37 = call slice_push_back(v33, v34, Field 15) -> (u32, [Field]) + store v36 at v4 + store v37 at v6 + v38 = eq v0, Field 20 + v39 = not v38 + jmpif v39 then: b6, else: b7 + b6(): + v40 = load v4 -> u32 + v41 = load v6 -> [Field] + v43, v44 = call slice_push_back(v40, v41, Field 50) -> (u32, [Field]) + store v43 at v4 + store v44 at v6 + jmp b7() + b7(): + v45 = load v4 -> u32 + v46 = load v6 -> [Field] + v48, v49 = call slice_push_back(v45, v46, Field 60) -> (u32, [Field]) + store v48 at v4 + store v49 at v6 + v50 = load v4 -> u32 + v51 = load v6 -> [Field] + return v50, v51 +} +acir(inline) fn merge_slices_push_then_pop f28 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v22 = load v4 -> u32 + v23 = load v6 -> [Field] + v25, v26 = call slice_push_back(v22, v23, Field 30) -> (u32, [Field]) + store v25 at v4 + store v26 at v6 + v28 = eq v0, Field 20 + jmpif v28 then: b4, else: b5 + b4(): + v29 = load v4 -> u32 + v30 = load v6 -> [Field] + v31, v32 = call slice_push_back(v29, v30, Field 20) -> (u32, [Field]) + store v31 at v4 + store v32 at v6 + jmp b5() + b5(): + v33 = load v4 -> u32 + v34 = load v6 -> [Field] + v36, v37, v38 = call slice_pop_back(v33, v34) -> (u32, [Field], Field) + v40 = eq v36, u32 4 + constrain v36 == u32 4 + v41 = eq v38, Field 30 + constrain v38 == Field 30 + v42, v43, v44 = call slice_pop_back(v36, v37) -> (u32, [Field], Field) + v46 = eq v42, u32 3 + constrain v42 == u32 3 + v47 = eq v44, v0 + constrain v44 == v0 + return +} +acir(inline) fn merge_slices_push_then_insert f29 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v22 = load v4 -> u32 + v23 = load v6 -> [Field] + v25, v26 = call slice_push_back(v22, v23, Field 30) -> (u32, [Field]) + store v25 at v4 + store v26 at v6 + v28 = eq v0, Field 20 + jmpif v28 then: b4, else: b5 + b4(): + v29 = load v4 -> u32 + v30 = load v6 -> [Field] + v31, v32 = call slice_push_back(v29, v30, Field 20) -> (u32, [Field]) + store v31 at v4 + store v32 at v6 + v33 = load v4 -> u32 + v34 = load v6 -> [Field] + v36, v37 = call slice_push_back(v33, v34, Field 15) -> (u32, [Field]) + store v36 at v4 + store v37 at v6 + jmp b5() + b5(): + v38 = load v4 -> u32 + v39 = load v6 -> [Field] + v41 = add v38, u32 1 + v42 = lt u32 1, v41 + constrain v42 == u1 1, "Index out of bounds" + v46, v47 = call slice_insert(v38, v39, u32 1, Field 50) -> (u32, [Field]) + store v46 at v4 + store v47 at v6 + v48 = load v4 -> u32 + v49 = load v6 -> [Field] + v50 = add v48, u32 1 + v52 = lt u32 6, v50 + constrain v52 == u1 1, "Index out of bounds" + v54, v55 = call slice_insert(v48, v49, u32 6, Field 100) -> (u32, [Field]) + store v54 at v4 + store v55 at v6 + v56 = load v4 -> u32 + v57 = load v6 -> [Field] + return v56, v57 +} +acir(inline) fn merge_slices_remove_between_ifs f30 { + b0(v0: Field, v1: Field): + v3 = make_array [Field 0, Field 0] : [Field] + v4 = allocate -> &mut u32 + store u32 2 at v4 + v6 = allocate -> &mut [Field] + store v3 at v6 + v7 = eq v0, v1 + v8 = not v7 + jmpif v8 then: b1, else: b2 + b1(): + v14 = load v4 -> u32 + v15 = load v6 -> [Field] + v16, v17 = call slice_push_back(v14, v15, v1) -> (u32, [Field]) + store v16 at v4 + store v17 at v6 + v18 = load v4 -> u32 + v19 = load v6 -> [Field] + v20, v21 = call slice_push_back(v18, v19, v0) -> (u32, [Field]) + store v20 at v4 + store v21 at v6 + jmp b3() + b2(): + v9 = load v4 -> u32 + v10 = load v6 -> [Field] + v12, v13 = call slice_push_back(v9, v10, v0) -> (u32, [Field]) + store v12 at v4 + store v13 at v6 + jmp b3() + b3(): + v22 = load v4 -> u32 + v23 = load v6 -> [Field] + v24 = lt u32 2, v22 + constrain v24 == u1 1, "Index out of bounds" + v27, v28, v29 = call slice_remove(v22, v23, u32 2) -> (u32, [Field], Field) + v30 = allocate -> &mut u32 + store v27 at v30 + v31 = allocate -> &mut [Field] + store v28 at v31 + v32 = eq v29, v1 + constrain v29 == v1 + v34 = eq v0, Field 20 + jmpif v34 then: b4, else: b5 + b4(): + v35 = load v30 -> u32 + v36 = load v31 -> [Field] + v37, v38 = call slice_push_back(v35, v36, Field 20) -> (u32, [Field]) + store v37 at v30 + store v38 at v31 + jmp b5() + b5(): + v39 = load v30 -> u32 + v40 = load v31 -> [Field] + v42, v43 = call slice_push_back(v39, v40, Field 15) -> (u32, [Field]) + store v42 at v30 + store v43 at v31 + v44 = eq v0, Field 20 + v45 = not v44 + jmpif v45 then: b6, else: b7 + b6(): + v46 = load v30 -> u32 + v47 = load v31 -> [Field] + v49, v50 = call slice_push_back(v46, v47, Field 50) -> (u32, [Field]) + store v49 at v30 + store v50 at v31 + jmp b7() + b7(): + v51 = load v30 -> u32 + v52 = load v31 -> [Field] + return v51, v52 +} +acir(inline) fn eq f31 { + b0(v0: Field, v1: Field): + v2 = eq v0, v1 + return v2 +} + "#; + + let values = expect_values_with_args( + src, + vec![ + Value::from_constant(5_u128.into(), NumericType::NativeField), + Value::from_constant(10_u128.into(), NumericType::NativeField), + ], + ); + assert!(values.is_empty()); +} diff --git a/compiler/noirc_evaluator/src/ssa/interpreter/value.rs b/compiler/noirc_evaluator/src/ssa/interpreter/value.rs new file mode 100644 index 00000000000..10ad7a86c2c --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/interpreter/value.rs @@ -0,0 +1,332 @@ +use std::sync::Arc; + +use acvm::{AcirField, FieldElement}; +use iter_extended::vecmap; +use noirc_frontend::Shared; + +use crate::ssa::ir::{ + function::FunctionId, + instruction::Intrinsic, + types::{CompositeType, NumericType, Type}, + value::ValueId, +}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum Value { + Numeric(NumericValue), + Reference(ReferenceValue), + ArrayOrSlice(ArrayValue), + Function(FunctionId), + Intrinsic(Intrinsic), + ForeignFunction(String), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub(crate) enum NumericValue { + Field(FieldElement), + + U1(bool), + U8(u8), + U16(u16), + U32(u32), + U64(u64), + U128(u128), + + I8(i8), + I16(i16), + I32(i32), + I64(i64), +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct ReferenceValue { + /// This is included mostly for debugging to distinguish different + /// ReferenceValues which store the same element. + pub original_id: ValueId, + + /// A value of `None` here means this allocation is currently uninitialized + pub element: Shared>, + + pub element_type: Arc, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct ArrayValue { + pub elements: Shared>, + + /// The `Shared` type contains its own reference count but we need to track + /// the reference count separate to ensure it is only changed by IncrementRc and + /// DecrementRc instructions. + pub rc: Shared, + + pub element_types: Arc, + pub is_slice: bool, +} + +impl Value { + #[allow(unused)] + pub(crate) fn get_type(&self) -> Type { + match self { + Value::Numeric(numeric_value) => Type::Numeric(numeric_value.get_type()), + Value::Reference(reference) => Type::Reference(reference.element_type.clone()), + Value::ArrayOrSlice(array) if array.is_slice => { + Type::Slice(array.element_types.clone()) + } + Value::ArrayOrSlice(array) => { + Type::Array(array.element_types.clone(), array.elements.borrow().len() as u32) + } + Value::Function(_) | Value::Intrinsic(_) | Value::ForeignFunction(_) => Type::Function, + } + } + + pub(crate) fn reference(original_id: ValueId, element_type: Arc) -> Self { + Value::Reference(ReferenceValue { original_id, element_type, element: Shared::new(None) }) + } + + pub(crate) fn as_bool(&self) -> Option { + match self { + Value::Numeric(NumericValue::U1(value)) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_u8(&self) -> Option { + match self { + Value::Numeric(NumericValue::U8(value)) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_u32(&self) -> Option { + match self { + Value::Numeric(NumericValue::U32(value)) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_numeric(&self) -> Option { + match self { + Value::Numeric(value) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_reference(&self) -> Option<&ReferenceValue> { + match self { + Value::Reference(value) => Some(value), + _ => None, + } + } + + pub(crate) fn as_array_or_slice(&self) -> Option<&ArrayValue> { + match self { + Value::ArrayOrSlice(value) => Some(value), + _ => None, + } + } + + pub(crate) fn from_constant(constant: FieldElement, typ: NumericType) -> Self { + Self::Numeric(NumericValue::from_constant(constant, typ)) + } + + // This is used in tests but shouldn't be cfg(test) only + #[allow(unused)] + pub(crate) fn bool(value: bool) -> Self { + Self::Numeric(NumericValue::U1(value)) + } + + pub(crate) fn array(elements: Vec, element_types: Vec) -> Self { + Self::ArrayOrSlice(ArrayValue { + elements: Shared::new(elements), + rc: Shared::new(1), + element_types: Arc::new(element_types), + is_slice: false, + }) + } + + pub(crate) fn slice(elements: Vec, element_types: Arc>) -> Self { + Self::ArrayOrSlice(ArrayValue { + elements: Shared::new(elements), + rc: Shared::new(1), + element_types, + is_slice: true, + }) + } + + /// Return an uninitialized value of the given type. This is usually a zeroed + /// value but we make no guarantee that it is. This is often used as the default + /// value to return for side-effectful functions like `call` or `array_get` when + /// side-effects are disabled. + pub(crate) fn uninitialized(typ: &Type, id: ValueId) -> Value { + match typ { + Type::Numeric(typ) => Value::Numeric(NumericValue::zero(*typ)), + Type::Reference(element_type) => Self::reference(id, element_type.clone()), + Type::Array(element_types, length) => { + let first_elements = + vecmap(element_types.iter(), |typ| Self::uninitialized(typ, id)); + let elements = std::iter::repeat_n(first_elements, *length as usize); + let elements = elements.flatten().collect(); + Self::array(elements, element_types.to_vec()) + } + Type::Slice(element_types) => Self::slice(Vec::new(), element_types.clone()), + Type::Function => Value::ForeignFunction("uninitialized!".to_string()), + } + } + + pub(crate) fn as_string(&self) -> Option { + let array = self.as_array_or_slice()?; + let elements = array.elements.borrow(); + let bytes = elements.iter().map(|element| element.as_u8()).collect::>>()?; + Some(String::from_utf8_lossy(&bytes).into_owned()) + } + + pub(crate) fn as_field(&self) -> Option { + self.as_numeric()?.as_field() + } +} + +impl NumericValue { + pub(crate) fn get_type(&self) -> NumericType { + match self { + NumericValue::Field(_) => NumericType::NativeField, + NumericValue::U1(_) => NumericType::unsigned(1), + NumericValue::U8(_) => NumericType::unsigned(8), + NumericValue::U16(_) => NumericType::unsigned(16), + NumericValue::U32(_) => NumericType::unsigned(32), + NumericValue::U64(_) => NumericType::unsigned(64), + NumericValue::U128(_) => NumericType::unsigned(128), + NumericValue::I8(_) => NumericType::signed(8), + NumericValue::I16(_) => NumericType::signed(16), + NumericValue::I32(_) => NumericType::signed(32), + NumericValue::I64(_) => NumericType::signed(64), + } + } + + pub(crate) fn zero(typ: NumericType) -> Self { + Self::from_constant(FieldElement::zero(), typ) + } + + pub(crate) fn as_field(&self) -> Option { + match self { + NumericValue::Field(value) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_bool(&self) -> Option { + match self { + NumericValue::U1(value) => Some(*value), + _ => None, + } + } + + pub(crate) fn as_u32(&self) -> Option { + match self { + NumericValue::U32(value) => Some(*value), + _ => None, + } + } + + pub(crate) fn from_constant(constant: FieldElement, typ: NumericType) -> NumericValue { + match typ { + NumericType::NativeField => Self::Field(constant), + NumericType::Unsigned { bit_size: 1 } => Self::U1(constant.is_one()), + NumericType::Unsigned { bit_size: 8 } => { + Self::U8(constant.try_into_u128().unwrap().try_into().unwrap()) + } + NumericType::Unsigned { bit_size: 16 } => { + Self::U16(constant.try_into_u128().unwrap().try_into().unwrap()) + } + NumericType::Unsigned { bit_size: 32 } => { + Self::U32(constant.try_into_u128().unwrap().try_into().unwrap()) + } + NumericType::Unsigned { bit_size: 64 } => { + Self::U64(constant.try_into_u128().unwrap().try_into().unwrap()) + } + NumericType::Unsigned { bit_size: 128 } => { + Self::U128(constant.try_into_u128().unwrap()) + } + NumericType::Signed { bit_size: 8 } => { + Self::I8(constant.try_into_i128().unwrap().try_into().unwrap()) + } + NumericType::Signed { bit_size: 16 } => { + Self::I16(constant.try_into_i128().unwrap().try_into().unwrap()) + } + NumericType::Signed { bit_size: 32 } => { + Self::I32(constant.try_into_i128().unwrap().try_into().unwrap()) + } + NumericType::Signed { bit_size: 64 } => { + Self::I64(constant.try_into_i128().unwrap().try_into().unwrap()) + } + other => panic!("Unsupported numeric type: {other}"), + } + } + + pub(crate) fn convert_to_field(&self) -> FieldElement { + match self { + NumericValue::Field(field) => *field, + NumericValue::U1(boolean) if *boolean => FieldElement::one(), + NumericValue::U1(_) => FieldElement::zero(), + NumericValue::U8(value) => FieldElement::from(*value as u32), + NumericValue::U16(value) => FieldElement::from(*value as u32), + NumericValue::U32(value) => FieldElement::from(*value), + NumericValue::U64(value) => FieldElement::from(*value), + NumericValue::U128(value) => FieldElement::from(*value), + NumericValue::I8(value) => FieldElement::from(*value as i128), + NumericValue::I16(value) => FieldElement::from(*value as i128), + NumericValue::I32(value) => FieldElement::from(*value as i128), + NumericValue::I64(value) => FieldElement::from(*value as i128), + } + } +} + +impl std::fmt::Display for Value { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Value::Numeric(numeric_value) => write!(f, "{numeric_value}"), + Value::Reference(reference_value) => write!(f, "{reference_value}"), + Value::ArrayOrSlice(array_value) => write!(f, "{array_value}"), + Value::Function(id) => write!(f, "{id}"), + Value::Intrinsic(intrinsic) => write!(f, "{intrinsic}"), + Value::ForeignFunction(name) => write!(f, "ForeignFunction(\"{name}\")"), + } + } +} + +impl std::fmt::Display for NumericValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + NumericValue::Field(value) => write!(f, "Field {value}"), + NumericValue::U1(value) => write!(f, "u1 {value}"), + NumericValue::U8(value) => write!(f, "u8 {value}"), + NumericValue::U16(value) => write!(f, "u16 {value}"), + NumericValue::U32(value) => write!(f, "u32 {value}"), + NumericValue::U64(value) => write!(f, "u64 {value}"), + NumericValue::U128(value) => write!(f, "u128 {value}"), + NumericValue::I8(value) => write!(f, "i8 {value}"), + NumericValue::I16(value) => write!(f, "i16 {value}"), + NumericValue::I32(value) => write!(f, "i32 {value}"), + NumericValue::I64(value) => write!(f, "i64 {value}"), + } + } +} + +impl std::fmt::Display for ReferenceValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let element = self.element.borrow(); + match &*element { + Some(element) => write!(f, "*{} = {}", self.original_id, element), + None => write!(f, "*{} = None", self.original_id), + } + } +} + +impl std::fmt::Display for ArrayValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let elements = self.elements.borrow(); + let elements = vecmap(elements.iter(), ToString::to_string).join(", "); + + let is_slice = if self.is_slice { "&" } else { "" }; + write!(f, "rc{} {is_slice}[{elements}]", self.rc.borrow()) + } +} diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify.rs index 325130f0dc8..d8aed40f702 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify.rs @@ -24,6 +24,8 @@ mod call; mod cast; mod constrain; +pub(crate) use call::constant_to_radix; + /// Contains the result to Instruction::simplify, specifying how the instruction /// should be simplified. pub(crate) enum SimplifyResult { diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/call.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/call.rs index 7cb0eaab734..5c1dd5c6252 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg/simplify/call.rs @@ -57,7 +57,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, 2, limb_count, |values| { + simplify_constant_to_radix(endian, field, 2, limb_count, |values| { make_constant_array( dfg, values.into_iter(), @@ -80,7 +80,7 @@ pub(super) fn simplify_call( } else { unreachable!("ICE: Intrinsic::ToRadix return type must be array") }; - constant_to_radix(endian, field, radix, limb_count, |values| { + simplify_constant_to_radix(endian, field, radix, limb_count, |values| { make_constant_array( dfg, values.into_iter(), @@ -365,20 +365,32 @@ pub(super) fn simplify_call( } /// Returns a slice (represented by a tuple (len, slice)) of constants corresponding to the limbs of the radix decomposition. -fn constant_to_radix( +fn simplify_constant_to_radix( endian: Endian, field: FieldElement, radix: u32, limb_count: u32, mut make_array: impl FnMut(Vec) -> ValueId, ) -> SimplifyResult { + match constant_to_radix(endian, field, radix, limb_count) { + Some(result) => SimplifyResult::SimplifiedTo(make_array(result)), + None => SimplifyResult::None, + } +} + +pub(crate) fn constant_to_radix( + endian: Endian, + field: FieldElement, + radix: u32, + limb_count: u32, +) -> Option> { let bit_size = u32::BITS - (radix - 1).leading_zeros(); let radix_big = BigUint::from(radix); let radix_range = BigUint::from(2u128)..=BigUint::from(256u128); if !radix_range.contains(&radix_big) || BigUint::from(2u128).pow(bit_size) != radix_big { // NOTE: expect an error to be thrown later in // acir::generated_acir::radix_le_decompose - return SimplifyResult::None; + return None; } let big_integer = BigUint::from_bytes_be(&field.to_be_bytes()); @@ -387,7 +399,7 @@ fn constant_to_radix( if limb_count < decomposed_integer.len() as u32 { // `field` cannot be represented as `limb_count` bits. // defer error to acir_gen. - SimplifyResult::None + None } else { let mut limbs = vecmap(0..limb_count, |i| match decomposed_integer.get(i as usize) { Some(digit) => FieldElement::from_be_bytes_reduce(&[*digit]), @@ -396,8 +408,7 @@ fn constant_to_radix( if endian == Endian::Big { limbs.reverse(); } - let result_array = make_array(limbs); - SimplifyResult::SimplifiedTo(result_array) + Some(limbs) } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 33bafc2fbb6..799091e5033 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -374,28 +374,7 @@ impl Instruction { /// If true the instruction will depend on `enable_side_effects` context during acir-gen. pub(crate) fn requires_acir_gen_predicate(&self, dfg: &DataFlowGraph) -> bool { match self { - Instruction::Binary(binary) => { - match binary.operator { - BinaryOp::Add { unchecked: false } - | BinaryOp::Sub { unchecked: false } - | BinaryOp::Mul { unchecked: false } => { - // Some binary math can overflow or underflow, but this is only the case - // for unsigned types (here we assume the type of binary.lhs is the same) - dfg.type_of_value(binary.rhs).is_unsigned() - } - BinaryOp::Div | BinaryOp::Mod => true, - BinaryOp::Add { unchecked: true } - | BinaryOp::Sub { unchecked: true } - | BinaryOp::Mul { unchecked: true } - | BinaryOp::Eq - | BinaryOp::Lt - | BinaryOp::And - | BinaryOp::Or - | BinaryOp::Xor - | BinaryOp::Shl - | BinaryOp::Shr => false, - } - } + Instruction::Binary(binary) => binary.requires_acir_gen_predicate(dfg), Instruction::ArrayGet { array, index } => { // `ArrayGet`s which read from "known good" indices from an array should not need a predicate. @@ -666,6 +645,31 @@ impl Instruction { } } +impl Binary { + pub(crate) fn requires_acir_gen_predicate(&self, dfg: &DataFlowGraph) -> bool { + match self.operator { + BinaryOp::Add { unchecked: false } + | BinaryOp::Sub { unchecked: false } + | BinaryOp::Mul { unchecked: false } => { + // Some binary math can overflow or underflow, but this is only the case + // for unsigned types (here we assume the type of binary.lhs is the same) + dfg.type_of_value(self.rhs).is_unsigned() + } + BinaryOp::Div | BinaryOp::Mod => true, + BinaryOp::Add { unchecked: true } + | BinaryOp::Sub { unchecked: true } + | BinaryOp::Mul { unchecked: true } + | BinaryOp::Eq + | BinaryOp::Lt + | BinaryOp::And + | BinaryOp::Or + | BinaryOp::Xor + | BinaryOp::Shl + | BinaryOp::Shr => false, + } + } +} + #[derive(Debug, PartialEq, Eq, Hash, Clone)] pub enum ErrorType { String(String), diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index 957ae2787fd..4324977ab88 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -1017,9 +1017,8 @@ mod tests { builder.insert_constrain(v9, two, None); let v11 = builder.insert_load(v2, v2_type); let v12 = builder.insert_load(v11, Type::field()); - let _ = builder.insert_binary(v12, BinaryOp::Eq, two); - builder.insert_constrain(v11, two, None); + builder.insert_constrain(v12, two, None); builder.terminate_with_return(vec![]); let ssa = builder.finish();