From b8bc4064fd6a0e8b74f5911eb3d52f7ae56b2d8d Mon Sep 17 00:00:00 2001 From: ptitSeb Date: Wed, 22 Jun 2022 10:04:18 +0200 Subject: [PATCH] Converted all machine and emitter of singlepass to use Result instead of panic (but assert are still present) --- lib/compiler-singlepass/src/codegen.rs | 1055 +++-- lib/compiler-singlepass/src/compiler.rs | 10 +- lib/compiler-singlepass/src/emitter_arm64.rs | 1462 ++++-- lib/compiler-singlepass/src/emitter_x64.rs | 1146 +++-- lib/compiler-singlepass/src/machine.rs | 569 +-- lib/compiler-singlepass/src/machine_arm64.rs | 3064 +++++++----- lib/compiler-singlepass/src/machine_x64.rs | 4391 +++++++++++------- 7 files changed, 7229 insertions(+), 4468 deletions(-) diff --git a/lib/compiler-singlepass/src/codegen.rs b/lib/compiler-singlepass/src/codegen.rs index 371729da74b..dd0a536e4f8 100644 --- a/lib/compiler-singlepass/src/codegen.rs +++ b/lib/compiler-singlepass/src/codegen.rs @@ -296,11 +296,11 @@ impl<'a, M: Machine> FuncGen<'a, M> { let delta_stack_offset = self.machine.round_stack_adjust(delta_stack_offset); if delta_stack_offset != 0 { - self.machine.adjust_stack(delta_stack_offset as u32); + self.machine.adjust_stack(delta_stack_offset as u32)?; } if zeroed { for i in 0..tys.len() { - self.machine.zero_location(Size::S64, ret[i]); + self.machine.zero_location(Size::S64, ret[i])?; } } Ok(ret) @@ -353,7 +353,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { } let delta_stack_offset = self.machine.round_stack_adjust(delta_stack_offset); if delta_stack_offset != 0 { - self.machine.restore_stack(delta_stack_offset as u32); + self.machine.restore_stack(delta_stack_offset as u32)?; } Ok(()) } @@ -403,7 +403,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { let delta_stack_offset = self.machine.round_stack_adjust(delta_stack_offset); if delta_stack_offset != 0 { - self.machine.adjust_stack(delta_stack_offset as u32); + self.machine.adjust_stack(delta_stack_offset as u32)?; } Ok(()) } @@ -459,7 +459,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { let delta_stack_offset = self.machine.round_stack_adjust(delta_stack_offset); if delta_stack_offset != 0 { - self.machine.pop_stack_locals(delta_stack_offset as u32); + self.machine.pop_stack_locals(delta_stack_offset as u32)?; } Ok(()) } @@ -498,17 +498,18 @@ impl<'a, M: Machine> FuncGen<'a, M> { let delta_stack_offset = self.machine.round_stack_adjust(delta_stack_offset); if delta_stack_offset != 0 { - self.machine.pop_stack_locals(delta_stack_offset as u32); + self.machine.pop_stack_locals(delta_stack_offset as u32)?; } Ok(()) } + #[allow(clippy::type_complexity)] fn init_locals( &mut self, n: usize, sig: FunctionType, calling_convention: CallingConvention, - ) -> Vec> { + ) -> Result>, CodegenError> { // How many machine stack slots will all the locals use? let num_mem_slots = (0..n) .filter(|&x| self.machine.is_local_on_stack(x)) @@ -555,16 +556,16 @@ impl<'a, M: Machine> FuncGen<'a, M> { .step_by(NATIVE_PAGE_SIZE / 8) .skip(1) { - self.machine.zero_location(Size::S64, locations[i]); + self.machine.zero_location(Size::S64, locations[i])?; } - self.machine.adjust_stack(static_area_size as _); + self.machine.adjust_stack(static_area_size as _)?; // Save callee-saved registers. for loc in locations.iter() { if let Location::GPR(x) = *loc { self.stack_offset.0 += 8; - self.machine.move_local(self.stack_offset.0 as i32, *loc); + self.machine.move_local(self.stack_offset.0 as i32, *loc)?; self.state.stack_values.push(MachineValue::PreserveRegister( self.machine.index_from_gpr(x), )); @@ -576,7 +577,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.machine.move_local( self.stack_offset.0 as i32, Location::GPR(self.machine.get_vmctx_reg()), - ); + )?; self.state.stack_values.push(MachineValue::PreserveRegister( self.machine.index_from_gpr(self.machine.get_vmctx_reg()), )); @@ -585,7 +586,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { let regs_to_save = self.machine.list_to_save(calling_convention); for loc in regs_to_save.iter() { self.stack_offset.0 += 8; - self.machine.move_local(self.stack_offset.0 as i32, *loc); + self.machine.move_local(self.stack_offset.0 as i32, *loc)?; } // Save the offset of register save area. @@ -601,7 +602,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Location::Memory(_, _) => { self.state.stack_values.push(MachineValue::WasmLocal(i)); } - _ => unreachable!(), + _ => codegen_error!("singlpass init_local unreachable"), } } @@ -614,7 +615,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Type::I32 | Type::F32 => Size::S32, Type::I64 | Type::F64 => Size::S64, Type::ExternRef | Type::FuncRef => Size::S64, - _ => unimplemented!(), + _ => codegen_error!("singlepass init_local unimplemented"), }; let loc = self.machine.get_call_param_location( i + 1, @@ -623,7 +624,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { calling_convention, ); self.machine - .move_location_extend(sz, false, loc, Size::S64, locations[i]); + .move_location_extend(sz, false, loc, Size::S64, locations[i])?; } // Load vmctx into it's GPR. @@ -632,7 +633,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.machine .get_simple_param_location(0, calling_convention), Location::GPR(self.machine.get_vmctx_reg()), - ); + )?; // Initialize all normal locals to zero. let mut init_stack_loc_cnt = 0; @@ -644,42 +645,46 @@ impl<'a, M: Machine> FuncGen<'a, M> { last_stack_loc = cmp::min(last_stack_loc, *location); } Location::GPR(_) => { - self.machine.zero_location(Size::S64, *location); + self.machine.zero_location(Size::S64, *location)?; } - _ => unreachable!(), + _ => codegen_error!("singlepass init_local unreachable"), } } if init_stack_loc_cnt > 0 { self.machine - .init_stack_loc(init_stack_loc_cnt, last_stack_loc); + .init_stack_loc(init_stack_loc_cnt, last_stack_loc)?; } // Add the size of all locals allocated to stack. self.stack_offset.0 += static_area_size - callee_saved_regs_size; - locations + Ok(locations) } - fn finalize_locals(&mut self, calling_convention: CallingConvention) { + fn finalize_locals( + &mut self, + calling_convention: CallingConvention, + ) -> Result<(), CodegenError> { // Unwind stack to the "save area". self.machine - .restore_saved_area(self.save_area_offset.as_ref().unwrap().0 as i32); + .restore_saved_area(self.save_area_offset.as_ref().unwrap().0 as i32)?; let regs_to_save = self.machine.list_to_save(calling_convention); for loc in regs_to_save.iter().rev() { - self.machine.pop_location(*loc); + self.machine.pop_location(*loc)?; } // Restore register used by vmctx. self.machine - .pop_location(Location::GPR(self.machine.get_vmctx_reg())); + .pop_location(Location::GPR(self.machine.get_vmctx_reg()))?; // Restore callee-saved registers. for loc in self.locals.iter().rev() { if let Location::GPR(_) = *loc { - self.machine.pop_location(*loc); + self.machine.pop_location(*loc)?; } } + Ok(()) } /// Set the source location of the Wasm to the given offset. @@ -753,7 +758,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { fn emit_call_native< I: Iterator>, J: Iterator, - F: FnOnce(&mut Self), + F: FnOnce(&mut Self) -> Result<(), CodegenError>, >( &mut self, cb: F, @@ -774,7 +779,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { // Save used GPRs. Preserve correct stack alignment let used_gprs = self.machine.get_used_gprs(); - let mut used_stack = self.machine.push_used_gpr(&used_gprs); + let mut used_stack = self.machine.push_used_gpr(&used_gprs)?; for r in used_gprs.iter() { let content = self.state.register_values[self.machine.index_from_gpr(*r).0].clone(); if content == MachineValue::Undefined { @@ -788,7 +793,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { // Save used SIMD registers. let used_simds = self.machine.get_used_simd(); if !used_simds.is_empty() { - used_stack += self.machine.push_used_simd(&used_simds); + used_stack += self.machine.push_used_simd(&used_simds)?; for r in used_simds.iter().rev() { let content = @@ -832,7 +837,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { if stack_unaligned != 0 { stack_offset += 16 - stack_unaligned; } - self.machine.adjust_stack(stack_offset as u32); + self.machine.adjust_stack(stack_offset as u32)?; #[allow(clippy::type_complexity)] let mut call_movs: Vec<(Location, M::GPR)> = vec![]; @@ -880,7 +885,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { } } self.machine - .move_location_for_native(params_size[i], *param, loc); + .move_location_for_native(params_size[i], *param, loc)?; } _ => { return Err(CodegenError { @@ -897,7 +902,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { for (loc, gpr) in call_movs { if loc != Location::GPR(gpr) { self.machine - .move_location(Size::S64, loc, Location::GPR(gpr)); + .move_location(Size::S64, loc, Location::GPR(gpr))?; } } @@ -907,14 +912,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { Location::GPR(self.machine.get_vmctx_reg()), self.machine .get_simple_param_location(0, calling_convention), - ); // vmctx + )?; // vmctx if stack_padding > 0 { - self.machine.adjust_stack(stack_padding as u32); + self.machine.adjust_stack(stack_padding as u32)?; } // release the GPR used for call self.machine.release_gpr(self.machine.get_grp_for_call()); - cb(self); + cb(self)?; // Offset needs to be after the 'call' instruction. // TODO: Now the state information is also inserted for internal calls (e.g. MemoryGrow). Is this expected? @@ -939,32 +944,41 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.machine.restore_stack( self.machine .round_stack_adjust(stack_offset + stack_padding) as u32, - ); + )?; if (stack_offset % 8) != 0 { return Err(CodegenError { message: "emit_call_native: Bad restoring stack alignement".to_string(), }); } for _ in 0..pushed_args { - self.state.stack_values.pop().unwrap(); + self.state.stack_values.pop().ok_or(CodegenError { + message: "Pop an empty value stack".to_string(), + })?; } } // Restore SIMDs. if !used_simds.is_empty() { - self.machine.pop_used_simd(&used_simds); + self.machine.pop_used_simd(&used_simds)?; for _ in 0..used_simds.len() { - self.state.stack_values.pop().unwrap(); + self.state.stack_values.pop().ok_or(CodegenError { + message: "Pop an empty value stack".to_string(), + })?; } } // Restore GPRs. - self.machine.pop_used_gpr(&used_gprs); + self.machine.pop_used_gpr(&used_gprs)?; for _ in used_gprs.iter().rev() { - self.state.stack_values.pop().unwrap(); + self.state.stack_values.pop().ok_or(CodegenError { + message: "Pop an empty value stack".to_string(), + })?; } - if self.state.stack_values.pop().unwrap() != MachineValue::ExplicitShadow { + if self.state.stack_values.pop().ok_or(CodegenError { + message: "Pop an empty value stack".to_string(), + })? != MachineValue::ExplicitShadow + { return Err(CodegenError { message: "emit_call_native: Popped value is not ExplicitShadow".to_string(), }); @@ -991,7 +1005,10 @@ impl<'a, M: Machine> FuncGen<'a, M> { } /// Emits a memory operation. - fn op_memory(&mut self, cb: F) { + fn op_memory Result<(), CodegenError>>( + &mut self, + cb: F, + ) -> Result<(), CodegenError> { let need_check = match self.memory_styles[MemoryIndex::new(0)] { MemoryStyle::Static { .. } => false, MemoryStyle::Dynamic { .. } => true, @@ -1010,7 +1027,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.module.num_imported_memories != 0, offset as i32, self.special_labels.heap_access_oob, - ); + ) } pub fn get_state_diff(&mut self) -> usize { @@ -1028,14 +1045,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { } fn emit_head(&mut self) -> Result<(), CodegenError> { - self.machine.emit_function_prolog(); + self.machine.emit_function_prolog()?; // Initialize locals. self.locals = self.init_locals( self.local_types.len(), self.signature.clone(), self.calling_convention, - ); + )?; // Mark vmctx register. The actual loading of the vmctx value is handled by init_local. self.state.register_values[self.machine.index_from_gpr(self.machine.get_vmctx_reg()).0] = @@ -1047,7 +1064,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fsm.diffs.push(diff); // simulate "red zone" if not supported by the platform - self.machine.adjust_stack(32); + self.machine.adjust_stack(32)?; self.control_stack.push(ControlFrame { label: self.machine.get_label(), @@ -1213,7 +1230,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), offset as i32), Location::GPR(tmp), - ); + )?; Location::Memory(tmp, 0) } else { // Imported globals require one level of indirection. @@ -1224,11 +1241,11 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), offset as i32), Location::GPR(tmp), - ); + )?; Location::Memory(tmp, 0) }; - self.machine.emit_relaxed_mov(Size::S64, src, loc); + self.machine.emit_relaxed_mov(Size::S64, src, loc)?; self.machine.release_gpr(tmp); } @@ -1243,7 +1260,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), offset as i32), Location::GPR(tmp), - ); + )?; Location::Memory(tmp, 0) } else { // Imported globals require one level of indirection. @@ -1254,7 +1271,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), offset as i32), Location::GPR(tmp), - ); + )?; Location::Memory(tmp, 0) }; let ty = type_to_wp_type(self.module.globals[global_index].ty); @@ -1269,16 +1286,16 @@ impl<'a, M: Machine> FuncGen<'a, M> { match ty { WpType::F32 => Size::S32, WpType::F64 => Size::S64, - _ => unreachable!(), + _ => codegen_error!("singlepass Operator::GlobalSet unreachable"), }, loc, dst, - ); + )?; } else { - self.machine.emit_relaxed_mov(Size::S64, loc, dst); + self.machine.emit_relaxed_mov(Size::S64, loc, dst)?; } } else { - self.machine.emit_relaxed_mov(Size::S64, loc, dst); + self.machine.emit_relaxed_mov(Size::S64, loc, dst)?; } self.machine.release_gpr(tmp); } @@ -1289,7 +1306,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.machine - .emit_relaxed_mov(Size::S64, self.locals[local_index], ret); + .emit_relaxed_mov(Size::S64, self.locals[local_index], ret)?; self.value_stack.push(ret); if self.local_types[local_index].is_float() { self.fp_stack @@ -1310,19 +1327,19 @@ impl<'a, M: Machine> FuncGen<'a, M> { match self.local_types[local_index] { WpType::F32 => Size::S32, WpType::F64 => Size::S64, - _ => unreachable!(), + _ => codegen_error!("singlepass Operator::LocalSet unreachable"), }, loc, self.locals[local_index], - ); + ) } else { self.machine - .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]); + .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]) } } else { self.machine - .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]); - } + .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]) + }?; } Operator::LocalTee { local_index } => { let local_index = local_index as usize; @@ -1338,19 +1355,19 @@ impl<'a, M: Machine> FuncGen<'a, M> { match self.local_types[local_index] { WpType::F32 => Size::S32, WpType::F64 => Size::S64, - _ => unreachable!(), + _ => codegen_error!("singlepass Operator::LocalTee unreachable"), }, loc, self.locals[local_index], - ); + ) } else { self.machine - .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]); + .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]) } } else { self.machine - .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]); - } + .emit_relaxed_mov(Size::S64, loc, self.locals[local_index]) + }?; } Operator::I32Const { value } => { self.value_stack.push(Location::Imm32(value as u32)); @@ -1360,15 +1377,15 @@ impl<'a, M: Machine> FuncGen<'a, M> { } Operator::I32Add => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_add32(loc_a, loc_b, ret); + self.machine.emit_binop_add32(loc_a, loc_b, ret)?; } Operator::I32Sub => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_sub32(loc_a, loc_b, ret); + self.machine.emit_binop_sub32(loc_a, loc_b, ret)?; } Operator::I32Mul => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_mul32(loc_a, loc_b, ret); + self.machine.emit_binop_mul32(loc_a, loc_b, ret)?; } Operator::I32DivU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; @@ -1378,7 +1395,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I32DivS => { @@ -1389,7 +1406,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I32RemU => { @@ -1400,7 +1417,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I32RemS => { @@ -1411,28 +1428,28 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I32And => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_and32(loc_a, loc_b, ret); + self.machine.emit_binop_and32(loc_a, loc_b, ret)?; } Operator::I32Or => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_or32(loc_a, loc_b, ret); + self.machine.emit_binop_or32(loc_a, loc_b, ret)?; } Operator::I32Xor => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.emit_binop_xor32(loc_a, loc_b, ret); + self.machine.emit_binop_xor32(loc_a, loc_b, ret)?; } Operator::I32Eq => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_eq(loc_a, loc_b, ret); + self.machine.i32_cmp_eq(loc_a, loc_b, ret)?; } Operator::I32Ne => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_ne(loc_a, loc_b, ret); + self.machine.i32_cmp_ne(loc_a, loc_b, ret)?; } Operator::I32Eqz => { let loc_a = self.pop_value_released()?; @@ -1440,7 +1457,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], false, )?[0]; - self.machine.i32_cmp_eq(loc_a, Location::Imm32(0), ret); + self.machine.i32_cmp_eq(loc_a, Location::Imm32(0), ret)?; self.value_stack.push(ret); } Operator::I32Clz => { @@ -1450,7 +1467,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i32_clz(loc, ret); + self.machine.i32_clz(loc, ret)?; } Operator::I32Ctz => { let loc = self.pop_value_released()?; @@ -1459,7 +1476,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i32_ctz(loc, ret); + self.machine.i32_ctz(loc, ret)?; } Operator::I32Popcnt => { let loc = self.pop_value_released()?; @@ -1468,59 +1485,59 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i32_popcnt(loc, ret); + self.machine.i32_popcnt(loc, ret)?; } Operator::I32Shl => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_shl(loc_a, loc_b, ret); + self.machine.i32_shl(loc_a, loc_b, ret)?; } Operator::I32ShrU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_shr(loc_a, loc_b, ret); + self.machine.i32_shr(loc_a, loc_b, ret)?; } Operator::I32ShrS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_sar(loc_a, loc_b, ret); + self.machine.i32_sar(loc_a, loc_b, ret)?; } Operator::I32Rotl => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_rol(loc_a, loc_b, ret); + self.machine.i32_rol(loc_a, loc_b, ret)?; } Operator::I32Rotr => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_ror(loc_a, loc_b, ret); + self.machine.i32_ror(loc_a, loc_b, ret)?; } Operator::I32LtU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_lt_u(loc_a, loc_b, ret); + self.machine.i32_cmp_lt_u(loc_a, loc_b, ret)?; } Operator::I32LeU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_le_u(loc_a, loc_b, ret); + self.machine.i32_cmp_le_u(loc_a, loc_b, ret)?; } Operator::I32GtU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_gt_u(loc_a, loc_b, ret); + self.machine.i32_cmp_gt_u(loc_a, loc_b, ret)?; } Operator::I32GeU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_ge_u(loc_a, loc_b, ret); + self.machine.i32_cmp_ge_u(loc_a, loc_b, ret)?; } Operator::I32LtS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_lt_s(loc_a, loc_b, ret); + self.machine.i32_cmp_lt_s(loc_a, loc_b, ret)?; } Operator::I32LeS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_le_s(loc_a, loc_b, ret); + self.machine.i32_cmp_le_s(loc_a, loc_b, ret)?; } Operator::I32GtS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_gt_s(loc_a, loc_b, ret); + self.machine.i32_cmp_gt_s(loc_a, loc_b, ret)?; } Operator::I32GeS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.i32_cmp_ge_s(loc_a, loc_b, ret); + self.machine.i32_cmp_ge_s(loc_a, loc_b, ret)?; } Operator::I64Const { value } => { let value = value as u64; @@ -1529,15 +1546,15 @@ impl<'a, M: Machine> FuncGen<'a, M> { } Operator::I64Add => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_add64(loc_a, loc_b, ret); + self.machine.emit_binop_add64(loc_a, loc_b, ret)?; } Operator::I64Sub => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_sub64(loc_a, loc_b, ret); + self.machine.emit_binop_sub64(loc_a, loc_b, ret)?; } Operator::I64Mul => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_mul64(loc_a, loc_b, ret); + self.machine.emit_binop_mul64(loc_a, loc_b, ret)?; } Operator::I64DivU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; @@ -1547,7 +1564,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I64DivS => { @@ -1558,7 +1575,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I64RemU => { @@ -1569,7 +1586,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I64RemS => { @@ -1580,28 +1597,28 @@ impl<'a, M: Machine> FuncGen<'a, M> { ret, self.special_labels.integer_division_by_zero, self.special_labels.integer_overflow, - ); + )?; self.mark_offset_trappable(offset); } Operator::I64And => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_and64(loc_a, loc_b, ret); + self.machine.emit_binop_and64(loc_a, loc_b, ret)?; } Operator::I64Or => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_or64(loc_a, loc_b, ret); + self.machine.emit_binop_or64(loc_a, loc_b, ret)?; } Operator::I64Xor => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.emit_binop_xor64(loc_a, loc_b, ret); + self.machine.emit_binop_xor64(loc_a, loc_b, ret)?; } Operator::I64Eq => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_eq(loc_a, loc_b, ret); + self.machine.i64_cmp_eq(loc_a, loc_b, ret)?; } Operator::I64Ne => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_ne(loc_a, loc_b, ret); + self.machine.i64_cmp_ne(loc_a, loc_b, ret)?; } Operator::I64Eqz => { let loc_a = self.pop_value_released()?; @@ -1609,7 +1626,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { &[(WpType::I64, MachineValue::WasmStack(self.value_stack.len()))], false, )?[0]; - self.machine.i64_cmp_eq(loc_a, Location::Imm64(0), ret); + self.machine.i64_cmp_eq(loc_a, Location::Imm64(0), ret)?; self.value_stack.push(ret); } Operator::I64Clz => { @@ -1619,7 +1636,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i64_clz(loc, ret); + self.machine.i64_clz(loc, ret)?; } Operator::I64Ctz => { let loc = self.pop_value_released()?; @@ -1628,7 +1645,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i64_ctz(loc, ret); + self.machine.i64_ctz(loc, ret)?; } Operator::I64Popcnt => { let loc = self.pop_value_released()?; @@ -1637,59 +1654,59 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.i64_popcnt(loc, ret); + self.machine.i64_popcnt(loc, ret)?; } Operator::I64Shl => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_shl(loc_a, loc_b, ret); + self.machine.i64_shl(loc_a, loc_b, ret)?; } Operator::I64ShrU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_shr(loc_a, loc_b, ret); + self.machine.i64_shr(loc_a, loc_b, ret)?; } Operator::I64ShrS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_sar(loc_a, loc_b, ret); + self.machine.i64_sar(loc_a, loc_b, ret)?; } Operator::I64Rotl => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_rol(loc_a, loc_b, ret); + self.machine.i64_rol(loc_a, loc_b, ret)?; } Operator::I64Rotr => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_ror(loc_a, loc_b, ret); + self.machine.i64_ror(loc_a, loc_b, ret)?; } Operator::I64LtU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_lt_u(loc_a, loc_b, ret); + self.machine.i64_cmp_lt_u(loc_a, loc_b, ret)?; } Operator::I64LeU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_le_u(loc_a, loc_b, ret); + self.machine.i64_cmp_le_u(loc_a, loc_b, ret)?; } Operator::I64GtU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_gt_u(loc_a, loc_b, ret); + self.machine.i64_cmp_gt_u(loc_a, loc_b, ret)?; } Operator::I64GeU => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_ge_u(loc_a, loc_b, ret); + self.machine.i64_cmp_ge_u(loc_a, loc_b, ret)?; } Operator::I64LtS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_lt_s(loc_a, loc_b, ret); + self.machine.i64_cmp_lt_s(loc_a, loc_b, ret)?; } Operator::I64LeS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_le_s(loc_a, loc_b, ret); + self.machine.i64_cmp_le_s(loc_a, loc_b, ret)?; } Operator::I64GtS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_gt_s(loc_a, loc_b, ret); + self.machine.i64_cmp_gt_s(loc_a, loc_b, ret)?; } Operator::I64GeS => { let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I64)?; - self.machine.i64_cmp_ge_s(loc_a, loc_b, ret); + self.machine.i64_cmp_ge_s(loc_a, loc_b, ret)?; } Operator::I64ExtendI32U => { let loc = self.pop_value_released()?; @@ -1698,7 +1715,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.emit_relaxed_mov(Size::S32, loc, ret); + self.machine.emit_relaxed_mov(Size::S32, loc, ret)?; // A 32-bit memory write does not automatically clear the upper 32 bits of a 64-bit word. // So, we need to explicitly write zero to the upper half here. @@ -1707,7 +1724,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S32, Location::Imm32(0), Location::Memory(base, off + 4), - ); + )?; } } Operator::I64ExtendI32S => { @@ -1718,7 +1735,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { )?[0]; self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S32, loc, Size::S64, ret); + .emit_relaxed_sign_extension(Size::S32, loc, Size::S64, ret)?; } Operator::I32Extend8S => { let loc = self.pop_value_released()?; @@ -1729,7 +1746,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S8, loc, Size::S32, ret); + .emit_relaxed_sign_extension(Size::S8, loc, Size::S32, ret)?; } Operator::I32Extend16S => { let loc = self.pop_value_released()?; @@ -1740,7 +1757,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S16, loc, Size::S32, ret); + .emit_relaxed_sign_extension(Size::S16, loc, Size::S32, ret)?; } Operator::I64Extend8S => { let loc = self.pop_value_released()?; @@ -1751,7 +1768,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S8, loc, Size::S64, ret); + .emit_relaxed_sign_extension(Size::S8, loc, Size::S64, ret)?; } Operator::I64Extend16S => { let loc = self.pop_value_released()?; @@ -1762,7 +1779,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S16, loc, Size::S64, ret); + .emit_relaxed_sign_extension(Size::S16, loc, Size::S64, ret)?; } Operator::I64Extend32S => { let loc = self.pop_value_released()?; @@ -1773,7 +1790,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.machine - .emit_relaxed_sign_extension(Size::S32, loc, Size::S64, ret); + .emit_relaxed_sign_extension(Size::S32, loc, Size::S64, ret)?; } Operator::I32WrapI64 => { let loc = self.pop_value_released()?; @@ -1782,7 +1799,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.emit_relaxed_mov(Size::S32, loc, ret); + self.machine.emit_relaxed_mov(Size::S32, loc, ret)?; } Operator::F32Const { value } => { @@ -1799,7 +1816,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f32(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_add(loc_a, loc_b, ret); + self.machine.f32_add(loc_a, loc_b, ret)?; } Operator::F32Sub => { self.fp_stack.pop2()?; @@ -1807,7 +1824,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f32(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_sub(loc_a, loc_b, ret); + self.machine.f32_sub(loc_a, loc_b, ret)?; } Operator::F32Mul => { self.fp_stack.pop2()?; @@ -1815,7 +1832,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f32(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_mul(loc_a, loc_b, ret); + self.machine.f32_mul(loc_a, loc_b, ret)?; } Operator::F32Div => { self.fp_stack.pop2()?; @@ -1823,51 +1840,51 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f32(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_div(loc_a, loc_b, ret); + self.machine.f32_div(loc_a, loc_b, ret)?; } Operator::F32Max => { self.fp_stack.pop2()?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_max(loc_a, loc_b, ret); + self.machine.f32_max(loc_a, loc_b, ret)?; } Operator::F32Min => { self.fp_stack.pop2()?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f32_min(loc_a, loc_b, ret); + self.machine.f32_min(loc_a, loc_b, ret)?; } Operator::F32Eq => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_eq(loc_a, loc_b, ret); + self.machine.f32_cmp_eq(loc_a, loc_b, ret)?; } Operator::F32Ne => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_ne(loc_a, loc_b, ret); + self.machine.f32_cmp_ne(loc_a, loc_b, ret)?; } Operator::F32Lt => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_lt(loc_a, loc_b, ret); + self.machine.f32_cmp_lt(loc_a, loc_b, ret)?; } Operator::F32Le => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_le(loc_a, loc_b, ret); + self.machine.f32_cmp_le(loc_a, loc_b, ret)?; } Operator::F32Gt => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_gt(loc_a, loc_b, ret); + self.machine.f32_cmp_gt(loc_a, loc_b, ret)?; } Operator::F32Ge => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f32_cmp_ge(loc_a, loc_b, ret); + self.machine.f32_cmp_ge(loc_a, loc_b, ret)?; } Operator::F32Nearest => { self.fp_stack.pop1()?; @@ -1879,7 +1896,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f32_nearest(loc, ret); + self.machine.f32_nearest(loc, ret)?; } Operator::F32Floor => { self.fp_stack.pop1()?; @@ -1891,7 +1908,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f32_floor(loc, ret); + self.machine.f32_floor(loc, ret)?; } Operator::F32Ceil => { self.fp_stack.pop1()?; @@ -1903,7 +1920,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f32_ceil(loc, ret); + self.machine.f32_ceil(loc, ret)?; } Operator::F32Trunc => { self.fp_stack.pop1()?; @@ -1915,7 +1932,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f32_trunc(loc, ret); + self.machine.f32_trunc(loc, ret)?; } Operator::F32Sqrt => { self.fp_stack.pop1()?; @@ -1927,7 +1944,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f32_sqrt(loc, ret); + self.machine.f32_sqrt(loc, ret)?; } Operator::F32Copysign => { @@ -1947,23 +1964,23 @@ impl<'a, M: Machine> FuncGen<'a, M> { match fp.canonicalization { Some(_) => { self.machine - .canonicalize_nan(Size::S32, *loc, Location::GPR(*tmp)); + .canonicalize_nan(Size::S32, *loc, Location::GPR(*tmp)) } None => { self.machine - .move_location(Size::S32, *loc, Location::GPR(*tmp)); + .move_location(Size::S32, *loc, Location::GPR(*tmp)) } - } + }?; } } else { self.machine - .move_location(Size::S32, loc_a, Location::GPR(tmp1)); + .move_location(Size::S32, loc_a, Location::GPR(tmp1))?; self.machine - .move_location(Size::S32, loc_b, Location::GPR(tmp2)); + .move_location(Size::S32, loc_b, Location::GPR(tmp2))?; } - self.machine.emit_i32_copysign(tmp1, tmp2); + self.machine.emit_i32_copysign(tmp1, tmp2)?; self.machine - .move_location(Size::S32, Location::GPR(tmp1), ret); + .move_location(Size::S32, Location::GPR(tmp1), ret)?; self.machine.release_gpr(tmp2); self.machine.release_gpr(tmp1); } @@ -1978,7 +1995,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { )?[0]; self.value_stack.push(ret); - self.machine.f32_abs(loc, ret); + self.machine.f32_abs(loc, ret)?; } Operator::F32Neg => { @@ -1991,7 +2008,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { )?[0]; self.value_stack.push(ret); - self.machine.f32_neg(loc, ret); + self.machine.f32_neg(loc, ret)?; } Operator::F64Const { value } => { @@ -2008,7 +2025,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f64(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_add(loc_a, loc_b, ret); + self.machine.f64_add(loc_a, loc_b, ret)?; } Operator::F64Sub => { self.fp_stack.pop2()?; @@ -2016,7 +2033,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f64(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_sub(loc_a, loc_b, ret); + self.machine.f64_sub(loc_a, loc_b, ret)?; } Operator::F64Mul => { self.fp_stack.pop2()?; @@ -2024,7 +2041,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f64(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_mul(loc_a, loc_b, ret); + self.machine.f64_mul(loc_a, loc_b, ret)?; } Operator::F64Div => { self.fp_stack.pop2()?; @@ -2032,51 +2049,51 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::cncl_f64(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_div(loc_a, loc_b, ret); + self.machine.f64_div(loc_a, loc_b, ret)?; } Operator::F64Max => { self.fp_stack.pop2()?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_max(loc_a, loc_b, ret); + self.machine.f64_max(loc_a, loc_b, ret)?; } Operator::F64Min => { self.fp_stack.pop2()?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 2)); let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::F64)?; - self.machine.f64_min(loc_a, loc_b, ret); + self.machine.f64_min(loc_a, loc_b, ret)?; } Operator::F64Eq => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_eq(loc_a, loc_b, ret); + self.machine.f64_cmp_eq(loc_a, loc_b, ret)?; } Operator::F64Ne => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_ne(loc_a, loc_b, ret); + self.machine.f64_cmp_ne(loc_a, loc_b, ret)?; } Operator::F64Lt => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_lt(loc_a, loc_b, ret); + self.machine.f64_cmp_lt(loc_a, loc_b, ret)?; } Operator::F64Le => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_le(loc_a, loc_b, ret); + self.machine.f64_cmp_le(loc_a, loc_b, ret)?; } Operator::F64Gt => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_gt(loc_a, loc_b, ret); + self.machine.f64_cmp_gt(loc_a, loc_b, ret)?; } Operator::F64Ge => { self.fp_stack.pop2()?; let I2O1 { loc_a, loc_b, ret } = self.i2o1_prepare(WpType::I32)?; - self.machine.f64_cmp_ge(loc_a, loc_b, ret); + self.machine.f64_cmp_ge(loc_a, loc_b, ret)?; } Operator::F64Nearest => { self.fp_stack.pop1()?; @@ -2088,7 +2105,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f64_nearest(loc, ret); + self.machine.f64_nearest(loc, ret)?; } Operator::F64Floor => { self.fp_stack.pop1()?; @@ -2100,7 +2117,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f64_floor(loc, ret); + self.machine.f64_floor(loc, ret)?; } Operator::F64Ceil => { self.fp_stack.pop1()?; @@ -2112,7 +2129,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f64_ceil(loc, ret); + self.machine.f64_ceil(loc, ret)?; } Operator::F64Trunc => { self.fp_stack.pop1()?; @@ -2124,7 +2141,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f64_trunc(loc, ret); + self.machine.f64_trunc(loc, ret)?; } Operator::F64Sqrt => { self.fp_stack.pop1()?; @@ -2136,7 +2153,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.f64_sqrt(loc, ret); + self.machine.f64_sqrt(loc, ret)?; } Operator::F64Copysign => { @@ -2156,23 +2173,23 @@ impl<'a, M: Machine> FuncGen<'a, M> { match fp.canonicalization { Some(_) => { self.machine - .canonicalize_nan(Size::S64, *loc, Location::GPR(*tmp)); + .canonicalize_nan(Size::S64, *loc, Location::GPR(*tmp)) } None => { self.machine - .move_location(Size::S64, *loc, Location::GPR(*tmp)); + .move_location(Size::S64, *loc, Location::GPR(*tmp)) } - } + }?; } } else { self.machine - .move_location(Size::S64, loc_a, Location::GPR(tmp1)); + .move_location(Size::S64, loc_a, Location::GPR(tmp1))?; self.machine - .move_location(Size::S64, loc_b, Location::GPR(tmp2)); + .move_location(Size::S64, loc_b, Location::GPR(tmp2))?; } - self.machine.emit_i64_copysign(tmp1, tmp2); + self.machine.emit_i64_copysign(tmp1, tmp2)?; self.machine - .move_location(Size::S64, Location::GPR(tmp1), ret); + .move_location(Size::S64, Location::GPR(tmp1), ret)?; self.machine.release_gpr(tmp2); self.machine.release_gpr(tmp1); @@ -2188,7 +2205,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { )?[0]; self.value_stack.push(ret); - self.machine.f64_abs(loc, ret); + self.machine.f64_abs(loc, ret)?; } Operator::F64Neg => { @@ -2201,7 +2218,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { )?[0]; self.value_stack.push(ret); - self.machine.f64_neg(loc, ret); + self.machine.f64_neg(loc, ret)?; } Operator::F64PromoteF32 => { @@ -2213,7 +2230,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.convert_f64_f32(loc, ret); + self.machine.convert_f64_f32(loc, ret)?; } Operator::F32DemoteF64 => { let fp = self.fp_stack.pop1()?; @@ -2224,7 +2241,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false, )?[0]; self.value_stack.push(ret); - self.machine.convert_f32_f64(loc, ret); + self.machine.convert_f32_f64(loc, ret)?; } Operator::I32ReinterpretF32 => { @@ -2241,10 +2258,10 @@ impl<'a, M: Machine> FuncGen<'a, M> { || fp.canonicalization.is_none() { if loc != ret { - self.machine.emit_relaxed_mov(Size::S32, loc, ret); + self.machine.emit_relaxed_mov(Size::S32, loc, ret)?; } } else { - self.machine.canonicalize_nan(Size::S32, loc, ret); + self.machine.canonicalize_nan(Size::S32, loc, ret)?; } } Operator::F32ReinterpretI32 => { @@ -2258,7 +2275,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::new(self.value_stack.len() - 1)); if loc != ret { - self.machine.emit_relaxed_mov(Size::S32, loc, ret); + self.machine.emit_relaxed_mov(Size::S32, loc, ret)?; } } @@ -2276,10 +2293,10 @@ impl<'a, M: Machine> FuncGen<'a, M> { || fp.canonicalization.is_none() { if loc != ret { - self.machine.emit_relaxed_mov(Size::S64, loc, ret); + self.machine.emit_relaxed_mov(Size::S64, loc, ret)?; } } else { - self.machine.canonicalize_nan(Size::S64, loc, ret); + self.machine.canonicalize_nan(Size::S64, loc, ret)?; } } Operator::F64ReinterpretI64 => { @@ -2293,7 +2310,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .push(FloatValue::new(self.value_stack.len() - 1)); if loc != ret { - self.machine.emit_relaxed_mov(Size::S64, loc, ret); + self.machine.emit_relaxed_mov(Size::S64, loc, ret)?; } } @@ -2306,7 +2323,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f32(loc, ret, false, false); + self.machine.convert_i32_f32(loc, ret, false, false)?; } Operator::I32TruncSatF32U => { @@ -2318,7 +2335,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f32(loc, ret, false, true); + self.machine.convert_i32_f32(loc, ret, false, true)?; } Operator::I32TruncF32S => { @@ -2330,7 +2347,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f32(loc, ret, true, false); + self.machine.convert_i32_f32(loc, ret, true, false)?; } Operator::I32TruncSatF32S => { let loc = self.pop_value_released()?; @@ -2341,7 +2358,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f32(loc, ret, true, true); + self.machine.convert_i32_f32(loc, ret, true, true)?; } Operator::I64TruncF32S => { @@ -2353,7 +2370,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f32(loc, ret, true, false); + self.machine.convert_i64_f32(loc, ret, true, false)?; } Operator::I64TruncSatF32S => { @@ -2365,7 +2382,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f32(loc, ret, true, true); + self.machine.convert_i64_f32(loc, ret, true, true)?; } Operator::I64TruncF32U => { @@ -2377,7 +2394,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f32(loc, ret, false, false); + self.machine.convert_i64_f32(loc, ret, false, false)?; } Operator::I64TruncSatF32U => { let loc = self.pop_value_released()?; @@ -2388,7 +2405,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f32(loc, ret, false, true); + self.machine.convert_i64_f32(loc, ret, false, true)?; } Operator::I32TruncF64U => { @@ -2400,7 +2417,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f64(loc, ret, false, false); + self.machine.convert_i32_f64(loc, ret, false, false)?; } Operator::I32TruncSatF64U => { @@ -2412,7 +2429,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f64(loc, ret, false, true); + self.machine.convert_i32_f64(loc, ret, false, true)?; } Operator::I32TruncF64S => { @@ -2424,7 +2441,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f64(loc, ret, true, false); + self.machine.convert_i32_f64(loc, ret, true, false)?; } Operator::I32TruncSatF64S => { @@ -2436,7 +2453,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i32_f64(loc, ret, true, true); + self.machine.convert_i32_f64(loc, ret, true, true)?; } Operator::I64TruncF64S => { @@ -2448,7 +2465,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f64(loc, ret, true, false); + self.machine.convert_i64_f64(loc, ret, true, false)?; } Operator::I64TruncSatF64S => { @@ -2460,7 +2477,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f64(loc, ret, true, true); + self.machine.convert_i64_f64(loc, ret, true, true)?; } Operator::I64TruncF64U => { @@ -2472,7 +2489,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f64(loc, ret, false, false); + self.machine.convert_i64_f64(loc, ret, false, false)?; } Operator::I64TruncSatF64U => { @@ -2484,7 +2501,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.value_stack.push(ret); self.fp_stack.pop1()?; - self.machine.convert_i64_f64(loc, ret, false, true); + self.machine.convert_i64_f64(loc, ret, false, true)?; } Operator::F32ConvertI32S => { @@ -2497,7 +2514,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i32 to f32 never results in NaN. - self.machine.convert_f32_i32(loc, true, ret); + self.machine.convert_f32_i32(loc, true, ret)?; } Operator::F32ConvertI32U => { let loc = self.pop_value_released()?; @@ -2509,7 +2526,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i32 to f32 never results in NaN. - self.machine.convert_f32_i32(loc, false, ret); + self.machine.convert_f32_i32(loc, false, ret)?; } Operator::F32ConvertI64S => { let loc = self.pop_value_released()?; @@ -2521,7 +2538,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i64 to f32 never results in NaN. - self.machine.convert_f32_i64(loc, true, ret); + self.machine.convert_f32_i64(loc, true, ret)?; } Operator::F32ConvertI64U => { let loc = self.pop_value_released()?; @@ -2533,7 +2550,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i64 to f32 never results in NaN. - self.machine.convert_f32_i64(loc, false, ret); + self.machine.convert_f32_i64(loc, false, ret)?; } Operator::F64ConvertI32S => { @@ -2546,7 +2563,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i32 to f64 never results in NaN. - self.machine.convert_f64_i32(loc, true, ret); + self.machine.convert_f64_i32(loc, true, ret)?; } Operator::F64ConvertI32U => { let loc = self.pop_value_released()?; @@ -2558,7 +2575,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i32 to f64 never results in NaN. - self.machine.convert_f64_i32(loc, false, ret); + self.machine.convert_f64_i32(loc, false, ret)?; } Operator::F64ConvertI64S => { let loc = self.pop_value_released()?; @@ -2570,7 +2587,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i64 to f64 never results in NaN. - self.machine.convert_f64_i64(loc, true, ret); + self.machine.convert_f64_i64(loc, true, ret)?; } Operator::F64ConvertI64U => { let loc = self.pop_value_released()?; @@ -2582,7 +2599,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); // Converting i64 to f64 never results in NaN. - self.machine.convert_f64_i64(loc, false, ret); + self.machine.convert_f64_i64(loc, false, ret)?; } Operator::Call { function_index } => { @@ -2620,7 +2637,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { { let size = fp.canonicalization.unwrap().to_size(); self.machine - .canonicalize_nan(size, params[index], params[index]); + .canonicalize_nan(size, params[index], params[index])?; } self.fp_stack.pop().unwrap(); } else { @@ -2645,9 +2662,10 @@ impl<'a, M: Machine> FuncGen<'a, M> { .mark_instruction_with_trap_code(TrapCode::StackOverflow); let mut relocations = this .machine - .emit_call_with_reloc(calling_convention, reloc_target); + .emit_call_with_reloc(calling_convention, reloc_target)?; this.machine.mark_instruction_address_end(offset); this.relocations.append(&mut relocations); + Ok(()) }, params.iter().copied(), param_types.iter().copied(), @@ -2669,7 +2687,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::SIMD(self.machine.get_simd_for_ret()), ret, - ); + )?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); } else { @@ -2677,7 +2695,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } } } @@ -2713,7 +2731,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { { let size = fp.canonicalization.unwrap().to_size(); self.machine - .canonicalize_nan(size, params[index], params[index]); + .canonicalize_nan(size, params[index], params[index])?; } self.fp_stack.pop().unwrap(); } else { @@ -2735,12 +2753,12 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), vmctx_offset_base as i32), Location::GPR(table_base), - ); + )?; self.machine.move_location( Size::S32, Location::Memory(self.machine.get_vmctx_reg(), vmctx_offset_len as i32), Location::GPR(table_count), - ); + )?; } else { // Do an indirection. let import_offset = self.vmoffsets.vmctx_vmtable_import(table_index); @@ -2748,7 +2766,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::Memory(self.machine.get_vmctx_reg(), import_offset as i32), Location::GPR(table_base), - ); + )?; // Load len. self.machine.move_location( @@ -2758,48 +2776,48 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.vmoffsets.vmtable_definition_current_elements() as _, ), Location::GPR(table_count), - ); + )?; // Load base. self.machine.move_location( Size::S64, Location::Memory(table_base, self.vmoffsets.vmtable_definition_base() as _), Location::GPR(table_base), - ); + )?; } self.machine - .location_cmp(Size::S32, func_index, Location::GPR(table_count)); + .location_cmp(Size::S32, func_index, Location::GPR(table_count))?; self.machine - .jmp_on_belowequal(self.special_labels.table_access_oob); + .jmp_on_belowequal(self.special_labels.table_access_oob)?; self.machine - .move_location(Size::S32, func_index, Location::GPR(table_count)); + .move_location(Size::S32, func_index, Location::GPR(table_count))?; self.machine.emit_imul_imm32( Size::S64, self.vmoffsets.size_of_vm_funcref() as u32, table_count, - ); + )?; self.machine.location_add( Size::S64, Location::GPR(table_base), Location::GPR(table_count), false, - ); + )?; // deref the table to get a VMFuncRef self.machine.move_location( Size::S64, Location::Memory(table_count, self.vmoffsets.vm_funcref_anyfunc_ptr() as i32), Location::GPR(table_count), - ); + )?; // Trap if the FuncRef is null self.machine.location_cmp( Size::S64, Location::Imm32(0), Location::GPR(table_count), - ); + )?; self.machine - .jmp_on_equal(self.special_labels.indirect_call_null); + .jmp_on_equal(self.special_labels.indirect_call_null)?; self.machine.move_location( Size::S64, Location::Memory( @@ -2807,7 +2825,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.vmoffsets.vmctx_vmshared_signature_id(index) as i32, ), Location::GPR(sigidx), - ); + )?; // Trap if signature mismatches. self.machine.location_cmp( @@ -2817,9 +2835,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { table_count, (self.vmoffsets.vmcaller_checked_anyfunc_type_index() as usize) as i32, ), - ); + )?; self.machine - .jmp_on_different(self.special_labels.bad_signature); + .jmp_on_different(self.special_labels.bad_signature)?; self.machine.release_gpr(sigidx); self.machine.release_gpr(table_count); @@ -2831,7 +2849,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(table_count), Location::GPR(gpr_for_call), - ); + )?; } self.release_locations_only_osr_state(params.len())?; @@ -2849,7 +2867,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { .arch_emit_indirect_call_with_trampoline(Location::Memory( gpr_for_call, vmcaller_checked_anyfunc_func_ptr as i32, - )); + )) } else { let offset = this .machine @@ -2864,13 +2882,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { ), this.machine .get_simple_param_location(0, calling_convention), - ); + )?; this.machine.emit_call_location(Location::Memory( gpr_for_call, vmcaller_checked_anyfunc_func_ptr as i32, - )); + ))?; this.machine.mark_instruction_address_end(offset); + Ok(()) } }, params.iter().copied(), @@ -2893,7 +2912,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::SIMD(self.machine.get_simd_for_ret()), ret, - ); + )?; self.fp_stack .push(FloatValue::new(self.value_stack.len() - 1)); } else { @@ -2901,7 +2920,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } } } @@ -2931,8 +2950,8 @@ impl<'a, M: Machine> FuncGen<'a, M> { }; self.control_stack.push(frame); self.machine - .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond); - self.machine.jmp_on_equal(label_else); + .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond)?; + self.machine.jmp_on_equal(label_else)?; } Operator::Else => { let frame = self.control_stack.last_mut().unwrap(); @@ -2949,7 +2968,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let frame = &self.control_stack.last_mut().unwrap(); @@ -2962,8 +2981,8 @@ impl<'a, M: Machine> FuncGen<'a, M> { match frame.if_else { IfElseState::If(label) => { - self.machine.jmp_unconditionnal(frame.label); - self.machine.emit_label(label); + self.machine.jmp_unconditionnal(frame.label)?; + self.machine.emit_label(label)?; frame.if_else = IfElseState::Else; } _ => { @@ -3001,37 +3020,37 @@ impl<'a, M: Machine> FuncGen<'a, M> { let zero_label = self.machine.get_label(); self.machine - .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond); - self.machine.jmp_on_equal(zero_label); + .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond)?; + self.machine.jmp_on_equal(zero_label)?; match cncl { Some((Some(fp), _)) if self.machine.arch_supports_canonicalize_nan() && self.config.enable_nan_canonicalization => { - self.machine.canonicalize_nan(fp.to_size(), v_a, ret); + self.machine.canonicalize_nan(fp.to_size(), v_a, ret)?; } _ => { if v_a != ret { - self.machine.emit_relaxed_mov(Size::S64, v_a, ret); + self.machine.emit_relaxed_mov(Size::S64, v_a, ret)?; } } } - self.machine.jmp_unconditionnal(end_label); - self.machine.emit_label(zero_label); + self.machine.jmp_unconditionnal(end_label)?; + self.machine.emit_label(zero_label)?; match cncl { Some((_, Some(fp))) if self.machine.arch_supports_canonicalize_nan() && self.config.enable_nan_canonicalization => { - self.machine.canonicalize_nan(fp.to_size(), v_b, ret); + self.machine.canonicalize_nan(fp.to_size(), v_b, ret)?; } _ => { if v_b != ret { - self.machine.emit_relaxed_mov(Size::S64, v_b, ret); + self.machine.emit_relaxed_mov(Size::S64, v_b, ret)?; } } } - self.machine.emit_label(end_label); + self.machine.emit_label(end_label)?; } Operator::Block { ty } => { let frame = ControlFrame { @@ -3056,7 +3075,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.control_stack.push(frame); } Operator::Loop { ty } => { - self.machine.align_for_loop(); + self.machine.align_for_loop()?; let label = self.machine.get_label(); let state_diff_id = self.get_state_diff(); let _activate_offset = self.machine.assembler_get_offset().0; @@ -3080,7 +3099,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { state: self.state.clone(), state_diff_id, }); - self.machine.emit_label(label); + self.machine.emit_label(label)?; // TODO: Re-enable interrupt signal check without branching } @@ -3100,11 +3119,11 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, memory_index] iter::once(Location::Imm32(memory_index.index() as u32)), @@ -3119,7 +3138,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::MemoryInit { segment, mem } => { let len = self.value_stack.pop().unwrap(); @@ -3136,7 +3155,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; @@ -3144,7 +3163,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, memory_index, segment_index, dst, src, len] [ @@ -3178,12 +3197,12 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, segment_index] iter::once(Location::Imm32(segment)), @@ -3219,7 +3238,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.vmoffsets.vmctx_builtin_function(memory_copy_index) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; @@ -3227,7 +3246,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, memory_index, dst, src, len] [ @@ -3271,7 +3290,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.vmoffsets.vmctx_builtin_function(memory_fill_index) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; @@ -3279,7 +3298,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, memory_index, dst, src, len] [Location::Imm32(memory_index.index() as u32), dst, val, len] @@ -3310,14 +3329,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, val, memory_index] iter::once(param_pages) @@ -3336,7 +3355,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::I32Load { ref memarg } => { let target = self.pop_value_released()?; @@ -3355,9 +3374,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::F32Load { ref memarg } => { let target = self.pop_value_released()?; @@ -3378,9 +3397,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Load8U { ref memarg } => { let target = self.pop_value_released()?; @@ -3399,9 +3418,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Load8S { ref memarg } => { let target = self.pop_value_released()?; @@ -3420,9 +3439,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Load16U { ref memarg } => { let target = self.pop_value_released()?; @@ -3441,9 +3460,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Load16S { ref memarg } => { let target = self.pop_value_released()?; @@ -3462,9 +3481,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Store { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3479,9 +3498,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::F32Store { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3499,9 +3518,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Store8 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3516,9 +3535,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32Store16 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3533,9 +3552,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load { ref memarg } => { let target = self.pop_value_released()?; @@ -3554,9 +3573,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::F64Load { ref memarg } => { let target = self.pop_value_released()?; @@ -3577,9 +3596,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load8U { ref memarg } => { let target = self.pop_value_released()?; @@ -3598,9 +3617,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load8S { ref memarg } => { let target = self.pop_value_released()?; @@ -3619,9 +3638,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load16U { ref memarg } => { let target = self.pop_value_released()?; @@ -3640,9 +3659,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load16S { ref memarg } => { let target = self.pop_value_released()?; @@ -3661,9 +3680,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load32U { ref memarg } => { let target = self.pop_value_released()?; @@ -3682,9 +3701,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Load32S { ref memarg } => { let target = self.pop_value_released()?; @@ -3703,9 +3722,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Store { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3721,9 +3740,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::F64Store { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3741,9 +3760,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Store8 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3758,9 +3777,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Store16 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3775,9 +3794,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64Store32 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -3792,14 +3811,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::Unreachable => { self.mark_trappable(); self.machine - .emit_illegal_op(TrapCode::UnreachableCodeReached); + .emit_illegal_op(TrapCode::UnreachableCodeReached)?; self.unreachable_depth = 1; } Operator::Return => { @@ -3821,13 +3840,13 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let frame = &self.control_stack[0]; let frame_depth = frame.value_stack_depth; let label = frame.label; self.release_locations_keep_state(frame_depth)?; - self.machine.jmp_unconditionnal(label); + self.machine.jmp_unconditionnal(label)?; self.unreachable_depth = 1; } Operator::Br { relative_depth } => { @@ -3850,7 +3869,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let stack_len = self.control_stack.len(); let frame = &mut self.control_stack[stack_len - 1 - (relative_depth as usize)]; @@ -3858,15 +3877,15 @@ impl<'a, M: Machine> FuncGen<'a, M> { let label = frame.label; self.release_locations_keep_state(frame_depth)?; - self.machine.jmp_unconditionnal(label); + self.machine.jmp_unconditionnal(label)?; self.unreachable_depth = 1; } Operator::BrIf { relative_depth } => { let after = self.machine.get_label(); let cond = self.pop_value_released()?; self.machine - .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond); - self.machine.jmp_on_equal(after); + .emit_relaxed_cmp(Size::S32, Location::Imm32(0), cond)?; + self.machine.jmp_on_equal(after)?; let frame = &self.control_stack[self.control_stack.len() - 1 - (relative_depth as usize)]; @@ -3888,16 +3907,16 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let stack_len = self.control_stack.len(); let frame = &mut self.control_stack[stack_len - 1 - (relative_depth as usize)]; let stack_depth = frame.value_stack_depth; let label = frame.label; self.release_locations_keep_state(stack_depth)?; - self.machine.jmp_unconditionnal(label); + self.machine.jmp_unconditionnal(label)?; - self.machine.emit_label(after); + self.machine.emit_label(after)?; } Operator::BrTable { ref table } => { let targets = table @@ -3915,14 +3934,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S32, Location::Imm32(targets.len() as u32), cond, - ); - self.machine.jmp_on_aboveequal(default_br); + )?; + self.machine.jmp_on_aboveequal(default_br)?; - self.machine.emit_jmp_to_jumptable(table_label, cond); + self.machine.emit_jmp_to_jumptable(table_label, cond)?; for target in targets.iter() { let label = self.machine.get_label(); - self.machine.emit_label(label); + self.machine.emit_label(label)?; table.push(label); let frame = &self.control_stack[self.control_stack.len() - 1 - (*target as usize)]; @@ -3947,16 +3966,16 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let frame = &self.control_stack[self.control_stack.len() - 1 - (*target as usize)]; let stack_depth = frame.value_stack_depth; let label = frame.label; self.release_locations_keep_state(stack_depth)?; - self.machine.jmp_unconditionnal(label); + self.machine.jmp_unconditionnal(label)?; } - self.machine.emit_label(default_br); + self.machine.emit_label(default_br)?; { let frame = &self.control_stack @@ -3979,19 +3998,19 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(first_return, canonicalize, loc); + .emit_function_return_value(first_return, canonicalize, loc)?; } let frame = &self.control_stack [self.control_stack.len() - 1 - (default_target as usize)]; let stack_depth = frame.value_stack_depth; let label = frame.label; self.release_locations_keep_state(stack_depth)?; - self.machine.jmp_unconditionnal(label); + self.machine.jmp_unconditionnal(label)?; } - self.machine.emit_label(table_label); + self.machine.emit_label(table_label)?; for x in table { - self.machine.jmp_unconditionnal(x); + self.machine.jmp_unconditionnal(x)?; } self.unreachable_depth = 1; } @@ -4017,22 +4036,22 @@ impl<'a, M: Machine> FuncGen<'a, M> { false }; self.machine - .emit_function_return_value(frame.returns[0], canonicalize, loc); + .emit_function_return_value(frame.returns[0], canonicalize, loc)?; } if self.control_stack.is_empty() { - self.machine.emit_label(frame.label); - self.finalize_locals(self.calling_convention); - self.machine.emit_function_epilog(); + self.machine.emit_label(frame.label)?; + self.finalize_locals(self.calling_convention)?; + self.machine.emit_function_epilog()?; // Make a copy of the return value in XMM0, as required by the SysV CC. match self.signature.results() { [x] if *x == Type::F32 || *x == Type::F64 => { - self.machine.emit_function_return_float(); + self.machine.emit_function_return_float()?; } _ => {} } - self.machine.emit_ret(); + self.machine.emit_ret()?; } else { let released = &self.value_stack.clone()[frame.value_stack_depth..]; self.release_locations(released)?; @@ -4040,11 +4059,11 @@ impl<'a, M: Machine> FuncGen<'a, M> { self.fp_stack.truncate(frame.fp_stack_depth); if !frame.loop_like { - self.machine.emit_label(frame.label); + self.machine.emit_label(frame.label)?; } if let IfElseState::If(label) = frame.if_else { - self.machine.emit_label(label); + self.machine.emit_label(label)?; } if !frame.returns.is_empty() { @@ -4064,7 +4083,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), loc, - ); + )?; self.value_stack.push(loc); if frame.returns[0].is_float() { self.fp_stack @@ -4082,7 +4101,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { // model, and if we hadn't recorded what fences used to be there, // it would lead to data races that weren't present in the // original source language. - self.machine.emit_memory_fence(); + self.machine.emit_memory_fence()?; } Operator::I32AtomicLoad { ref memarg } => { let target = self.pop_value_released()?; @@ -4101,9 +4120,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicLoad8U { ref memarg } => { let target = self.pop_value_released()?; @@ -4122,9 +4141,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicLoad16U { ref memarg } => { let target = self.pop_value_released()?; @@ -4143,9 +4162,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicStore { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4160,9 +4179,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicStore8 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4177,9 +4196,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicStore16 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4194,9 +4213,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicLoad { ref memarg } => { let target = self.pop_value_released()?; @@ -4215,9 +4234,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicLoad8U { ref memarg } => { let target = self.pop_value_released()?; @@ -4236,9 +4255,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicLoad16U { ref memarg } => { let target = self.pop_value_released()?; @@ -4257,9 +4276,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicLoad32U { ref memarg } => { let target = self.pop_value_released()?; @@ -4278,9 +4297,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicStore { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4295,9 +4314,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicStore8 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4312,9 +4331,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicStore16 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4329,9 +4348,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicStore32 { ref memarg } => { let target_value = self.pop_value_released()?; @@ -4346,9 +4365,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwAdd { ref memarg } => { let loc = self.pop_value_released()?; @@ -4369,9 +4388,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwAdd { ref memarg } => { let loc = self.pop_value_released()?; @@ -4392,9 +4411,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8AddU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4415,9 +4434,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16AddU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4438,9 +4457,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8AddU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4461,9 +4480,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16AddU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4484,9 +4503,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32AddU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4507,9 +4526,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwSub { ref memarg } => { let loc = self.pop_value_released()?; @@ -4530,9 +4549,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwSub { ref memarg } => { let loc = self.pop_value_released()?; @@ -4553,9 +4572,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8SubU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4576,9 +4595,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16SubU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4599,9 +4618,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8SubU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4622,9 +4641,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16SubU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4645,9 +4664,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32SubU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4668,9 +4687,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwAnd { ref memarg } => { let loc = self.pop_value_released()?; @@ -4691,9 +4710,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwAnd { ref memarg } => { let loc = self.pop_value_released()?; @@ -4714,9 +4733,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8AndU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4737,9 +4756,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16AndU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4760,9 +4779,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8AndU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4783,9 +4802,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16AndU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4806,9 +4825,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32AndU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4829,9 +4848,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwOr { ref memarg } => { let loc = self.pop_value_released()?; @@ -4852,9 +4871,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwOr { ref memarg } => { let loc = self.pop_value_released()?; @@ -4875,9 +4894,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8OrU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4898,9 +4917,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16OrU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4921,9 +4940,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8OrU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4944,9 +4963,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16OrU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4967,9 +4986,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32OrU { ref memarg } => { let loc = self.pop_value_released()?; @@ -4990,9 +5009,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwXor { ref memarg } => { let loc = self.pop_value_released()?; @@ -5013,9 +5032,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwXor { ref memarg } => { let loc = self.pop_value_released()?; @@ -5036,9 +5055,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8XorU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5059,9 +5078,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16XorU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5082,9 +5101,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8XorU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5105,9 +5124,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16XorU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5128,9 +5147,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32XorU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5151,9 +5170,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwXchg { ref memarg } => { let loc = self.pop_value_released()?; @@ -5174,9 +5193,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwXchg { ref memarg } => { let loc = self.pop_value_released()?; @@ -5197,9 +5216,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8XchgU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5220,9 +5239,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16XchgU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5243,9 +5262,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8XchgU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5266,9 +5285,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16XchgU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5289,9 +5308,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32XchgU { ref memarg } => { let loc = self.pop_value_released()?; @@ -5312,9 +5331,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmwCmpxchg { ref memarg } => { let new = self.pop_value_released()?; @@ -5337,9 +5356,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmwCmpxchg { ref memarg } => { let new = self.pop_value_released()?; @@ -5362,9 +5381,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw8CmpxchgU { ref memarg } => { let new = self.pop_value_released()?; @@ -5387,9 +5406,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I32AtomicRmw16CmpxchgU { ref memarg } => { let new = self.pop_value_released()?; @@ -5412,9 +5431,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw8CmpxchgU { ref memarg } => { let new = self.pop_value_released()?; @@ -5437,9 +5456,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw16CmpxchgU { ref memarg } => { let new = self.pop_value_released()?; @@ -5462,9 +5481,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::I64AtomicRmw32CmpxchgU { ref memarg } => { let new = self.pop_value_released()?; @@ -5487,9 +5506,9 @@ impl<'a, M: Machine> FuncGen<'a, M> { imported_memories, offset, heap_access_oob, - ); + ) }, - ); + )?; } Operator::RefNull { .. } => { @@ -5506,14 +5525,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: unclear if we need this? check other new insts with no stack ops //.machine.release_locations_only_osr_state(1); self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, func_index] -> funcref iter::once(Location::Imm32(function_index as u32)), @@ -5532,7 +5551,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::RefIsNull => { let loc_a = self.pop_value_released()?; @@ -5540,7 +5559,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { &[(WpType::I32, MachineValue::WasmStack(self.value_stack.len()))], false, )?[0]; - self.machine.i64_cmp_eq(loc_a, Location::Imm64(0), ret); + self.machine.i64_cmp_eq(loc_a, Location::Imm64(0), ret)?; self.value_stack.push(ret); } Operator::TableSet { table: index } => { @@ -5563,14 +5582,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 2? self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, table_index, elem_index, reftype] [Location::Imm32(table_index.index() as u32), index, value] @@ -5599,13 +5618,13 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, table_index, elem_index] -> reftype [Location::Imm32(table_index.index() as u32), index] @@ -5628,7 +5647,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S64, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::TableSize { table: index } => { let table_index = TableIndex::new(index as _); @@ -5646,12 +5665,12 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, table_index] -> i32 iter::once(Location::Imm32(table_index.index() as u32)), @@ -5667,7 +5686,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S32, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::TableGrow { table: index } => { let table_index = TableIndex::new(index as _); @@ -5688,14 +5707,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { ) as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 2? self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, init_value, delta, table_index] -> u32 [ @@ -5719,7 +5738,7 @@ impl<'a, M: Machine> FuncGen<'a, M> { Size::S32, Location::GPR(self.machine.get_gpr_for_ret()), ret, - ); + )?; } Operator::TableCopy { dst_table, @@ -5739,14 +5758,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, dst_table_index, src_table_index, dst, src, len] [ @@ -5787,14 +5806,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, table_index, start_idx, item, len] [Location::Imm32(table), dest, val, len].iter().cloned(), @@ -5820,14 +5839,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: should this be 3? self.release_locations_only_osr_state(1)?; self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, table_index, elem_index, dst, src, len] [ @@ -5862,14 +5881,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { as i32, ), Location::GPR(self.machine.get_grp_for_call()), - ); + )?; // TODO: do we need this? //.machine.release_locations_only_osr_state(1); self.emit_call_native( |this| { this.machine - .emit_call_register(this.machine.get_grp_for_call()); + .emit_call_register(this.machine.get_grp_for_call()) }, // [vmctx, elem_index] [Location::Imm32(segment)].iter().cloned(), @@ -5886,35 +5905,39 @@ impl<'a, M: Machine> FuncGen<'a, M> { Ok(()) } - pub fn finalize(mut self, data: &FunctionBodyData) -> (CompiledFunction, Option) { + pub fn finalize( + mut self, + data: &FunctionBodyData, + ) -> Result<(CompiledFunction, Option), CodegenError> { // Generate actual code for special labels. self.machine - .emit_label(self.special_labels.integer_division_by_zero); + .emit_label(self.special_labels.integer_division_by_zero)?; self.machine - .emit_illegal_op(TrapCode::IntegerDivisionByZero); + .emit_illegal_op(TrapCode::IntegerDivisionByZero)?; self.machine - .emit_label(self.special_labels.integer_overflow); - self.machine.emit_illegal_op(TrapCode::IntegerOverflow); + .emit_label(self.special_labels.integer_overflow)?; + self.machine.emit_illegal_op(TrapCode::IntegerOverflow)?; - self.machine.emit_label(self.special_labels.heap_access_oob); self.machine - .emit_illegal_op(TrapCode::HeapAccessOutOfBounds); + .emit_label(self.special_labels.heap_access_oob)?; + self.machine + .emit_illegal_op(TrapCode::HeapAccessOutOfBounds)?; self.machine - .emit_label(self.special_labels.table_access_oob); + .emit_label(self.special_labels.table_access_oob)?; self.machine - .emit_illegal_op(TrapCode::TableAccessOutOfBounds); + .emit_illegal_op(TrapCode::TableAccessOutOfBounds)?; self.machine - .emit_label(self.special_labels.indirect_call_null); - self.machine.emit_illegal_op(TrapCode::IndirectCallToNull); + .emit_label(self.special_labels.indirect_call_null)?; + self.machine.emit_illegal_op(TrapCode::IndirectCallToNull)?; - self.machine.emit_label(self.special_labels.bad_signature); - self.machine.emit_illegal_op(TrapCode::BadSignature); + self.machine.emit_label(self.special_labels.bad_signature)?; + self.machine.emit_illegal_op(TrapCode::BadSignature)?; // Notify the assembler backend to generate necessary code at end of function. - self.machine.finalize_function(); + self.machine.finalize_function()?; let body_len = self.machine.assembler_get_offset().0; @@ -5946,14 +5969,14 @@ impl<'a, M: Machine> FuncGen<'a, M> { let traps = self.machine.collect_trap_information(); let body = self.machine.assembler_finalize(); - ( + Ok(( CompiledFunction { body: FunctionBody { body, unwind_info }, relocations: self.relocations.clone(), frame_info: CompiledFunctionFrameInfo { traps, address_map }, }, fde, - ) + )) } // FIXME: This implementation seems to be not enough to resolve all kinds of register dependencies // at call place. diff --git a/lib/compiler-singlepass/src/compiler.rs b/lib/compiler-singlepass/src/compiler.rs index 5f2d0df41e0..b484fd4ce22 100644 --- a/lib/compiler-singlepass/src/compiler.rs +++ b/lib/compiler-singlepass/src/compiler.rs @@ -33,6 +33,12 @@ use wasmer_types::{ SectionIndex, TableIndex, TrapCode, VMOffsets, }; +impl From for CompileError { + fn from(err: CodegenError) -> Self { + Self::Codegen(err.message) + } +} + /// A compiler that compiles a WebAssembly module with Singlepass. /// It does the compilation in one pass pub struct SinglepassCompiler { @@ -188,7 +194,7 @@ impl Compiler for SinglepassCompiler { generator.feed_operator(op).map_err(to_compile_error)?; } - Ok(generator.finalize(input)) + generator.finalize(input).map_err(to_compile_error) } Architecture::Aarch64(_) => { let machine = MachineARM64::new(); @@ -210,7 +216,7 @@ impl Compiler for SinglepassCompiler { generator.feed_operator(op).map_err(to_compile_error)?; } - Ok(generator.finalize(input)) + generator.finalize(input).map_err(to_compile_error) } _ => unimplemented!(), } diff --git a/lib/compiler-singlepass/src/emitter_arm64.rs b/lib/compiler-singlepass/src/emitter_arm64.rs index 10edbaaeb8d..c5aad8a0fe8 100644 --- a/lib/compiler-singlepass/src/emitter_arm64.rs +++ b/lib/compiler-singlepass/src/emitter_arm64.rs @@ -1,7 +1,9 @@ pub use crate::arm64_decl::{ARM64Register, ArgumentRegisterAllocator, GPR, NEON}; +use crate::codegen_error; use crate::common_decl::Size; use crate::location::Location as AbstractLocation; pub use crate::location::{Multiplier, Reg}; +use crate::machine::CodegenError; pub use crate::machine::{Label, Offset}; use dynasm::dynasm; pub use dynasmrt::aarch64::{encode_logical_immediate_32bit, encode_logical_immediate_64bit}; @@ -89,117 +91,360 @@ pub trait EmitterARM64 { fn finalize_function(&mut self); - fn emit_str(&mut self, sz: Size, reg: Location, addr: Location); - fn emit_ldr(&mut self, sz: Size, reg: Location, addr: Location); - fn emit_stur(&mut self, sz: Size, reg: Location, addr: GPR, offset: i32); - fn emit_ldur(&mut self, sz: Size, reg: Location, addr: GPR, offset: i32); - fn emit_strdb(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32); - fn emit_stria(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32); - fn emit_ldria(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32); - fn emit_stpdb(&mut self, sz: Size, reg1: Location, reg2: Location, addr: GPR, offset: u32); - fn emit_ldpia(&mut self, sz: Size, reg1: Location, reg2: Location, addr: GPR, offset: u32); - - fn emit_ldrb(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_ldrh(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_ldrsb(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_ldrsh(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_ldrsw(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_strb(&mut self, sz: Size, reg: Location, dst: Location); - fn emit_strh(&mut self, sz: Size, reg: Location, dst: Location); - - fn emit_mov(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_movn(&mut self, sz: Size, reg: Location, val: u32); - fn emit_movz(&mut self, reg: Location, val: u32); - fn emit_movk(&mut self, reg: Location, val: u32, shift: u32); - - fn emit_mov_imm(&mut self, dst: Location, val: u64); - - fn emit_add(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_sub(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_mul(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_adds(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_subs(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - - fn emit_add_lsl(&mut self, sz: Size, src1: Location, src2: Location, lsl: u32, dst: Location); - - fn emit_cmp(&mut self, sz: Size, src: Location, dst: Location); - fn emit_tst(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_lsl(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_lsr(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_asr(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_ror(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - - fn emit_or(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_and(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_eor(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - - fn emit_bfc(&mut self, se: Size, lsb: u32, width: u32, dst: Location); - fn emit_bfi(&mut self, se: Size, src: Location, lsb: u32, width: u32, dst: Location); - - fn emit_udiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_sdiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); + fn emit_str(&mut self, sz: Size, reg: Location, addr: Location) -> Result<(), CodegenError>; + fn emit_ldr(&mut self, sz: Size, reg: Location, addr: Location) -> Result<(), CodegenError>; + fn emit_stur( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: i32, + ) -> Result<(), CodegenError>; + fn emit_ldur( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: i32, + ) -> Result<(), CodegenError>; + fn emit_strdb( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError>; + fn emit_stria( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError>; + fn emit_ldria( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError>; + fn emit_stpdb( + &mut self, + sz: Size, + reg1: Location, + reg2: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError>; + fn emit_ldpia( + &mut self, + sz: Size, + reg1: Location, + reg2: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError>; + + fn emit_ldrb(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_ldrh(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_ldrsb(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_ldrsh(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_ldrsw(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_strb(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_strh(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_movn(&mut self, sz: Size, reg: Location, val: u32) -> Result<(), CodegenError>; + fn emit_movz(&mut self, reg: Location, val: u32) -> Result<(), CodegenError>; + fn emit_movk(&mut self, reg: Location, val: u32, shift: u32) -> Result<(), CodegenError>; + + fn emit_mov_imm(&mut self, dst: Location, val: u64) -> Result<(), CodegenError>; + + fn emit_add( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_sub( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_mul( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_adds( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_subs( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_add_lsl( + &mut self, + sz: Size, + src1: Location, + src2: Location, + lsl: u32, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_cmp(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_tst(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_lsl( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_lsr( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_asr( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_ror( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_or( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_and( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_eor( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_bfc( + &mut self, + se: Size, + lsb: u32, + width: u32, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_bfi( + &mut self, + se: Size, + src: Location, + lsb: u32, + width: u32, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_udiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_sdiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; /// msub : c - a*b -> dst - fn emit_msub(&mut self, sz: Size, a: Location, b: Location, c: Location, dst: Location); - - fn emit_sxtb(&mut self, sz: Size, src: Location, dst: Location); - fn emit_sxth(&mut self, sz: Size, src: Location, dst: Location); - fn emit_sxtw(&mut self, sz: Size, src: Location, dst: Location); - fn emit_uxtb(&mut self, sz: Size, src: Location, dst: Location); - fn emit_uxth(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_cset(&mut self, sz: Size, dst: Location, cond: Condition); - fn emit_csetm(&mut self, sz: Size, dst: Location, cond: Condition); - fn emit_cinc(&mut self, sz: Size, src: Location, dst: Location, cond: Condition); - fn emit_clz(&mut self, sz: Size, src: Location, dst: Location); - fn emit_rbit(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_label(&mut self, label: Label); - fn emit_load_label(&mut self, reg: GPR, label: Label); - fn emit_b_label(&mut self, label: Label); - fn emit_cbz_label(&mut self, sz: Size, reg: Location, label: Label); - fn emit_cbnz_label(&mut self, sz: Size, reg: Location, label: Label); - fn emit_tbz_label(&mut self, sz: Size, reg: Location, n: u32, label: Label); - fn emit_tbnz_label(&mut self, sz: Size, reg: Location, n: u32, label: Label); - fn emit_bcond_label(&mut self, condition: Condition, label: Label); - fn emit_bcond_label_far(&mut self, condition: Condition, label: Label); - fn emit_b_register(&mut self, reg: GPR); - fn emit_call_label(&mut self, label: Label); - fn emit_call_register(&mut self, reg: GPR); - fn emit_ret(&mut self); - - fn emit_udf(&mut self, payload: u16); - fn emit_dmb(&mut self); - fn emit_brk(&mut self); - - fn emit_fcmp(&mut self, sz: Size, src1: Location, src2: Location); - fn emit_fneg(&mut self, sz: Size, src: Location, dst: Location); - fn emit_fsqrt(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_fadd(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_fsub(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_fmul(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_fdiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - - fn emit_fmin(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - fn emit_fmax(&mut self, sz: Size, src1: Location, src2: Location, dst: Location); - - fn emit_frintz(&mut self, sz: Size, src: Location, dst: Location); - fn emit_frintn(&mut self, sz: Size, src: Location, dst: Location); - fn emit_frintm(&mut self, sz: Size, src: Location, dst: Location); - fn emit_frintp(&mut self, sz: Size, src: Location, dst: Location); - - fn emit_scvtf(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location); - fn emit_ucvtf(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location); - fn emit_fcvt(&mut self, sz_in: Size, src: Location, dst: Location); - fn emit_fcvtzs(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location); - fn emit_fcvtzu(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location); - - fn emit_read_fpcr(&mut self, reg: GPR); - fn emit_write_fpcr(&mut self, reg: GPR); - fn emit_read_fpsr(&mut self, reg: GPR); - fn emit_write_fpsr(&mut self, reg: GPR); + fn emit_msub( + &mut self, + sz: Size, + a: Location, + b: Location, + c: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_sxtb(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_sxth(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_sxtw(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_uxtb(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_uxth(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_cset(&mut self, sz: Size, dst: Location, cond: Condition) -> Result<(), CodegenError>; + fn emit_csetm(&mut self, sz: Size, dst: Location, cond: Condition) -> Result<(), CodegenError>; + fn emit_cinc( + &mut self, + sz: Size, + src: Location, + dst: Location, + cond: Condition, + ) -> Result<(), CodegenError>; + fn emit_clz(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_rbit(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError>; + fn emit_load_label(&mut self, reg: GPR, label: Label) -> Result<(), CodegenError>; + fn emit_b_label(&mut self, label: Label) -> Result<(), CodegenError>; + fn emit_cbz_label(&mut self, sz: Size, reg: Location, label: Label) + -> Result<(), CodegenError>; + fn emit_cbnz_label( + &mut self, + sz: Size, + reg: Location, + label: Label, + ) -> Result<(), CodegenError>; + fn emit_tbz_label( + &mut self, + sz: Size, + reg: Location, + n: u32, + label: Label, + ) -> Result<(), CodegenError>; + fn emit_tbnz_label( + &mut self, + sz: Size, + reg: Location, + n: u32, + label: Label, + ) -> Result<(), CodegenError>; + fn emit_bcond_label(&mut self, condition: Condition, label: Label) -> Result<(), CodegenError>; + fn emit_bcond_label_far( + &mut self, + condition: Condition, + label: Label, + ) -> Result<(), CodegenError>; + fn emit_b_register(&mut self, reg: GPR) -> Result<(), CodegenError>; + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError>; + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError>; + fn emit_ret(&mut self) -> Result<(), CodegenError>; + + fn emit_udf(&mut self, payload: u16) -> Result<(), CodegenError>; + fn emit_dmb(&mut self) -> Result<(), CodegenError>; + fn emit_brk(&mut self) -> Result<(), CodegenError>; + + fn emit_fcmp(&mut self, sz: Size, src1: Location, src2: Location) -> Result<(), CodegenError>; + fn emit_fneg(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_fsqrt(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_fadd( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fsub( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fmul( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fdiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_fmin( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fmax( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_frintz(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_frintn(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_frintm(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_frintp(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + + fn emit_scvtf( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_ucvtf( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fcvt(&mut self, sz_in: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_fcvtzs( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_fcvtzu( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError>; + + fn emit_read_fpcr(&mut self, reg: GPR) -> Result<(), CodegenError>; + fn emit_write_fpcr(&mut self, reg: GPR) -> Result<(), CodegenError>; + fn emit_read_fpsr(&mut self, reg: GPR) -> Result<(), CodegenError>; + fn emit_write_fpsr(&mut self, reg: GPR) -> Result<(), CodegenError>; fn arch_supports_canonicalize_nan(&self) -> bool { true @@ -209,8 +454,11 @@ pub trait EmitterARM64 { false } - fn arch_emit_indirect_call_with_trampoline(&mut self, _loc: Location) { - unimplemented!() + fn arch_emit_indirect_call_with_trampoline( + &mut self, + _loc: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_indirect_call_with_trampoline unimplemented") } } @@ -239,7 +487,7 @@ impl EmitterARM64 for Assembler { ); } - fn emit_str(&mut self, sz: Size, reg: Location, addr: Location) { + fn emit_str(&mut self, sz: Size, reg: Location, addr: Location) -> Result<(), CodegenError> { match (sz, reg, addr) { (Size::S64, Location::GPR(reg), Location::Memory(addr, disp)) => { let reg = reg.into_index() as u32; @@ -307,10 +555,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; str W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit STR {:?}, {:?}, {:?}", sz, reg, addr), + _ => codegen_error!("singlepass can't emit STR {:?}, {:?}, {:?}", sz, reg, addr), } + Ok(()) } - fn emit_ldr(&mut self, sz: Size, reg: Location, addr: Location) { + fn emit_ldr(&mut self, sz: Size, reg: Location, addr: Location) -> Result<(), CodegenError> { match (sz, reg, addr) { (Size::S64, Location::GPR(reg), Location::Memory(addr, disp)) => { let reg = reg.into_index() as u32; @@ -402,10 +651,17 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldr S(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDR {:?}, {:?}, {:?}", sz, reg, addr), + _ => codegen_error!("singlepass can't emit LDR {:?}, {:?}, {:?}", sz, reg, addr), } + Ok(()) } - fn emit_stur(&mut self, sz: Size, reg: Location, addr: GPR, offset: i32) { + fn emit_stur( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: i32, + ) -> Result<(), CodegenError> { assert!((offset >= -255) && (offset <= 255)); match (sz, reg) { (Size::S64, Location::GPR(reg)) => { @@ -428,13 +684,23 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; stur S(reg), [X(addr), offset]); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit STUR {:?}, {:?}, {:?}, {:?}", - sz, reg, addr, offset + sz, + reg, + addr, + offset ), } + Ok(()) } - fn emit_ldur(&mut self, sz: Size, reg: Location, addr: GPR, offset: i32) { + fn emit_ldur( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: i32, + ) -> Result<(), CodegenError> { assert!((offset >= -255) && (offset <= 255)); match (sz, reg) { (Size::S64, Location::GPR(reg)) => { @@ -457,14 +723,24 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; ldur S(reg), [X(addr), offset]); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LDUR {:?}, {:?}, {:?}, {:?}", - sz, reg, addr, offset + sz, + reg, + addr, + offset ), } + Ok(()) } - fn emit_strdb(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32) { + fn emit_strdb( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError> { assert!(offset <= 255); match (sz, reg) { (Size::S64, Location::GPR(reg)) => { @@ -477,10 +753,17 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; str D(reg), [X(addr), -(offset as i32)]!); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit STRDB"), } + Ok(()) } - fn emit_stria(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32) { + fn emit_stria( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError> { assert!(offset <= 255); match (sz, reg) { (Size::S64, Location::GPR(reg)) => { @@ -493,10 +776,17 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; str D(reg), [X(addr)], (offset as i32)); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit STRIA"), } + Ok(()) } - fn emit_ldria(&mut self, sz: Size, reg: Location, addr: GPR, offset: u32) { + fn emit_ldria( + &mut self, + sz: Size, + reg: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError> { assert!(offset <= 255); match (sz, reg) { (Size::S64, Location::GPR(reg)) => { @@ -509,11 +799,19 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; ldr D(reg), [X(addr)], offset); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit LDRIA"), } + Ok(()) } - fn emit_stpdb(&mut self, sz: Size, reg1: Location, reg2: Location, addr: GPR, offset: u32) { + fn emit_stpdb( + &mut self, + sz: Size, + reg1: Location, + reg2: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError> { assert!(offset <= 255); match (sz, reg1, reg2) { (Size::S64, Location::GPR(reg1), Location::GPR(reg2)) => { @@ -522,10 +820,18 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; stp X(reg1), X(reg2), [X(addr), -(offset as i32)]!); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit STPDB"), } + Ok(()) } - fn emit_ldpia(&mut self, sz: Size, reg1: Location, reg2: Location, addr: GPR, offset: u32) { + fn emit_ldpia( + &mut self, + sz: Size, + reg1: Location, + reg2: Location, + addr: GPR, + offset: u32, + ) -> Result<(), CodegenError> { assert!(offset <= 255); match (sz, reg1, reg2) { (Size::S64, Location::GPR(reg1), Location::GPR(reg2)) => { @@ -534,11 +840,12 @@ impl EmitterARM64 for Assembler { let addr = addr.into_index() as u32; dynasm!(self ; ldp X(reg1), X(reg2), [X(addr)], offset); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit LDPIA"), } + Ok(()) } - fn emit_ldrb(&mut self, _sz: Size, reg: Location, dst: Location) { + fn emit_ldrb(&mut self, _sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (reg, dst) { (Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -559,10 +866,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldrb W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDRB {:?}, {:?}", reg, dst), + _ => codegen_error!("singlepass can't emit LDRB {:?}, {:?}", reg, dst), } + Ok(()) } - fn emit_ldrh(&mut self, _sz: Size, reg: Location, dst: Location) { + fn emit_ldrh(&mut self, _sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (reg, dst) { (Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -583,10 +891,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldrh W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDRH {:?}, {:?}", reg, dst), + _ => codegen_error!("singlepass can't emit LDRH {:?}, {:?}", reg, dst), } + Ok(()) } - fn emit_ldrsb(&mut self, sz: Size, reg: Location, dst: Location) { + fn emit_ldrsb(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (sz, reg, dst) { (Size::S64, Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -626,10 +935,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldrsb W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDRSB {:?}, {:?}, {:?}", sz, reg, dst), + _ => codegen_error!("singlepass can't emit LDRSB {:?}, {:?}, {:?}", sz, reg, dst), } + Ok(()) } - fn emit_ldrsh(&mut self, sz: Size, reg: Location, dst: Location) { + fn emit_ldrsh(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (sz, reg, dst) { (Size::S64, Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -669,10 +979,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldrsh W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDRSH {:?}, {:?}, {:?}", sz, reg, dst), + _ => codegen_error!("singlepass can't emit LDRSH {:?}, {:?}, {:?}", sz, reg, dst), } + Ok(()) } - fn emit_ldrsw(&mut self, sz: Size, reg: Location, dst: Location) { + fn emit_ldrsw(&mut self, sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (sz, reg, dst) { (Size::S64, Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -693,10 +1004,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; ldrsw X(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit LDRSW {:?}, {:?}, {:?}", sz, reg, dst), + _ => codegen_error!("singlepass can't emit LDRSW {:?}, {:?}, {:?}", sz, reg, dst), } + Ok(()) } - fn emit_strb(&mut self, _sz: Size, reg: Location, dst: Location) { + fn emit_strb(&mut self, _sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (reg, dst) { (Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -717,10 +1029,11 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; strb W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit STRB {:?}, {:?}", reg, dst), + _ => codegen_error!("singlepass can't emit STRB {:?}, {:?}", reg, dst), } + Ok(()) } - fn emit_strh(&mut self, _sz: Size, reg: Location, dst: Location) { + fn emit_strh(&mut self, _sz: Size, reg: Location, dst: Location) -> Result<(), CodegenError> { match (reg, dst) { (Location::GPR(reg), Location::Memory(addr, offset)) => { let reg = reg.into_index() as u32; @@ -741,11 +1054,12 @@ impl EmitterARM64 for Assembler { _ => dynasm!(self ; strh W(reg), [X(addr), X(r2), LSL mult]), }; } - _ => panic!("singlepass can't emit STRH {:?}, {:?}", reg, dst), + _ => codegen_error!("singlepass can't emit STRH {:?}, {:?}", reg, dst), } + Ok(()) } - fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -794,7 +1108,7 @@ impl EmitterARM64 for Assembler { } else if encode_logical_immediate_32bit(val as _).is_some() { dynasm!(self ; orr W(dst), wzr, val); } else { - unreachable!(); + codegen_error!("singlepass can't emit MOV S32 {}, {:?}", val, dst); } } (Size::S64, Location::Imm32(val), Location::GPR(dst)) => { @@ -804,7 +1118,7 @@ impl EmitterARM64 for Assembler { } else if encode_logical_immediate_64bit(val as _).is_some() { dynasm!(self ; orr X(dst), xzr, val as u64); } else { - unreachable!(); + codegen_error!("singlepass can't emit MOV S64 {}, {:?}", val, dst); } } (Size::S64, Location::Imm64(val), Location::GPR(dst)) => { @@ -814,14 +1128,15 @@ impl EmitterARM64 for Assembler { } else if encode_logical_immediate_64bit(val as _).is_some() { dynasm!(self ; orr X(dst), xzr, val as u64); } else { - unreachable!(); + codegen_error!("singleplasse can't emit MOV S64 {}, {:?}", val, dst); } } - _ => panic!("singlepass can't emit MOV {:?}, {:?}, {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit MOV {:?}, {:?}, {:?}", sz, src, dst), } + Ok(()) } - fn emit_movn(&mut self, sz: Size, reg: Location, val: u32) { + fn emit_movn(&mut self, sz: Size, reg: Location, val: u32) -> Result<(), CodegenError> { match (sz, reg) { (Size::S32, Location::GPR(reg)) => { let reg = reg.into_index() as u32; @@ -831,29 +1146,32 @@ impl EmitterARM64 for Assembler { let reg = reg.into_index() as u32; dynasm!(self ; movn X(reg), val); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit MOVN"), } + Ok(()) } - fn emit_movz(&mut self, reg: Location, val: u32) { + fn emit_movz(&mut self, reg: Location, val: u32) -> Result<(), CodegenError> { match reg { Location::GPR(reg) => { let reg = reg.into_index() as u32; dynasm!(self ; movz W(reg), val); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit MOVZ"), } + Ok(()) } - fn emit_movk(&mut self, reg: Location, val: u32, shift: u32) { + fn emit_movk(&mut self, reg: Location, val: u32, shift: u32) -> Result<(), CodegenError> { match reg { Location::GPR(reg) => { let reg = reg.into_index() as u32; dynasm!(self ; movk X(reg), val, LSL shift); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit MOVK"), } + Ok(()) } - fn emit_mov_imm(&mut self, dst: Location, val: u64) { + fn emit_mov_imm(&mut self, dst: Location, val: u64) -> Result<(), CodegenError> { match dst { Location::GPR(dst) => { let dst = dst.into_index() as u32; @@ -877,11 +1195,18 @@ impl EmitterARM64 for Assembler { } } } - _ => panic!("singlepass can't emit MOVW {:?}", dst), + _ => codegen_error!("singlepass can't emit MOVW {:?}", dst), } + Ok(()) } - fn emit_add(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_add( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -935,13 +1260,23 @@ impl EmitterARM64 for Assembler { } dynasm!(self ; add W(dst), W(src1), imm); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit ADD {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_sub(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_sub( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -989,13 +1324,23 @@ impl EmitterARM64 for Assembler { } dynasm!(self ; sub X(dst), X(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit SUB {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_mul(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_mul( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1009,13 +1354,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; mul W(dst), W(src1), W(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit MUL {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_adds(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_adds( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1040,7 +1395,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm >= 0x1000 { - unreachable!(); + codegen_error!("singlepass ADD.S with imm too large {}", imm); } dynasm!(self ; adds X(dst), X(src1), imm); } @@ -1055,17 +1410,27 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm >= 0x1000 { - unreachable!(); + codegen_error!("singlepass ADD.S with imm too large {}", imm); } dynasm!(self ; adds W(dst), W(src1), imm); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit ADD.S {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_subs(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_subs( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1089,13 +1454,24 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; subs W(dst), W(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit SUB.S {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_add_lsl(&mut self, sz: Size, src1: Location, src2: Location, lsl: u32, dst: Location) { + fn emit_add_lsl( + &mut self, + sz: Size, + src1: Location, + src2: Location, + lsl: u32, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1103,14 +1479,19 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; add X(dst), X(src1), X(src2), LSL lsl); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LSL {:?} {:?} {:?} {:?} LSL {:?}", - sz, src1, src2, dst, lsl + sz, + src1, + src2, + dst, + lsl ), } + Ok(()) } - fn emit_cmp(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_cmp(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1129,14 +1510,14 @@ impl EmitterARM64 for Assembler { (Size::S64, Location::Imm32(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if imm >= 0x1000 { - unreachable!(); + codegen_error!("singlepass CMP with imm too large {}", imm); } dynasm!(self ; cmp X(dst), imm as u32); } (Size::S64, Location::Imm64(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if imm >= 0x1000 { - unreachable!(); + codegen_error!("singlepass CMP with imm too large {}", imm); } dynasm!(self ; cmp X(dst), imm as u32); } @@ -1147,15 +1528,16 @@ impl EmitterARM64 for Assembler { (Size::S32, Location::Imm32(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if imm >= 0x1000 { - unreachable!(); + codegen_error!("singlepass CMP with imm too large {}", imm); } dynasm!(self ; cmp W(dst), imm as u32); } - _ => panic!("singlepass can't emit CMP {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit CMP {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_tst(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_tst(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1165,14 +1547,14 @@ impl EmitterARM64 for Assembler { (Size::S64, Location::Imm32(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(imm as u64).is_none() { - unreachable!(); + codegen_error!("singlepass TST with incompatible imm {}", imm); } dynasm!(self ; tst X(dst), imm as u64); } (Size::S64, Location::Imm64(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(imm as u64).is_none() { - unreachable!(); + codegen_error!("singlepass TST with incompatible imm {}", imm); } dynasm!(self ; tst X(dst), imm as u64); } @@ -1184,15 +1566,22 @@ impl EmitterARM64 for Assembler { (Size::S32, Location::Imm32(imm), Location::GPR(dst)) => { let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(imm as u64).is_none() { - unreachable!(); + codegen_error!("singlepass TST with incompatible imm {}", imm); } dynasm!(self ; tst W(dst), imm); } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit TST"), } + Ok(()) } - fn emit_lsl(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_lsl( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1203,7 +1592,7 @@ impl EmitterARM64 for Assembler { (Size::S64, Location::GPR(src1), Location::Imm32(imm), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; if imm > 63 { - unreachable!(); + codegen_error!("singlepass LSL with incompatible imm {}", imm); } let imm = imm as u32; let dst = dst.into_index() as u32; @@ -1220,7 +1609,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm > 63 { - unreachable!(); + codegen_error!("singlepass LSL with incompatible imm {}", imm); } dynasm!(self ; lsl X(dst), X(src1), imm as u32); } @@ -1229,7 +1618,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm > 63 { - unreachable!(); + codegen_error!("singlepass LSL with incompatible imm {}", imm); } dynasm!(self ; lsl X(dst), X(src1), imm as u32); } @@ -1238,7 +1627,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm > 31 { - unreachable!(); + codegen_error!("singlepass LSL with incompatible imm {}", imm); } dynasm!(self ; lsl W(dst), W(src1), imm as u32); } @@ -1247,17 +1636,27 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm > 31 { - unreachable!(); + codegen_error!("singlepass LSL with incompatible imm {}", imm); } dynasm!(self ; lsl W(dst), W(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LSL {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_asr(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_asr( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1270,7 +1669,7 @@ impl EmitterARM64 for Assembler { let imm = imm as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass ASR with incompatible imm {}", imm); } dynasm!(self ; asr X(dst), X(src1), imm); } @@ -1285,7 +1684,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass ASR with incompatible imm {}", imm); } dynasm!(self ; asr X(dst), X(src1), imm as u32); } @@ -1294,7 +1693,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass ASR with incompatible imm {}", imm); } dynasm!(self ; asr X(dst), X(src1), imm as u32); } @@ -1303,7 +1702,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass ASR with incompatible imm {}", imm); } dynasm!(self ; asr W(dst), W(src1), imm as u32); } @@ -1312,17 +1711,27 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass ASR with incompatible imm {}", imm); } dynasm!(self ; asr W(dst), W(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit ASR {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_lsr(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_lsr( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1335,7 +1744,7 @@ impl EmitterARM64 for Assembler { let imm = imm as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass LSR with incompatible imm {}", imm); } dynasm!(self ; lsr X(dst), X(src1), imm); } @@ -1350,7 +1759,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass LSR with incompatible imm {}", imm); } dynasm!(self ; lsr X(dst), X(src1), imm as u32); } @@ -1359,7 +1768,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass LSR with incompatible imm {}", imm); } dynasm!(self ; lsr X(dst), X(src1), imm as u32); } @@ -1368,7 +1777,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass LSR with incompatible imm {}", imm); } dynasm!(self ; lsr W(dst), W(src1), imm as u32); } @@ -1377,17 +1786,27 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass LSR with incompatible imm {}", imm); } dynasm!(self ; lsr W(dst), W(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LSR {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_ror(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_ror( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1401,7 +1820,7 @@ impl EmitterARM64 for Assembler { let imm = imm as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass ROR with incompatible imm {}", imm); } dynasm!(self ; ror X(dst), X(src1), imm); } @@ -1410,7 +1829,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass ROR with incompatible imm {}", imm); } dynasm!(self ; ror W(dst), W(src1), imm as u32); } @@ -1425,7 +1844,7 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 63 { - unreachable!(); + codegen_error!("singlepass ROR with incompatible imm {}", imm); } dynasm!(self ; ror X(dst), X(src1), imm as u32); } @@ -1434,18 +1853,28 @@ impl EmitterARM64 for Assembler { let src1 = src1.into_index() as u32; let dst = dst.into_index() as u32; if imm == 0 || imm > 31 { - unreachable!(); + codegen_error!("singlepass ROR with incompatible imm {}", imm); } dynasm!(self ; ror W(dst), W(src1), imm as u32); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit ROR {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_or(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_or( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1458,7 +1887,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u64; let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(src2 as u64).is_none() { - unreachable!(); + codegen_error!("singlepass OR with incompatible imm {}", src2); } dynasm!(self ; orr X(dst), X(src1), src2); } @@ -1467,7 +1896,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u32; let dst = dst.into_index() as u32; if encode_logical_immediate_32bit(src2).is_none() { - unreachable!(); + codegen_error!("singlepass OR with incompatible imm {}", src2); } dynasm!(self ; orr W(dst), W(src1), src2); } @@ -1477,13 +1906,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; orr W(dst), W(src1), W(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit OR {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_and(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_and( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1496,7 +1935,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u64; let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(src2 as u64).is_none() { - unreachable!(); + codegen_error!("singlepass AND with incompatible imm {}", src2); } dynasm!(self ; and X(dst), X(src1), src2); } @@ -1505,7 +1944,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u32; let dst = dst.into_index() as u32; if encode_logical_immediate_32bit(src2).is_none() { - unreachable!(); + codegen_error!("singlepass AND with incompatible imm {}", src2); } dynasm!(self ; and W(dst), W(src1), src2); } @@ -1515,13 +1954,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; and W(dst), W(src1), W(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit AND {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_eor(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_eor( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S64, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1534,7 +1983,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u64; let dst = dst.into_index() as u32; if encode_logical_immediate_64bit(src2 as u64).is_none() { - unreachable!(); + codegen_error!("singlepass EOR with incompatible imm {}", src2); } dynasm!(self ; eor X(dst), X(src1), src2); } @@ -1543,7 +1992,7 @@ impl EmitterARM64 for Assembler { let src2 = src2 as u32; let dst = dst.into_index() as u32; if encode_logical_immediate_32bit(src2).is_none() { - unreachable!(); + codegen_error!("singlepass EOR with incompatible imm {}", src2); } dynasm!(self ; eor W(dst), W(src1), src2); } @@ -1553,14 +2002,24 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; eor W(dst), W(src1), W(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit EOR {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_bfc(&mut self, sz: Size, lsb: u32, width: u32, dst: Location) { + fn emit_bfc( + &mut self, + sz: Size, + lsb: u32, + width: u32, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, dst) { (Size::S32, Location::GPR(dst)) => { dynasm!(self ; bfc W(dst as u32), lsb, width); @@ -1568,10 +2027,18 @@ impl EmitterARM64 for Assembler { (Size::S64, Location::GPR(dst)) => { dynasm!(self ; bfc X(dst as u32), lsb, width); } - _ => unimplemented!(), + _ => codegen_error!("singlepass can't emit BFC"), } + Ok(()) } - fn emit_bfi(&mut self, sz: Size, src: Location, lsb: u32, width: u32, dst: Location) { + fn emit_bfi( + &mut self, + sz: Size, + src: Location, + lsb: u32, + width: u32, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::GPR(src), Location::GPR(dst)) => { dynasm!(self ; bfi W(dst as u32), W(src as u32), lsb, width); @@ -1579,11 +2046,18 @@ impl EmitterARM64 for Assembler { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { dynasm!(self ; bfi X(dst as u32), X(src as u32), lsb, width); } - _ => unimplemented!(), + _ => codegen_error!("singlepass can't emit BFI"), } + Ok(()) } - fn emit_udiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_udiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1597,13 +2071,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; udiv X(dst), X(src1), X(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit UDIV {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_sdiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_sdiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::GPR(src1), Location::GPR(src2), Location::GPR(dst)) => { let src1 = src1.into_index() as u32; @@ -1617,15 +2101,26 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; sdiv X(dst), X(src1), X(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit UDIV {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } /// msub : c - a*b -> dst - fn emit_msub(&mut self, sz: Size, a: Location, b: Location, c: Location, dst: Location) { + fn emit_msub( + &mut self, + sz: Size, + a: Location, + b: Location, + c: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, a, b, c, dst) { ( Size::S32, @@ -1653,14 +2148,19 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; msub X(dst), X(a), X(b), X(c)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit msub {:?} {:?} {:?} {:?} {:?}", - sz, a, b, c, dst + sz, + a, + b, + c, + dst ), } + Ok(()) } - fn emit_sxtb(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_sxtb(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1672,10 +2172,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; sxtb X(dst), W(src)); } - _ => panic!("singlepass can't emit SXTB {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit SXTB {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_sxth(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_sxth(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1687,41 +2188,45 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; sxth X(dst), W(src)); } - _ => panic!("singlepass can't emit SXTH {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit SXTH {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_sxtw(&mut self, _sz: Size, src: Location, dst: Location) { + fn emit_sxtw(&mut self, _sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (src, dst) { (Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; let dst = dst.into_index() as u32; dynasm!(self ; sxtw X(dst), W(src)); } - _ => panic!("singlepass can't emit SXTW {:?} {:?}", src, dst), + _ => codegen_error!("singlepass can't emit SXTW {:?} {:?}", src, dst), } + Ok(()) } - fn emit_uxtb(&mut self, _sz: Size, src: Location, dst: Location) { + fn emit_uxtb(&mut self, _sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (src, dst) { (Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; let dst = dst.into_index() as u32; dynasm!(self ; uxtb W(dst), W(src)); } - _ => panic!("singlepass can't emit UXTB {:?} {:?}", src, dst), + _ => codegen_error!("singlepass can't emit UXTB {:?} {:?}", src, dst), } + Ok(()) } - fn emit_uxth(&mut self, _sz: Size, src: Location, dst: Location) { + fn emit_uxth(&mut self, _sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (src, dst) { (Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; let dst = dst.into_index() as u32; dynasm!(self ; uxth W(dst), W(src)); } - _ => panic!("singlepass can't emit UXTH {:?} {:?}", src, dst), + _ => codegen_error!("singlepass can't emit UXTH {:?} {:?}", src, dst), } + Ok(()) } - fn emit_cset(&mut self, sz: Size, dst: Location, cond: Condition) { + fn emit_cset(&mut self, sz: Size, dst: Location, cond: Condition) -> Result<(), CodegenError> { match (sz, dst) { (Size::S32, Location::GPR(reg)) => { let reg = reg as u32; @@ -1763,10 +2268,11 @@ impl EmitterARM64 for Assembler { Condition::Al => dynasm!(self ; cset X(reg), al), } } - _ => panic!("singlepass can't emit CSET {:?} {:?} {:?}", sz, dst, cond), + _ => codegen_error!("singlepass can't emit CSET {:?} {:?} {:?}", sz, dst, cond), } + Ok(()) } - fn emit_csetm(&mut self, sz: Size, dst: Location, cond: Condition) { + fn emit_csetm(&mut self, sz: Size, dst: Location, cond: Condition) -> Result<(), CodegenError> { match (sz, dst) { (Size::S32, Location::GPR(reg)) => { let reg = reg as u32; @@ -1808,10 +2314,17 @@ impl EmitterARM64 for Assembler { Condition::Al => dynasm!(self ; csetm X(reg), al), } } - _ => panic!("singlepass can't emit CSETM {:?} {:?} {:?}", sz, dst, cond), + _ => codegen_error!("singlepass can't emit CSETM {:?} {:?} {:?}", sz, dst, cond), } + Ok(()) } - fn emit_cinc(&mut self, sz: Size, src: Location, dst: Location, cond: Condition) { + fn emit_cinc( + &mut self, + sz: Size, + src: Location, + dst: Location, + cond: Condition, + ) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1855,11 +2368,12 @@ impl EmitterARM64 for Assembler { Condition::Al => dynasm!(self ; cinc X(src), X(dst), al), }; } - _ => unreachable!(), + _ => codegen_error!("singlepass can't emit CINC"), } + Ok(()) } - fn emit_clz(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_clz(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1871,10 +2385,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; clz W(dst), W(src)); } - _ => panic!("singlepass can't emit CLS {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit CLS {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_rbit(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_rbit(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S64, Location::GPR(src), Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -1886,22 +2401,31 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; rbit W(dst), W(src)); } - _ => panic!("singlepass can't emit CLS {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit CLS {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_label(&mut self, label: Label) { + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError> { dynasm!(self ; => label); + Ok(()) } - fn emit_load_label(&mut self, reg: GPR, label: Label) { + fn emit_load_label(&mut self, reg: GPR, label: Label) -> Result<(), CodegenError> { let reg = reg.into_index() as u32; dynasm!(self ; adr X(reg), =>label); + Ok(()) } - fn emit_b_label(&mut self, label: Label) { + fn emit_b_label(&mut self, label: Label) -> Result<(), CodegenError> { dynasm!(self ; b =>label); - } - fn emit_cbz_label(&mut self, sz: Size, reg: Location, label: Label) { + Ok(()) + } + fn emit_cbz_label( + &mut self, + sz: Size, + reg: Location, + label: Label, + ) -> Result<(), CodegenError> { match (sz, reg) { (Size::S32, Location::GPR(reg)) => { let reg = reg.into_index() as u32; @@ -1911,10 +2435,16 @@ impl EmitterARM64 for Assembler { let reg = reg.into_index() as u32; dynasm!(self ; cbz X(reg), =>label); } - _ => panic!("singlepass can't emit CBZ {:?} {:?} {:?}", sz, reg, label), + _ => codegen_error!("singlepass can't emit CBZ {:?} {:?} {:?}", sz, reg, label), } + Ok(()) } - fn emit_cbnz_label(&mut self, sz: Size, reg: Location, label: Label) { + fn emit_cbnz_label( + &mut self, + sz: Size, + reg: Location, + label: Label, + ) -> Result<(), CodegenError> { match (sz, reg) { (Size::S32, Location::GPR(reg)) => { let reg = reg.into_index() as u32; @@ -1924,10 +2454,17 @@ impl EmitterARM64 for Assembler { let reg = reg.into_index() as u32; dynasm!(self ; cbnz X(reg), =>label); } - _ => panic!("singlepass can't emit CBNZ {:?} {:?} {:?}", sz, reg, label), + _ => codegen_error!("singlepass can't emit CBNZ {:?} {:?} {:?}", sz, reg, label), } + Ok(()) } - fn emit_tbz_label(&mut self, sz: Size, reg: Location, n: u32, label: Label) { + fn emit_tbz_label( + &mut self, + sz: Size, + reg: Location, + n: u32, + label: Label, + ) -> Result<(), CodegenError> { match (sz, reg) { (Size::S32, Location::GPR(reg)) => { let reg = reg.into_index() as u32; @@ -1937,13 +2474,23 @@ impl EmitterARM64 for Assembler { let reg = reg.into_index() as u32; dynasm!(self ; tbz X(reg), n, =>label); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit TBZ {:?} {:?} {:?} {:?}", - sz, reg, n, label + sz, + reg, + n, + label ), } + Ok(()) } - fn emit_tbnz_label(&mut self, sz: Size, reg: Location, n: u32, label: Label) { + fn emit_tbnz_label( + &mut self, + sz: Size, + reg: Location, + n: u32, + label: Label, + ) -> Result<(), CodegenError> { match (sz, reg) { (Size::S32, Location::GPR(reg)) => { let reg = reg.into_index() as u32; @@ -1953,13 +2500,17 @@ impl EmitterARM64 for Assembler { let reg = reg.into_index() as u32; dynasm!(self ; tbnz X(reg), n, =>label); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit TBNZ {:?} {:?} {:?} {:?}", - sz, reg, n, label + sz, + reg, + n, + label ), } + Ok(()) } - fn emit_bcond_label(&mut self, condition: Condition, label: Label) { + fn emit_bcond_label(&mut self, condition: Condition, label: Label) -> Result<(), CodegenError> { match condition { Condition::Eq => dynasm!(self ; b.eq => label), Condition::Ne => dynasm!(self ; b.ne => label), @@ -1977,8 +2528,13 @@ impl EmitterARM64 for Assembler { Condition::Le => dynasm!(self ; b.le => label), Condition::Al => dynasm!(self ; b => label), } + Ok(()) } - fn emit_bcond_label_far(&mut self, condition: Condition, label: Label) { + fn emit_bcond_label_far( + &mut self, + condition: Condition, + label: Label, + ) -> Result<(), CodegenError> { let cont: Label = self.get_label(); match condition { // if not condition than continue @@ -1999,32 +2555,40 @@ impl EmitterARM64 for Assembler { Condition::Al => { /*nothing*/ } } dynasm!(self ; b => label); - self.emit_label(cont); + self.emit_label(cont)?; + Ok(()) } - fn emit_b_register(&mut self, reg: GPR) { + fn emit_b_register(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; br X(reg.into_index() as u32)); + Ok(()) } - fn emit_call_label(&mut self, label: Label) { + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError> { dynasm!(self ; bl =>label); + Ok(()) } - fn emit_call_register(&mut self, reg: GPR) { + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; blr X(reg.into_index() as u32)); + Ok(()) } - fn emit_ret(&mut self) { + fn emit_ret(&mut self) -> Result<(), CodegenError> { dynasm!(self ; ret); + Ok(()) } - fn emit_udf(&mut self, payload: u16) { + fn emit_udf(&mut self, payload: u16) -> Result<(), CodegenError> { dynasm!(self ; udf (payload as u32)); + Ok(()) } - fn emit_dmb(&mut self) { + fn emit_dmb(&mut self) -> Result<(), CodegenError> { dynasm!(self ; dmb ish); + Ok(()) } - fn emit_brk(&mut self) { + fn emit_brk(&mut self) -> Result<(), CodegenError> { dynasm!(self ; brk 0); + Ok(()) } - fn emit_fcmp(&mut self, sz: Size, src1: Location, src2: Location) { + fn emit_fcmp(&mut self, sz: Size, src1: Location, src2: Location) -> Result<(), CodegenError> { match (sz, src1, src2) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2)) => { let src1 = src1.into_index() as u32; @@ -2036,11 +2600,12 @@ impl EmitterARM64 for Assembler { let src2 = src2.into_index() as u32; dynasm!(self ; fcmp D(src1), D(src2)); } - _ => panic!("singlepass can't emit FCMP {:?} {:?} {:?}", sz, src1, src2), + _ => codegen_error!("singlepass can't emit FCMP {:?} {:?} {:?}", sz, src1, src2), } + Ok(()) } - fn emit_fneg(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_fneg(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2052,10 +2617,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fneg D(dst), D(src)); } - _ => panic!("singlepass can't emit FNEG {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FNEG {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_fsqrt(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_fsqrt(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2067,11 +2633,18 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fsqrt D(dst), D(src)); } - _ => panic!("singlepass can't emit FSQRT {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FSQRT {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_fadd(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fadd( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2085,13 +2658,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fadd D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FADD {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_fsub(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fsub( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2105,13 +2688,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fsub D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FSUB {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_fmul(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fmul( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2125,13 +2718,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fmul D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FMUL {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_fdiv(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fdiv( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2145,14 +2748,24 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fdiv D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FDIV {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_fmin(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fmin( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2166,13 +2779,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fmin D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FMIN {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_fmax(&mut self, sz: Size, src1: Location, src2: Location, dst: Location) { + fn emit_fmax( + &mut self, + sz: Size, + src1: Location, + src2: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src1, src2, dst) { (Size::S32, Location::SIMD(src1), Location::SIMD(src2), Location::SIMD(dst)) => { let src1 = src1.into_index() as u32; @@ -2186,14 +2809,18 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fmax D(dst), D(src1), D(src2)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FMAX {:?} {:?} {:?} {:?}", - sz, src1, src2, dst + sz, + src1, + src2, + dst ), } + Ok(()) } - fn emit_frintz(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_frintz(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2205,10 +2832,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; frintz D(dst), D(src)); } - _ => panic!("singlepass can't emit FRINTZ {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FRINTZ {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_frintn(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_frintn(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2220,10 +2848,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; frintn D(dst), D(src)); } - _ => panic!("singlepass can't emit FRINTN {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FRINTN {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_frintm(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_frintm(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2235,10 +2864,11 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; frintm D(dst), D(src)); } - _ => panic!("singlepass can't emit FRINTM {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FRINTM {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_frintp(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_frintp(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2250,11 +2880,18 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; frintp D(dst), D(src)); } - _ => panic!("singlepass can't emit FRINTP {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit FRINTP {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_scvtf(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location) { + fn emit_scvtf( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_in, src, sz_out, dst) { (Size::S32, Location::GPR(src), Size::S32, Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2276,13 +2913,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; scvtf D(dst), X(src)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit SCVTF {:?} {:?} {:?} {:?}", - sz_in, src, sz_out, dst + sz_in, + src, + sz_out, + dst ), } + Ok(()) } - fn emit_ucvtf(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location) { + fn emit_ucvtf( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_in, src, sz_out, dst) { (Size::S32, Location::GPR(src), Size::S32, Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2304,13 +2951,17 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; ucvtf D(dst), X(src)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit UCVTF {:?} {:?} {:?} {:?}", - sz_in, src, sz_out, dst + sz_in, + src, + sz_out, + dst ), } + Ok(()) } - fn emit_fcvt(&mut self, sz_in: Size, src: Location, dst: Location) { + fn emit_fcvt(&mut self, sz_in: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz_in, src, dst) { (Size::S32, Location::SIMD(src), Location::SIMD(dst)) => { let src = src.into_index() as u32; @@ -2322,13 +2973,22 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fcvt S(dst), D(src)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit UCVTF {:?} {:?} {:?}", - sz_in, src, dst + sz_in, + src, + dst ), } + Ok(()) } - fn emit_fcvtzs(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location) { + fn emit_fcvtzs( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_in, src, sz_out, dst) { (Size::S32, Location::SIMD(src), Size::S32, Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -2350,13 +3010,23 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fcvtzs X(dst), D(src)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FCVTZS {:?} {:?} {:?} {:?}", - sz_in, src, sz_out, dst + sz_in, + src, + sz_out, + dst ), } + Ok(()) } - fn emit_fcvtzu(&mut self, sz_in: Size, src: Location, sz_out: Size, dst: Location) { + fn emit_fcvtzu( + &mut self, + sz_in: Size, + src: Location, + sz_out: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_in, src, sz_out, dst) { (Size::S32, Location::SIMD(src), Size::S32, Location::GPR(dst)) => { let src = src.into_index() as u32; @@ -2378,33 +3048,41 @@ impl EmitterARM64 for Assembler { let dst = dst.into_index() as u32; dynasm!(self ; fcvtzu X(dst), D(src)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit FCVTZU {:?} {:?} {:?} {:?}", - sz_in, src, sz_out, dst + sz_in, + src, + sz_out, + dst ), } + Ok(()) } // 1 011 0100 0100 000 => fpcr - fn emit_read_fpcr(&mut self, reg: GPR) { + fn emit_read_fpcr(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; mrs X(reg as u32), 0b1_011_0100_0100_000); + Ok(()) } - fn emit_write_fpcr(&mut self, reg: GPR) { + fn emit_write_fpcr(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; msr 0b1_011_0100_0100_000, X(reg as u32)); + Ok(()) } // 1 011 0100 0100 001 => fpsr - fn emit_read_fpsr(&mut self, reg: GPR) { + fn emit_read_fpsr(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; mrs X(reg as u32), 0b1_011_0100_0100_001); + Ok(()) } - fn emit_write_fpsr(&mut self, reg: GPR) { + fn emit_write_fpsr(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; msr 0b1_011_0100_0100_001, X(reg as u32)); + Ok(()) } } pub fn gen_std_trampoline_arm64( sig: &FunctionType, calling_convention: CallingConvention, -) -> FunctionBody { +) -> Result { let mut a = Assembler::new(0); let fptr = GPR::X27; @@ -2438,7 +3116,7 @@ pub fn gen_std_trampoline_arm64( Type::I64 | Type::F64 => Size::S64, Type::ExternRef => Size::S64, Type::FuncRef => Size::S64, - _ => panic!( + _ => codegen_error!( "singlepass unsupported param type for trampoline {:?}", *param ), @@ -2449,7 +3127,7 @@ pub fn gen_std_trampoline_arm64( sz, Location::GPR(GPR::from_index(i + 1).unwrap()), Location::Memory(args, (i * 16) as i32), - ); + )?; } _ => { #[allow(clippy::single_match)] @@ -2481,12 +3159,12 @@ pub fn gen_std_trampoline_arm64( sz, Location::GPR(GPR::X16), Location::Memory(args, (i * 16) as i32), - ); + )?; a.emit_str( sz, Location::GPR(GPR::X16), Location::Memory(GPR::XzrSp, caller_stack_offset), - ); + )?; match calling_convention { CallingConvention::AppleAarch64 => { caller_stack_offset += match sz { @@ -2508,7 +3186,7 @@ pub fn gen_std_trampoline_arm64( // Write return value. if !sig.results().is_empty() { - a.emit_str(Size::S64, Location::GPR(GPR::X0), Location::Memory(args, 0)); + a.emit_str(Size::S64, Location::GPR(GPR::X0), Location::Memory(args, 0))?; } // Restore stack. @@ -2519,17 +3197,17 @@ pub fn gen_std_trampoline_arm64( ; ret ); - FunctionBody { + Ok(FunctionBody { body: a.finalize().unwrap().to_vec(), unwind_info: None, - } + }) } // Generates dynamic import function call trampoline for a function type. pub fn gen_std_dynamic_import_trampoline_arm64( vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, -) -> FunctionBody { +) -> Result { let mut a = Assembler::new(0); // Allocate argument array. let stack_offset: usize = 16 * std::cmp::max(sig.params().len(), sig.results().len()); @@ -2540,7 +3218,7 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Location::GPR(GPR::X26), GPR::XzrSp, 16, - ); + )?; if stack_offset != 0 { if stack_offset < 0x1000 { @@ -2549,15 +3227,15 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Location::GPR(GPR::XzrSp), Location::Imm32(stack_offset as _), Location::GPR(GPR::XzrSp), - ); + )?; } else { - a.emit_mov_imm(Location::GPR(GPR::X26), stack_offset as u64); + a.emit_mov_imm(Location::GPR(GPR::X26), stack_offset as u64)?; a.emit_sub( Size::S64, Location::GPR(GPR::XzrSp), Location::GPR(GPR::X26), Location::GPR(GPR::XzrSp), - ); + )?; } } @@ -2589,11 +3267,13 @@ pub fn gen_std_dynamic_import_trampoline_arm64( sz, Location::GPR(GPR::X26), Location::Memory(GPR::XzrSp, (stack_offset + 16 + stack_param_count) as _), - ); + )?; stack_param_count += match sz { Size::S32 => 4, Size::S64 => 8, - _ => unreachable!(), + _ => codegen_error!( + "singlepass unreachable in gen_std_dynamic_import_trampoline_arm64" + ), }; Location::GPR(GPR::X26) } @@ -2602,14 +3282,14 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Size::S64, source_loc, Location::Memory(GPR::XzrSp, (i * 16) as _), - ); + )?; // Zero upper 64 bits. a.emit_str( Size::S64, Location::GPR(GPR::XzrSp), // XZR here Location::Memory(GPR::XzrSp, (i * 16 + 8) as _), // XSP here - ); + )?; } } @@ -2618,19 +3298,19 @@ pub fn gen_std_dynamic_import_trampoline_arm64( _ => { // Load target address. let offset = vmoffsets.vmdynamicfunction_import_context_address(); - a.emit_ldur(Size::S64, Location::GPR(GPR::X26), GPR::X0, offset as i32); + a.emit_ldur(Size::S64, Location::GPR(GPR::X26), GPR::X0, offset as i32)?; // Load values array. a.emit_add( Size::S64, Location::GPR(GPR::XzrSp), Location::Imm8(0), Location::GPR(GPR::X1), - ); + )?; } }; // Call target. - a.emit_call_register(GPR::X26); + a.emit_call_register(GPR::X26)?; // Fetch return value. if !sig.results().is_empty() { @@ -2639,7 +3319,7 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Size::S64, Location::GPR(GPR::X0), Location::Memory(GPR::XzrSp, 0), - ); + )?; } // Release values array. @@ -2650,15 +3330,15 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Location::GPR(GPR::XzrSp), Location::Imm32(stack_offset as _), Location::GPR(GPR::XzrSp), - ); + )?; } else { - a.emit_mov_imm(Location::GPR(GPR::X26), stack_offset as u64); + a.emit_mov_imm(Location::GPR(GPR::X26), stack_offset as u64)?; a.emit_add( Size::S64, Location::GPR(GPR::XzrSp), Location::GPR(GPR::X26), Location::GPR(GPR::XzrSp), - ); + )?; } } a.emit_ldpia( @@ -2667,15 +3347,15 @@ pub fn gen_std_dynamic_import_trampoline_arm64( Location::GPR(GPR::X26), GPR::XzrSp, 16, - ); + )?; // Return. - a.emit_ret(); + a.emit_ret()?; - FunctionBody { + Ok(FunctionBody { body: a.finalize().unwrap().to_vec(), unwind_info: None, - } + }) } // Singlepass calls import functions through a trampoline. pub fn gen_import_call_trampoline_arm64( @@ -2683,7 +3363,7 @@ pub fn gen_import_call_trampoline_arm64( index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, -) -> CustomSection { +) -> Result { let mut a = Assembler::new(0); // Singlepass internally treats all arguments as integers @@ -2716,15 +3396,15 @@ pub fn gen_import_call_trampoline_arm64( Location::GPR(GPR::XzrSp), Location::Imm32(stack_offset as u32), Location::GPR(GPR::XzrSp), - ); + )?; } else { - a.emit_mov_imm(Location::GPR(GPR::X16), stack_offset as u64); + a.emit_mov_imm(Location::GPR(GPR::X16), stack_offset as u64)?; a.emit_sub( Size::S64, Location::GPR(GPR::XzrSp), Location::GPR(GPR::X16), Location::GPR(GPR::XzrSp), - ); + )?; } } @@ -2745,7 +3425,7 @@ pub fn gen_import_call_trampoline_arm64( let loc = match i { 0..=6 => { let loc = Location::Memory(GPR::XzrSp, (i * 8) as i32); - a.emit_str(Size::S64, Location::GPR(PARAM_REGS[i]), loc); + a.emit_str(Size::S64, Location::GPR(PARAM_REGS[i]), loc)?; loc } _ => Location::Memory(GPR::XzrSp, stack_offset + ((i - 7) * 8) as i32), @@ -2764,17 +3444,17 @@ pub fn gen_import_call_trampoline_arm64( Some(ARM64Register::NEON(neon)) => Location::SIMD(neon), None => { // No register can be allocated. Put this argument on the stack. - a.emit_ldr(Size::S64, Location::GPR(GPR::X16), prev_loc); + a.emit_ldr(Size::S64, Location::GPR(GPR::X16), prev_loc)?; a.emit_str( Size::S64, Location::GPR(GPR::X16), Location::Memory(GPR::XzrSp, stack_offset + caller_stack_offset), - ); + )?; caller_stack_offset += 8; continue; } }; - a.emit_ldr(Size::S64, targ, prev_loc); + a.emit_ldr(Size::S64, targ, prev_loc)?; } // Restore stack pointer. @@ -2785,15 +3465,15 @@ pub fn gen_import_call_trampoline_arm64( Location::GPR(GPR::XzrSp), Location::Imm32(stack_offset as u32), Location::GPR(GPR::XzrSp), - ); + )?; } else { - a.emit_mov_imm(Location::GPR(GPR::X16), stack_offset as u64); + a.emit_mov_imm(Location::GPR(GPR::X16), stack_offset as u64)?; a.emit_add( Size::S64, Location::GPR(GPR::XzrSp), Location::GPR(GPR::X16), Location::GPR(GPR::XzrSp), - ); + )?; } } } @@ -2810,13 +3490,13 @@ pub fn gen_import_call_trampoline_arm64( if (offset > 0 && offset < 0xF8) || (offset > 0 && offset < 0x7FF8 && (offset & 7) == 0) { offset } else { - a.emit_mov_imm(Location::GPR(GPR::X16), (offset as i64) as u64); + a.emit_mov_imm(Location::GPR(GPR::X16), (offset as i64) as u64)?; a.emit_add( Size::S64, Location::GPR(GPR::X0), Location::GPR(GPR::X16), Location::GPR(GPR::X0), - ); + )?; 0 }; #[allow(clippy::match_single_binding)] @@ -2827,35 +3507,35 @@ pub fn gen_import_call_trampoline_arm64( Size::S64, Location::GPR(GPR::X16), Location::Memory(GPR::X0, offset as i32), // function pointer - ); + )?; a.emit_ldr( Size::S64, Location::GPR(GPR::X0), Location::Memory(GPR::X0, offset as i32 + 8), // target vmctx - ); + )?; } else { a.emit_ldur( Size::S64, Location::GPR(GPR::X16), GPR::X0, offset as i32, // function pointer - ); + )?; a.emit_ldur( Size::S64, Location::GPR(GPR::X0), GPR::X0, offset as i32 + 8, // target vmctx - ); + )?; } } } - a.emit_b_register(GPR::X16); + a.emit_b_register(GPR::X16)?; let section_body = SectionBody::new_with_vec(a.finalize().unwrap().to_vec()); - CustomSection { + Ok(CustomSection { protection: CustomSectionProtection::ReadExecute, bytes: section_body, relocations: vec![], - } + }) } diff --git a/lib/compiler-singlepass/src/emitter_x64.rs b/lib/compiler-singlepass/src/emitter_x64.rs index 3ce8b7e160b..f859b4c8fbc 100644 --- a/lib/compiler-singlepass/src/emitter_x64.rs +++ b/lib/compiler-singlepass/src/emitter_x64.rs @@ -1,6 +1,8 @@ +use crate::codegen_error; use crate::common_decl::Size; use crate::location::Location as AbstractLocation; pub use crate::location::Multiplier; +use crate::machine::CodegenError; pub use crate::machine::{Label, Offset}; use crate::machine_x64::AssemblerX64; pub use crate::x64_decl::{GPR, XMM}; @@ -66,223 +68,379 @@ pub trait EmitterX64 { fn get_offset(&self) -> Offset; fn get_jmp_instr_size(&self) -> u8; - fn finalize_function(&mut self) {} + fn finalize_function(&mut self) -> Result<(), CodegenError> { + Ok(()) + } - fn emit_u64(&mut self, x: u64); - fn emit_bytes(&mut self, bytes: &[u8]); + fn emit_u64(&mut self, x: u64) -> Result<(), CodegenError>; + fn emit_bytes(&mut self, bytes: &[u8]) -> Result<(), CodegenError>; - fn emit_label(&mut self, label: Label); + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError>; - fn emit_nop(&mut self); + fn emit_nop(&mut self) -> Result<(), CodegenError>; /// A high-level assembler method. Emits an instruction sequence of length `n` that is functionally /// equivalent to a `nop` instruction, without guarantee about the underlying implementation. - fn emit_nop_n(&mut self, n: usize); - - fn emit_mov(&mut self, sz: Size, src: Location, dst: Location); - fn emit_lea(&mut self, sz: Size, src: Location, dst: Location); - fn emit_lea_label(&mut self, label: Label, dst: Location); - fn emit_cdq(&mut self); - fn emit_cqo(&mut self); - fn emit_xor(&mut self, sz: Size, src: Location, dst: Location); - fn emit_jmp(&mut self, condition: Condition, label: Label); - fn emit_jmp_location(&mut self, loc: Location); - fn emit_set(&mut self, condition: Condition, dst: GPR); - fn emit_push(&mut self, sz: Size, src: Location); - fn emit_pop(&mut self, sz: Size, dst: Location); - fn emit_cmp(&mut self, sz: Size, left: Location, right: Location); - fn emit_add(&mut self, sz: Size, src: Location, dst: Location); - fn emit_sub(&mut self, sz: Size, src: Location, dst: Location); - fn emit_neg(&mut self, sz: Size, value: Location); - fn emit_imul(&mut self, sz: Size, src: Location, dst: Location); - fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR); - fn emit_div(&mut self, sz: Size, divisor: Location); - fn emit_idiv(&mut self, sz: Size, divisor: Location); - fn emit_shl(&mut self, sz: Size, src: Location, dst: Location); - fn emit_shr(&mut self, sz: Size, src: Location, dst: Location); - fn emit_sar(&mut self, sz: Size, src: Location, dst: Location); - fn emit_rol(&mut self, sz: Size, src: Location, dst: Location); - fn emit_ror(&mut self, sz: Size, src: Location, dst: Location); - fn emit_and(&mut self, sz: Size, src: Location, dst: Location); - fn emit_test(&mut self, sz: Size, src: Location, dst: Location); - fn emit_or(&mut self, sz: Size, src: Location, dst: Location); - fn emit_bsr(&mut self, sz: Size, src: Location, dst: Location); - fn emit_bsf(&mut self, sz: Size, src: Location, dst: Location); - fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location); - fn emit_movzx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location); - fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location); - fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location); - fn emit_lock_xadd(&mut self, sz: Size, src: Location, dst: Location); - fn emit_lock_cmpxchg(&mut self, sz: Size, src: Location, dst: Location); - fn emit_rep_stosq(&mut self); - - fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR); - fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR); - - fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR); - fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR); - - fn emit_vmovaps(&mut self, src: XMMOrMemory, dst: XMMOrMemory); - fn emit_vmovapd(&mut self, src: XMMOrMemory, dst: XMMOrMemory); - fn emit_vxorps(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vxorpd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vaddss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vaddsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vsubss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vsubsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vmulss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vmulsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vdivss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vdivsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vmaxss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vmaxsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vminss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vminsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpeqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpeqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpneqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpneqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpltss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpltsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpless(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmplesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpgtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpgtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpgess(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpgesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpunordss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpunordsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcmpordss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcmpordsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vsqrtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vsqrtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vroundss_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundss_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundss_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundss_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundsd_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundsd_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundsd_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vroundsd_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_vcvtss2sd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - fn emit_vcvtsd2ss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM); - - fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM); - fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM); - - fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR); - fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR); - fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR); - fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR); - - fn emit_vcvtsi2ss_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM); - fn emit_vcvtsi2ss_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM); - fn emit_vcvtsi2sd_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM); - fn emit_vcvtsi2sd_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM); - - fn emit_vblendvps(&mut self, src1: XMM, src2: XMMOrMemory, mask: XMM, dst: XMM); - fn emit_vblendvpd(&mut self, src1: XMM, src2: XMMOrMemory, mask: XMM, dst: XMM); - - fn emit_test_gpr_64(&mut self, reg: GPR); - - fn emit_ud2(&mut self); - fn emit_ud1_payload(&mut self, payload: u8); - fn emit_ret(&mut self); - fn emit_call_label(&mut self, label: Label); - fn emit_call_location(&mut self, loc: Location); - - fn emit_call_register(&mut self, reg: GPR); - - fn emit_bkpt(&mut self); - - fn emit_host_redirection(&mut self, target: GPR); + fn emit_nop_n(&mut self, n: usize) -> Result<(), CodegenError>; + + fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_lea(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_lea_label(&mut self, label: Label, dst: Location) -> Result<(), CodegenError>; + fn emit_cdq(&mut self) -> Result<(), CodegenError>; + fn emit_cqo(&mut self) -> Result<(), CodegenError>; + fn emit_xor(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_jmp(&mut self, condition: Condition, label: Label) -> Result<(), CodegenError>; + fn emit_jmp_location(&mut self, loc: Location) -> Result<(), CodegenError>; + fn emit_set(&mut self, condition: Condition, dst: GPR) -> Result<(), CodegenError>; + fn emit_push(&mut self, sz: Size, src: Location) -> Result<(), CodegenError>; + fn emit_pop(&mut self, sz: Size, dst: Location) -> Result<(), CodegenError>; + fn emit_cmp(&mut self, sz: Size, left: Location, right: Location) -> Result<(), CodegenError>; + fn emit_add(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_sub(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_neg(&mut self, sz: Size, value: Location) -> Result<(), CodegenError>; + fn emit_imul(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR) -> Result<(), CodegenError>; + fn emit_div(&mut self, sz: Size, divisor: Location) -> Result<(), CodegenError>; + fn emit_idiv(&mut self, sz: Size, divisor: Location) -> Result<(), CodegenError>; + fn emit_shl(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_shr(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_sar(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_rol(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_ror(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_and(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_test(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_or(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_bsr(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_bsf(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_movzx( + &mut self, + sz_src: Size, + src: Location, + sz_dst: Size, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_movsx( + &mut self, + sz_src: Size, + src: Location, + sz_dst: Size, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError>; + fn emit_lock_xadd( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_lock_cmpxchg( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError>; + fn emit_rep_stosq(&mut self) -> Result<(), CodegenError>; + + fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR) -> Result<(), CodegenError>; + fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR) -> Result<(), CodegenError>; + + fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR) -> Result<(), CodegenError>; + fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR) -> Result<(), CodegenError>; + + fn emit_vmovaps(&mut self, src: XMMOrMemory, dst: XMMOrMemory) -> Result<(), CodegenError>; + fn emit_vmovapd(&mut self, src: XMMOrMemory, dst: XMMOrMemory) -> Result<(), CodegenError>; + fn emit_vxorps(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vxorpd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + + fn emit_vaddss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vaddsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vsubss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vsubsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vmulss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vmulsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vdivss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vdivsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vmaxss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vmaxsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vminss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vminsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + + fn emit_vcmpeqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + fn emit_vcmpeqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + + fn emit_vcmpneqss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcmpneqsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_vcmpltss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + fn emit_vcmpltsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + + fn emit_vcmpless(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + fn emit_vcmplesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + + fn emit_vcmpgtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + fn emit_vcmpgtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + + fn emit_vcmpgess(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + fn emit_vcmpgesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) + -> Result<(), CodegenError>; + + fn emit_vcmpunordss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcmpunordsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_vcmpordss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcmpordsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_vsqrtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_vsqrtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + + fn emit_vroundss_nearest( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundss_floor( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundss_ceil( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundss_trunc( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundsd_nearest( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundsd_floor( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundsd_ceil( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vroundsd_trunc( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_vcvtss2sd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcvtsd2ss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM) -> Result<(), CodegenError>; + + fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError>; + fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError>; + fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError>; + fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError>; + + fn emit_vcvtsi2ss_32( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcvtsi2ss_64( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcvtsi2sd_32( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vcvtsi2sd_64( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_vblendvps( + &mut self, + src1: XMM, + src2: XMMOrMemory, + mask: XMM, + dst: XMM, + ) -> Result<(), CodegenError>; + fn emit_vblendvpd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + mask: XMM, + dst: XMM, + ) -> Result<(), CodegenError>; + + fn emit_test_gpr_64(&mut self, reg: GPR) -> Result<(), CodegenError>; + + fn emit_ud2(&mut self) -> Result<(), CodegenError>; + fn emit_ud1_payload(&mut self, payload: u8) -> Result<(), CodegenError>; + fn emit_ret(&mut self) -> Result<(), CodegenError>; + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError>; + fn emit_call_location(&mut self, loc: Location) -> Result<(), CodegenError>; + + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError>; + + fn emit_bkpt(&mut self) -> Result<(), CodegenError>; + + fn emit_host_redirection(&mut self, target: GPR) -> Result<(), CodegenError>; fn arch_has_itruncf(&self) -> bool { false } - fn arch_emit_i32_trunc_sf32(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i32_trunc_sf32(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i32_trunc_sf32 unimplemented") } - fn arch_emit_i32_trunc_sf64(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i32_trunc_sf64(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i32_trunc_sf64 unimplemented") } - fn arch_emit_i32_trunc_uf32(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i32_trunc_uf32(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i32_trunc_uf32 unimplemented") } - fn arch_emit_i32_trunc_uf64(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i32_trunc_uf64(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i32_trunc_uf64 unimplemented") } - fn arch_emit_i64_trunc_sf32(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i64_trunc_sf32(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i64_trunc_sf32 unimplemented") } - fn arch_emit_i64_trunc_sf64(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i64_trunc_sf64(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i64_trunc_sf64 unimplemented") } - fn arch_emit_i64_trunc_uf32(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i64_trunc_uf32(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i64_trunc_uf32 unimplemented") } - fn arch_emit_i64_trunc_uf64(&mut self, _src: XMM, _dst: GPR) { - unimplemented!() + fn arch_emit_i64_trunc_uf64(&mut self, _src: XMM, _dst: GPR) -> Result<(), CodegenError> { + codegen_error!("singplepass arch_emit_i64_trunc_uf64 unimplemented") } fn arch_has_fconverti(&self) -> bool { false } - fn arch_emit_f32_convert_si32(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f32_convert_si32(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f32_convert_si32 unimplemented") } - fn arch_emit_f32_convert_si64(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f32_convert_si64(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f32_convert_si64 unimplemented") } - fn arch_emit_f32_convert_ui32(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f32_convert_ui32(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f32_convert_ui32 unimplemented") } - fn arch_emit_f32_convert_ui64(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f32_convert_ui64(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f32_convert_ui64 unimplemented") } - fn arch_emit_f64_convert_si32(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f64_convert_si32(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f64_convert_si32 unimplemented") } - fn arch_emit_f64_convert_si64(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f64_convert_si64(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f64_convert_si64 unimplemented") } - fn arch_emit_f64_convert_ui32(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f64_convert_ui32(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f64_convert_ui32 unimplemented") } - fn arch_emit_f64_convert_ui64(&mut self, _src: GPR, _dst: XMM) { - unimplemented!() + fn arch_emit_f64_convert_ui64(&mut self, _src: GPR, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f64_convert_ui64 unimplemented") } fn arch_has_fneg(&self) -> bool { false } - fn arch_emit_f32_neg(&mut self, _src: XMM, _dst: XMM) { - unimplemented!() + fn arch_emit_f32_neg(&mut self, _src: XMM, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f32_neg unimplemented") } - fn arch_emit_f64_neg(&mut self, _src: XMM, _dst: XMM) { - unimplemented!() + fn arch_emit_f64_neg(&mut self, _src: XMM, _dst: XMM) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_f64_neg unimplemented") } fn arch_has_xzcnt(&self) -> bool { false } - fn arch_emit_lzcnt(&mut self, _sz: Size, _src: Location, _dst: Location) { - unimplemented!() + fn arch_emit_lzcnt( + &mut self, + _sz: Size, + _src: Location, + _dst: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_lzcnt unimplemented") } - fn arch_emit_tzcnt(&mut self, _sz: Size, _src: Location, _dst: Location) { - unimplemented!() + fn arch_emit_tzcnt( + &mut self, + _sz: Size, + _src: Location, + _dst: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_tzcnt unimplemented") } fn arch_supports_canonicalize_nan(&self) -> bool { @@ -293,17 +451,22 @@ pub trait EmitterX64 { false } - fn arch_emit_indirect_call_with_trampoline(&mut self, _loc: Location) { - unimplemented!() + fn arch_emit_indirect_call_with_trampoline( + &mut self, + _loc: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass arch_emit_indirect_call_with_trampoline unimplemented") } // Emits entry trampoline just before the real function. - fn arch_emit_entry_trampoline(&mut self) {} + fn arch_emit_entry_trampoline(&mut self) -> Result<(), CodegenError> { + Ok(()) + } // Byte offset from the beginning of a `mov Imm64, GPR` instruction to the imm64 value. // Required to support emulation on Aarch64. - fn arch_mov64_imm_offset(&self) -> usize { - unimplemented!() + fn arch_mov64_imm_offset(&self) -> Result { + codegen_error!("singlepass arch_mov64_imm_offset unimplemented") } } @@ -771,7 +934,7 @@ impl EmitterX64 for AssemblerX64 { 5 } - fn finalize_function(&mut self) { + fn finalize_function(&mut self) -> Result<(), CodegenError> { dynasm!( self ; const_neg_one_32: @@ -781,27 +944,32 @@ impl EmitterX64 for AssemblerX64 { ; const_pos_one_32: ; .dword 1 ); + Ok(()) } - fn emit_u64(&mut self, x: u64) { + fn emit_u64(&mut self, x: u64) -> Result<(), CodegenError> { self.push_u64(x); + Ok(()) } - fn emit_bytes(&mut self, bytes: &[u8]) { + fn emit_bytes(&mut self, bytes: &[u8]) -> Result<(), CodegenError> { for &b in bytes { self.push(b); } + Ok(()) } - fn emit_label(&mut self, label: Label) { + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError> { dynasm!(self ; => label); + Ok(()) } - fn emit_nop(&mut self) { + fn emit_nop(&mut self) -> Result<(), CodegenError> { dynasm!(self ; nop); + Ok(()) } - fn emit_nop_n(&mut self, mut n: usize) { + fn emit_nop_n(&mut self, mut n: usize) -> Result<(), CodegenError> { /* 1 90H NOP 2 66 90H 66 NOP @@ -815,7 +983,7 @@ impl EmitterX64 for AssemblerX64 { */ while n >= 9 { n -= 9; - self.emit_bytes(&[0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00]); + self.emit_bytes(&[0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00])?; // 9-byte nop } let seq: &[u8] = match n { @@ -828,16 +996,16 @@ impl EmitterX64 for AssemblerX64 { 6 => &[0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00], 7 => &[0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00], 8 => &[0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00], - _ => unreachable!(), + _ => codegen_error!("singlepass emit_nop_n unreachable"), }; - self.emit_bytes(seq); + self.emit_bytes(seq) } - fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_mov(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { // fast path if let (Location::Imm32(0), Location::GPR(x)) = (src, dst) { dynasm!(self ; xor Rd(x as u8), Rd(x as u8)); - return; + return Ok(()); } binop_all_nofp!(mov, self, sz, src, dst, { @@ -914,12 +1082,13 @@ impl EmitterX64 for AssemblerX64 { dynasm!(self ; movq Rx(dst as u8), Rx(src as u8)); } - _ => panic!("singlepass can't emit MOV {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit MOV {:?} {:?} {:?}", sz, src, dst), } }) }); + Ok(()) } - fn emit_lea(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_lea(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S32, Location::Memory(src, disp), Location::GPR(dst)) => { dynasm!(self ; lea Rd(dst as u8), [Rq(src as u8) + disp]); @@ -961,29 +1130,34 @@ impl EmitterX64 for AssemblerX64 { } }; } - _ => panic!("singlepass can't emit LEA {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit LEA {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_lea_label(&mut self, label: Label, dst: Location) { + fn emit_lea_label(&mut self, label: Label, dst: Location) -> Result<(), CodegenError> { match dst { Location::GPR(x) => { dynasm!(self ; lea Rq(x as u8), [=>label]); } - _ => panic!("singlepass can't emit LEA label={:?} {:?}", label, dst), + _ => codegen_error!("singlepass can't emit LEA label={:?} {:?}", label, dst), } + Ok(()) } - fn emit_cdq(&mut self) { + fn emit_cdq(&mut self) -> Result<(), CodegenError> { dynasm!(self ; cdq); + Ok(()) } - fn emit_cqo(&mut self) { + fn emit_cqo(&mut self) -> Result<(), CodegenError> { dynasm!(self ; cqo); + Ok(()) } - fn emit_xor(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_xor(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_all_nofp!(xor, self, sz, src, dst, { - panic!("singlepass can't emit XOR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit XOR {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_jmp(&mut self, condition: Condition, label: Label) { + fn emit_jmp(&mut self, condition: Condition, label: Label) -> Result<(), CodegenError> { match condition { Condition::None => jmp_op!(jmp, self, label), Condition::Above => jmp_op!(ja, self, label), @@ -999,15 +1173,17 @@ impl EmitterX64 for AssemblerX64 { Condition::Signed => jmp_op!(js, self, label), Condition::Carry => jmp_op!(jc, self, label), } + Ok(()) } - fn emit_jmp_location(&mut self, loc: Location) { + fn emit_jmp_location(&mut self, loc: Location) -> Result<(), CodegenError> { match loc { Location::GPR(x) => dynasm!(self ; jmp Rq(x as u8)), Location::Memory(base, disp) => dynasm!(self ; jmp QWORD [Rq(base as u8) + disp]), - _ => panic!("singlepass can't emit JMP {:?}", loc), + _ => codegen_error!("singlepass can't emit JMP {:?}", loc), } + Ok(()) } - fn emit_set(&mut self, condition: Condition, dst: GPR) { + fn emit_set(&mut self, condition: Condition, dst: GPR) -> Result<(), CodegenError> { match condition { Condition::Above => dynasm!(self ; seta Rb(dst as u8)), Condition::AboveEqual => dynasm!(self ; setae Rb(dst as u8)), @@ -1021,29 +1197,32 @@ impl EmitterX64 for AssemblerX64 { Condition::NotEqual => dynasm!(self ; setne Rb(dst as u8)), Condition::Signed => dynasm!(self ; sets Rb(dst as u8)), Condition::Carry => dynasm!(self ; setc Rb(dst as u8)), - _ => panic!("singlepass can't emit SET {:?} {:?}", condition, dst), + _ => codegen_error!("singlepass can't emit SET {:?} {:?}", condition, dst), } + Ok(()) } - fn emit_push(&mut self, sz: Size, src: Location) { + fn emit_push(&mut self, sz: Size, src: Location) -> Result<(), CodegenError> { match (sz, src) { (Size::S64, Location::Imm32(src)) => dynasm!(self ; push src as i32), (Size::S64, Location::GPR(src)) => dynasm!(self ; push Rq(src as u8)), (Size::S64, Location::Memory(src, disp)) => { dynasm!(self ; push QWORD [Rq(src as u8) + disp]) } - _ => panic!("singlepass can't emit PUSH {:?} {:?}", sz, src), + _ => codegen_error!("singlepass can't emit PUSH {:?} {:?}", sz, src), } + Ok(()) } - fn emit_pop(&mut self, sz: Size, dst: Location) { + fn emit_pop(&mut self, sz: Size, dst: Location) -> Result<(), CodegenError> { match (sz, dst) { (Size::S64, Location::GPR(dst)) => dynasm!(self ; pop Rq(dst as u8)), (Size::S64, Location::Memory(dst, disp)) => { dynasm!(self ; pop QWORD [Rq(dst as u8) + disp]) } - _ => panic!("singlepass can't emit POP {:?} {:?}", sz, dst), + _ => codegen_error!("singlepass can't emit POP {:?} {:?}", sz, dst), } + Ok(()) } - fn emit_cmp(&mut self, sz: Size, left: Location, right: Location) { + fn emit_cmp(&mut self, sz: Size, left: Location, right: Location) -> Result<(), CodegenError> { // Constant elimination for comparison between consts. // // Only needed for `emit_cmp`, since other binary operators actually write to `right` and `right` must @@ -1063,29 +1242,32 @@ impl EmitterX64 for AssemblerX64 { Ordering::Greater => dynasm!(self ; cmp DWORD [>const_pos_one_32], 0), }, None => binop_all_nofp!(cmp, self, sz, left, right, { - panic!("singlepass can't emit CMP {:?} {:?} {:?}", sz, left, right); + codegen_error!("singlepass can't emit CMP {:?} {:?} {:?}", sz, left, right); }), } + Ok(()) } - fn emit_add(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_add(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { // Fast path if let Location::Imm32(0) = src { - return; + return Ok(()); } binop_all_nofp!(add, self, sz, src, dst, { - panic!("singlepass can't emit ADD {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit ADD {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_sub(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_sub(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { // Fast path if let Location::Imm32(0) = src { - return; + return Ok(()); } binop_all_nofp!(sub, self, sz, src, dst, { - panic!("singlepass can't emit SUB {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit SUB {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_neg(&mut self, sz: Size, value: Location) { + fn emit_neg(&mut self, sz: Size, value: Location) -> Result<(), CodegenError> { match (sz, value) { (Size::S8, Location::GPR(value)) => dynasm!(self ; neg Rb(value as u8)), (Size::S8, Location::Memory(value, disp)) => { @@ -1103,91 +1285,113 @@ impl EmitterX64 for AssemblerX64 { (Size::S64, Location::Memory(value, disp)) => { dynasm!(self ; neg [Rq(value as u8) + disp]) } - _ => panic!("singlepass can't emit NEG {:?} {:?}", sz, value), + _ => codegen_error!("singlepass can't emit NEG {:?} {:?}", sz, value), } + Ok(()) } - fn emit_imul(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_imul(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_gpr_gpr!(imul, self, sz, src, dst, { binop_mem_gpr!(imul, self, sz, src, dst, { - panic!("singlepass can't emit IMUL {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit IMUL {:?} {:?} {:?}", sz, src, dst) }) }); + Ok(()) } - fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR) { + fn emit_imul_imm32_gpr64(&mut self, src: u32, dst: GPR) -> Result<(), CodegenError> { dynasm!(self ; imul Rq(dst as u8), Rq(dst as u8), src as i32); + Ok(()) } - fn emit_div(&mut self, sz: Size, divisor: Location) { + fn emit_div(&mut self, sz: Size, divisor: Location) -> Result<(), CodegenError> { unop_gpr_or_mem!(div, self, sz, divisor, { - panic!("singlepass can't emit DIV {:?} {:?}", sz, divisor) + codegen_error!("singlepass can't emit DIV {:?} {:?}", sz, divisor) }); + Ok(()) } - fn emit_idiv(&mut self, sz: Size, divisor: Location) { + fn emit_idiv(&mut self, sz: Size, divisor: Location) -> Result<(), CodegenError> { unop_gpr_or_mem!(idiv, self, sz, divisor, { - panic!("singlepass can't emit IDIV {:?} {:?}", sz, divisor) + codegen_error!("singlepass can't emit IDIV {:?} {:?}", sz, divisor) }); + Ok(()) } - fn emit_shl(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_shl(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_shift!(shl, self, sz, src, dst, { - panic!("singlepass can't emit SHL {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit SHL {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_shr(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_shr(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_shift!(shr, self, sz, src, dst, { - panic!("singlepass can't emit SHR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit SHR {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_sar(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_sar(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_shift!(sar, self, sz, src, dst, { - panic!("singlepass can't emit SAR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit SAR {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_rol(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_rol(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_shift!(rol, self, sz, src, dst, { - panic!("singlepass can't emit ROL {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit ROL {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_ror(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_ror(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_shift!(ror, self, sz, src, dst, { - panic!("singlepass can't emit ROR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit ROR {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_and(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_and(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_all_nofp!(and, self, sz, src, dst, { - panic!("singlepass can't emit AND {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit AND {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_test(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_test(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_all_nofp!(test, self, sz, src, dst, { - panic!("singlepass can't emit TEST {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit TEST {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_or(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_or(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_all_nofp!(or, self, sz, src, dst, { - panic!("singlepass can't emit OR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit OR {:?} {:?} {:?}", sz, src, dst) }); + Ok(()) } - fn emit_bsr(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_bsr(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_gpr_gpr!(bsr, self, sz, src, dst, { binop_mem_gpr!(bsr, self, sz, src, dst, { - panic!("singlepass can't emit BSR {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit BSR {:?} {:?} {:?}", sz, src, dst) }) }); + Ok(()) } - fn emit_bsf(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_bsf(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_gpr_gpr!(bsf, self, sz, src, dst, { binop_mem_gpr!(bsf, self, sz, src, dst, { - panic!("singlepass can't emit BSF {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit BSF {:?} {:?} {:?}", sz, src, dst) }) }); + Ok(()) } - fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_popcnt(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { binop_gpr_gpr!(popcnt, self, sz, src, dst, { binop_mem_gpr!(popcnt, self, sz, src, dst, { - panic!("singlepass can't emit POPCNT {:?} {:?} {:?}", sz, src, dst) + codegen_error!("singlepass can't emit POPCNT {:?} {:?} {:?}", sz, src, dst) }) }); - } - fn emit_movzx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location) { + Ok(()) + } + fn emit_movzx( + &mut self, + sz_src: Size, + src: Location, + sz_dst: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_src, src, sz_dst, dst) { (Size::S8, Location::GPR(src), Size::S32, Location::GPR(dst)) => { dynasm!(self ; movzx Rd(dst as u8), Rb(src as u8)); @@ -1214,14 +1418,24 @@ impl EmitterX64 for AssemblerX64 { dynasm!(self ; movzx Rq(dst as u8), WORD [Rq(src as u8) + disp]); } _ => { - panic!( + codegen_error!( "singlepass can't emit MOVZX {:?} {:?} {:?} {:?}", - sz_src, src, sz_dst, dst + sz_src, + src, + sz_dst, + dst ) } } + Ok(()) } - fn emit_movsx(&mut self, sz_src: Size, src: Location, sz_dst: Size, dst: Location) { + fn emit_movsx( + &mut self, + sz_src: Size, + src: Location, + sz_dst: Size, + dst: Location, + ) -> Result<(), CodegenError> { match (sz_src, src, sz_dst, dst) { (Size::S8, Location::GPR(src), Size::S32, Location::GPR(dst)) => { dynasm!(self ; movsx Rd(dst as u8), Rb(src as u8)); @@ -1254,15 +1468,19 @@ impl EmitterX64 for AssemblerX64 { dynasm!(self ; movsx Rq(dst as u8), DWORD [Rq(src as u8) + disp]); } _ => { - panic!( + codegen_error!( "singlepass can't emit MOVSX {:?} {:?} {:?} {:?}", - sz_src, src, sz_dst, dst + sz_src, + src, + sz_dst, + dst ) } } + Ok(()) } - fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_xchg(&mut self, sz: Size, src: Location, dst: Location) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S8, Location::GPR(src), Location::GPR(dst)) => { dynasm!(self ; xchg Rb(dst as u8), Rb(src as u8)); @@ -1300,11 +1518,17 @@ impl EmitterX64 for AssemblerX64 { (Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => { dynasm!(self ; xchg [Rq(dst as u8) + disp], Rq(src as u8)); } - _ => panic!("singlepass can't emit XCHG {:?} {:?} {:?}", sz, src, dst), + _ => codegen_error!("singlepass can't emit XCHG {:?} {:?} {:?}", sz, src, dst), } + Ok(()) } - fn emit_lock_xadd(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_lock_xadd( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S8, Location::GPR(src), Location::Memory(dst, disp)) => { dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rb(src as u8)); @@ -1318,14 +1542,22 @@ impl EmitterX64 for AssemblerX64 { (Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => { dynasm!(self ; lock xadd [Rq(dst as u8) + disp], Rq(src as u8)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LOCK XADD {:?} {:?} {:?}", - sz, src, dst + sz, + src, + dst ), } + Ok(()) } - fn emit_lock_cmpxchg(&mut self, sz: Size, src: Location, dst: Location) { + fn emit_lock_cmpxchg( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { match (sz, src, dst) { (Size::S8, Location::GPR(src), Location::Memory(dst, disp)) => { dynasm!(self ; lock cmpxchg [Rq(dst as u8) + disp], Rb(src as u8)); @@ -1339,33 +1571,41 @@ impl EmitterX64 for AssemblerX64 { (Size::S64, Location::GPR(src), Location::Memory(dst, disp)) => { dynasm!(self ; lock cmpxchg [Rq(dst as u8) + disp], Rq(src as u8)); } - _ => panic!( + _ => codegen_error!( "singlepass can't emit LOCK CMPXCHG {:?} {:?} {:?}", - sz, src, dst + sz, + src, + dst ), } + Ok(()) } - fn emit_rep_stosq(&mut self) { + fn emit_rep_stosq(&mut self) -> Result<(), CodegenError> { dynasm!(self ; rep stosq); + Ok(()) } - fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR) { + fn emit_btc_gpr_imm8_32(&mut self, src: u8, dst: GPR) -> Result<(), CodegenError> { dynasm!(self ; btc Rd(dst as u8), BYTE src as i8); + Ok(()) } - fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR) { + fn emit_btc_gpr_imm8_64(&mut self, src: u8, dst: GPR) -> Result<(), CodegenError> { dynasm!(self ; btc Rq(dst as u8), BYTE src as i8); + Ok(()) } - fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR) { + fn emit_cmovae_gpr_32(&mut self, src: GPR, dst: GPR) -> Result<(), CodegenError> { dynasm!(self ; cmovae Rd(dst as u8), Rd(src as u8)); + Ok(()) } - fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR) { + fn emit_cmovae_gpr_64(&mut self, src: GPR, dst: GPR) -> Result<(), CodegenError> { dynasm!(self ; cmovae Rq(dst as u8), Rq(src as u8)); + Ok(()) } - fn emit_vmovaps(&mut self, src: XMMOrMemory, dst: XMMOrMemory) { + fn emit_vmovaps(&mut self, src: XMMOrMemory, dst: XMMOrMemory) -> Result<(), CodegenError> { match (src, dst) { (XMMOrMemory::XMM(src), XMMOrMemory::XMM(dst)) => { dynasm!(self ; movaps Rx(dst as u8), Rx(src as u8)) @@ -1376,11 +1616,12 @@ impl EmitterX64 for AssemblerX64 { (XMMOrMemory::XMM(src), XMMOrMemory::Memory(base, disp)) => { dynasm!(self ; movaps [Rq(base as u8) + disp], Rx(src as u8)) } - _ => panic!("singlepass can't emit VMOVAPS {:?} {:?}", src, dst), + _ => codegen_error!("singlepass can't emit VMOVAPS {:?} {:?}", src, dst), }; + Ok(()) } - fn emit_vmovapd(&mut self, src: XMMOrMemory, dst: XMMOrMemory) { + fn emit_vmovapd(&mut self, src: XMMOrMemory, dst: XMMOrMemory) -> Result<(), CodegenError> { match (src, dst) { (XMMOrMemory::XMM(src), XMMOrMemory::XMM(dst)) => { dynasm!(self ; movapd Rx(dst as u8), Rx(src as u8)) @@ -1391,248 +1632,378 @@ impl EmitterX64 for AssemblerX64 { (XMMOrMemory::XMM(src), XMMOrMemory::Memory(base, disp)) => { dynasm!(self ; movapd [Rq(base as u8) + disp], Rx(src as u8)) } - _ => panic!("singlepass can't emit VMOVAPD {:?} {:?}", src, dst), + _ => codegen_error!("singlepass can't emit VMOVAPD {:?} {:?}", src, dst), }; + Ok(()) } - fn emit_vxorps(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vxorps(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vxorps)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(xorps)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vxorpd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vxorpd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vxorpd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(xorpd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vaddss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vaddss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vaddss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(addss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vaddsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vaddsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vaddsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(addsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vsubss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vsubss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vsubss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(subss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vsubsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vsubsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vsubsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(subsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vmulss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vmulss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vmulss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(mulss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vmulsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vmulsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vmulsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(mulsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vdivss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vdivss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vdivss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(divss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vdivsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vdivsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vdivsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(divsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vmaxss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vmaxss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vmaxss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(maxss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vmaxsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vmaxsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vmaxsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(maxsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vminss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vminss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vminss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(minss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vminsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vminsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vminsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(minsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpeqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpeqss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpeqss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 0)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpeqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpeqsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpeqsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 0)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpneqss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpneqss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpneqss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 4)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpneqsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpneqsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpneqsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 4)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpltss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpltss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpltss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 1)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpltsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpltsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpltsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 1)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpless(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpless( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpless)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 2)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmplesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmplesd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmplesd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 2)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpgtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpgtss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpgtss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 6)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpgtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpgtsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpgtsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 6)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpgess(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpgess( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpgess)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 5)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpgesd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpgesd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpgesd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 5)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpunordss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpunordss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpunordss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 3)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpunordsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpunordsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpunordsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 3)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpordss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpordss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpordss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpss, 7)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcmpordsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcmpordsd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcmpordsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cmpsd, 7)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vsqrtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vsqrtss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vsqrtss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(sqrtss)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vsqrtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vsqrtsd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vsqrtsd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(sqrtsd)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcvtss2sd(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcvtss2sd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcvtss2sd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cvtss2sd)(self, Precision::Single, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vcvtsd2ss(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vcvtsd2ss( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_fn!(vcvtsd2ss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => sse_fn!(cvtsd2ss)(self, Precision::Double, src1, src2, dst), _ => {} } + Ok(()) } - fn emit_vroundss_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundss_nearest( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundss, 0)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1640,8 +2011,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundsd_nearest(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundsd_nearest( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundsd, 0)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1649,8 +2026,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundss_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundss_floor( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundss, 1)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1658,8 +2041,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundsd_floor(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundsd_floor( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundsd, 1)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1667,8 +2056,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundss_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundss_ceil( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundss, 2)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1676,8 +2071,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundsd_ceil(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundsd_ceil( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundsd, 2)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1685,8 +2086,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundss_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundss_trunc( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundss, 3)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1694,8 +2101,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vroundsd_trunc(&mut self, src1: XMM, src2: XMMOrMemory, dst: XMM) { + fn emit_vroundsd_trunc( + &mut self, + src1: XMM, + src2: XMMOrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_round_fn!(vroundsd, 3)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1703,8 +2116,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vcvtsi2ss_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) { + fn emit_vcvtsi2ss_32( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_i2f_32_fn!(vcvtsi2ss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1712,8 +2131,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vcvtsi2sd_32(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) { + fn emit_vcvtsi2sd_32( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_i2f_32_fn!(vcvtsi2sd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1721,8 +2146,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vcvtsi2ss_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) { + fn emit_vcvtsi2ss_64( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_i2f_64_fn!(vcvtsi2ss)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1730,8 +2161,14 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vcvtsi2sd_64(&mut self, src1: XMM, src2: GPROrMemory, dst: XMM) { + fn emit_vcvtsi2sd_64( + &mut self, + src1: XMM, + src2: GPROrMemory, + dst: XMM, + ) -> Result<(), CodegenError> { match self.get_simd_arch() { Some(CpuFeature::AVX) => avx_i2f_64_fn!(vcvtsi2sd)(self, src1, src2, dst), Some(CpuFeature::SSE42) => { @@ -1739,9 +2176,16 @@ impl EmitterX64 for AssemblerX64 { } _ => {} } + Ok(()) } - fn emit_vblendvps(&mut self, src1: XMM, src2: XMMOrMemory, mask: XMM, dst: XMM) { + fn emit_vblendvps( + &mut self, + src1: XMM, + src2: XMMOrMemory, + mask: XMM, + dst: XMM, + ) -> Result<(), CodegenError> { // this implementation works only for sse 4.1 and greater match self.get_simd_arch() { Some(CpuFeature::AVX) => match src2 { @@ -1765,9 +2209,16 @@ impl EmitterX64 for AssemblerX64 { }, _ => {} } + Ok(()) } - fn emit_vblendvpd(&mut self, src1: XMM, src2: XMMOrMemory, mask: XMM, dst: XMM) { + fn emit_vblendvpd( + &mut self, + src1: XMM, + src2: XMMOrMemory, + mask: XMM, + dst: XMM, + ) -> Result<(), CodegenError> { // this implementation works only for sse 4.1 and greater match self.get_simd_arch() { Some(CpuFeature::AVX) => match src2 { @@ -1791,101 +2242,116 @@ impl EmitterX64 for AssemblerX64 { }, _ => {} } + Ok(()) } - fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM) { + fn emit_ucomiss(&mut self, src: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; ucomiss Rx(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; ucomiss Rx(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM) { + fn emit_ucomisd(&mut self, src: XMMOrMemory, dst: XMM) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; ucomisd Rx(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; ucomisd Rx(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR) { + fn emit_cvttss2si_32(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; cvttss2si Rd(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; cvttss2si Rd(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR) { + fn emit_cvttss2si_64(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; cvttss2si Rq(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; cvttss2si Rq(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR) { + fn emit_cvttsd2si_32(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; cvttsd2si Rd(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; cvttsd2si Rd(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR) { + fn emit_cvttsd2si_64(&mut self, src: XMMOrMemory, dst: GPR) -> Result<(), CodegenError> { match src { XMMOrMemory::XMM(x) => dynasm!(self ; cvttsd2si Rq(dst as u8), Rx(x as u8)), XMMOrMemory::Memory(base, disp) => { dynasm!(self ; cvttsd2si Rq(dst as u8), [Rq(base as u8) + disp]) } } + Ok(()) } - fn emit_test_gpr_64(&mut self, reg: GPR) { + fn emit_test_gpr_64(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; test Rq(reg as u8), Rq(reg as u8)); + Ok(()) } - fn emit_ud2(&mut self) { + fn emit_ud2(&mut self) -> Result<(), CodegenError> { dynasm!(self ; ud2); + Ok(()) } - fn emit_ud1_payload(&mut self, payload: u8) { + fn emit_ud1_payload(&mut self, payload: u8) -> Result<(), CodegenError> { assert!(payload & 0xf0 == 0); dynasm!(self ; ud1 Rd((payload>>3)&1), Rd(payload&7)); + Ok(()) } - fn emit_ret(&mut self) { + fn emit_ret(&mut self) -> Result<(), CodegenError> { dynasm!(self ; ret); + Ok(()) } - fn emit_call_label(&mut self, label: Label) { + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError> { dynasm!(self ; call =>label); + Ok(()) } - fn emit_call_location(&mut self, loc: Location) { + fn emit_call_location(&mut self, loc: Location) -> Result<(), CodegenError> { match loc { Location::GPR(x) => dynasm!(self ; call Rq(x as u8)), Location::Memory(base, disp) => dynasm!(self ; call QWORD [Rq(base as u8) + disp]), - _ => panic!("singlepass can't emit CALL {:?}", loc), + _ => codegen_error!("singlepass can't emit CALL {:?}", loc), } + Ok(()) } - fn emit_call_register(&mut self, reg: GPR) { + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError> { dynasm!(self ; call Rq(reg as u8)); + Ok(()) } - fn emit_bkpt(&mut self) { + fn emit_bkpt(&mut self) -> Result<(), CodegenError> { dynasm!(self ; int3); + Ok(()) } - fn emit_host_redirection(&mut self, target: GPR) { - self.emit_jmp_location(Location::GPR(target)); + fn emit_host_redirection(&mut self, target: GPR) -> Result<(), CodegenError> { + self.emit_jmp_location(Location::GPR(target)) } - fn arch_mov64_imm_offset(&self) -> usize { - 2 + fn arch_mov64_imm_offset(&self) -> Result { + Ok(2) } } diff --git a/lib/compiler-singlepass/src/machine.rs b/lib/compiler-singlepass/src/machine.rs index 631c4d7faf1..c2aaed062bf 100644 --- a/lib/compiler-singlepass/src/machine.rs +++ b/lib/compiler-singlepass/src/machine.rs @@ -87,9 +87,9 @@ pub trait Machine { /// reserve a GPR fn reserve_gpr(&mut self, gpr: Self::GPR); /// Push used gpr to the stack. Return the bytes taken on the stack - fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> usize; + fn push_used_gpr(&mut self, grps: &[Self::GPR]) -> Result; /// Pop used gpr to the stack - fn pop_used_gpr(&mut self, grps: &[Self::GPR]); + fn pop_used_gpr(&mut self, grps: &[Self::GPR]) -> Result<(), CodegenError>; /// Picks an unused SIMD register. /// /// This method does not mark the register as used @@ -105,9 +105,9 @@ pub trait Machine { /// Releases a temporary XMM register. fn release_simd(&mut self, simd: Self::SIMD); /// Push used simd regs to the stack. Return bytes taken on the stack - fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> usize; + fn push_used_simd(&mut self, simds: &[Self::SIMD]) -> Result; /// Pop used simd regs to the stack - fn pop_used_simd(&mut self, simds: &[Self::SIMD]); + fn pop_used_simd(&mut self, simds: &[Self::SIMD]) -> Result<(), CodegenError>; /// Return a rounded stack adjustement value (must be multiple of 16bytes on ARM64 for example) fn round_stack_adjust(&self, value: usize) -> usize; /// Set the source location of the Wasm to the given offset. @@ -132,15 +132,19 @@ pub trait Machine { fn local_on_stack(&mut self, stack_offset: i32) -> Location; /// Adjust stack for locals /// Like assembler.emit_sub(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP)) - fn adjust_stack(&mut self, delta_stack_offset: u32); + fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError>; /// restore stack /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP)) - fn restore_stack(&mut self, delta_stack_offset: u32); + fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError>; /// Pop stack of locals /// Like assembler.emit_add(Size::S64, Location::Imm32(delta_stack_offset as u32), Location::GPR(GPR::RSP)) - fn pop_stack_locals(&mut self, delta_stack_offset: u32); + fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError>; /// Zero a location taht is 32bits - fn zero_location(&mut self, size: Size, location: Location); + fn zero_location( + &mut self, + size: Size, + location: Location, + ) -> Result<(), CodegenError>; /// GPR Reg used for local pointer on the stack fn local_pointer(&self) -> Self::GPR; /// push a value on the stack for a native call @@ -149,7 +153,7 @@ pub trait Machine { size: Size, loc: Location, dest: Location, - ); + ) -> Result<(), CodegenError>; /// Determine whether a local should be allocated on the stack. fn is_local_on_stack(&self, idx: usize) -> bool; /// Determine a local's location. @@ -160,7 +164,11 @@ pub trait Machine { ) -> Location; /// Move a local to the stack /// Like emit_mov(Size::S64, location, Location::Memory(GPR::RBP, -(self.stack_offset.0 as i32))); - fn move_local(&mut self, stack_offset: i32, location: Location); + fn move_local( + &mut self, + stack_offset: i32, + location: Location, + ) -> Result<(), CodegenError>; /// List of register to save, depending on the CallingConvention fn list_to_save( &self, @@ -194,7 +202,7 @@ pub trait Machine { size: Size, source: Location, dest: Location, - ); + ) -> Result<(), CodegenError>; /// move a location to another, with zero or sign extension fn move_location_extend( &mut self, @@ -203,7 +211,7 @@ pub trait Machine { source: Location, size_op: Size, dest: Location, - ); + ) -> Result<(), CodegenError>; /// Load a memory value to a register, zero extending to 64bits. /// Panic if gpr is not a Location::GPR or if mem is not a Memory(2) fn load_address( @@ -211,17 +219,20 @@ pub trait Machine { size: Size, gpr: Location, mem: Location, - ); + ) -> Result<(), CodegenError>; /// Init the stack loc counter fn init_stack_loc( &mut self, init_stack_loc_cnt: u64, last_stack_loc: Location, - ); + ) -> Result<(), CodegenError>; /// Restore save_area - fn restore_saved_area(&mut self, saved_area_offset: i32); + fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CodegenError>; /// Pop a location - fn pop_location(&mut self, location: Location); + fn pop_location( + &mut self, + location: Location, + ) -> Result<(), CodegenError>; /// Create a new `MachineState` with default values. fn new_machine_state(&self) -> MachineState; @@ -232,21 +243,21 @@ pub trait Machine { fn get_offset(&self) -> Offset; /// finalize a function - fn finalize_function(&mut self); + fn finalize_function(&mut self) -> Result<(), CodegenError>; /// emit native function prolog (depending on the calling Convention, like "PUSH RBP / MOV RSP, RBP") - fn emit_function_prolog(&mut self); + fn emit_function_prolog(&mut self) -> Result<(), CodegenError>; /// emit native function epilog (depending on the calling Convention, like "MOV RBP, RSP / POP RBP") - fn emit_function_epilog(&mut self); + fn emit_function_epilog(&mut self) -> Result<(), CodegenError>; /// handle return value, with optionnal cannonicalization if wanted fn emit_function_return_value( &mut self, ty: WpType, cannonicalize: bool, loc: Location, - ); + ) -> Result<(), CodegenError>; /// Handle copy to SIMD register from ret value (if needed by the arch/calling convention) - fn emit_function_return_float(&mut self); + fn emit_function_return_float(&mut self) -> Result<(), CodegenError>; /// Is NaN canonicalization supported fn arch_supports_canonicalize_nan(&self) -> bool; /// Cannonicalize a NaN (or panic if not supported) @@ -255,37 +266,40 @@ pub trait Machine { sz: Size, input: Location, output: Location, - ); + ) -> Result<(), CodegenError>; /// emit an Illegal Opcode, associated with a trapcode - fn emit_illegal_op(&mut self, trp: TrapCode); + fn emit_illegal_op(&mut self, trp: TrapCode) -> Result<(), CodegenError>; /// create a new label fn get_label(&mut self) -> Label; /// emit a label - fn emit_label(&mut self, label: Label); + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError>; /// get the gpr use for call. like RAX on x86_64 fn get_grp_for_call(&self) -> Self::GPR; /// Emit a call using the value in register - fn emit_call_register(&mut self, register: Self::GPR); + fn emit_call_register(&mut self, register: Self::GPR) -> Result<(), CodegenError>; /// Emit a call to a label - fn emit_call_label(&mut self, label: Label); + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError>; /// Does an trampoline is neededfor indirect call fn arch_requires_indirect_call_trampoline(&self) -> bool; /// indirect call with trampoline fn arch_emit_indirect_call_with_trampoline( &mut self, location: Location, - ); + ) -> Result<(), CodegenError>; /// emit a call to a location - fn emit_call_location(&mut self, location: Location); + fn emit_call_location( + &mut self, + location: Location, + ) -> Result<(), CodegenError>; /// get the gpr for the return of generic values fn get_gpr_for_ret(&self) -> Self::GPR; /// get the simd for the return of float/double values fn get_simd_for_ret(&self) -> Self::SIMD; /// Emit a debug breakpoint - fn emit_debug_breakpoint(&mut self); + fn emit_debug_breakpoint(&mut self) -> Result<(), CodegenError>; /// load the address of a memory location (will panic if src is not a memory) /// like LEA opcode on x86_64 @@ -294,7 +308,7 @@ pub trait Machine { size: Size, source: Location, dest: Location, - ); + ) -> Result<(), CodegenError>; /// And src & dst -> dst (with or without flags) fn location_and( @@ -303,7 +317,7 @@ pub trait Machine { source: Location, dest: Location, flags: bool, - ); + ) -> Result<(), CodegenError>; /// Xor src & dst -> dst (with or without flags) fn location_xor( &mut self, @@ -311,7 +325,7 @@ pub trait Machine { source: Location, dest: Location, flags: bool, - ); + ) -> Result<(), CodegenError>; /// Or src & dst -> dst (with or without flags) fn location_or( &mut self, @@ -319,7 +333,7 @@ pub trait Machine { source: Location, dest: Location, flags: bool, - ); + ) -> Result<(), CodegenError>; /// Add src+dst -> dst (with or without flags) fn location_add( @@ -328,7 +342,7 @@ pub trait Machine { source: Location, dest: Location, flags: bool, - ); + ) -> Result<(), CodegenError>; /// Sub dst-src -> dst (with or without flags) fn location_sub( &mut self, @@ -336,7 +350,7 @@ pub trait Machine { source: Location, dest: Location, flags: bool, - ); + ) -> Result<(), CodegenError>; /// -src -> dst fn location_neg( &mut self, @@ -345,7 +359,7 @@ pub trait Machine { source: Location, size_op: Size, dest: Location, - ); + ) -> Result<(), CodegenError>; /// Cmp src - dst and set flags fn location_cmp( @@ -353,65 +367,77 @@ pub trait Machine { size: Size, source: Location, dest: Location, - ); + ) -> Result<(), CodegenError>; /// Test src & dst and set flags fn location_test( &mut self, size: Size, source: Location, dest: Location, - ); + ) -> Result<(), CodegenError>; /// jmp without condidtion - fn jmp_unconditionnal(&mut self, label: Label); + fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on equal (src==dst) /// like Equal set on x86_64 - fn jmp_on_equal(&mut self, label: Label); + fn jmp_on_equal(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on different (src!=dst) /// like NotEqual set on x86_64 - fn jmp_on_different(&mut self, label: Label); + fn jmp_on_different(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on above (src>dst) /// like Above set on x86_64 - fn jmp_on_above(&mut self, label: Label); + fn jmp_on_above(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on above (src>=dst) /// like Above or Equal set on x86_64 - fn jmp_on_aboveequal(&mut self, label: Label); + fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on above (src<=dst) /// like Below or Equal set on x86_64 - fn jmp_on_belowequal(&mut self, label: Label); + fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp on overflow /// like Carry set on x86_64 - fn jmp_on_overflow(&mut self, label: Label); + fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CodegenError>; /// jmp using a jump table at lable with cond as the indice - fn emit_jmp_to_jumptable(&mut self, label: Label, cond: Location); + fn emit_jmp_to_jumptable( + &mut self, + label: Label, + cond: Location, + ) -> Result<(), CodegenError>; /// Align for Loop (may do nothing, depending on the arch) - fn align_for_loop(&mut self); + fn align_for_loop(&mut self) -> Result<(), CodegenError>; /// ret (from a Call) - fn emit_ret(&mut self); + fn emit_ret(&mut self) -> Result<(), CodegenError>; /// Stack push of a location - fn emit_push(&mut self, size: Size, loc: Location); + fn emit_push( + &mut self, + size: Size, + loc: Location, + ) -> Result<(), CodegenError>; /// Stack pop of a location - fn emit_pop(&mut self, size: Size, loc: Location); + fn emit_pop( + &mut self, + size: Size, + loc: Location, + ) -> Result<(), CodegenError>; /// relaxed mov: move from anywhere to anywhere fn emit_relaxed_mov( &mut self, sz: Size, src: Location, dst: Location, - ); + ) -> Result<(), CodegenError>; /// relaxed cmp: compare from anywhere and anywhere fn emit_relaxed_cmp( &mut self, sz: Size, src: Location, dst: Location, - ); + ) -> Result<(), CodegenError>; /// Emit a memory fence. Can be nothing for x86_64 or a DMB on ARM64 for example - fn emit_memory_fence(&mut self); + fn emit_memory_fence(&mut self) -> Result<(), CodegenError>; /// relaxed move with zero extension fn emit_relaxed_zero_extension( &mut self, @@ -419,7 +445,7 @@ pub trait Machine { src: Location, sz_dst: Size, dst: Location, - ); + ) -> Result<(), CodegenError>; /// relaxed move with sign extension fn emit_relaxed_sign_extension( &mut self, @@ -427,30 +453,35 @@ pub trait Machine { src: Location, sz_dst: Size, dst: Location, - ); + ) -> Result<(), CodegenError>; /// Multiply location with immediate - fn emit_imul_imm32(&mut self, size: Size, imm32: u32, gpr: Self::GPR); + fn emit_imul_imm32( + &mut self, + size: Size, + imm32: u32, + gpr: Self::GPR, + ) -> Result<(), CodegenError>; /// Add with location directly from the stack fn emit_binop_add32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Sub with location directly from the stack fn emit_binop_sub32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Multiply with location directly from the stack fn emit_binop_mul32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_udiv32( &mut self, @@ -459,7 +490,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_sdiv32( &mut self, @@ -468,7 +499,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_urem32( &mut self, @@ -477,7 +508,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_srem32( &mut self, @@ -486,151 +517,151 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// And with location directly from the stack fn emit_binop_and32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Or with location directly from the stack fn emit_binop_or32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Xor with location directly from the stack fn emit_binop_xor32( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Greater of Equal Compare 2 i32, result in a GPR fn i32_cmp_ge_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Greater Than Compare 2 i32, result in a GPR fn i32_cmp_gt_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Less of Equal Compare 2 i32, result in a GPR fn i32_cmp_le_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Less Than Compare 2 i32, result in a GPR fn i32_cmp_lt_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Greater of Equal Compare 2 i32, result in a GPR fn i32_cmp_ge_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Greater Than Compare 2 i32, result in a GPR fn i32_cmp_gt_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Less of Equal Compare 2 i32, result in a GPR fn i32_cmp_le_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Less Than Compare 2 i32, result in a GPR fn i32_cmp_lt_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Not Equal Compare 2 i32, result in a GPR fn i32_cmp_ne( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Equal Compare 2 i32, result in a GPR fn i32_cmp_eq( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count Leading 0 bit of an i32 fn i32_clz( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count Trailling 0 bit of an i32 fn i32_ctz( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count the number of 1 bit of an i32 fn i32_popcnt( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 Logical Shift Left fn i32_shl( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 Logical Shift Right fn i32_shr( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 Arithmetic Shift Right fn i32_sar( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 Roll Left fn i32_rol( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 Roll Right fn i32_ror( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i32 load #[allow(clippy::too_many_arguments)] fn i32_load( @@ -642,7 +673,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 load of an unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_load_8u( @@ -654,7 +685,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 load of an signed 8bits #[allow(clippy::too_many_arguments)] fn i32_load_8s( @@ -666,7 +697,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 load of an unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_load_16u( @@ -678,7 +709,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 load of an signed 16bits #[allow(clippy::too_many_arguments)] fn i32_load_16s( @@ -690,7 +721,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic load #[allow(clippy::too_many_arguments)] fn i32_atomic_load( @@ -702,7 +733,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic load of an unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_load_8u( @@ -714,7 +745,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic load of an unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_load_16u( @@ -726,7 +757,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 save #[allow(clippy::too_many_arguments)] fn i32_save( @@ -738,7 +769,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 save of the lower 8bits #[allow(clippy::too_many_arguments)] fn i32_save_8( @@ -750,7 +781,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 save of the lower 16bits #[allow(clippy::too_many_arguments)] fn i32_save_16( @@ -762,7 +793,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic save #[allow(clippy::too_many_arguments)] fn i32_atomic_save( @@ -774,7 +805,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic save of a the lower 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_save_8( @@ -786,7 +817,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic save of a the lower 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_save_16( @@ -798,7 +829,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Add with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_add( @@ -811,7 +842,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Add with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_add_8u( @@ -824,7 +855,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Add with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_add_16u( @@ -837,7 +868,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Sub with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_sub( @@ -850,7 +881,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Sub with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_sub_8u( @@ -863,7 +894,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Sub with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_sub_16u( @@ -876,7 +907,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic And with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_and( @@ -889,7 +920,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic And with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_and_8u( @@ -902,7 +933,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic And with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_and_16u( @@ -915,7 +946,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Or with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_or( @@ -928,7 +959,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Or with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_or_8u( @@ -941,7 +972,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Or with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_or_16u( @@ -954,7 +985,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Xor with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_xor( @@ -967,7 +998,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Xor with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i32_atomic_xor_8u( @@ -980,7 +1011,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Xor with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i32_atomic_xor_16u( @@ -993,7 +1024,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Exchange with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_xchg( @@ -1006,7 +1037,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Exchange with u8 #[allow(clippy::too_many_arguments)] fn i32_atomic_xchg_8u( @@ -1019,7 +1050,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Exchange with u16 #[allow(clippy::too_many_arguments)] fn i32_atomic_xchg_16u( @@ -1032,7 +1063,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Compare and Exchange with i32 #[allow(clippy::too_many_arguments)] fn i32_atomic_cmpxchg( @@ -1046,7 +1077,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Compare and Exchange with u8 #[allow(clippy::too_many_arguments)] fn i32_atomic_cmpxchg_8u( @@ -1060,7 +1091,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i32 atomic Compare and Exchange with u16 #[allow(clippy::too_many_arguments)] fn i32_atomic_cmpxchg_16u( @@ -1074,35 +1105,35 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// emit a move function address to GPR ready for call, using appropriate relocation fn emit_call_with_reloc( &mut self, calling_convention: CallingConvention, reloc_target: RelocationTarget, - ) -> Vec; + ) -> Result, CodegenError>; /// Add with location directly from the stack fn emit_binop_add64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Sub with location directly from the stack fn emit_binop_sub64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Multiply with location directly from the stack fn emit_binop_mul64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_udiv64( &mut self, @@ -1111,7 +1142,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Signed Division with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_sdiv64( &mut self, @@ -1120,7 +1151,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Unsigned Reminder (of a division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_urem64( &mut self, @@ -1129,7 +1160,7 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// Signed Reminder (of a Division) with location directly from the stack. return the offset of the DIV opcode, to mark as trappable. fn emit_binop_srem64( &mut self, @@ -1138,151 +1169,151 @@ pub trait Machine { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize; + ) -> Result; /// And with location directly from the stack fn emit_binop_and64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Or with location directly from the stack fn emit_binop_or64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Xor with location directly from the stack fn emit_binop_xor64( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Greater of Equal Compare 2 i64, result in a GPR fn i64_cmp_ge_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Greater Than Compare 2 i64, result in a GPR fn i64_cmp_gt_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Less of Equal Compare 2 i64, result in a GPR fn i64_cmp_le_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Signed Less Than Compare 2 i64, result in a GPR fn i64_cmp_lt_s( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Greater of Equal Compare 2 i64, result in a GPR fn i64_cmp_ge_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Greater Than Compare 2 i64, result in a GPR fn i64_cmp_gt_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Less of Equal Compare 2 i64, result in a GPR fn i64_cmp_le_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Unsigned Less Than Compare 2 i64, result in a GPR fn i64_cmp_lt_u( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Not Equal Compare 2 i64, result in a GPR fn i64_cmp_ne( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Equal Compare 2 i64, result in a GPR fn i64_cmp_eq( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count Leading 0 bit of an i64 fn i64_clz( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count Trailling 0 bit of an i64 fn i64_ctz( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Count the number of 1 bit of an i64 fn i64_popcnt( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 Logical Shift Left fn i64_shl( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 Logical Shift Right fn i64_shr( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 Arithmetic Shift Right fn i64_sar( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 Roll Left fn i64_rol( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 Roll Right fn i64_ror( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// i64 load #[allow(clippy::too_many_arguments)] fn i64_load( @@ -1294,7 +1325,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_load_8u( @@ -1306,7 +1337,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an signed 8bits #[allow(clippy::too_many_arguments)] fn i64_load_8s( @@ -1318,7 +1349,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_load_32u( @@ -1330,7 +1361,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an signed 32bits #[allow(clippy::too_many_arguments)] fn i64_load_32s( @@ -1342,7 +1373,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an signed 16bits #[allow(clippy::too_many_arguments)] fn i64_load_16u( @@ -1354,7 +1385,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 load of an signed 16bits #[allow(clippy::too_many_arguments)] fn i64_load_16s( @@ -1366,7 +1397,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic load #[allow(clippy::too_many_arguments)] fn i64_atomic_load( @@ -1378,7 +1409,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic load from unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_load_8u( @@ -1390,7 +1421,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic load from unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_load_16u( @@ -1402,7 +1433,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic load from unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_load_32u( @@ -1414,7 +1445,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 save #[allow(clippy::too_many_arguments)] fn i64_save( @@ -1426,7 +1457,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 save of the lower 8bits #[allow(clippy::too_many_arguments)] fn i64_save_8( @@ -1438,7 +1469,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 save of the lower 16bits #[allow(clippy::too_many_arguments)] fn i64_save_16( @@ -1450,7 +1481,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 save of the lower 32bits #[allow(clippy::too_many_arguments)] fn i64_save_32( @@ -1462,7 +1493,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic save #[allow(clippy::too_many_arguments)] fn i64_atomic_save( @@ -1474,7 +1505,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic save of a the lower 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_save_8( @@ -1486,7 +1517,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic save of a the lower 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_save_16( @@ -1498,7 +1529,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic save of a the lower 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_save_32( @@ -1510,7 +1541,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Add with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_add( @@ -1523,7 +1554,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Add with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_add_8u( @@ -1536,7 +1567,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Add with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_add_16u( @@ -1549,7 +1580,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Add with unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_add_32u( @@ -1562,7 +1593,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Sub with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_sub( @@ -1575,7 +1606,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Sub with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_sub_8u( @@ -1588,7 +1619,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Sub with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_sub_16u( @@ -1601,7 +1632,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Sub with unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_sub_32u( @@ -1614,7 +1645,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic And with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_and( @@ -1627,7 +1658,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic And with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_and_8u( @@ -1640,7 +1671,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic And with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_and_16u( @@ -1653,7 +1684,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic And with unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_and_32u( @@ -1666,7 +1697,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Or with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_or( @@ -1679,7 +1710,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Or with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_or_8u( @@ -1692,7 +1723,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Or with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_or_16u( @@ -1705,7 +1736,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Or with unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_or_32u( @@ -1718,7 +1749,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Xor with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_xor( @@ -1731,7 +1762,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Xor with unsigned 8bits #[allow(clippy::too_many_arguments)] fn i64_atomic_xor_8u( @@ -1744,7 +1775,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Xor with unsigned 16bits #[allow(clippy::too_many_arguments)] fn i64_atomic_xor_16u( @@ -1757,7 +1788,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Xor with unsigned 32bits #[allow(clippy::too_many_arguments)] fn i64_atomic_xor_32u( @@ -1770,7 +1801,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Exchange with i64 #[allow(clippy::too_many_arguments)] fn i64_atomic_xchg( @@ -1783,7 +1814,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Exchange with u8 #[allow(clippy::too_many_arguments)] fn i64_atomic_xchg_8u( @@ -1796,7 +1827,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Exchange with u16 #[allow(clippy::too_many_arguments)] fn i64_atomic_xchg_16u( @@ -1809,7 +1840,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Exchange with u32 #[allow(clippy::too_many_arguments)] fn i64_atomic_xchg_32u( @@ -1822,7 +1853,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Compare and Exchange with i32 #[allow(clippy::too_many_arguments)] fn i64_atomic_cmpxchg( @@ -1836,7 +1867,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Compare and Exchange with u8 #[allow(clippy::too_many_arguments)] fn i64_atomic_cmpxchg_8u( @@ -1850,7 +1881,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Compare and Exchange with u16 #[allow(clippy::too_many_arguments)] fn i64_atomic_cmpxchg_16u( @@ -1864,7 +1895,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// i64 atomic Compare and Exchange with u32 #[allow(clippy::too_many_arguments)] fn i64_atomic_cmpxchg_32u( @@ -1878,7 +1909,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// load an F32 #[allow(clippy::too_many_arguments)] @@ -1891,7 +1922,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// f32 save #[allow(clippy::too_many_arguments)] fn f32_save( @@ -1904,7 +1935,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// load an F64 #[allow(clippy::too_many_arguments)] fn f64_load( @@ -1916,7 +1947,7 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// f64 save #[allow(clippy::too_many_arguments)] fn f64_save( @@ -1929,35 +1960,35 @@ pub trait Machine { imported_memories: bool, offset: i32, heap_access_oob: Label, - ); + ) -> Result<(), CodegenError>; /// Convert a F64 from I64, signed or unsigned fn convert_f64_i64( &mut self, loc: Location, signed: bool, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Convert a F64 from I32, signed or unsigned fn convert_f64_i32( &mut self, loc: Location, signed: bool, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Convert a F32 from I64, signed or unsigned fn convert_f32_i64( &mut self, loc: Location, signed: bool, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Convert a F32 from I32, signed or unsigned fn convert_f32_i32( &mut self, loc: Location, signed: bool, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Convert a F64 to I64, signed or unsigned, without or without saturation fn convert_i64_f64( &mut self, @@ -1965,7 +1996,7 @@ pub trait Machine { ret: Location, signed: bool, sat: bool, - ); + ) -> Result<(), CodegenError>; /// Convert a F64 to I32, signed or unsigned, without or without saturation fn convert_i32_f64( &mut self, @@ -1973,7 +2004,7 @@ pub trait Machine { ret: Location, signed: bool, sat: bool, - ); + ) -> Result<(), CodegenError>; /// Convert a F32 to I64, signed or unsigned, without or without saturation fn convert_i64_f32( &mut self, @@ -1981,7 +2012,7 @@ pub trait Machine { ret: Location, signed: bool, sat: bool, - ); + ) -> Result<(), CodegenError>; /// Convert a F32 to I32, signed or unsigned, without or without saturation fn convert_i32_f32( &mut self, @@ -1989,289 +2020,289 @@ pub trait Machine { ret: Location, signed: bool, sat: bool, - ); + ) -> Result<(), CodegenError>; /// Convert a F32 to F64 fn convert_f64_f32( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Convert a F64 to F32 fn convert_f32_f64( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Negate an F64 fn f64_neg( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Get the Absolute Value of an F64 fn f64_abs( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR - fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR); + fn emit_i64_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CodegenError>; /// Get the Square Root of an F64 fn f64_sqrt( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Trunc of an F64 fn f64_trunc( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Ceil of an F64 fn f64_ceil( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Floor of an F64 fn f64_floor( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Round at nearest int of an F64 fn f64_nearest( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Greater of Equal Compare 2 F64, result in a GPR fn f64_cmp_ge( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Greater Than Compare 2 F64, result in a GPR fn f64_cmp_gt( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Less of Equal Compare 2 F64, result in a GPR fn f64_cmp_le( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Less Than Compare 2 F64, result in a GPR fn f64_cmp_lt( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Not Equal Compare 2 F64, result in a GPR fn f64_cmp_ne( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Equal Compare 2 F64, result in a GPR fn f64_cmp_eq( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// get Min for 2 F64 values fn f64_min( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// get Max for 2 F64 values fn f64_max( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Add 2 F64 values fn f64_add( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Sub 2 F64 values fn f64_sub( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Multiply 2 F64 values fn f64_mul( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Divide 2 F64 values fn f64_div( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Negate an F32 fn f32_neg( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Get the Absolute Value of an F32 fn f32_abs( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Copy sign from tmp1 Self::GPR to tmp2 Self::GPR - fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR); + fn emit_i32_copysign(&mut self, tmp1: Self::GPR, tmp2: Self::GPR) -> Result<(), CodegenError>; /// Get the Square Root of an F32 fn f32_sqrt( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Trunc of an F32 fn f32_trunc( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Ceil of an F32 fn f32_ceil( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Floor of an F32 fn f32_floor( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Round at nearest int of an F32 fn f32_nearest( &mut self, loc: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Greater of Equal Compare 2 F32, result in a GPR fn f32_cmp_ge( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Greater Than Compare 2 F32, result in a GPR fn f32_cmp_gt( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Less of Equal Compare 2 F32, result in a GPR fn f32_cmp_le( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Less Than Compare 2 F32, result in a GPR fn f32_cmp_lt( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Not Equal Compare 2 F32, result in a GPR fn f32_cmp_ne( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Equal Compare 2 F32, result in a GPR fn f32_cmp_eq( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// get Min for 2 F32 values fn f32_min( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// get Max for 2 F32 values fn f32_max( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Add 2 F32 values fn f32_add( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Sub 2 F32 values fn f32_sub( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Multiply 2 F32 values fn f32_mul( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Divide 2 F32 values fn f32_div( &mut self, loc_a: Location, loc_b: Location, ret: Location, - ); + ) -> Result<(), CodegenError>; /// Standard function Trampoline generation fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody; + ) -> Result; /// Generates dynamic import function call trampoline for a function type. fn gen_std_dynamic_import_trampoline( &self, vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody; + ) -> Result; /// Singlepass calls import functions through a trampoline. fn gen_import_call_trampoline( &self, @@ -2279,7 +2310,7 @@ pub trait Machine { index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, - ) -> CustomSection; + ) -> Result; /// generate eh_frame instruction (or None if not possible / supported) fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option; /// generate Windows unwind instructions (or None if not possible / supported) @@ -2299,15 +2330,15 @@ pub fn gen_std_trampoline( } else if target.cpu_features().contains(CpuFeature::SSE42) { MachineX86_64::new(Some(CpuFeature::SSE42)) } else { - unimplemented!() + panic!("singlepass unimplement X86_64 variant for gen_std_trampoline") }; - machine.gen_std_trampoline(sig, calling_convention) + machine.gen_std_trampoline(sig, calling_convention).unwrap() } Architecture::Aarch64(_) => { let machine = MachineARM64::new(); - machine.gen_std_trampoline(sig, calling_convention) + machine.gen_std_trampoline(sig, calling_convention).unwrap() } - _ => unimplemented!(), + _ => panic!("singlepass unimplemented arch for gen_std_trampoline"), } } @@ -2325,15 +2356,21 @@ pub fn gen_std_dynamic_import_trampoline( } else if target.cpu_features().contains(CpuFeature::SSE42) { MachineX86_64::new(Some(CpuFeature::SSE42)) } else { - unimplemented!() + panic!( + "singlepass unimplement X86_64 variant for gen_std_dynamic_import_trampoline" + ) }; - machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention) + machine + .gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention) + .unwrap() } Architecture::Aarch64(_) => { let machine = MachineARM64::new(); - machine.gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention) + machine + .gen_std_dynamic_import_trampoline(vmoffsets, sig, calling_convention) + .unwrap() } - _ => unimplemented!(), + _ => panic!("singlepass unimplemented arch for gen_std_dynamic_import_trampoline"), } } /// Singlepass calls import functions through a trampoline. @@ -2351,15 +2388,19 @@ pub fn gen_import_call_trampoline( } else if target.cpu_features().contains(CpuFeature::SSE42) { MachineX86_64::new(Some(CpuFeature::SSE42)) } else { - unimplemented!() + panic!("singlepass unimplement X86_64 variant for gen_import_call_trampoline") }; - machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention) + machine + .gen_import_call_trampoline(vmoffsets, index, sig, calling_convention) + .unwrap() } Architecture::Aarch64(_) => { let machine = MachineARM64::new(); - machine.gen_import_call_trampoline(vmoffsets, index, sig, calling_convention) + machine + .gen_import_call_trampoline(vmoffsets, index, sig, calling_convention) + .unwrap() } - _ => unimplemented!(), + _ => panic!("singlepass unimplemented arch for gen_import_call_trampoline"), } } diff --git a/lib/compiler-singlepass/src/machine_arm64.rs b/lib/compiler-singlepass/src/machine_arm64.rs index e70a9515d9f..9d8555eaa3b 100644 --- a/lib/compiler-singlepass/src/machine_arm64.rs +++ b/lib/compiler-singlepass/src/machine_arm64.rs @@ -1,5 +1,6 @@ use crate::arm64_decl::new_machine_state; use crate::arm64_decl::{GPR, NEON}; +use crate::codegen_error; use crate::common_decl::*; use crate::emitter_arm64::*; use crate::location::Location as AbstractLocation; @@ -174,66 +175,76 @@ impl MachineARM64 { allow_imm: ImmType, read_val: bool, wanted: Option, - ) -> Location { + ) -> Result { match src { - Location::GPR(_) | Location::SIMD(_) => src, + Location::GPR(_) | Location::SIMD(_) => Ok(src), Location::Imm8(val) => { if allow_imm == ImmType::NoneXzr && val == 0 { - Location::GPR(GPR::XzrSp) + Ok(Location::GPR(GPR::XzrSp)) } else if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { let tmp = if let Some(wanted) = wanted { wanted } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); tmp }; - self.assembler.emit_mov_imm(Location::GPR(tmp), val as u64); - Location::GPR(tmp) + self.assembler + .emit_mov_imm(Location::GPR(tmp), val as u64)?; + Ok(Location::GPR(tmp)) } } Location::Imm32(val) => { if allow_imm == ImmType::NoneXzr && val == 0 { - Location::GPR(GPR::XzrSp) + Ok(Location::GPR(GPR::XzrSp)) } else if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { let tmp = if let Some(wanted) = wanted { wanted } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); tmp }; self.assembler - .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64); - Location::GPR(tmp) + .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64)?; + Ok(Location::GPR(tmp)) } } Location::Imm64(val) => { if allow_imm == ImmType::NoneXzr && val == 0 { - Location::GPR(GPR::XzrSp) + Ok(Location::GPR(GPR::XzrSp)) } else if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { let tmp = if let Some(wanted) = wanted { wanted } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); tmp }; - self.assembler.emit_mov_imm(Location::GPR(tmp), val as u64); - Location::GPR(tmp) + self.assembler + .emit_mov_imm(Location::GPR(tmp), val as u64)?; + Ok(Location::GPR(tmp)) } } Location::Memory(reg, val) => { let tmp = if let Some(wanted) = wanted { wanted } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); tmp }; @@ -250,18 +261,18 @@ impl MachineARM64 { sz, Location::GPR(tmp), Location::Memory(reg, val as _), - ); + )?; } else { if reg == tmp { - unreachable!(); + codegen_error!("singlepass reg==tmp unreachable"); } self.assembler - .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64)?; self.assembler.emit_ldrb( sz, Location::GPR(tmp), Location::Memory2(reg, tmp, Multiplier::One, 0), - ); + )?; } } else if sz == Size::S16 { if self.compatible_imm(val as i64, offsize) { @@ -269,43 +280,43 @@ impl MachineARM64 { sz, Location::GPR(tmp), Location::Memory(reg, val as _), - ); + )?; } else { if reg == tmp { - unreachable!(); + codegen_error!("singlepass reg==tmp unreachable"); } self.assembler - .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64)?; self.assembler.emit_ldrh( sz, Location::GPR(tmp), Location::Memory2(reg, tmp, Multiplier::One, 0), - ); + )?; } } else if self.compatible_imm(val as i64, offsize) { self.assembler.emit_ldr( sz, Location::GPR(tmp), Location::Memory(reg, val as _), - ); + )?; } else if self.compatible_imm(val as i64, ImmType::UnscaledOffset) { - self.assembler.emit_ldur(sz, Location::GPR(tmp), reg, val); + self.assembler.emit_ldur(sz, Location::GPR(tmp), reg, val)?; } else { if reg == tmp { - unreachable!(); + codegen_error!("singlepass reg == tmp unreachable"); } self.assembler - .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (val as i64) as u64)?; self.assembler.emit_ldr( sz, Location::GPR(tmp), Location::Memory2(reg, tmp, Multiplier::One, 0), - ); + )?; } } - Location::GPR(tmp) + Ok(Location::GPR(tmp)) } - _ => panic!("singlepass can't emit location_to_reg {:?} {:?}", sz, src), + _ => codegen_error!("singlepass can't emit location_to_reg {:?} {:?}", sz, src), } } fn location_to_neon( @@ -315,62 +326,80 @@ impl MachineARM64 { temps: &mut Vec, allow_imm: ImmType, read_val: bool, - ) -> Location { + ) -> Result { match src { - Location::SIMD(_) => src, + Location::SIMD(_) => Ok(src), Location::GPR(_) => { - let tmp = self.acquire_temp_simd().unwrap(); + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; temps.push(tmp); if read_val { - self.assembler.emit_mov(sz, src, Location::SIMD(tmp)); + self.assembler.emit_mov(sz, src, Location::SIMD(tmp))?; } - Location::SIMD(tmp) + Ok(Location::SIMD(tmp)) } Location::Imm8(val) => { if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { - let gpr = self.acquire_temp_gpr().unwrap(); - let tmp = self.acquire_temp_simd().unwrap(); + let gpr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov_imm(Location::GPR(gpr), val as u64); self.assembler - .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp)); + .emit_mov_imm(Location::GPR(gpr), val as u64)?; + self.assembler + .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp))?; self.release_gpr(gpr); - Location::SIMD(tmp) + Ok(Location::SIMD(tmp)) } } Location::Imm32(val) => { if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { - let gpr = self.acquire_temp_gpr().unwrap(); - let tmp = self.acquire_temp_simd().unwrap(); + let gpr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; temps.push(tmp); self.assembler - .emit_mov_imm(Location::GPR(gpr), (val as i64) as u64); + .emit_mov_imm(Location::GPR(gpr), (val as i64) as u64)?; self.assembler - .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp)); + .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp))?; self.release_gpr(gpr); - Location::SIMD(tmp) + Ok(Location::SIMD(tmp)) } } Location::Imm64(val) => { if self.compatible_imm(val as i64, allow_imm) { - src + Ok(src) } else { - let gpr = self.acquire_temp_gpr().unwrap(); - let tmp = self.acquire_temp_simd().unwrap(); + let gpr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov_imm(Location::GPR(gpr), val as u64); self.assembler - .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp)); + .emit_mov_imm(Location::GPR(gpr), val as u64)?; + self.assembler + .emit_mov(sz, Location::GPR(gpr), Location::SIMD(tmp))?; self.release_gpr(gpr); - Location::SIMD(tmp) + Ok(Location::SIMD(tmp)) } } Location::Memory(reg, val) => { - let tmp = self.acquire_temp_simd().unwrap(); + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; temps.push(tmp); if read_val { let offsize = if sz == Size::S32 { @@ -383,416 +412,491 @@ impl MachineARM64 { sz, Location::SIMD(tmp), Location::Memory(reg, val as _), - ); + )?; } else if self.compatible_imm(val as i64, ImmType::UnscaledOffset) { - self.assembler.emit_ldur(sz, Location::SIMD(tmp), reg, val); + self.assembler + .emit_ldur(sz, Location::SIMD(tmp), reg, val)?; } else { - let gpr = self.acquire_temp_gpr().unwrap(); + let gpr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(gpr), (val as i64) as u64); + .emit_mov_imm(Location::GPR(gpr), (val as i64) as u64)?; self.assembler.emit_ldr( sz, Location::SIMD(tmp), Location::Memory2(reg, gpr, Multiplier::One, 0), - ); + )?; self.release_gpr(gpr); } } - Location::SIMD(tmp) + Ok(Location::SIMD(tmp)) } - _ => panic!("singlepass can't emit location_to_neon {:?} {:?}", sz, src), + _ => codegen_error!("singlepass can't emit location_to_neon {:?} {:?}", sz, src), } } fn emit_relaxed_binop( &mut self, - op: fn(&mut Assembler, Size, Location, Location), + op: fn(&mut Assembler, Size, Location, Location) -> Result<(), CodegenError>, sz: Size, src: Location, dst: Location, putback: bool, - ) { + ) -> Result<(), CodegenError> { let mut temps = vec![]; let src_imm = if putback { ImmType::None } else { ImmType::Bits12 }; - let src = self.location_to_reg(sz, src, &mut temps, src_imm, true, None); - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, !putback, None); - op(&mut self.assembler, sz, src, dest); + let src = self.location_to_reg(sz, src, &mut temps, src_imm, true, None)?; + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, !putback, None)?; + op(&mut self.assembler, sz, src, dest)?; if dst != dest && putback { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } fn emit_relaxed_binop_neon( &mut self, - op: fn(&mut Assembler, Size, Location, Location), + op: fn(&mut Assembler, Size, Location, Location) -> Result<(), CodegenError>, sz: Size, src: Location, dst: Location, putback: bool, - ) { + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_neon(sz, src, &mut temps, ImmType::None, true); - let dest = self.location_to_neon(sz, dst, &mut temps, ImmType::None, !putback); - op(&mut self.assembler, sz, src, dest); + let src = self.location_to_neon(sz, src, &mut temps, ImmType::None, true)?; + let dest = self.location_to_neon(sz, dst, &mut temps, ImmType::None, !putback)?; + op(&mut self.assembler, sz, src, dest)?; if dst != dest && putback { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_simd(r); } + Ok(()) } fn emit_relaxed_binop3( &mut self, - op: fn(&mut Assembler, Size, Location, Location, Location), + op: fn(&mut Assembler, Size, Location, Location, Location) -> Result<(), CodegenError>, sz: Size, src1: Location, src2: Location, dst: Location, allow_imm: ImmType, - ) { + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let src1 = self.location_to_reg(sz, src1, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(sz, src2, &mut temps, allow_imm, true, None); - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); - op(&mut self.assembler, sz, src1, src2, dest); + let src1 = self.location_to_reg(sz, src1, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(sz, src2, &mut temps, allow_imm, true, None)?; + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; + op(&mut self.assembler, sz, src1, src2, dest)?; if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } fn emit_relaxed_binop3_neon( &mut self, - op: fn(&mut Assembler, Size, Location, Location, Location), + op: fn(&mut Assembler, Size, Location, Location, Location) -> Result<(), CodegenError>, sz: Size, src1: Location, src2: Location, dst: Location, allow_imm: ImmType, - ) { + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let src1 = self.location_to_neon(sz, src1, &mut temps, ImmType::None, true); - let src2 = self.location_to_neon(sz, src2, &mut temps, allow_imm, true); - let dest = self.location_to_neon(sz, dst, &mut temps, ImmType::None, false); - op(&mut self.assembler, sz, src1, src2, dest); + let src1 = self.location_to_neon(sz, src1, &mut temps, ImmType::None, true)?; + let src2 = self.location_to_neon(sz, src2, &mut temps, allow_imm, true)?; + let dest = self.location_to_neon(sz, dst, &mut temps, ImmType::None, false)?; + op(&mut self.assembler, sz, src1, src2, dest)?; if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_simd(r); } + Ok(()) } - fn emit_relaxed_ldr64(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr64( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetDWord) { - self.assembler.emit_ldr(Size::S64, dest, src); + self.assembler.emit_ldr(Size::S64, dest, src)?; } else if self.compatible_imm(offset as i64, ImmType::UnscaledOffset) { - self.assembler.emit_ldur(Size::S64, dest, addr, offset); + self.assembler.emit_ldur(Size::S64, dest, addr, offset)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldr( Size::S64, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singplass emit_relaxed_ldr64 unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr32(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr32( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetWord) { - self.assembler.emit_ldr(Size::S32, dest, src); + self.assembler.emit_ldr(Size::S32, dest, src)?; } else if self.compatible_imm(offset as i64, ImmType::UnscaledOffset) { - self.assembler.emit_ldur(Size::S32, dest, addr, offset); + self.assembler.emit_ldur(Size::S32, dest, addr, offset)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldr( Size::S32, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_ldr32 unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr32s(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr32s( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetWord) { - self.assembler.emit_ldrsw(Size::S64, dest, src); + self.assembler.emit_ldrsw(Size::S64, dest, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldrsw( Size::S64, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singplepass emit_relaxed_ldr32s unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr16(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr16( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetHWord) { - self.assembler.emit_ldrh(Size::S32, dest, src); + self.assembler.emit_ldrh(Size::S32, dest, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldrh( Size::S32, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlpass emit_relaxed_ldr16 unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr16s(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr16s( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetHWord) { - self.assembler.emit_ldrsh(sz, dest, src); + self.assembler.emit_ldrsh(sz, dest, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldrsh( sz, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_ldr16s unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr8(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr8( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetByte) { - self.assembler.emit_ldrb(Size::S32, dest, src); + self.assembler.emit_ldrb(Size::S32, dest, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldrb( Size::S32, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singplepass emit_relaxed_ldr8 unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_ldr8s(&mut self, sz: Size, dst: Location, src: Location) { + fn emit_relaxed_ldr8s( + &mut self, + sz: Size, + dst: Location, + src: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None); + let dest = self.location_to_reg(sz, dst, &mut temps, ImmType::None, false, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetByte) { - self.assembler.emit_ldrsb(sz, dest, src); + self.assembler.emit_ldrsb(sz, dest, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_ldrsb( sz, dest, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_ldr8s unreachable"), } if dst != dest { - self.move_location(sz, dest, dst); + self.move_location(sz, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_str64(&mut self, dst: Location, src: Location) { + fn emit_relaxed_str64(&mut self, dst: Location, src: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None); + let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetDWord) { - self.assembler.emit_str(Size::S64, dst, src); + self.assembler.emit_str(Size::S64, dst, src)?; } else if self.compatible_imm(offset as i64, ImmType::UnscaledOffset) { - self.assembler.emit_stur(Size::S64, dst, addr, offset); + self.assembler.emit_stur(Size::S64, dst, addr, offset)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_str( Size::S64, dst, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => panic!("singlepass can't emit str64 {:?} {:?}", dst, src), + _ => codegen_error!("singlepass can't emit str64 {:?} {:?}", dst, src), } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_str32(&mut self, dst: Location, src: Location) { + fn emit_relaxed_str32(&mut self, dst: Location, src: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None); + let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetWord) { - self.assembler.emit_str(Size::S32, dst, src); + self.assembler.emit_str(Size::S32, dst, src)?; } else if self.compatible_imm(offset as i64, ImmType::UnscaledOffset) { - self.assembler.emit_stur(Size::S32, dst, addr, offset); + self.assembler.emit_stur(Size::S32, dst, addr, offset)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_str( Size::S32, dst, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singplepass emit_relaxed_str32 unreachable"), } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_str16(&mut self, dst: Location, src: Location) { + fn emit_relaxed_str16(&mut self, dst: Location, src: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None); + let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetHWord) { - self.assembler.emit_strh(Size::S32, dst, src); + self.assembler.emit_strh(Size::S32, dst, src)?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_strh( Size::S32, dst, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_str16 unreachable"), } for r in temps { self.release_gpr(r); } + Ok(()) } - fn emit_relaxed_str8(&mut self, dst: Location, src: Location) { + fn emit_relaxed_str8(&mut self, dst: Location, src: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None); + let dst = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::NoneXzr, true, None)?; match src { Location::Memory(addr, offset) => { if self.compatible_imm(offset as i64, ImmType::OffsetByte) { self.assembler - .emit_strb(Size::S32, dst, Location::Memory(addr, offset)); + .emit_strb(Size::S32, dst, Location::Memory(addr, offset))?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_strb( Size::S32, dst, Location::Memory2(addr, tmp, Multiplier::One, 0), - ); + )?; temps.push(tmp); } } - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_str8 unreachable"), } for r in temps { self.release_gpr(r); } + Ok(()) } /// I64 comparison with. fn emit_cmpop_i64_dynamic_b( @@ -801,23 +905,26 @@ impl MachineARM64 { loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { match ret { Location::GPR(_) => { - self.emit_relaxed_cmp(Size::S64, loc_b, loc_a); - self.assembler.emit_cset(Size::S32, ret, c); + self.emit_relaxed_cmp(Size::S64, loc_b, loc_a)?; + self.assembler.emit_cset(Size::S32, ret, c)?; } Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_cmp(Size::S64, loc_b, loc_a); - self.assembler.emit_cset(Size::S32, Location::GPR(tmp), c); - self.move_location(Size::S32, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_cmp(Size::S64, loc_b, loc_a)?; + self.assembler.emit_cset(Size::S32, Location::GPR(tmp), c)?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } _ => { - unreachable!(); + codegen_error!("singlepass emit_compop_i64_dynamic_b unreachable"); } } + Ok(()) } /// I32 comparison with. fn emit_cmpop_i32_dynamic_b( @@ -826,27 +933,30 @@ impl MachineARM64 { loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { match ret { Location::GPR(_) => { - self.emit_relaxed_cmp(Size::S32, loc_b, loc_a); - self.assembler.emit_cset(Size::S32, ret, c); + self.emit_relaxed_cmp(Size::S32, loc_b, loc_a)?; + self.assembler.emit_cset(Size::S32, ret, c)?; } Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_cmp(Size::S32, loc_b, loc_a); - self.assembler.emit_cset(Size::S32, Location::GPR(tmp), c); - self.move_location(Size::S32, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_cmp(Size::S32, loc_b, loc_a)?; + self.assembler.emit_cset(Size::S32, Location::GPR(tmp), c)?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } _ => { - unreachable!(); + codegen_error!("singlepass emit_cmpop_i32_dynamic_b unreachable"); } } + Ok(()) } #[allow(clippy::too_many_arguments)] - fn memory_op( + fn memory_op Result<(), CodegenError>>( &mut self, addr: Location, memarg: &MemoryImmediate, @@ -857,8 +967,10 @@ impl MachineARM64 { offset: i32, heap_access_oob: Label, cb: F, - ) { - let tmp_addr = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let tmp_addr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; // Reusing `tmp_addr` for temporary indirection here, since it's not used before the last reference to `{base,bound}_loc`. let (base_loc, bound_loc) = if imported_memories { @@ -869,7 +981,7 @@ impl MachineARM64 { Location::Memory(self.get_vmctx_reg(), offset), Location::GPR(tmp_addr), true, - ); + )?; (Location::Memory(tmp_addr, 0), Location::Memory(tmp_addr, 8)) } else { ( @@ -878,15 +990,19 @@ impl MachineARM64 { ) }; - let tmp_base = self.acquire_temp_gpr().unwrap(); - let tmp_bound = self.acquire_temp_gpr().unwrap(); + let tmp_base = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_bound = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; // Load base into temporary register. - self.emit_relaxed_ldr64(Size::S64, Location::GPR(tmp_base), base_loc); + self.emit_relaxed_ldr64(Size::S64, Location::GPR(tmp_base), base_loc)?; // Load bound into temporary register, if needed. if need_check { - self.emit_relaxed_ldr64(Size::S64, Location::GPR(tmp_bound), bound_loc); + self.emit_relaxed_ldr64(Size::S64, Location::GPR(tmp_bound), bound_loc)?; // Wasm -> Effective. // Assuming we never underflow - should always be true on Linux/macOS and Windows >=8, @@ -896,24 +1012,26 @@ impl MachineARM64 { Location::GPR(tmp_bound), Location::GPR(tmp_base), Location::GPR(tmp_bound), - ); + )?; if self.compatible_imm(value_size as _, ImmType::Bits12) { self.assembler.emit_sub( Size::S64, Location::GPR(tmp_bound), Location::GPR(tmp_bound), Location::Imm32(value_size as _), - ); + )?; } else { - let tmp2 = self.acquire_temp_gpr().unwrap(); + let tmp2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp2), value_size as u64); + .emit_mov_imm(Location::GPR(tmp2), value_size as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(tmp_bound), Location::GPR(tmp2), Location::GPR(tmp_bound), - ); + )?; self.release_gpr(tmp2); } } @@ -921,7 +1039,7 @@ impl MachineARM64 { // Load effective address. // `base_loc` and `bound_loc` becomes INVALID after this line, because `tmp_addr` // might be reused. - self.move_location(Size::S32, addr, Location::GPR(tmp_addr)); + self.move_location(Size::S32, addr, Location::GPR(tmp_addr))?; // Add offset to memory address. if memarg.offset != 0 { @@ -931,23 +1049,25 @@ impl MachineARM64 { Location::Imm32(memarg.offset as u32), Location::GPR(tmp_addr), Location::GPR(tmp_addr), - ); + )?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), memarg.offset as _); + .emit_mov_imm(Location::GPR(tmp), memarg.offset as _)?; self.assembler.emit_adds( Size::S32, Location::GPR(tmp_addr), Location::GPR(tmp), Location::GPR(tmp_addr), - ); + )?; self.release_gpr(tmp); } // Trap if offset calculation overflowed. self.assembler - .emit_bcond_label_far(Condition::Cs, heap_access_oob); + .emit_bcond_label_far(Condition::Cs, heap_access_oob)?; } // Wasm linear memory -> real memory @@ -956,16 +1076,19 @@ impl MachineARM64 { Location::GPR(tmp_base), Location::GPR(tmp_addr), Location::GPR(tmp_addr), - ); + )?; if need_check { // Trap if the end address of the requested area is above that of the linear memory. - self.assembler - .emit_cmp(Size::S64, Location::GPR(tmp_bound), Location::GPR(tmp_addr)); + self.assembler.emit_cmp( + Size::S64, + Location::GPR(tmp_bound), + Location::GPR(tmp_addr), + )?; // `tmp_bound` is inclusive. So trap only if `tmp_addr > tmp_bound`. self.assembler - .emit_bcond_label_far(Condition::Hi, heap_access_oob); + .emit_bcond_label_far(Condition::Hi, heap_access_oob)?; } self.release_gpr(tmp_bound); @@ -977,16 +1100,17 @@ impl MachineARM64 { Size::S64, Location::Imm32((align - 1).into()), Location::GPR(tmp_addr), - ); + )?; self.assembler - .emit_bcond_label_far(Condition::Ne, heap_access_oob); + .emit_bcond_label_far(Condition::Ne, heap_access_oob)?; } let begin = self.assembler.get_offset().0; - cb(self, tmp_addr); + cb(self, tmp_addr)?; let end = self.assembler.get_offset().0; self.mark_address_range_with_trap_code(TrapCode::HeapAccessOutOfBounds, begin, end); self.release_gpr(tmp_addr); + Ok(()) } /*fn emit_compare_and_swap( @@ -1026,7 +1150,7 @@ impl MachineARM64 { true } - fn emit_push(&mut self, sz: Size, src: Location) { + fn emit_push(&mut self, sz: Size, src: Location) -> Result<(), CodegenError> { match (sz, src) { (Size::S64, Location::GPR(_)) | (Size::S64, Location::SIMD(_)) => { let offset = if self.pushed { @@ -1037,15 +1161,16 @@ impl MachineARM64 { Location::GPR(GPR::XzrSp), Location::Imm8(16), Location::GPR(GPR::XzrSp), - ); + )?; 8 }; - self.assembler.emit_stur(Size::S64, src, GPR::XzrSp, offset); + self.assembler + .emit_stur(Size::S64, src, GPR::XzrSp, offset)?; self.pushed = !self.pushed; } (Size::S64, _) => { let mut temps = vec![]; - let src = self.location_to_reg(sz, src, &mut temps, ImmType::None, true, None); + let src = self.location_to_reg(sz, src, &mut temps, ImmType::None, true, None)?; let offset = if self.pushed { 0 } else { @@ -1054,84 +1179,106 @@ impl MachineARM64 { Location::GPR(GPR::XzrSp), Location::Imm8(16), Location::GPR(GPR::XzrSp), - ); + )?; 8 }; - self.assembler.emit_stur(Size::S64, src, GPR::XzrSp, offset); + self.assembler + .emit_stur(Size::S64, src, GPR::XzrSp, offset)?; self.pushed = !self.pushed; for r in temps { self.release_gpr(r); } } - _ => panic!("singlepass can't emit PUSH {:?} {:?}", sz, src), + _ => codegen_error!("singlepass can't emit PUSH {:?} {:?}", sz, src), } + Ok(()) } - fn emit_double_push(&mut self, sz: Size, src1: Location, src2: Location) { + fn emit_double_push( + &mut self, + sz: Size, + src1: Location, + src2: Location, + ) -> Result<(), CodegenError> { if !self.pushed { match (sz, src1, src2) { (Size::S64, Location::GPR(_), Location::GPR(_)) => { self.assembler - .emit_stpdb(Size::S64, src1, src2, GPR::XzrSp, 16); + .emit_stpdb(Size::S64, src1, src2, GPR::XzrSp, 16)?; } _ => { - self.emit_push(sz, src1); - self.emit_push(sz, src2); + self.emit_push(sz, src1)?; + self.emit_push(sz, src2)?; } } } else { - self.emit_push(sz, src1); - self.emit_push(sz, src2); + self.emit_push(sz, src1)?; + self.emit_push(sz, src2)?; } + Ok(()) } - fn emit_pop(&mut self, sz: Size, dst: Location) { + fn emit_pop(&mut self, sz: Size, dst: Location) -> Result<(), CodegenError> { match (sz, dst) { (Size::S64, Location::GPR(_)) | (Size::S64, Location::SIMD(_)) => { let offset = if self.pushed { 8 } else { 0 }; - self.assembler.emit_ldur(Size::S64, dst, GPR::XzrSp, offset); + self.assembler + .emit_ldur(Size::S64, dst, GPR::XzrSp, offset)?; if self.pushed { self.assembler.emit_add( Size::S64, Location::GPR(GPR::XzrSp), Location::Imm8(16), Location::GPR(GPR::XzrSp), - ); + )?; } self.pushed = !self.pushed; } - _ => panic!("singlepass can't emit PUSH {:?} {:?}", sz, dst), + _ => codegen_error!("singlepass can't emit PUSH {:?} {:?}", sz, dst), } + Ok(()) } - fn emit_double_pop(&mut self, sz: Size, dst1: Location, dst2: Location) { + fn emit_double_pop( + &mut self, + sz: Size, + dst1: Location, + dst2: Location, + ) -> Result<(), CodegenError> { if !self.pushed { match (sz, dst1, dst2) { (Size::S64, Location::GPR(_), Location::GPR(_)) => { self.assembler - .emit_ldpia(Size::S64, dst1, dst2, GPR::XzrSp, 16); + .emit_ldpia(Size::S64, dst1, dst2, GPR::XzrSp, 16)?; } _ => { - self.emit_pop(sz, dst2); - self.emit_pop(sz, dst1); + self.emit_pop(sz, dst2)?; + self.emit_pop(sz, dst1)?; } } } else { - self.emit_pop(sz, dst2); - self.emit_pop(sz, dst1); + self.emit_pop(sz, dst2)?; + self.emit_pop(sz, dst1)?; } + Ok(()) } - fn set_default_nan(&mut self, temps: &mut Vec) -> GPR { + fn set_default_nan(&mut self, temps: &mut Vec) -> Result { // temporarly set FPCR to DefaultNan - let old_fpcr = self.acquire_temp_gpr().unwrap(); + let old_fpcr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(old_fpcr); - self.assembler.emit_read_fpcr(old_fpcr); - let new_fpcr = self.acquire_temp_gpr().unwrap(); + self.assembler.emit_read_fpcr(old_fpcr)?; + let new_fpcr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(new_fpcr); - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); self.assembler - .emit_mov(Size::S32, Location::Imm32(1), Location::GPR(tmp)); + .emit_mov(Size::S32, Location::Imm32(1), Location::GPR(tmp))?; self.assembler - .emit_mov(Size::S64, Location::GPR(old_fpcr), Location::GPR(new_fpcr)); + .emit_mov(Size::S64, Location::GPR(old_fpcr), Location::GPR(new_fpcr))?; // DN is bit 25 of FPCR self.assembler.emit_bfi( Size::S64, @@ -1139,43 +1286,52 @@ impl MachineARM64 { 25, 1, Location::GPR(new_fpcr), - ); - self.assembler.emit_write_fpcr(new_fpcr); - old_fpcr + )?; + self.assembler.emit_write_fpcr(new_fpcr)?; + Ok(old_fpcr) } - fn set_trap_enabled(&mut self, temps: &mut Vec) -> GPR { + fn set_trap_enabled(&mut self, temps: &mut Vec) -> Result { // temporarly set FPCR to DefaultNan - let old_fpcr = self.acquire_temp_gpr().unwrap(); + let old_fpcr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(old_fpcr); - self.assembler.emit_read_fpcr(old_fpcr); - let new_fpcr = self.acquire_temp_gpr().unwrap(); + self.assembler.emit_read_fpcr(old_fpcr)?; + let new_fpcr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(new_fpcr); self.assembler - .emit_mov(Size::S64, Location::GPR(old_fpcr), Location::GPR(new_fpcr)); + .emit_mov(Size::S64, Location::GPR(old_fpcr), Location::GPR(new_fpcr))?; // IOE is bit 8 of FPCR self.assembler - .emit_bfc(Size::S64, 8, 1, Location::GPR(new_fpcr)); - self.assembler.emit_write_fpcr(new_fpcr); - old_fpcr + .emit_bfc(Size::S64, 8, 1, Location::GPR(new_fpcr))?; + self.assembler.emit_write_fpcr(new_fpcr)?; + Ok(old_fpcr) } - fn restore_fpcr(&mut self, old_fpcr: GPR) { - self.assembler.emit_write_fpcr(old_fpcr); + fn restore_fpcr(&mut self, old_fpcr: GPR) -> Result<(), CodegenError> { + self.assembler.emit_write_fpcr(old_fpcr) } - fn reset_exception_fpsr(&mut self) { + fn reset_exception_fpsr(&mut self) -> Result<(), CodegenError> { // reset exception count in FPSR - let fpsr = self.acquire_temp_gpr().unwrap(); - self.assembler.emit_read_fpsr(fpsr); + let fpsr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.assembler.emit_read_fpsr(fpsr)?; // IOC is 0 self.assembler - .emit_bfc(Size::S64, 0, 1, Location::GPR(fpsr)); - self.assembler.emit_write_fpsr(fpsr); + .emit_bfc(Size::S64, 0, 1, Location::GPR(fpsr))?; + self.assembler.emit_write_fpsr(fpsr)?; self.release_gpr(fpsr); + Ok(()) } - fn read_fpsr(&mut self) -> GPR { - let fpsr = self.acquire_temp_gpr().unwrap(); - self.assembler.emit_read_fpsr(fpsr); - fpsr + fn read_fpsr(&mut self) -> Result { + let fpsr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.assembler.emit_read_fpsr(fpsr)?; + Ok(fpsr) } fn trap_float_convertion_errors( @@ -1184,29 +1340,30 @@ impl MachineARM64 { sz: Size, f: Location, temps: &mut Vec, - ) { + ) -> Result<(), CodegenError> { let trap_badconv = self.assembler.get_label(); let end = self.assembler.get_label(); - let fpsr = self.read_fpsr(); + let fpsr = self.read_fpsr()?; temps.push(fpsr); // no trap, than all good self.assembler - .emit_tbz_label(Size::S32, Location::GPR(fpsr), 0, end); + .emit_tbz_label(Size::S32, Location::GPR(fpsr), 0, end)?; // now need to check if it's overflow or NaN self.assembler - .emit_bfc(Size::S64, 0, 4, Location::GPR(fpsr)); - self.restore_fpcr(old_fpcr); - self.assembler.emit_fcmp(sz, f, f); - self.assembler.emit_bcond_label(Condition::Vs, trap_badconv); + .emit_bfc(Size::S64, 0, 4, Location::GPR(fpsr))?; + self.restore_fpcr(old_fpcr)?; + self.assembler.emit_fcmp(sz, f, f)?; + self.assembler + .emit_bcond_label(Condition::Vs, trap_badconv)?; // fallthru: trap_overflow - self.emit_illegal_op_internal(TrapCode::IntegerOverflow); + self.emit_illegal_op_internal(TrapCode::IntegerOverflow)?; - self.emit_label(trap_badconv); - self.emit_illegal_op_internal(TrapCode::BadConversionToInteger); + self.emit_label(trap_badconv)?; + self.emit_illegal_op_internal(TrapCode::BadConversionToInteger)?; - self.emit_label(end); - self.restore_fpcr(old_fpcr); + self.emit_label(end)?; + self.restore_fpcr(old_fpcr) } fn used_gprs_contains(&self, r: &GPR) -> bool { @@ -1234,8 +1391,8 @@ impl MachineARM64 { fn emit_unwind_op(&mut self, op: UnwindOps) { self.unwind_ops.push((self.get_offset().0, op)); } - fn emit_illegal_op_internal(&mut self, trap: TrapCode) { - self.assembler.emit_udf(0xc0 | (trap as u8) as u16); + fn emit_illegal_op_internal(&mut self, trap: TrapCode) -> Result<(), CodegenError> { + self.assembler.emit_udf(0xc0 | (trap as u8) as u16) } } @@ -1315,22 +1472,23 @@ impl Machine for MachineARM64 { self.used_gprs_insert(gpr); } - fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> usize { + fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> Result { if used_gprs.len() % 2 == 1 { - self.emit_push(Size::S64, Location::GPR(GPR::XzrSp)); + self.emit_push(Size::S64, Location::GPR(GPR::XzrSp))?; } for r in used_gprs.iter() { - self.emit_push(Size::S64, Location::GPR(*r)); + self.emit_push(Size::S64, Location::GPR(*r))?; } - ((used_gprs.len() + 1) / 2) * 16 + Ok(((used_gprs.len() + 1) / 2) * 16) } - fn pop_used_gpr(&mut self, used_gprs: &[GPR]) { + fn pop_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<(), CodegenError> { for r in used_gprs.iter().rev() { - self.emit_pop(Size::S64, Location::GPR(*r)); + self.emit_pop(Size::S64, Location::GPR(*r))?; } if used_gprs.len() % 2 == 1 { - self.emit_pop(Size::S64, Location::GPR(GPR::XzrSp)); + self.emit_pop(Size::S64, Location::GPR(GPR::XzrSp))?; } + Ok(()) } // Picks an unused NEON register. @@ -1375,30 +1533,30 @@ impl Machine for MachineARM64 { assert!(self.used_simd_remove(&simd)); } - fn push_used_simd(&mut self, used_neons: &[NEON]) -> usize { + fn push_used_simd(&mut self, used_neons: &[NEON]) -> Result { let stack_adjust = if used_neons.len() & 1 == 1 { (used_neons.len() * 8) as u32 + 8 } else { (used_neons.len() * 8) as u32 }; - self.adjust_stack(stack_adjust); + self.adjust_stack(stack_adjust)?; for (i, r) in used_neons.iter().enumerate() { self.assembler.emit_str( Size::S64, Location::SIMD(*r), Location::Memory(GPR::XzrSp, (i * 8) as i32), - ); + )?; } - stack_adjust as usize + Ok(stack_adjust as usize) } - fn pop_used_simd(&mut self, used_neons: &[NEON]) { + fn pop_used_simd(&mut self, used_neons: &[NEON]) -> Result<(), CodegenError> { for (i, r) in used_neons.iter().enumerate() { self.assembler.emit_ldr( Size::S64, Location::SIMD(*r), Location::Memory(GPR::XzrSp, (i * 8) as i32), - ); + )?; } let stack_adjust = if used_neons.len() & 1 == 1 { (used_neons.len() * 8) as u32 + 8 @@ -1410,7 +1568,7 @@ impl Machine for MachineARM64 { Location::GPR(GPR::XzrSp), Location::Imm32(stack_adjust as _), Location::GPR(GPR::XzrSp), - ); + ) } /// Set the source location of the Wasm to the given offset. @@ -1488,13 +1646,13 @@ impl Machine for MachineARM64 { } // Adjust stack for locals - fn adjust_stack(&mut self, delta_stack_offset: u32) { + fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { let delta = if self.compatible_imm(delta_stack_offset as _, ImmType::Bits12) { Location::Imm32(delta_stack_offset as _) } else { let tmp = GPR::X17; self.assembler - .emit_mov_imm(Location::GPR(tmp), delta_stack_offset as u64); + .emit_mov_imm(Location::GPR(tmp), delta_stack_offset as u64)?; Location::GPR(tmp) }; self.assembler.emit_sub( @@ -1502,16 +1660,16 @@ impl Machine for MachineARM64 { Location::GPR(GPR::XzrSp), delta, Location::GPR(GPR::XzrSp), - ); + ) } // restore stack - fn restore_stack(&mut self, delta_stack_offset: u32) { + fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { let delta = if self.compatible_imm(delta_stack_offset as _, ImmType::Bits12) { Location::Imm32(delta_stack_offset as _) } else { let tmp = GPR::X17; self.assembler - .emit_mov_imm(Location::GPR(tmp), delta_stack_offset as u64); + .emit_mov_imm(Location::GPR(tmp), delta_stack_offset as u64)?; Location::GPR(tmp) }; self.assembler.emit_add( @@ -1519,9 +1677,9 @@ impl Machine for MachineARM64 { Location::GPR(GPR::XzrSp), delta, Location::GPR(GPR::XzrSp), - ); + ) } - fn pop_stack_locals(&mut self, delta_stack_offset: u32) { + fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { let real_delta = if delta_stack_offset & 15 != 0 { delta_stack_offset + 8 } else { @@ -1532,7 +1690,7 @@ impl Machine for MachineARM64 { } else { let tmp = GPR::X17; self.assembler - .emit_mov_imm(Location::GPR(tmp), real_delta as u64); + .emit_mov_imm(Location::GPR(tmp), real_delta as u64)?; Location::GPR(tmp) }; self.assembler.emit_add( @@ -1540,26 +1698,31 @@ impl Machine for MachineARM64 { Location::GPR(GPR::XzrSp), delta, Location::GPR(GPR::XzrSp), - ); + ) } // push a value on the stack for a native call - fn move_location_for_native(&mut self, size: Size, loc: Location, dest: Location) { + fn move_location_for_native( + &mut self, + size: Size, + loc: Location, + dest: Location, + ) -> Result<(), CodegenError> { match loc { Location::Imm64(_) | Location::Imm32(_) | Location::Imm8(_) | Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { - self.move_location(size, loc, Location::GPR(GPR::X17)); - self.move_location(size, Location::GPR(GPR::X17), dest); + self.move_location(size, loc, Location::GPR(GPR::X17))?; + self.move_location(size, Location::GPR(GPR::X17), dest) } _ => self.move_location(size, loc, dest), } } // Zero a location that is 32bits - fn zero_location(&mut self, size: Size, location: Location) { - self.move_location(size, Location::GPR(GPR::XzrSp), location); + fn zero_location(&mut self, size: Size, location: Location) -> Result<(), CodegenError> { + self.move_location(size, Location::GPR(GPR::XzrSp), location) } // GPR Reg used for local pointer on the stack @@ -1588,38 +1751,35 @@ impl Machine for MachineARM64 { } } // Move a local to the stack - fn move_local(&mut self, stack_offset: i32, location: Location) { + fn move_local(&mut self, stack_offset: i32, location: Location) -> Result<(), CodegenError> { if stack_offset < 256 { self.assembler - .emit_stur(Size::S64, location, GPR::X29, -stack_offset); + .emit_stur(Size::S64, location, GPR::X29, -stack_offset)?; } else { let tmp = GPR::X17; self.assembler - .emit_mov_imm(Location::GPR(tmp), (stack_offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (stack_offset as i64) as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(GPR::X29), Location::GPR(tmp), Location::GPR(tmp), - ); + )?; self.assembler - .emit_str(Size::S64, location, Location::GPR(tmp)); + .emit_str(Size::S64, location, Location::GPR(tmp))?; } match location { - Location::GPR(x) => { - self.emit_unwind_op(UnwindOps::SaveRegister { - reg: x.to_dwarf(), - bp_neg_offset: stack_offset, - }); - } - Location::SIMD(x) => { - self.emit_unwind_op(UnwindOps::SaveRegister { - reg: x.to_dwarf(), - bp_neg_offset: stack_offset, - }); - } + Location::GPR(x) => self.emit_unwind_op(UnwindOps::SaveRegister { + reg: x.to_dwarf(), + bp_neg_offset: stack_offset, + }), + Location::SIMD(x) => self.emit_unwind_op(UnwindOps::SaveRegister { + reg: x.to_dwarf(), + bp_neg_offset: stack_offset, + }), _ => (), } + Ok(()) } // List of register to save, depending on the CallingConvention @@ -1751,42 +1911,50 @@ impl Machine for MachineARM64 { } } // move a location to another - fn move_location(&mut self, size: Size, source: Location, dest: Location) { + fn move_location( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { match source { Location::GPR(_) | Location::SIMD(_) => match dest { Location::GPR(_) | Location::SIMD(_) => self.assembler.emit_mov(size, source, dest), Location::Memory(addr, offs) => { if self.offset_is_ok(size, offs) { - self.assembler.emit_str(size, source, dest); + self.assembler.emit_str(size, source, dest) } else if self.compatible_imm(offs as i64, ImmType::UnscaledOffset) { - self.assembler.emit_stur(size, source, addr, offs); + self.assembler.emit_stur(size, source, addr, offs) } else { let tmp = GPR::X17; if offs < 0 { self.assembler - .emit_mov_imm(Location::GPR(tmp), (-offs) as u64); + .emit_mov_imm(Location::GPR(tmp), (-offs) as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(addr), Location::GPR(tmp), Location::GPR(tmp), - ); + )?; } else { - self.assembler.emit_mov_imm(Location::GPR(tmp), offs as u64); + self.assembler + .emit_mov_imm(Location::GPR(tmp), offs as u64)?; self.assembler.emit_add( Size::S64, Location::GPR(addr), Location::GPR(tmp), Location::GPR(tmp), - ); + )?; } self.assembler - .emit_str(size, source, Location::Memory(tmp, 0)); + .emit_str(size, source, Location::Memory(tmp, 0)) } } - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location {:?} {:?} => {:?}", - size, source, dest + size, + source, + dest ), }, Location::Imm8(_) => match dest { @@ -1797,9 +1965,11 @@ impl Machine for MachineARM64 { Size::S16 => self.emit_relaxed_str16(source, dest), Size::S8 => self.emit_relaxed_str8(source, dest), }, - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location {:?} {:?} => {:?}", - size, source, dest + size, + source, + dest ), }, Location::Imm32(val) => match dest { @@ -1810,9 +1980,11 @@ impl Machine for MachineARM64 { Size::S16 => self.emit_relaxed_str16(source, dest), Size::S8 => self.emit_relaxed_str8(source, dest), }, - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location {:?} {:?} => {:?}", - size, source, dest + size, + source, + dest ), }, Location::Imm64(val) => match dest { @@ -1823,54 +1995,60 @@ impl Machine for MachineARM64 { Size::S16 => self.emit_relaxed_str16(source, dest), Size::S8 => self.emit_relaxed_str8(source, dest), }, - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location {:?} {:?} => {:?}", - size, source, dest + size, + source, + dest ), }, Location::Memory(addr, offs) => match dest { Location::GPR(_) | Location::SIMD(_) => { if self.offset_is_ok(size, offs) { - self.assembler.emit_ldr(size, dest, source); + self.assembler.emit_ldr(size, dest, source) } else if offs > -256 && offs < 256 { - self.assembler.emit_ldur(size, dest, addr, offs); + self.assembler.emit_ldur(size, dest, addr, offs) } else { let tmp = GPR::X17; if offs < 0 { self.assembler - .emit_mov_imm(Location::GPR(tmp), (-offs) as u64); + .emit_mov_imm(Location::GPR(tmp), (-offs) as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(addr), Location::GPR(tmp), Location::GPR(tmp), - ); + )?; } else { - self.assembler.emit_mov_imm(Location::GPR(tmp), offs as u64); + self.assembler + .emit_mov_imm(Location::GPR(tmp), offs as u64)?; self.assembler.emit_add( Size::S64, Location::GPR(addr), Location::GPR(tmp), Location::GPR(tmp), - ); + )?; } self.assembler - .emit_ldr(size, dest, Location::Memory(tmp, 0)); + .emit_ldr(size, dest, Location::Memory(tmp, 0)) } } _ => { let mut temps = vec![]; let src = - self.location_to_reg(size, source, &mut temps, ImmType::None, true, None); - self.move_location(size, src, dest); + self.location_to_reg(size, source, &mut temps, ImmType::None, true, None)?; + self.move_location(size, src, dest)?; for r in temps { self.release_gpr(r); } + Ok(()) } }, - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location {:?} {:?} => {:?}", - size, source, dest + size, + source, + dest ), } } @@ -1882,53 +2060,69 @@ impl Machine for MachineARM64 { source: Location, size_op: Size, dest: Location, - ) { + ) -> Result<(), CodegenError> { if size_op != Size::S64 { - unreachable!(); + codegen_error!("singlepass move_location_extend unreachable"); } let mut temps = vec![]; - let dst = self.location_to_reg(size_op, dest, &mut temps, ImmType::None, false, None); + let dst = self.location_to_reg(size_op, dest, &mut temps, ImmType::None, false, None)?; let src = match (size_val, signed, source) { (Size::S64, _, _) => source, (Size::S32, false, Location::GPR(_)) => { - self.assembler.emit_mov(size_val, source, dst); + self.assembler.emit_mov(size_val, source, dst)?; dst } (Size::S32, true, Location::GPR(_)) => { - self.assembler.emit_sxtw(size_val, source, dst); + self.assembler.emit_sxtw(size_val, source, dst)?; dst } (Size::S32, false, Location::Memory(_, _)) => { - self.emit_relaxed_ldr32(size_op, dst, source); + self.emit_relaxed_ldr32(size_op, dst, source)?; dst } (Size::S32, true, Location::Memory(_, _)) => { - self.emit_relaxed_ldr32s(size_op, dst, source); + self.emit_relaxed_ldr32s(size_op, dst, source)?; dst } - _ => panic!( + _ => codegen_error!( "singlepass can't emit move_location_extend {:?} {:?} {:?} => {:?} {:?}", - size_val, signed, source, size_op, dest + size_val, + signed, + source, + size_op, + dest ), }; if src != dst { - self.move_location(size_op, src, dst); + self.move_location(size_op, src, dst)?; } if dst != dest { - self.move_location(size_op, dst, dest); + self.move_location(size_op, dst, dest)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn load_address(&mut self, _size: Size, _reg: Location, _mem: Location) { - unimplemented!(); + fn load_address( + &mut self, + _size: Size, + _reg: Location, + _mem: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass load_address unimplemented"); } // Init the stack loc counter - fn init_stack_loc(&mut self, init_stack_loc_cnt: u64, last_stack_loc: Location) { + fn init_stack_loc( + &mut self, + init_stack_loc_cnt: u64, + last_stack_loc: Location, + ) -> Result<(), CodegenError> { let label = self.assembler.get_label(); let mut temps = vec![]; - let dest = self.acquire_temp_gpr().unwrap(); + let dest = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(dest); let cnt = self.location_to_reg( Size::S64, @@ -1937,10 +2131,10 @@ impl Machine for MachineARM64 { ImmType::None, true, None, - ); + )?; let dest = match last_stack_loc { - Location::GPR(_) => unreachable!(), - Location::SIMD(_) => unreachable!(), + Location::GPR(_) => codegen_error!("singlepass init_stack_loc unreachable"), + Location::SIMD(_) => codegen_error!("singlepass init_stack_loc unreachable"), Location::Memory(reg, offset) => { if offset < 0 { let offset = (-offset) as u32; @@ -1950,17 +2144,19 @@ impl Machine for MachineARM64 { Location::GPR(reg), Location::Imm32(offset), Location::GPR(dest), - ); + )?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(reg), Location::GPR(tmp), Location::GPR(dest), - ); + )?; temps.push(tmp); } dest @@ -1972,36 +2168,39 @@ impl Machine for MachineARM64 { Location::GPR(reg), Location::Imm32(offset), Location::GPR(dest), - ); + )?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64); + .emit_mov_imm(Location::GPR(tmp), (offset as i64) as u64)?; self.assembler.emit_add( Size::S64, Location::GPR(reg), Location::GPR(tmp), Location::GPR(dest), - ); + )?; temps.push(tmp); } dest } } - _ => panic!("singlepass can't emit init_stack_loc {:?}", last_stack_loc), + _ => codegen_error!("singlepass can't emit init_stack_loc {:?}", last_stack_loc), }; - self.assembler.emit_label(label); + self.assembler.emit_label(label)?; self.assembler - .emit_stria(Size::S64, Location::GPR(GPR::XzrSp), dest, 8); + .emit_stria(Size::S64, Location::GPR(GPR::XzrSp), dest, 8)?; self.assembler - .emit_sub(Size::S64, cnt, Location::Imm8(1), cnt); - self.assembler.emit_cbnz_label(Size::S64, cnt, label); + .emit_sub(Size::S64, cnt, Location::Imm8(1), cnt)?; + self.assembler.emit_cbnz_label(Size::S64, cnt, label)?; for r in temps { self.release_gpr(r); } + Ok(()) } // Restore save_area - fn restore_saved_area(&mut self, saved_area_offset: i32) { + fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CodegenError> { let real_delta = if saved_area_offset & 15 != 0 { self.pushed = true; saved_area_offset + 8 @@ -2015,23 +2214,26 @@ impl Machine for MachineARM64 { Location::GPR(GPR::X29), Location::Imm32(real_delta as _), Location::GPR(GPR::XzrSp), - ); + )?; } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), real_delta as u64); + .emit_mov_imm(Location::GPR(tmp), real_delta as u64)?; self.assembler.emit_sub( Size::S64, Location::GPR(GPR::X29), Location::GPR(tmp), Location::GPR(GPR::XzrSp), - ); + )?; self.release_gpr(tmp); } + Ok(()) } // Pop a location - fn pop_location(&mut self, location: Location) { - self.emit_pop(Size::S64, location); + fn pop_location(&mut self, location: Location) -> Result<(), CodegenError> { + self.emit_pop(Size::S64, location) } // Create a new `MachineState` with default values. fn new_machine_state(&self) -> MachineState { @@ -2047,18 +2249,19 @@ impl Machine for MachineARM64 { self.assembler.get_offset() } - fn finalize_function(&mut self) { + fn finalize_function(&mut self) -> Result<(), CodegenError> { self.assembler.finalize_function(); + Ok(()) } - fn emit_function_prolog(&mut self) { - self.emit_double_push(Size::S64, Location::GPR(GPR::X29), Location::GPR(GPR::X30)); // save LR too + fn emit_function_prolog(&mut self) -> Result<(), CodegenError> { + self.emit_double_push(Size::S64, Location::GPR(GPR::X29), Location::GPR(GPR::X30))?; // save LR too self.emit_unwind_op(UnwindOps::Push2Regs { reg1: GPR::X29.to_dwarf(), reg2: GPR::X30.to_dwarf(), up_to_sp: 16, }); - self.emit_double_push(Size::S64, Location::GPR(GPR::X27), Location::GPR(GPR::X28)); + self.emit_double_push(Size::S64, Location::GPR(GPR::X27), Location::GPR(GPR::X28))?; self.emit_unwind_op(UnwindOps::Push2Regs { reg1: GPR::X27.to_dwarf(), reg2: GPR::X28.to_dwarf(), @@ -2070,24 +2273,31 @@ impl Machine for MachineARM64 { Location::GPR(GPR::XzrSp), Location::Imm8(0), Location::GPR(GPR::X29), - ); + )?; self.emit_unwind_op(UnwindOps::DefineNewFrame); + Ok(()) } - fn emit_function_epilog(&mut self) { + fn emit_function_epilog(&mut self) -> Result<(), CodegenError> { // cannot use mov, because XSP is XZR there. Need to use ADD with #0 self.assembler.emit_add( Size::S64, Location::GPR(GPR::X29), Location::Imm8(0), Location::GPR(GPR::XzrSp), - ); + )?; self.pushed = false; // SP is restored, consider it aligned - self.emit_double_pop(Size::S64, Location::GPR(GPR::X27), Location::GPR(GPR::X28)); - self.emit_double_pop(Size::S64, Location::GPR(GPR::X29), Location::GPR(GPR::X30)); + self.emit_double_pop(Size::S64, Location::GPR(GPR::X27), Location::GPR(GPR::X28))?; + self.emit_double_pop(Size::S64, Location::GPR(GPR::X29), Location::GPR(GPR::X30))?; + Ok(()) } - fn emit_function_return_value(&mut self, ty: WpType, canonicalize: bool, loc: Location) { + fn emit_function_return_value( + &mut self, + ty: WpType, + canonicalize: bool, + loc: Location, + ) -> Result<(), CodegenError> { if canonicalize { self.canonicalize_nan( match ty { @@ -2097,79 +2307,89 @@ impl Machine for MachineARM64 { }, loc, Location::GPR(GPR::X0), - ); + )?; } else { - self.emit_relaxed_mov(Size::S64, loc, Location::GPR(GPR::X0)); + self.emit_relaxed_mov(Size::S64, loc, Location::GPR(GPR::X0))?; } + Ok(()) } - fn emit_function_return_float(&mut self) { + fn emit_function_return_float(&mut self) -> Result<(), CodegenError> { self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::X0), Location::SIMD(NEON::V0)); + .emit_mov(Size::S64, Location::GPR(GPR::X0), Location::SIMD(NEON::V0)) } fn arch_supports_canonicalize_nan(&self) -> bool { self.assembler.arch_supports_canonicalize_nan() } - fn canonicalize_nan(&mut self, sz: Size, input: Location, output: Location) { + fn canonicalize_nan( + &mut self, + sz: Size, + input: Location, + output: Location, + ) -> Result<(), CodegenError> { let mut tempn = vec![]; let mut temps = vec![]; - let old_fpcr = self.set_default_nan(&mut temps); + let old_fpcr = self.set_default_nan(&mut temps)?; // use FMAX (input, intput) => output to automaticaly normalize the NaN match (sz, input, output) { (Size::S32, Location::SIMD(_), Location::SIMD(_)) => { - self.assembler.emit_fmax(sz, input, input, output); + self.assembler.emit_fmax(sz, input, input, output)?; } (Size::S64, Location::SIMD(_), Location::SIMD(_)) => { - self.assembler.emit_fmax(sz, input, input, output); + self.assembler.emit_fmax(sz, input, input, output)?; } (Size::S32, Location::SIMD(_), _) | (Size::S64, Location::SIMD(_), _) => { - let tmp = self.location_to_neon(sz, output, &mut tempn, ImmType::None, false); - self.assembler.emit_fmax(sz, input, input, tmp); - self.move_location(sz, tmp, output); + let tmp = self.location_to_neon(sz, output, &mut tempn, ImmType::None, false)?; + self.assembler.emit_fmax(sz, input, input, tmp)?; + self.move_location(sz, tmp, output)?; } (Size::S32, Location::Memory(_, _), _) | (Size::S64, Location::Memory(_, _), _) => { - let src = self.location_to_neon(sz, input, &mut tempn, ImmType::None, true); - let tmp = self.location_to_neon(sz, output, &mut tempn, ImmType::None, false); - self.assembler.emit_fmax(sz, src, src, tmp); + let src = self.location_to_neon(sz, input, &mut tempn, ImmType::None, true)?; + let tmp = self.location_to_neon(sz, output, &mut tempn, ImmType::None, false)?; + self.assembler.emit_fmax(sz, src, src, tmp)?; if tmp != output { - self.move_location(sz, tmp, output); + self.move_location(sz, tmp, output)?; } } - _ => panic!( + _ => codegen_error!( "singlepass can't emit canonicalize_nan {:?} {:?} {:?}", - sz, input, output + sz, + input, + output ), } - self.restore_fpcr(old_fpcr); + self.restore_fpcr(old_fpcr)?; for r in temps { self.release_gpr(r); } for r in tempn { self.release_simd(r); } + Ok(()) } - fn emit_illegal_op(&mut self, trap: TrapCode) { + fn emit_illegal_op(&mut self, trap: TrapCode) -> Result<(), CodegenError> { let offset = self.assembler.get_offset().0; - self.assembler.emit_udf(0xc0 | (trap as u8) as u16); + self.assembler.emit_udf(0xc0 | (trap as u8) as u16)?; self.mark_instruction_address_end(offset); + Ok(()) } fn get_label(&mut self) -> Label { self.assembler.new_dynamic_label() } - fn emit_label(&mut self, label: Label) { - self.assembler.emit_label(label); + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_label(label) } fn get_grp_for_call(&self) -> GPR { GPR::X27 } - fn emit_call_register(&mut self, reg: GPR) { - self.assembler.emit_call_register(reg); + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError> { + self.assembler.emit_call_register(reg) } - fn emit_call_label(&mut self, label: Label) { - self.assembler.emit_call_label(label); + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_call_label(label) } fn get_gpr_for_ret(&self) -> GPR { GPR::X0 @@ -2182,16 +2402,19 @@ impl Machine for MachineARM64 { self.assembler.arch_requires_indirect_call_trampoline() } - fn arch_emit_indirect_call_with_trampoline(&mut self, location: Location) { + fn arch_emit_indirect_call_with_trampoline( + &mut self, + location: Location, + ) -> Result<(), CodegenError> { self.assembler - .arch_emit_indirect_call_with_trampoline(location); + .arch_emit_indirect_call_with_trampoline(location) } - fn emit_debug_breakpoint(&mut self) { - self.assembler.emit_brk(); + fn emit_debug_breakpoint(&mut self) -> Result<(), CodegenError> { + self.assembler.emit_brk() } - fn emit_call_location(&mut self, location: Location) { + fn emit_call_location(&mut self, location: Location) -> Result<(), CodegenError> { let mut temps = vec![]; let loc = self.location_to_reg( Size::S64, @@ -2200,97 +2423,149 @@ impl Machine for MachineARM64 { ImmType::None, true, Some(GPR::X27), - ); + )?; match loc { Location::GPR(reg) => self.assembler.emit_call_register(reg), - _ => unreachable!(), - } + _ => codegen_error!("singlepass can't emit CALL Location"), + }?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn location_address(&mut self, _size: Size, _source: Location, _dest: Location) { - unimplemented!(); + fn location_address( + &mut self, + _size: Size, + _source: Location, + _dest: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_address not implemented") } // logic - fn location_and(&mut self, _size: Size, _source: Location, _dest: Location, _flags: bool) { - unimplemented!(); + fn location_and( + &mut self, + _size: Size, + _source: Location, + _dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_and not implemented") } - fn location_xor(&mut self, _size: Size, _source: Location, _dest: Location, _flags: bool) { - unimplemented!(); + fn location_xor( + &mut self, + _size: Size, + _source: Location, + _dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_xor not implemented") } - fn location_or(&mut self, _size: Size, _source: Location, _dest: Location, _flags: bool) { - unimplemented!(); + fn location_or( + &mut self, + _size: Size, + _source: Location, + _dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_or not implemented") } - fn location_test(&mut self, _size: Size, _source: Location, _dest: Location) { - unimplemented!(); + fn location_test( + &mut self, + _size: Size, + _source: Location, + _dest: Location, + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_test not implemented") } // math - fn location_add(&mut self, size: Size, source: Location, dest: Location, flags: bool) { + fn location_add( + &mut self, + size: Size, + source: Location, + dest: Location, + flags: bool, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_reg(size, source, &mut temps, ImmType::Bits12, true, None); - let dst = self.location_to_reg(size, dest, &mut temps, ImmType::None, true, None); + let src = self.location_to_reg(size, source, &mut temps, ImmType::Bits12, true, None)?; + let dst = self.location_to_reg(size, dest, &mut temps, ImmType::None, true, None)?; if flags { - self.assembler.emit_adds(size, dst, src, dst); + self.assembler.emit_adds(size, dst, src, dst)?; } else { - self.assembler.emit_add(size, dst, src, dst); + self.assembler.emit_add(size, dst, src, dst)?; } if dst != dest { - self.move_location(size, dst, dest); + self.move_location(size, dst, dest)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn location_sub(&mut self, size: Size, source: Location, dest: Location, flags: bool) { + fn location_sub( + &mut self, + size: Size, + source: Location, + dest: Location, + flags: bool, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_reg(size, source, &mut temps, ImmType::Bits12, true, None); - let dst = self.location_to_reg(size, dest, &mut temps, ImmType::None, true, None); + let src = self.location_to_reg(size, source, &mut temps, ImmType::Bits12, true, None)?; + let dst = self.location_to_reg(size, dest, &mut temps, ImmType::None, true, None)?; if flags { - self.assembler.emit_subs(size, dst, src, dst); + self.assembler.emit_subs(size, dst, src, dst)?; } else { - self.assembler.emit_sub(size, dst, src, dst); + self.assembler.emit_sub(size, dst, src, dst)?; } if dst != dest { - self.move_location(size, dst, dest); + self.move_location(size, dst, dest)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn location_cmp(&mut self, size: Size, source: Location, dest: Location) { - self.emit_relaxed_binop(Assembler::emit_cmp, size, source, dest, false); + fn location_cmp( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(Assembler::emit_cmp, size, source, dest, false) } - fn jmp_unconditionnal(&mut self, label: Label) { - self.assembler.emit_b_label(label); + fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_b_label(label) } - fn jmp_on_equal(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Eq, label); + fn jmp_on_equal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Eq, label) } - fn jmp_on_different(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Ne, label); + fn jmp_on_different(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Ne, label) } - fn jmp_on_above(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Hi, label); + fn jmp_on_above(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Hi, label) } - fn jmp_on_aboveequal(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Cs, label); + fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Cs, label) } - fn jmp_on_belowequal(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Ls, label); + fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Ls, label) } - fn jmp_on_overflow(&mut self, label: Label) { - self.assembler.emit_bcond_label_far(Condition::Cs, label); + fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_bcond_label_far(Condition::Cs, label) } // jmp table - fn emit_jmp_to_jumptable(&mut self, label: Label, cond: Location) { - let tmp1 = self.acquire_temp_gpr().unwrap(); - let tmp2 = self.acquire_temp_gpr().unwrap(); + fn emit_jmp_to_jumptable(&mut self, label: Label, cond: Location) -> Result<(), CodegenError> { + let tmp1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; - self.assembler.emit_load_label(tmp1, label); - self.move_location(Size::S32, cond, Location::GPR(tmp2)); + self.assembler.emit_load_label(tmp1, label)?; + self.move_location(Size::S32, cond, Location::GPR(tmp2))?; self.assembler.emit_add_lsl( Size::S64, @@ -2298,29 +2573,31 @@ impl Machine for MachineARM64 { Location::GPR(tmp2), 2, Location::GPR(tmp2), - ); - self.assembler.emit_b_register(tmp2); + )?; + self.assembler.emit_b_register(tmp2)?; self.release_gpr(tmp2); self.release_gpr(tmp1); + Ok(()) } - fn align_for_loop(&mut self) { + fn align_for_loop(&mut self) -> Result<(), CodegenError> { // noting to do on ARM64 + Ok(()) } - fn emit_ret(&mut self) { - self.assembler.emit_ret(); + fn emit_ret(&mut self) -> Result<(), CodegenError> { + self.assembler.emit_ret() } - fn emit_push(&mut self, size: Size, loc: Location) { - self.emit_push(size, loc); + fn emit_push(&mut self, size: Size, loc: Location) -> Result<(), CodegenError> { + self.emit_push(size, loc) } - fn emit_pop(&mut self, size: Size, loc: Location) { - self.emit_pop(size, loc); + fn emit_pop(&mut self, size: Size, loc: Location) -> Result<(), CodegenError> { + self.emit_pop(size, loc) } - fn emit_memory_fence(&mut self) { - self.assembler.emit_dmb(); + fn emit_memory_fence(&mut self) -> Result<(), CodegenError> { + self.assembler.emit_dmb() } fn location_neg( @@ -2330,29 +2607,42 @@ impl Machine for MachineARM64 { _source: Location, _size_op: Size, _dest: Location, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass location_neg unimplemented"); } - fn emit_imul_imm32(&mut self, size: Size, imm32: u32, gpr: GPR) { - let tmp = self.acquire_temp_gpr().unwrap(); + fn emit_imul_imm32(&mut self, size: Size, imm32: u32, gpr: GPR) -> Result<(), CodegenError> { + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov_imm(Location::GPR(tmp), imm32 as u64); + .emit_mov_imm(Location::GPR(tmp), imm32 as u64)?; self.assembler.emit_mul( size, Location::GPR(gpr), Location::GPR(tmp), Location::GPR(gpr), - ); + )?; self.release_gpr(tmp); + Ok(()) } // relaxed binop based... - fn emit_relaxed_mov(&mut self, sz: Size, src: Location, dst: Location) { - self.emit_relaxed_binop(Assembler::emit_mov, sz, src, dst, true); + fn emit_relaxed_mov( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(Assembler::emit_mov, sz, src, dst, true) } - fn emit_relaxed_cmp(&mut self, sz: Size, src: Location, dst: Location) { - self.emit_relaxed_binop(Assembler::emit_cmp, sz, src, dst, false); + fn emit_relaxed_cmp( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(Assembler::emit_cmp, sz, src, dst, false) } fn emit_relaxed_zero_extension( &mut self, @@ -2360,8 +2650,8 @@ impl Machine for MachineARM64 { _src: Location, _sz_dst: Size, _dst: Location, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass emit_relaxed_zero_extension unimplemented"); } fn emit_relaxed_sign_extension( &mut self, @@ -2369,36 +2659,43 @@ impl Machine for MachineARM64 { src: Location, sz_dst: Size, dst: Location, - ) { + ) -> Result<(), CodegenError> { match (src, dst) { (Location::Memory(_, _), Location::GPR(_)) => match sz_src { Size::S8 => self.emit_relaxed_ldr8s(sz_dst, dst, src), Size::S16 => self.emit_relaxed_ldr16s(sz_dst, dst, src), Size::S32 => self.emit_relaxed_ldr32s(sz_dst, dst, src), - _ => unreachable!(), + _ => codegen_error!("singlepass emit_relaxed_sign_extension unreachable"), }, _ => { let mut temps = vec![]; - let src = self.location_to_reg(sz_src, src, &mut temps, ImmType::None, true, None); + let src = + self.location_to_reg(sz_src, src, &mut temps, ImmType::None, true, None)?; let dest = - self.location_to_reg(sz_dst, dst, &mut temps, ImmType::None, false, None); + self.location_to_reg(sz_dst, dst, &mut temps, ImmType::None, false, None)?; match sz_src { Size::S8 => self.assembler.emit_sxtb(sz_dst, src, dest), Size::S16 => self.assembler.emit_sxth(sz_dst, src, dest), Size::S32 => self.assembler.emit_sxtw(sz_dst, src, dest), - _ => unreachable!(), - }; + _ => codegen_error!("singlepass emit_relaxed_sign_extension unreachable"), + }?; if dst != dest { - self.move_location(sz_dst, dest, dst); + self.move_location(sz_dst, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } } } - fn emit_binop_add32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_add32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_add, Size::S32, @@ -2406,9 +2703,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Bits12, - ); + ) } - fn emit_binop_sub32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_sub32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_sub, Size::S32, @@ -2416,9 +2718,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Bits12, - ); + ) } - fn emit_binop_mul32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_mul32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_mul, Size::S32, @@ -2426,7 +2733,7 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } fn emit_binop_udiv32( &mut self, @@ -2435,23 +2742,23 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; self.assembler - .emit_cbz_label(Size::S32, src2, integer_division_by_zero); + .emit_cbz_label(Size::S32, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_udiv(Size::S32, src1, src2, dest); + self.assembler.emit_udiv(Size::S32, src1, src2, dest)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_sdiv32( &mut self, @@ -2460,14 +2767,14 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; self.assembler - .emit_cbz_label(Size::S32, src2, integer_division_by_zero); + .emit_cbz_label(Size::S32, src2, integer_division_by_zero)?; let label_nooverflow = self.assembler.get_label(); let tmp = self.location_to_reg( Size::S32, @@ -2476,24 +2783,24 @@ impl Machine for MachineARM64 { ImmType::None, true, None, - ); - self.assembler.emit_cmp(Size::S32, tmp, src1); + )?; + self.assembler.emit_cmp(Size::S32, tmp, src1)?; self.assembler - .emit_bcond_label(Condition::Ne, label_nooverflow); - self.assembler.emit_movn(Size::S32, tmp, 0); - self.assembler.emit_cmp(Size::S32, tmp, src2); + .emit_bcond_label(Condition::Ne, label_nooverflow)?; + self.assembler.emit_movn(Size::S32, tmp, 0)?; + self.assembler.emit_cmp(Size::S32, tmp, src2)?; self.assembler - .emit_bcond_label_far(Condition::Eq, integer_overflow); + .emit_bcond_label_far(Condition::Eq, integer_overflow)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_label(label_nooverflow); - self.assembler.emit_sdiv(Size::S32, src1, src2, dest); + self.assembler.emit_label(label_nooverflow)?; + self.assembler.emit_sdiv(Size::S32, src1, src2, dest)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_urem32( &mut self, @@ -2502,32 +2809,36 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; let dest = if dest == src1 || dest == src2 { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S32, dest, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S32, dest, Location::GPR(tmp))?; Location::GPR(tmp) } else { dest }; self.assembler - .emit_cbz_label(Size::S32, src2, integer_division_by_zero); + .emit_cbz_label(Size::S32, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_udiv(Size::S32, src1, src2, dest); + self.assembler.emit_udiv(Size::S32, src1, src2, dest)?; // unsigned remainder : src1 - (src1/src2)*src2 - self.assembler.emit_msub(Size::S32, dest, src2, src1, dest); + self.assembler + .emit_msub(Size::S32, dest, src2, src1, dest)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_srem32( &mut self, @@ -2536,34 +2847,43 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S32, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; let dest = if dest == src1 || dest == src2 { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S32, dest, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S32, dest, Location::GPR(tmp))?; Location::GPR(tmp) } else { dest }; self.assembler - .emit_cbz_label(Size::S32, src2, integer_division_by_zero); + .emit_cbz_label(Size::S32, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_sdiv(Size::S32, src1, src2, dest); + self.assembler.emit_sdiv(Size::S32, src1, src2, dest)?; // unsigned remainder : src1 - (src1/src2)*src2 - self.assembler.emit_msub(Size::S32, dest, src2, src1, dest); + self.assembler + .emit_msub(Size::S32, dest, src2, src1, dest)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } - fn emit_binop_and32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_and32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_and, Size::S32, @@ -2571,9 +2891,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical32, - ); + ) } - fn emit_binop_or32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_or32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_or, Size::S32, @@ -2581,9 +2906,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical32, - ); + ) } - fn emit_binop_xor32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_xor32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_eor, Size::S32, @@ -2591,95 +2921,157 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical32, - ); + ) } - fn i32_cmp_ge_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Ge, loc_a, loc_b, ret); + fn i32_cmp_ge_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Ge, loc_a, loc_b, ret) } - fn i32_cmp_gt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Gt, loc_a, loc_b, ret); + fn i32_cmp_gt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Gt, loc_a, loc_b, ret) } - fn i32_cmp_le_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Le, loc_a, loc_b, ret); + fn i32_cmp_le_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Le, loc_a, loc_b, ret) } - fn i32_cmp_lt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Lt, loc_a, loc_b, ret); + fn i32_cmp_lt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Lt, loc_a, loc_b, ret) } - fn i32_cmp_ge_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Cs, loc_a, loc_b, ret); + fn i32_cmp_ge_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Cs, loc_a, loc_b, ret) } - fn i32_cmp_gt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Hi, loc_a, loc_b, ret); + fn i32_cmp_gt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Hi, loc_a, loc_b, ret) } - fn i32_cmp_le_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Ls, loc_a, loc_b, ret); + fn i32_cmp_le_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Ls, loc_a, loc_b, ret) } - fn i32_cmp_lt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Cc, loc_a, loc_b, ret); + fn i32_cmp_lt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Cc, loc_a, loc_b, ret) } - fn i32_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Ne, loc_a, loc_b, ret); + fn i32_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Ne, loc_a, loc_b, ret) } - fn i32_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Eq, loc_a, loc_b, ret); + fn i32_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Eq, loc_a, loc_b, ret) } - fn i32_clz(&mut self, src: Location, dst: Location) { - self.emit_relaxed_binop(Assembler::emit_clz, Size::S32, src, dst, true); + fn i32_clz(&mut self, src: Location, dst: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop(Assembler::emit_clz, Size::S32, src, dst, true) } - fn i32_ctz(&mut self, src: Location, dst: Location) { + fn i32_ctz(&mut self, src: Location, dst: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_reg(Size::S32, src, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, dst, &mut temps, ImmType::None, false, None); - self.assembler.emit_rbit(Size::S32, src, dest); - self.assembler.emit_clz(Size::S32, dest, dest); + let src = self.location_to_reg(Size::S32, src, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, dst, &mut temps, ImmType::None, false, None)?; + self.assembler.emit_rbit(Size::S32, src, dest)?; + self.assembler.emit_clz(Size::S32, dest, dest)?; if dst != dest { - self.move_location(Size::S32, dest, dst); + self.move_location(Size::S32, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn i32_popcnt(&mut self, loc: Location, ret: Location) { + fn i32_popcnt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { // no opcode for that. // 2 solutions: using NEON CNT, that count bits per Byte, or using clz with some shift and loop let mut temps = vec![]; - let src = self.location_to_reg(Size::S32, loc, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); + let src = self.location_to_reg(Size::S32, loc, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; let src = if src == loc { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S32, src, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S32, src, Location::GPR(tmp))?; Location::GPR(tmp) } else { src }; let tmp = { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); Location::GPR(tmp) }; let label_loop = self.assembler.get_label(); let label_exit = self.assembler.get_label(); self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::XzrSp), dest); // 0 => dest - self.assembler.emit_cbz_label(Size::S32, src, label_exit); // src==0, exit - self.assembler.emit_label(label_loop); // loop: + .emit_mov(Size::S32, Location::GPR(GPR::XzrSp), dest)?; // 0 => dest + self.assembler.emit_cbz_label(Size::S32, src, label_exit)?; // src==0, exit + self.assembler.emit_label(label_loop)?; // loop: self.assembler - .emit_add(Size::S32, dest, Location::Imm8(1), dest); // inc dest - self.assembler.emit_clz(Size::S32, src, tmp); // clz src => tmp + .emit_add(Size::S32, dest, Location::Imm8(1), dest)?; // inc dest + self.assembler.emit_clz(Size::S32, src, tmp)?; // clz src => tmp self.assembler - .emit_add(Size::S32, tmp, Location::Imm8(1), tmp); // inc tmp - self.assembler.emit_lsl(Size::S32, src, tmp, src); // src << tmp => src - self.assembler.emit_cbnz_label(Size::S32, src, label_loop); // if src!=0 goto loop - self.assembler.emit_label(label_exit); + .emit_add(Size::S32, tmp, Location::Imm8(1), tmp)?; // inc tmp + self.assembler.emit_lsl(Size::S32, src, tmp, src)?; // src << tmp => src + self.assembler.emit_cbnz_label(Size::S32, src, label_loop)?; // if src!=0 goto loop + self.assembler.emit_label(label_exit)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn i32_shl(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i32_shl( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_lsl, Size::S32, @@ -2687,9 +3079,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift32No0, - ); + ) } - fn i32_shr(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i32_shr( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_lsr, Size::S32, @@ -2697,9 +3094,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift32No0, - ); + ) } - fn i32_sar(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i32_sar( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_asr, Size::S32, @@ -2707,9 +3109,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift32No0, - ); + ) } - fn i32_rol(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i32_rol( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; let src2 = match loc_b { Location::Imm8(imm) => Location::Imm8(32 - (imm & 31)), @@ -2723,10 +3130,10 @@ impl Machine for MachineARM64 { ImmType::None, true, None, - ); + )?; let tmp2 = - self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None); - self.assembler.emit_sub(Size::S32, tmp1, tmp2, tmp1); + self.location_to_reg(Size::S32, loc_b, &mut temps, ImmType::None, true, None)?; + self.assembler.emit_sub(Size::S32, tmp1, tmp2, tmp1)?; tmp1 } }; @@ -2737,12 +3144,18 @@ impl Machine for MachineARM64 { src2, ret, ImmType::Shift32No0, - ); + )?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn i32_ror(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i32_ror( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_ror, Size::S32, @@ -2750,7 +3163,7 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift32No0, - ); + ) } fn i32_load( &mut self, @@ -2761,7 +3174,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2771,10 +3184,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr32(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr32(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn i32_load_8u( &mut self, @@ -2785,7 +3196,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2795,10 +3206,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr8(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr8(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn i32_load_8s( &mut self, @@ -2809,7 +3218,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2819,10 +3228,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr8s(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr8s(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn i32_load_16u( &mut self, @@ -2833,7 +3240,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2843,10 +3250,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr16(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr16(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn i32_load_16s( &mut self, @@ -2857,7 +3262,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2867,10 +3272,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr16s(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr16s(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn i32_atomic_load( &mut self, @@ -2881,8 +3284,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_load unimplemented"); } fn i32_atomic_load_8u( &mut self, @@ -2893,8 +3296,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_load_8u unimplemented"); } fn i32_atomic_load_16u( &mut self, @@ -2905,8 +3308,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_load_16u unimplemented"); } fn i32_save( &mut self, @@ -2917,7 +3320,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -2927,10 +3330,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)), + ) } fn i32_save_8( &mut self, @@ -2941,7 +3342,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -2951,10 +3352,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str8(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str8(target_value, Location::Memory(addr, 0)), + ) } fn i32_save_16( &mut self, @@ -2965,7 +3364,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -2975,10 +3374,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str16(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str16(target_value, Location::Memory(addr, 0)), + ) } fn i32_atomic_save( &mut self, @@ -2989,8 +3386,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_save unimplemented"); } fn i32_atomic_save_8( &mut self, @@ -3001,8 +3398,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_save_8 unimplemented"); } fn i32_atomic_save_16( &mut self, @@ -3013,8 +3410,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_save_16 unimplemented"); } // i32 atomic Add with i32 fn i32_atomic_add( @@ -3027,8 +3424,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_add unimplemented"); } // i32 atomic Add with u8 fn i32_atomic_add_8u( @@ -3041,8 +3438,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_add_8u unimplemented"); } // i32 atomic Add with u16 fn i32_atomic_add_16u( @@ -3055,8 +3452,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_add_16u unimplemented"); } // i32 atomic Sub with i32 fn i32_atomic_sub( @@ -3069,8 +3466,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_sub unimplemented"); } // i32 atomic Sub with u8 fn i32_atomic_sub_8u( @@ -3083,8 +3480,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_sub_8u unimplemented"); } // i32 atomic Sub with u16 fn i32_atomic_sub_16u( @@ -3097,8 +3494,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_sub_16u unimplemented"); } // i32 atomic And with i32 fn i32_atomic_and( @@ -3111,8 +3508,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_and unimplemented"); } // i32 atomic And with u8 fn i32_atomic_and_8u( @@ -3125,8 +3522,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_and_8u unimplemented"); } // i32 atomic And with u16 fn i32_atomic_and_16u( @@ -3139,8 +3536,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_and_16u unimplemented"); } // i32 atomic Or with i32 fn i32_atomic_or( @@ -3153,8 +3550,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_or unimplemented"); } // i32 atomic Or with u8 fn i32_atomic_or_8u( @@ -3167,8 +3564,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_or_8u unimplemented"); } // i32 atomic Or with u16 fn i32_atomic_or_16u( @@ -3181,8 +3578,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_or_16u unimplemented"); } // i32 atomic Xor with i32 fn i32_atomic_xor( @@ -3195,8 +3592,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xor unimplemented"); } // i32 atomic Xor with u8 fn i32_atomic_xor_8u( @@ -3209,8 +3606,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xor_8u unimplemented"); } // i32 atomic Xor with u16 fn i32_atomic_xor_16u( @@ -3223,8 +3620,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xor_16u unimplemented"); } // i32 atomic Exchange with i32 fn i32_atomic_xchg( @@ -3237,8 +3634,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xchg unimplemented"); } // i32 atomic Exchange with u8 fn i32_atomic_xchg_8u( @@ -3251,8 +3648,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xchg_8u unimplemented"); } // i32 atomic Exchange with u16 fn i32_atomic_xchg_16u( @@ -3265,8 +3662,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_xchg_16u unimplemented"); } // i32 atomic Exchange with i32 fn i32_atomic_cmpxchg( @@ -3280,8 +3677,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_cmpxchg unimplemented"); } // i32 atomic Exchange with u8 fn i32_atomic_cmpxchg_8u( @@ -3295,8 +3692,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_cmpxchg_8u unimplemented"); } // i32 atomic Exchange with u16 fn i32_atomic_cmpxchg_16u( @@ -3310,30 +3707,35 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i32_atomic_cmpxchg_16u unimplemented"); } fn emit_call_with_reloc( &mut self, _calling_convention: CallingConvention, reloc_target: RelocationTarget, - ) -> Vec { + ) -> Result, CodegenError> { let mut relocations = vec![]; let next = self.get_label(); let reloc_at = self.assembler.get_offset().0; - self.emit_label(next); // this is to be sure the current imm26 value is 0 - self.assembler.emit_call_label(next); + self.emit_label(next)?; // this is to be sure the current imm26 value is 0 + self.assembler.emit_call_label(next)?; relocations.push(Relocation { kind: RelocationKind::Arm64Call, reloc_target, offset: reloc_at as u32, addend: 0, }); - relocations + Ok(relocations) } - fn emit_binop_add64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_add64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_add, Size::S64, @@ -3341,9 +3743,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Bits12, - ); + ) } - fn emit_binop_sub64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_sub64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_sub, Size::S64, @@ -3351,9 +3758,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Bits12, - ); + ) } - fn emit_binop_mul64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_mul64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_mul, Size::S64, @@ -3361,7 +3773,7 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } fn emit_binop_udiv64( &mut self, @@ -3370,23 +3782,23 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; self.assembler - .emit_cbz_label(Size::S64, src2, integer_division_by_zero); + .emit_cbz_label(Size::S64, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_udiv(Size::S64, src1, src2, dest); + self.assembler.emit_udiv(Size::S64, src1, src2, dest)?; if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_sdiv64( &mut self, @@ -3395,14 +3807,14 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; self.assembler - .emit_cbz_label(Size::S64, src2, integer_division_by_zero); + .emit_cbz_label(Size::S64, src2, integer_division_by_zero)?; let label_nooverflow = self.assembler.get_label(); let tmp = self.location_to_reg( Size::S64, @@ -3411,24 +3823,24 @@ impl Machine for MachineARM64 { ImmType::None, true, None, - ); - self.assembler.emit_cmp(Size::S64, tmp, src1); + )?; + self.assembler.emit_cmp(Size::S64, tmp, src1)?; self.assembler - .emit_bcond_label(Condition::Ne, label_nooverflow); - self.assembler.emit_movn(Size::S64, tmp, 0); - self.assembler.emit_cmp(Size::S64, tmp, src2); + .emit_bcond_label(Condition::Ne, label_nooverflow)?; + self.assembler.emit_movn(Size::S64, tmp, 0)?; + self.assembler.emit_cmp(Size::S64, tmp, src2)?; self.assembler - .emit_bcond_label_far(Condition::Eq, integer_overflow); + .emit_bcond_label_far(Condition::Eq, integer_overflow)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_label(label_nooverflow); - self.assembler.emit_sdiv(Size::S64, src1, src2, dest); + self.assembler.emit_label(label_nooverflow)?; + self.assembler.emit_sdiv(Size::S64, src1, src2, dest)?; if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_urem64( &mut self, @@ -3437,32 +3849,36 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; let dest = if dest == src1 || dest == src2 { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S32, dest, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S32, dest, Location::GPR(tmp))?; Location::GPR(tmp) } else { dest }; self.assembler - .emit_cbz_label(Size::S64, src2, integer_division_by_zero); + .emit_cbz_label(Size::S64, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_udiv(Size::S64, src1, src2, dest); + self.assembler.emit_udiv(Size::S64, src1, src2, dest)?; // unsigned remainder : src1 - (src1/src2)*src2 - self.assembler.emit_msub(Size::S64, dest, src2, src1, dest); + self.assembler + .emit_msub(Size::S64, dest, src2, src1, dest)?; if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } fn emit_binop_srem64( &mut self, @@ -3471,34 +3887,43 @@ impl Machine for MachineARM64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { let mut temps = vec![]; - let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None); - let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); + let src1 = self.location_to_reg(Size::S64, loc_a, &mut temps, ImmType::None, true, None)?; + let src2 = self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; let dest = if dest == src1 || dest == src2 { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S64, dest, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S64, dest, Location::GPR(tmp))?; Location::GPR(tmp) } else { dest }; self.assembler - .emit_cbz_label(Size::S64, src2, integer_division_by_zero); + .emit_cbz_label(Size::S64, src2, integer_division_by_zero)?; let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - self.assembler.emit_sdiv(Size::S64, src1, src2, dest); + self.assembler.emit_sdiv(Size::S64, src1, src2, dest)?; // unsigned remainder : src1 - (src1/src2)*src2 - self.assembler.emit_msub(Size::S64, dest, src2, src1, dest); + self.assembler + .emit_msub(Size::S64, dest, src2, src1, dest)?; if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in temps { self.release_gpr(r); } - offset + Ok(offset) } - fn emit_binop_and64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_and64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_and, Size::S64, @@ -3506,9 +3931,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical64, - ); + ) } - fn emit_binop_or64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_or64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_or, Size::S64, @@ -3516,9 +3946,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical64, - ); + ) } - fn emit_binop_xor64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn emit_binop_xor64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_eor, Size::S64, @@ -3526,93 +3961,155 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Logical64, - ); + ) } - fn i64_cmp_ge_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Ge, loc_a, loc_b, ret); + fn i64_cmp_ge_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Ge, loc_a, loc_b, ret) } - fn i64_cmp_gt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Gt, loc_a, loc_b, ret); + fn i64_cmp_gt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Gt, loc_a, loc_b, ret) } - fn i64_cmp_le_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Le, loc_a, loc_b, ret); + fn i64_cmp_le_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Le, loc_a, loc_b, ret) } - fn i64_cmp_lt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Lt, loc_a, loc_b, ret); + fn i64_cmp_lt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Lt, loc_a, loc_b, ret) } - fn i64_cmp_ge_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Cs, loc_a, loc_b, ret); + fn i64_cmp_ge_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Cs, loc_a, loc_b, ret) } - fn i64_cmp_gt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Hi, loc_a, loc_b, ret); + fn i64_cmp_gt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Hi, loc_a, loc_b, ret) } - fn i64_cmp_le_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Ls, loc_a, loc_b, ret); + fn i64_cmp_le_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Ls, loc_a, loc_b, ret) } - fn i64_cmp_lt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Cc, loc_a, loc_b, ret); + fn i64_cmp_lt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Cc, loc_a, loc_b, ret) } - fn i64_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Ne, loc_a, loc_b, ret); + fn i64_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Ne, loc_a, loc_b, ret) } - fn i64_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Eq, loc_a, loc_b, ret); + fn i64_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Eq, loc_a, loc_b, ret) } - fn i64_clz(&mut self, src: Location, dst: Location) { - self.emit_relaxed_binop(Assembler::emit_clz, Size::S64, src, dst, true); + fn i64_clz(&mut self, src: Location, dst: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop(Assembler::emit_clz, Size::S64, src, dst, true) } - fn i64_ctz(&mut self, src: Location, dst: Location) { + fn i64_ctz(&mut self, src: Location, dst: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_reg(Size::S64, src, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::None, false, None); - self.assembler.emit_rbit(Size::S64, src, dest); - self.assembler.emit_clz(Size::S64, dest, dest); + let src = self.location_to_reg(Size::S64, src, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, dst, &mut temps, ImmType::None, false, None)?; + self.assembler.emit_rbit(Size::S64, src, dest)?; + self.assembler.emit_clz(Size::S64, dest, dest)?; if dst != dest { - self.move_location(Size::S64, dest, dst); + self.move_location(Size::S64, dest, dst)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn i64_popcnt(&mut self, loc: Location, ret: Location) { + fn i64_popcnt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { let mut temps = vec![]; - let src = self.location_to_reg(Size::S64, loc, &mut temps, ImmType::None, true, None); - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); + let src = self.location_to_reg(Size::S64, loc, &mut temps, ImmType::None, true, None)?; + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; let src = if src == loc { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); - self.assembler.emit_mov(Size::S64, src, Location::GPR(tmp)); + self.assembler + .emit_mov(Size::S64, src, Location::GPR(tmp))?; Location::GPR(tmp) } else { src }; let tmp = { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; temps.push(tmp); Location::GPR(tmp) }; let label_loop = self.assembler.get_label(); let label_exit = self.assembler.get_label(); self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::XzrSp), dest); - self.assembler.emit_cbz_label(Size::S64, src, label_exit); - self.assembler.emit_label(label_loop); + .emit_mov(Size::S32, Location::GPR(GPR::XzrSp), dest)?; + self.assembler.emit_cbz_label(Size::S64, src, label_exit)?; + self.assembler.emit_label(label_loop)?; self.assembler - .emit_add(Size::S32, dest, Location::Imm8(1), dest); - self.assembler.emit_clz(Size::S64, src, tmp); + .emit_add(Size::S32, dest, Location::Imm8(1), dest)?; + self.assembler.emit_clz(Size::S64, src, tmp)?; self.assembler - .emit_add(Size::S32, tmp, Location::Imm8(1), tmp); - self.assembler.emit_lsl(Size::S64, src, tmp, src); - self.assembler.emit_cbnz_label(Size::S64, src, label_loop); - self.assembler.emit_label(label_exit); + .emit_add(Size::S32, tmp, Location::Imm8(1), tmp)?; + self.assembler.emit_lsl(Size::S64, src, tmp, src)?; + self.assembler.emit_cbnz_label(Size::S64, src, label_loop)?; + self.assembler.emit_label(label_exit)?; if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn i64_shl(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i64_shl( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_lsl, Size::S64, @@ -3620,9 +4117,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift64No0, - ); + ) } - fn i64_shr(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i64_shr( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_lsr, Size::S64, @@ -3630,9 +4132,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift64No0, - ); + ) } - fn i64_sar(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i64_sar( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_asr, Size::S64, @@ -3640,9 +4147,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift64No0, - ); + ) } - fn i64_rol(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i64_rol( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { // there is no ROL on ARM64. We use ROR with 64-value instead let mut temps = vec![]; let src2 = match loc_b { @@ -3657,10 +4169,10 @@ impl Machine for MachineARM64 { ImmType::None, true, None, - ); + )?; let tmp2 = - self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None); - self.assembler.emit_sub(Size::S64, tmp1, tmp2, tmp1); + self.location_to_reg(Size::S64, loc_b, &mut temps, ImmType::None, true, None)?; + self.assembler.emit_sub(Size::S64, tmp1, tmp2, tmp1)?; tmp1 } }; @@ -3671,12 +4183,18 @@ impl Machine for MachineARM64 { src2, ret, ImmType::Shift64No0, - ); + )?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn i64_ror(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn i64_ror( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3( Assembler::emit_ror, Size::S64, @@ -3684,7 +4202,7 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::Shift64No0, - ); + ) } fn i64_load( &mut self, @@ -3695,7 +4213,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3705,10 +4223,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr64(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr64(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_8u( &mut self, @@ -3719,7 +4235,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3729,10 +4245,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr8(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr8(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_8s( &mut self, @@ -3743,7 +4257,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3753,10 +4267,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr8s(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr8s(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_16u( &mut self, @@ -3767,7 +4279,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3777,10 +4289,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr16(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr16(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_16s( &mut self, @@ -3791,7 +4301,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3801,10 +4311,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr16s(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr16s(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_32u( &mut self, @@ -3815,7 +4323,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3825,10 +4333,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr32(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr32(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_load_32s( &mut self, @@ -3839,7 +4345,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3849,10 +4355,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr32s(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr32s(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn i64_atomic_load( &mut self, @@ -3863,8 +4367,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_load unimplemented"); } fn i64_atomic_load_8u( &mut self, @@ -3875,8 +4379,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_load_8u unimplemented"); } fn i64_atomic_load_16u( &mut self, @@ -3887,8 +4391,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_load_16u unimplemented"); } fn i64_atomic_load_32u( &mut self, @@ -3899,8 +4403,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_load_32u unimplemented"); } fn i64_save( &mut self, @@ -3911,7 +4415,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3921,10 +4425,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str64(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str64(target_value, Location::Memory(addr, 0)), + ) } fn i64_save_8( &mut self, @@ -3935,7 +4437,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3945,10 +4447,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str8(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str8(target_value, Location::Memory(addr, 0)), + ) } fn i64_save_16( &mut self, @@ -3959,7 +4459,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3969,10 +4469,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str16(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str16(target_value, Location::Memory(addr, 0)), + ) } fn i64_save_32( &mut self, @@ -3983,7 +4481,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3993,10 +4491,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)), + ) } fn i64_atomic_save( &mut self, @@ -4007,8 +4503,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_save unimplemented"); } fn i64_atomic_save_8( &mut self, @@ -4019,8 +4515,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_save_8 unimplemented"); } fn i64_atomic_save_16( &mut self, @@ -4031,8 +4527,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_save_16 unimplemented"); } fn i64_atomic_save_32( &mut self, @@ -4043,8 +4539,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_save_32 unimplemented"); } // i64 atomic Add with i64 fn i64_atomic_add( @@ -4057,8 +4553,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_add unimplemented"); } // i64 atomic Add with u8 fn i64_atomic_add_8u( @@ -4071,8 +4567,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_add_8u unimplemented"); } // i64 atomic Add with u16 fn i64_atomic_add_16u( @@ -4085,8 +4581,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_add_16u unimplemented"); } // i64 atomic Add with u32 fn i64_atomic_add_32u( @@ -4099,8 +4595,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_add_32u unimplemented"); } // i64 atomic Sub with i64 fn i64_atomic_sub( @@ -4113,8 +4609,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_sub unimplemented"); } // i64 atomic Sub with u8 fn i64_atomic_sub_8u( @@ -4127,8 +4623,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_sub_8u unimplemented"); } // i64 atomic Sub with u16 fn i64_atomic_sub_16u( @@ -4141,8 +4637,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_sub_16u unimplemented"); } // i64 atomic Sub with u32 fn i64_atomic_sub_32u( @@ -4155,8 +4651,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_sub_32u unimplemented"); } // i64 atomic And with i64 fn i64_atomic_and( @@ -4169,8 +4665,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_and unimplemented"); } // i64 atomic And with u8 fn i64_atomic_and_8u( @@ -4183,8 +4679,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_and_8u unimplemented"); } // i64 atomic And with u16 fn i64_atomic_and_16u( @@ -4197,8 +4693,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_and_16u unimplemented"); } // i64 atomic And with u32 fn i64_atomic_and_32u( @@ -4211,8 +4707,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_and_32u unimplemented"); } // i64 atomic Or with i64 fn i64_atomic_or( @@ -4225,8 +4721,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_or unimplemented"); } // i64 atomic Or with u8 fn i64_atomic_or_8u( @@ -4239,8 +4735,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_or_8u unimplemented"); } // i64 atomic Or with u16 fn i64_atomic_or_16u( @@ -4253,8 +4749,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_or_16u unimplemented"); } // i64 atomic Or with u32 fn i64_atomic_or_32u( @@ -4267,8 +4763,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_or_32u unimplemented"); } // i64 atomic xor with i64 fn i64_atomic_xor( @@ -4281,8 +4777,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xor unimplemented"); } // i64 atomic xor with u8 fn i64_atomic_xor_8u( @@ -4295,8 +4791,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xor_8u unimplemented"); } // i64 atomic xor with u16 fn i64_atomic_xor_16u( @@ -4309,8 +4805,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xor_16u unimplemented"); } // i64 atomic xor with u32 fn i64_atomic_xor_32u( @@ -4323,8 +4819,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xor_32u unimplemented"); } // i64 atomic Exchange with i64 fn i64_atomic_xchg( @@ -4337,8 +4833,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xchg unimplemented"); } // i64 atomic Exchange with u8 fn i64_atomic_xchg_8u( @@ -4351,8 +4847,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xchg_8u unimplemented"); } // i64 atomic Exchange with u16 fn i64_atomic_xchg_16u( @@ -4365,8 +4861,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xchg_16u unimplemented"); } // i64 atomic Exchange with u32 fn i64_atomic_xchg_32u( @@ -4379,8 +4875,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_xchg_32u unimplemented"); } // i64 atomic Exchange with i64 fn i64_atomic_cmpxchg( @@ -4394,8 +4890,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_cmpxchg unimplemented"); } // i64 atomic Exchange with u8 fn i64_atomic_cmpxchg_8u( @@ -4409,8 +4905,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_cmpxchg_8u unimplemented"); } // i64 atomic Exchange with u16 fn i64_atomic_cmpxchg_16u( @@ -4424,8 +4920,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_cmpxchg_16u unimplemented"); } // i64 atomic Exchange with u32 fn i64_atomic_cmpxchg_32u( @@ -4439,8 +4935,8 @@ impl Machine for MachineARM64 { _imported_memories: bool, _offset: i32, _heap_access_oob: Label, - ) { - unimplemented!(); + ) -> Result<(), CodegenError> { + codegen_error!("singlepass i64_atomic_cmpxchg_32u unimplemented"); } fn f32_load( @@ -4452,7 +4948,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4462,10 +4958,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr32(Size::S32, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr32(Size::S32, ret, Location::Memory(addr, 0)), + ) } fn f32_save( &mut self, @@ -4477,7 +4971,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let canonicalize = canonicalize && self.arch_supports_canonicalize_nan(); self.memory_op( target_addr, @@ -4490,12 +4984,12 @@ impl Machine for MachineARM64 { heap_access_oob, |this, addr| { if !canonicalize { - this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)); + this.emit_relaxed_str32(target_value, Location::Memory(addr, 0)) } else { - this.canonicalize_nan(Size::S32, target_value, Location::Memory(addr, 0)); + this.canonicalize_nan(Size::S32, target_value, Location::Memory(addr, 0)) } }, - ); + ) } fn f64_load( &mut self, @@ -4506,7 +5000,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4516,10 +5010,8 @@ impl Machine for MachineARM64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_ldr64(Size::S64, ret, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_ldr64(Size::S64, ret, Location::Memory(addr, 0)), + ) } fn f64_save( &mut self, @@ -4531,7 +5023,7 @@ impl Machine for MachineARM64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let canonicalize = canonicalize && self.arch_supports_canonicalize_nan(); self.memory_op( target_addr, @@ -4544,26 +5036,31 @@ impl Machine for MachineARM64 { heap_access_oob, |this, addr| { if !canonicalize { - this.emit_relaxed_str64(target_value, Location::Memory(addr, 0)); + this.emit_relaxed_str64(target_value, Location::Memory(addr, 0)) } else { - this.canonicalize_nan(Size::S64, target_value, Location::Memory(addr, 0)); + this.canonicalize_nan(Size::S64, target_value, Location::Memory(addr, 0)) } }, - ); + ) } - fn convert_f64_i64(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f64_i64( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_reg(Size::S64, loc, &mut gprs, ImmType::NoneXzr, true, None); - let dest = self.location_to_neon(Size::S64, ret, &mut neons, ImmType::None, false); + let src = self.location_to_reg(Size::S64, loc, &mut gprs, ImmType::NoneXzr, true, None)?; + let dest = self.location_to_neon(Size::S64, ret, &mut neons, ImmType::None, false)?; if signed { - self.assembler.emit_scvtf(Size::S64, src, Size::S64, dest); + self.assembler.emit_scvtf(Size::S64, src, Size::S64, dest)?; } else { - self.assembler.emit_ucvtf(Size::S64, src, Size::S64, dest); + self.assembler.emit_ucvtf(Size::S64, src, Size::S64, dest)?; } if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4571,19 +5068,25 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_f64_i32(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f64_i32( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_reg(Size::S32, loc, &mut gprs, ImmType::NoneXzr, true, None); - let dest = self.location_to_neon(Size::S64, ret, &mut neons, ImmType::None, false); + let src = self.location_to_reg(Size::S32, loc, &mut gprs, ImmType::NoneXzr, true, None)?; + let dest = self.location_to_neon(Size::S64, ret, &mut neons, ImmType::None, false)?; if signed { - self.assembler.emit_scvtf(Size::S32, src, Size::S64, dest); + self.assembler.emit_scvtf(Size::S32, src, Size::S64, dest)?; } else { - self.assembler.emit_ucvtf(Size::S32, src, Size::S64, dest); + self.assembler.emit_ucvtf(Size::S32, src, Size::S64, dest)?; } if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4591,19 +5094,25 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_f32_i64(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f32_i64( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_reg(Size::S64, loc, &mut gprs, ImmType::NoneXzr, true, None); - let dest = self.location_to_neon(Size::S32, ret, &mut neons, ImmType::None, false); + let src = self.location_to_reg(Size::S64, loc, &mut gprs, ImmType::NoneXzr, true, None)?; + let dest = self.location_to_neon(Size::S32, ret, &mut neons, ImmType::None, false)?; if signed { - self.assembler.emit_scvtf(Size::S64, src, Size::S32, dest); + self.assembler.emit_scvtf(Size::S64, src, Size::S32, dest)?; } else { - self.assembler.emit_ucvtf(Size::S64, src, Size::S32, dest); + self.assembler.emit_ucvtf(Size::S64, src, Size::S32, dest)?; } if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4611,19 +5120,25 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_f32_i32(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f32_i32( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_reg(Size::S32, loc, &mut gprs, ImmType::NoneXzr, true, None); - let dest = self.location_to_neon(Size::S32, ret, &mut neons, ImmType::None, false); + let src = self.location_to_reg(Size::S32, loc, &mut gprs, ImmType::NoneXzr, true, None)?; + let dest = self.location_to_neon(Size::S32, ret, &mut neons, ImmType::None, false)?; if signed { - self.assembler.emit_scvtf(Size::S32, src, Size::S32, dest); + self.assembler.emit_scvtf(Size::S32, src, Size::S32, dest)?; } else { - self.assembler.emit_ucvtf(Size::S32, src, Size::S32, dest); + self.assembler.emit_ucvtf(Size::S32, src, Size::S32, dest)?; } if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4631,28 +5146,37 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_i64_f64(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i64_f64( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_neon(Size::S64, loc, &mut neons, ImmType::None, true); - let dest = self.location_to_reg(Size::S64, ret, &mut gprs, ImmType::None, false, None); + let src = self.location_to_neon(Size::S64, loc, &mut neons, ImmType::None, true)?; + let dest = self.location_to_reg(Size::S64, ret, &mut gprs, ImmType::None, false, None)?; let old_fpcr = if !sat { - self.reset_exception_fpsr(); - self.set_trap_enabled(&mut gprs) + self.reset_exception_fpsr()?; + self.set_trap_enabled(&mut gprs)? } else { GPR::XzrSp }; if signed { - self.assembler.emit_fcvtzs(Size::S64, src, Size::S64, dest); + self.assembler + .emit_fcvtzs(Size::S64, src, Size::S64, dest)?; } else { - self.assembler.emit_fcvtzu(Size::S64, src, Size::S64, dest); + self.assembler + .emit_fcvtzu(Size::S64, src, Size::S64, dest)?; } if !sat { - self.trap_float_convertion_errors(old_fpcr, Size::S64, src, &mut gprs); + self.trap_float_convertion_errors(old_fpcr, Size::S64, src, &mut gprs)?; } if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4660,28 +5184,37 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_i32_f64(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i32_f64( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_neon(Size::S64, loc, &mut neons, ImmType::None, true); - let dest = self.location_to_reg(Size::S32, ret, &mut gprs, ImmType::None, false, None); + let src = self.location_to_neon(Size::S64, loc, &mut neons, ImmType::None, true)?; + let dest = self.location_to_reg(Size::S32, ret, &mut gprs, ImmType::None, false, None)?; let old_fpcr = if !sat { - self.reset_exception_fpsr(); - self.set_trap_enabled(&mut gprs) + self.reset_exception_fpsr()?; + self.set_trap_enabled(&mut gprs)? } else { GPR::XzrSp }; if signed { - self.assembler.emit_fcvtzs(Size::S64, src, Size::S32, dest); + self.assembler + .emit_fcvtzs(Size::S64, src, Size::S32, dest)?; } else { - self.assembler.emit_fcvtzu(Size::S64, src, Size::S32, dest); + self.assembler + .emit_fcvtzu(Size::S64, src, Size::S32, dest)?; } if !sat { - self.trap_float_convertion_errors(old_fpcr, Size::S64, src, &mut gprs); + self.trap_float_convertion_errors(old_fpcr, Size::S64, src, &mut gprs)?; } if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4689,28 +5222,37 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_i64_f32(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i64_f32( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_neon(Size::S32, loc, &mut neons, ImmType::None, true); - let dest = self.location_to_reg(Size::S64, ret, &mut gprs, ImmType::None, false, None); + let src = self.location_to_neon(Size::S32, loc, &mut neons, ImmType::None, true)?; + let dest = self.location_to_reg(Size::S64, ret, &mut gprs, ImmType::None, false, None)?; let old_fpcr = if !sat { - self.reset_exception_fpsr(); - self.set_trap_enabled(&mut gprs) + self.reset_exception_fpsr()?; + self.set_trap_enabled(&mut gprs)? } else { GPR::XzrSp }; if signed { - self.assembler.emit_fcvtzs(Size::S32, src, Size::S64, dest); + self.assembler + .emit_fcvtzs(Size::S32, src, Size::S64, dest)?; } else { - self.assembler.emit_fcvtzu(Size::S32, src, Size::S64, dest); + self.assembler + .emit_fcvtzu(Size::S32, src, Size::S64, dest)?; } if !sat { - self.trap_float_convertion_errors(old_fpcr, Size::S32, src, &mut gprs); + self.trap_float_convertion_errors(old_fpcr, Size::S32, src, &mut gprs)?; } if ret != dest { - self.move_location(Size::S64, dest, ret); + self.move_location(Size::S64, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4718,28 +5260,37 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_i32_f32(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i32_f32( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { let mut gprs = vec![]; let mut neons = vec![]; - let src = self.location_to_neon(Size::S32, loc, &mut neons, ImmType::None, true); - let dest = self.location_to_reg(Size::S32, ret, &mut gprs, ImmType::None, false, None); + let src = self.location_to_neon(Size::S32, loc, &mut neons, ImmType::None, true)?; + let dest = self.location_to_reg(Size::S32, ret, &mut gprs, ImmType::None, false, None)?; let old_fpcr = if !sat { - self.reset_exception_fpsr(); - self.set_trap_enabled(&mut gprs) + self.reset_exception_fpsr()?; + self.set_trap_enabled(&mut gprs)? } else { GPR::XzrSp }; if signed { - self.assembler.emit_fcvtzs(Size::S32, src, Size::S32, dest); + self.assembler + .emit_fcvtzs(Size::S32, src, Size::S32, dest)?; } else { - self.assembler.emit_fcvtzu(Size::S32, src, Size::S32, dest); + self.assembler + .emit_fcvtzu(Size::S32, src, Size::S32, dest)?; } if !sat { - self.trap_float_convertion_errors(old_fpcr, Size::S32, src, &mut gprs); + self.trap_float_convertion_errors(old_fpcr, Size::S32, src, &mut gprs)?; } if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in gprs { self.release_gpr(r); @@ -4747,142 +5298,187 @@ impl Machine for MachineARM64 { for r in neons { self.release_simd(r); } + Ok(()) } - fn convert_f64_f32(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fcvt, Size::S32, loc, ret, true); + fn convert_f64_f32(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fcvt, Size::S32, loc, ret, true) } - fn convert_f32_f64(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fcvt, Size::S64, loc, ret, true); + fn convert_f32_f64(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fcvt, Size::S64, loc, ret, true) } - fn f64_neg(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fneg, Size::S64, loc, ret, true); + fn f64_neg(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fneg, Size::S64, loc, ret, true) } - fn f64_abs(&mut self, loc: Location, ret: Location) { - let tmp = self.acquire_temp_gpr().unwrap(); + fn f64_abs(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; - self.move_location(Size::S64, loc, Location::GPR(tmp)); + self.move_location(Size::S64, loc, Location::GPR(tmp))?; self.assembler.emit_and( Size::S64, Location::GPR(tmp), Location::Imm64(0x7fffffffffffffffu64), Location::GPR(tmp), - ); - self.move_location(Size::S64, Location::GPR(tmp), ret); + )?; + self.move_location(Size::S64, Location::GPR(tmp), ret)?; self.release_gpr(tmp); + Ok(()) } - fn emit_i64_copysign(&mut self, tmp1: GPR, tmp2: GPR) { + fn emit_i64_copysign(&mut self, tmp1: GPR, tmp2: GPR) -> Result<(), CodegenError> { self.assembler.emit_and( Size::S64, Location::GPR(tmp1), Location::Imm64(0x7fffffffffffffffu64), Location::GPR(tmp1), - ); + )?; self.assembler.emit_and( Size::S64, Location::GPR(tmp2), Location::Imm64(0x8000000000000000u64), Location::GPR(tmp2), - ); + )?; self.assembler.emit_or( Size::S64, Location::GPR(tmp1), Location::GPR(tmp2), Location::GPR(tmp1), - ); + ) } - fn f64_sqrt(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fsqrt, Size::S64, loc, ret, true); + fn f64_sqrt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fsqrt, Size::S64, loc, ret, true) } - fn f64_trunc(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintz, Size::S64, loc, ret, true); + fn f64_trunc(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintz, Size::S64, loc, ret, true) } - fn f64_ceil(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintp, Size::S64, loc, ret, true); + fn f64_ceil(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintp, Size::S64, loc, ret, true) } - fn f64_floor(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintm, Size::S64, loc, ret, true); + fn f64_floor(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintm, Size::S64, loc, ret, true) } - fn f64_nearest(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintn, Size::S64, loc, ret, true); + fn f64_nearest(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintn, Size::S64, loc, ret, true) } - fn f64_cmp_ge(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_ge( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_b, loc_a, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ls); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_b, loc_a, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ls)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_cmp_gt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_gt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_b, loc_a, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Cc); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_b, loc_a, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Cc)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_cmp_le(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_le( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ls); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ls)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_cmp_lt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_lt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Cc); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Cc)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ne); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ne)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Eq); + let dest = self.location_to_reg(Size::S64, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S64, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Eq)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_min(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_min( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let old_fpcr = self.set_default_nan(&mut temps); + let old_fpcr = self.set_default_nan(&mut temps)?; self.emit_relaxed_binop3_neon( Assembler::emit_fmin, Size::S64, @@ -4890,15 +5486,21 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); - self.restore_fpcr(old_fpcr); + )?; + self.restore_fpcr(old_fpcr)?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_max(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_max( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let old_fpcr = self.set_default_nan(&mut temps); + let old_fpcr = self.set_default_nan(&mut temps)?; self.emit_relaxed_binop3_neon( Assembler::emit_fmax, Size::S64, @@ -4906,13 +5508,19 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); - self.restore_fpcr(old_fpcr); + )?; + self.restore_fpcr(old_fpcr)?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn f64_add(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_add( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fadd, Size::S64, @@ -4920,9 +5528,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f64_sub(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_sub( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fsub, Size::S64, @@ -4930,9 +5543,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f64_mul(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_mul( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fmul, Size::S64, @@ -4940,9 +5558,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f64_div(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_div( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fdiv, Size::S64, @@ -4950,133 +5573,177 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f32_neg(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fneg, Size::S32, loc, ret, true); + fn f32_neg(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fneg, Size::S32, loc, ret, true) } - fn f32_abs(&mut self, loc: Location, ret: Location) { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); + fn f32_abs(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; self.assembler.emit_and( Size::S32, Location::GPR(tmp), Location::Imm32(0x7fffffffu32), Location::GPR(tmp), - ); - self.move_location(Size::S32, Location::GPR(tmp), ret); + )?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); + Ok(()) } - fn emit_i32_copysign(&mut self, tmp1: GPR, tmp2: GPR) { + fn emit_i32_copysign(&mut self, tmp1: GPR, tmp2: GPR) -> Result<(), CodegenError> { self.assembler.emit_and( Size::S32, Location::GPR(tmp1), Location::Imm32(0x7fffffffu32), Location::GPR(tmp1), - ); + )?; self.assembler.emit_and( Size::S32, Location::GPR(tmp2), Location::Imm32(0x80000000u32), Location::GPR(tmp2), - ); + )?; self.assembler.emit_or( Size::S32, Location::GPR(tmp1), Location::GPR(tmp2), Location::GPR(tmp1), - ); + ) } - fn f32_sqrt(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_fsqrt, Size::S32, loc, ret, true); + fn f32_sqrt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_fsqrt, Size::S32, loc, ret, true) } - fn f32_trunc(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintz, Size::S32, loc, ret, true); + fn f32_trunc(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintz, Size::S32, loc, ret, true) } - fn f32_ceil(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintp, Size::S32, loc, ret, true); + fn f32_ceil(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintp, Size::S32, loc, ret, true) } - fn f32_floor(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintm, Size::S32, loc, ret, true); + fn f32_floor(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintm, Size::S32, loc, ret, true) } - fn f32_nearest(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_binop_neon(Assembler::emit_frintn, Size::S32, loc, ret, true); + fn f32_nearest(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_binop_neon(Assembler::emit_frintn, Size::S32, loc, ret, true) } - fn f32_cmp_ge(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_ge( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_b, loc_a, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ls); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_b, loc_a, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ls)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_cmp_gt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_gt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_b, loc_a, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Cc); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_b, loc_a, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Cc)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_cmp_le(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_le( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ls); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ls)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_cmp_lt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_lt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Cc); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Cc)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Ne); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Ne)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None); - self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false); - self.assembler.emit_cset(Size::S32, dest, Condition::Eq); + let dest = self.location_to_reg(Size::S32, ret, &mut temps, ImmType::None, false, None)?; + self.emit_relaxed_binop_neon(Assembler::emit_fcmp, Size::S32, loc_a, loc_b, false)?; + self.assembler.emit_cset(Size::S32, dest, Condition::Eq)?; if ret != dest { - self.move_location(Size::S32, dest, ret); + self.move_location(Size::S32, dest, ret)?; } for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_min(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_min( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let old_fpcr = self.set_default_nan(&mut temps); + let old_fpcr = self.set_default_nan(&mut temps)?; self.emit_relaxed_binop3_neon( Assembler::emit_fmin, Size::S32, @@ -5084,15 +5751,21 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); - self.restore_fpcr(old_fpcr); + )?; + self.restore_fpcr(old_fpcr)?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_max(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_max( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { let mut temps = vec![]; - let old_fpcr = self.set_default_nan(&mut temps); + let old_fpcr = self.set_default_nan(&mut temps)?; self.emit_relaxed_binop3_neon( Assembler::emit_fmax, Size::S32, @@ -5100,13 +5773,19 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); - self.restore_fpcr(old_fpcr); + )?; + self.restore_fpcr(old_fpcr)?; for r in temps { self.release_gpr(r); } + Ok(()) } - fn f32_add(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_add( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fadd, Size::S32, @@ -5114,9 +5793,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f32_sub(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_sub( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fsub, Size::S32, @@ -5124,9 +5808,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f32_mul(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_mul( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fmul, Size::S32, @@ -5134,9 +5823,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } - fn f32_div(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_div( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { self.emit_relaxed_binop3_neon( Assembler::emit_fdiv, Size::S32, @@ -5144,14 +5838,14 @@ impl Machine for MachineARM64 { loc_b, ret, ImmType::None, - ); + ) } fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody { + ) -> Result { gen_std_trampoline_arm64(sig, calling_convention) } // Generates dynamic import function call trampoline for a function type. @@ -5160,7 +5854,7 @@ impl Machine for MachineARM64 { vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody { + ) -> Result { gen_std_dynamic_import_trampoline_arm64(vmoffsets, sig, calling_convention) } // Singlepass calls import functions through a trampoline. @@ -5170,7 +5864,7 @@ impl Machine for MachineARM64 { index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, - ) -> CustomSection { + ) -> Result { gen_import_call_trampoline_arm64(vmoffsets, index, sig, calling_convention) } #[cfg(feature = "unwind")] diff --git a/lib/compiler-singlepass/src/machine_x64.rs b/lib/compiler-singlepass/src/machine_x64.rs index 66ac19d648b..0bc827f2480 100644 --- a/lib/compiler-singlepass/src/machine_x64.rs +++ b/lib/compiler-singlepass/src/machine_x64.rs @@ -1,3 +1,4 @@ +use crate::codegen_error; use crate::common_decl::*; use crate::emitter_x64::*; use crate::location::Location as AbstractLocation; @@ -134,11 +135,11 @@ impl MachineX86_64 { } pub fn emit_relaxed_binop( &mut self, - op: fn(&mut AssemblerX64, Size, Location, Location), + op: fn(&mut AssemblerX64, Size, Location, Location) -> Result<(), CodegenError>, sz: Size, src: Location, dst: Location, - ) { + ) -> Result<(), CodegenError> { enum RelaxMode { Direct, SrcToGPR, @@ -172,31 +173,39 @@ impl MachineX86_64 { match mode { RelaxMode::SrcToGPR => { - let temp = self.acquire_temp_gpr().unwrap(); - self.move_location(sz, src, Location::GPR(temp)); - op(&mut self.assembler, sz, Location::GPR(temp), dst); + let temp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(sz, src, Location::GPR(temp))?; + op(&mut self.assembler, sz, Location::GPR(temp), dst)?; self.release_gpr(temp); } RelaxMode::DstToGPR => { - let temp = self.acquire_temp_gpr().unwrap(); - self.move_location(sz, dst, Location::GPR(temp)); - op(&mut self.assembler, sz, src, Location::GPR(temp)); + let temp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(sz, dst, Location::GPR(temp))?; + op(&mut self.assembler, sz, src, Location::GPR(temp))?; self.release_gpr(temp); } RelaxMode::BothToGPR => { - let temp_src = self.acquire_temp_gpr().unwrap(); - let temp_dst = self.acquire_temp_gpr().unwrap(); - self.move_location(sz, src, Location::GPR(temp_src)); - self.move_location(sz, dst, Location::GPR(temp_dst)); + let temp_src = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let temp_dst = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(sz, src, Location::GPR(temp_src))?; + self.move_location(sz, dst, Location::GPR(temp_dst))?; op( &mut self.assembler, sz, Location::GPR(temp_src), Location::GPR(temp_dst), - ); + )?; match dst { Location::Memory(_, _) | Location::GPR(_) => { - self.move_location(sz, Location::GPR(temp_dst), dst); + self.move_location(sz, Location::GPR(temp_dst), dst)?; } _ => {} } @@ -204,45 +213,50 @@ impl MachineX86_64 { self.release_gpr(temp_src); } RelaxMode::Direct => { - op(&mut self.assembler, sz, src, dst); + op(&mut self.assembler, sz, src, dst)?; } } + Ok(()) } pub fn emit_relaxed_zx_sx( &mut self, - op: fn(&mut AssemblerX64, Size, Location, Size, Location), + op: fn(&mut AssemblerX64, Size, Location, Size, Location) -> Result<(), CodegenError>, sz_src: Size, src: Location, sz_dst: Size, dst: Location, - ) { + ) -> Result<(), CodegenError> { match src { Location::Imm32(_) | Location::Imm64(_) => { - let tmp_src = self.acquire_temp_gpr().unwrap(); + let tmp_src = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_mov(Size::S64, src, Location::GPR(tmp_src)); + .emit_mov(Size::S64, src, Location::GPR(tmp_src))?; let src = Location::GPR(tmp_src); match dst { Location::Imm32(_) | Location::Imm64(_) => unreachable!(), Location::Memory(_, _) => { - let tmp_dst = self.acquire_temp_gpr().unwrap(); + let tmp_dst = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; op( &mut self.assembler, sz_src, src, sz_dst, Location::GPR(tmp_dst), - ); - self.move_location(Size::S64, Location::GPR(tmp_dst), dst); + )?; + self.move_location(Size::S64, Location::GPR(tmp_dst), dst)?; self.release_gpr(tmp_dst); } Location::GPR(_) => { - op(&mut self.assembler, sz_src, src, sz_dst, dst); + op(&mut self.assembler, sz_src, src, sz_dst, dst)?; } _ => { - unreachable!(); + codegen_error!("singlepass emit_relaxed_zx_sx unreachable"); } }; @@ -252,66 +266,75 @@ impl MachineX86_64 { match dst { Location::Imm32(_) | Location::Imm64(_) => unreachable!(), Location::Memory(_, _) => { - let tmp_dst = self.acquire_temp_gpr().unwrap(); + let tmp_dst = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; op( &mut self.assembler, sz_src, src, sz_dst, Location::GPR(tmp_dst), - ); - self.move_location(Size::S64, Location::GPR(tmp_dst), dst); + )?; + self.move_location(Size::S64, Location::GPR(tmp_dst), dst)?; self.release_gpr(tmp_dst); } Location::GPR(_) => { - op(&mut self.assembler, sz_src, src, sz_dst, dst); + op(&mut self.assembler, sz_src, src, sz_dst, dst)?; } _ => { - unreachable!(); + codegen_error!("singlepass emit_relaxed_zx_sx unreachable"); } }; } _ => { - unreachable!(); + codegen_error!("singlepass emit_relaxed_zx_sx unreachable"); } } + Ok(()) } /// I32 binary operation with both operands popped from the virtual stack. fn emit_binop_i32( &mut self, - f: fn(&mut AssemblerX64, Size, Location, Location), + f: fn(&mut AssemblerX64, Size, Location, Location) -> Result<(), CodegenError>, loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { if loc_a != ret { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S32, loc_a, Location::GPR(tmp)); - self.emit_relaxed_binop(f, Size::S32, loc_b, Location::GPR(tmp)); - self.emit_relaxed_mov(Size::S32, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc_a, Location::GPR(tmp))?; + self.emit_relaxed_binop(f, Size::S32, loc_b, Location::GPR(tmp))?; + self.emit_relaxed_mov(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } else { - self.emit_relaxed_binop(f, Size::S32, loc_b, ret); + self.emit_relaxed_binop(f, Size::S32, loc_b, ret)?; } + Ok(()) } /// I64 binary operation with both operands popped from the virtual stack. fn emit_binop_i64( &mut self, - f: fn(&mut AssemblerX64, Size, Location, Location), + f: fn(&mut AssemblerX64, Size, Location, Location) -> Result<(), CodegenError>, loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { if loc_a != ret { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S64, loc_a, Location::GPR(tmp)); - self.emit_relaxed_binop(f, Size::S64, loc_b, Location::GPR(tmp)); - self.emit_relaxed_mov(Size::S64, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc_a, Location::GPR(tmp))?; + self.emit_relaxed_binop(f, Size::S64, loc_b, Location::GPR(tmp))?; + self.emit_relaxed_mov(Size::S64, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } else { - self.emit_relaxed_binop(f, Size::S64, loc_b, ret); + self.emit_relaxed_binop(f, Size::S64, loc_b, ret)?; } + Ok(()) } /// I64 comparison with. fn emit_cmpop_i64_dynamic_b( @@ -320,70 +343,73 @@ impl MachineX86_64 { loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { match ret { Location::GPR(x) => { - self.emit_relaxed_cmp(Size::S64, loc_b, loc_a); - self.assembler.emit_set(c, x); + self.emit_relaxed_cmp(Size::S64, loc_b, loc_a)?; + self.assembler.emit_set(c, x)?; self.assembler - .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(x)); + .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(x))?; } Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_cmp(Size::S64, loc_b, loc_a); - self.assembler.emit_set(c, tmp); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_cmp(Size::S64, loc_b, loc_a)?; + self.assembler.emit_set(c, tmp)?; self.assembler - .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(tmp)); - self.move_location(Size::S32, Location::GPR(tmp), ret); + .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(tmp))?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } _ => { - unreachable!(); + codegen_error!("singlepass emit_cmpop_i64_dynamic_b unreachable"); } } + Ok(()) } /// I64 shift with both operands popped from the virtual stack. fn emit_shift_i64( &mut self, - f: fn(&mut AssemblerX64, Size, Location, Location), + f: fn(&mut AssemblerX64, Size, Location, Location) -> Result<(), CodegenError>, loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { self.assembler - .emit_mov(Size::S64, loc_b, Location::GPR(GPR::RCX)); + .emit_mov(Size::S64, loc_b, Location::GPR(GPR::RCX))?; if loc_a != ret { - self.emit_relaxed_mov(Size::S64, loc_a, ret); + self.emit_relaxed_mov(Size::S64, loc_a, ret)?; } - f(&mut self.assembler, Size::S64, Location::GPR(GPR::RCX), ret); + f(&mut self.assembler, Size::S64, Location::GPR(GPR::RCX), ret) } /// Moves `loc` to a valid location for `div`/`idiv`. fn emit_relaxed_xdiv( &mut self, - op: fn(&mut AssemblerX64, Size, Location), + op: fn(&mut AssemblerX64, Size, Location) -> Result<(), CodegenError>, sz: Size, loc: Location, integer_division_by_zero: Label, - ) -> usize { - self.assembler.emit_cmp(sz, Location::Imm32(0), loc); + ) -> Result { + self.assembler.emit_cmp(sz, Location::Imm32(0), loc)?; self.assembler - .emit_jmp(Condition::Equal, integer_division_by_zero); + .emit_jmp(Condition::Equal, integer_division_by_zero)?; match loc { Location::Imm64(_) | Location::Imm32(_) => { - self.move_location(sz, loc, Location::GPR(GPR::RCX)); // must not be used during div (rax, rdx) + self.move_location(sz, loc, Location::GPR(GPR::RCX))?; // must not be used during div (rax, rdx) let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - op(&mut self.assembler, sz, Location::GPR(GPR::RCX)); + op(&mut self.assembler, sz, Location::GPR(GPR::RCX))?; self.mark_instruction_address_end(offset); - offset + Ok(offset) } _ => { let offset = self.mark_instruction_with_trap_code(TrapCode::IntegerOverflow); - op(&mut self.assembler, sz, loc); + op(&mut self.assembler, sz, loc)?; self.mark_instruction_address_end(offset); - offset + Ok(offset) } } } @@ -394,48 +420,51 @@ impl MachineX86_64 { loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { match ret { Location::GPR(x) => { - self.emit_relaxed_cmp(Size::S32, loc_b, loc_a); - self.assembler.emit_set(c, x); + self.emit_relaxed_cmp(Size::S32, loc_b, loc_a)?; + self.assembler.emit_set(c, x)?; self.assembler - .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(x)); + .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(x))?; } Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_cmp(Size::S32, loc_b, loc_a); - self.assembler.emit_set(c, tmp); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.emit_relaxed_cmp(Size::S32, loc_b, loc_a)?; + self.assembler.emit_set(c, tmp)?; self.assembler - .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(tmp)); - self.move_location(Size::S32, Location::GPR(tmp), ret); + .emit_and(Size::S32, Location::Imm32(0xff), Location::GPR(tmp))?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } _ => { - unreachable!(); + codegen_error!("singlepass emit_cmpop_i32_dynamic_b unreachable"); } } + Ok(()) } /// I32 shift with both operands popped from the virtual stack. fn emit_shift_i32( &mut self, - f: fn(&mut AssemblerX64, Size, Location, Location), + f: fn(&mut AssemblerX64, Size, Location, Location) -> Result<(), CodegenError>, loc_a: Location, loc_b: Location, ret: Location, - ) { + ) -> Result<(), CodegenError> { self.assembler - .emit_mov(Size::S32, loc_b, Location::GPR(GPR::RCX)); + .emit_mov(Size::S32, loc_b, Location::GPR(GPR::RCX))?; if loc_a != ret { - self.emit_relaxed_mov(Size::S32, loc_a, ret); + self.emit_relaxed_mov(Size::S32, loc_a, ret)?; } - f(&mut self.assembler, Size::S32, Location::GPR(GPR::RCX), ret); + f(&mut self.assembler, Size::S32, Location::GPR(GPR::RCX), ret) } #[allow(clippy::too_many_arguments)] - fn memory_op( + fn memory_op Result<(), CodegenError>>( &mut self, addr: Location, memarg: &MemoryImmediate, @@ -446,14 +475,18 @@ impl MachineX86_64 { offset: i32, heap_access_oob: Label, cb: F, - ) { + ) -> Result<(), CodegenError> { // This function as been re-writen to use only 2 temporary register instead of 3 // without compromisong on the perfomances. // The number of memory move should be equivalent to previous 3-temp regs version // Register pressure is high on x86_64, and this is needed to be able to use // instruction that neead RAX, like cmpxchg for example - let tmp_addr = self.acquire_temp_gpr().unwrap(); - let tmp2 = self.acquire_temp_gpr().unwrap(); + let tmp_addr = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; // Reusing `tmp_addr` for temporary indirection here, since it's not used before the last reference to `{base,bound}_loc`. let base_loc = if imported_memories { @@ -463,7 +496,7 @@ impl MachineX86_64 { Size::S64, Location::Memory(self.get_vmctx_reg(), offset), Location::GPR(tmp2), - ); + )?; Location::Memory(tmp2, 0) } else { Location::Memory(self.get_vmctx_reg(), offset) @@ -471,13 +504,13 @@ impl MachineX86_64 { // Load base into temporary register. self.assembler - .emit_mov(Size::S64, base_loc, Location::GPR(tmp2)); + .emit_mov(Size::S64, base_loc, Location::GPR(tmp2))?; // Load effective address. // `base_loc` and `bound_loc` becomes INVALID after this line, because `tmp_addr` // might be reused. self.assembler - .emit_mov(Size::S32, addr, Location::GPR(tmp_addr)); + .emit_mov(Size::S32, addr, Location::GPR(tmp_addr))?; // Add offset to memory address. if memarg.offset != 0 { @@ -485,10 +518,10 @@ impl MachineX86_64 { Size::S32, Location::Imm32(memarg.offset as u32), Location::GPR(tmp_addr), - ); + )?; // Trap if offset calculation overflowed. - self.assembler.emit_jmp(Condition::Carry, heap_access_oob); + self.assembler.emit_jmp(Condition::Carry, heap_access_oob)?; } if need_check { @@ -499,26 +532,26 @@ impl MachineX86_64 { Size::S64, Location::Memory(self.get_vmctx_reg(), offset), Location::GPR(tmp2), - ); + )?; Location::Memory(tmp2, 8) } else { Location::Memory(self.get_vmctx_reg(), offset + 8) }; self.assembler - .emit_mov(Size::S64, bound_loc, Location::GPR(tmp2)); + .emit_mov(Size::S64, bound_loc, Location::GPR(tmp2))?; // We will compare the upper bound limit without having add the "temp_base" value, as it's a constant self.assembler.emit_lea( Size::S64, Location::Memory(tmp2, -(value_size as i32)), Location::GPR(tmp2), - ); + )?; // Trap if the end address of the requested area is above that of the linear memory. self.assembler - .emit_cmp(Size::S64, Location::GPR(tmp2), Location::GPR(tmp_addr)); + .emit_cmp(Size::S64, Location::GPR(tmp2), Location::GPR(tmp_addr))?; // `tmp_bound` is inclusive. So trap only if `tmp_addr > tmp_bound`. - self.assembler.emit_jmp(Condition::Above, heap_access_oob); + self.assembler.emit_jmp(Condition::Above, heap_access_oob)?; } // get back baseloc, as it might have been destroid with the upper memory test let base_loc = if imported_memories { @@ -528,44 +561,47 @@ impl MachineX86_64 { Size::S64, Location::Memory(self.get_vmctx_reg(), offset), Location::GPR(tmp2), - ); + )?; Location::Memory(tmp2, 0) } else { Location::Memory(self.get_vmctx_reg(), offset) }; // Wasm linear memory -> real memory self.assembler - .emit_add(Size::S64, base_loc, Location::GPR(tmp_addr)); + .emit_add(Size::S64, base_loc, Location::GPR(tmp_addr))?; self.release_gpr(tmp2); let align = memarg.align; if check_alignment && align != 1 { - let tmp_aligncheck = self.acquire_temp_gpr().unwrap(); + let tmp_aligncheck = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler.emit_mov( Size::S32, Location::GPR(tmp_addr), Location::GPR(tmp_aligncheck), - ); + )?; self.assembler.emit_and( Size::S64, Location::Imm32((align - 1).into()), Location::GPR(tmp_aligncheck), - ); + )?; self.assembler - .emit_jmp(Condition::NotEqual, heap_access_oob); + .emit_jmp(Condition::NotEqual, heap_access_oob)?; self.release_gpr(tmp_aligncheck); } let begin = self.assembler.get_offset().0; - cb(self, tmp_addr); + cb(self, tmp_addr)?; let end = self.assembler.get_offset().0; self.mark_address_range_with_trap_code(TrapCode::HeapAccessOutOfBounds, begin, end); self.release_gpr(tmp_addr); + Ok(()) } #[allow(clippy::too_many_arguments)] - fn emit_compare_and_swap( + fn emit_compare_and_swap Result<(), CodegenError>>( &mut self, loc: Location, target: Location, @@ -579,9 +615,9 @@ impl MachineX86_64 { offset: i32, heap_access_oob: Label, cb: F, - ) { + ) -> Result<(), CodegenError> { if memory_sz > stack_sz { - unreachable!(); + codegen_error!("singlepass emit_compare_and_swap unreachable"); } let compare = self.reserve_unused_temp_gpr(GPR::RAX); @@ -590,12 +626,12 @@ impl MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; - self.move_location(stack_sz, loc, Location::GPR(value)); + self.move_location(stack_sz, loc, Location::GPR(value))?; let retry = self.assembler.get_label(); - self.emit_label(retry); + self.emit_label(retry)?; self.memory_op( target, @@ -607,21 +643,22 @@ impl MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.load_address(memory_sz, Location::GPR(compare), Location::Memory(addr, 0)); - this.move_location(stack_sz, Location::GPR(compare), ret); - cb(this, compare, value); + this.load_address(memory_sz, Location::GPR(compare), Location::Memory(addr, 0))?; + this.move_location(stack_sz, Location::GPR(compare), ret)?; + cb(this, compare, value)?; this.assembler.emit_lock_cmpxchg( memory_sz, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); + )?; - self.jmp_on_different(retry); + self.jmp_on_different(retry)?; - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // Checks for underflow/overflow/nan. @@ -635,50 +672,61 @@ impl MachineX86_64 { overflow_label: Label, nan_label: Label, succeed_label: Label, - ) { + ) -> Result<(), CodegenError> { let lower_bound = f32::to_bits(lower_bound); let upper_bound = f32::to_bits(upper_bound); - let tmp = self.acquire_temp_gpr().unwrap(); - let tmp_x = self.acquire_temp_simd().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_x = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // Underflow. - self.move_location(Size::S32, Location::Imm32(lower_bound), Location::GPR(tmp)); - self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x)); + self.move_location(Size::S32, Location::Imm32(lower_bound), Location::GPR(tmp))?; + self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x))?; self.assembler - .emit_vcmpless(reg, XMMOrMemory::XMM(tmp_x), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_vcmpless(reg, XMMOrMemory::XMM(tmp_x), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; self.assembler - .emit_jmp(Condition::NotEqual, underflow_label); + .emit_jmp(Condition::NotEqual, underflow_label)?; // Overflow. - self.move_location(Size::S32, Location::Imm32(upper_bound), Location::GPR(tmp)); - self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x)); + self.move_location(Size::S32, Location::Imm32(upper_bound), Location::GPR(tmp))?; + self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x))?; self.assembler - .emit_vcmpgess(reg, XMMOrMemory::XMM(tmp_x), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_vcmpgess(reg, XMMOrMemory::XMM(tmp_x), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - self.assembler.emit_jmp(Condition::NotEqual, overflow_label); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; + self.assembler + .emit_jmp(Condition::NotEqual, overflow_label)?; // NaN. self.assembler - .emit_vcmpeqss(reg, XMMOrMemory::XMM(reg), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_vcmpeqss(reg, XMMOrMemory::XMM(reg), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - self.assembler.emit_jmp(Condition::Equal, nan_label); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; + self.assembler.emit_jmp(Condition::Equal, nan_label)?; - self.assembler.emit_jmp(Condition::None, succeed_label); + self.assembler.emit_jmp(Condition::None, succeed_label)?; self.release_simd(tmp_x); self.release_gpr(tmp); + Ok(()) } // Checks for underflow/overflow/nan before IxxTrunc{U/S}F32. - fn emit_f32_int_conv_check_trap(&mut self, reg: XMM, lower_bound: f32, upper_bound: f32) { + fn emit_f32_int_conv_check_trap( + &mut self, + reg: XMM, + lower_bound: f32, + upper_bound: f32, + ) -> Result<(), CodegenError> { let trap_overflow = self.assembler.get_label(); let trap_badconv = self.assembler.get_label(); let end = self.assembler.get_label(); @@ -691,24 +739,25 @@ impl MachineX86_64 { trap_overflow, trap_badconv, end, - ); + )?; - self.emit_label(trap_overflow); + self.emit_label(trap_overflow)?; - self.emit_illegal_op_internal(TrapCode::IntegerOverflow); + self.emit_illegal_op_internal(TrapCode::IntegerOverflow)?; - self.emit_label(trap_badconv); + self.emit_label(trap_badconv)?; - self.emit_illegal_op_internal(TrapCode::BadConversionToInteger); + self.emit_illegal_op_internal(TrapCode::BadConversionToInteger)?; - self.emit_label(end); + self.emit_label(end)?; + Ok(()) } #[allow(clippy::too_many_arguments)] fn emit_f32_int_conv_check_sat< - F1: FnOnce(&mut Self), - F2: FnOnce(&mut Self), - F3: FnOnce(&mut Self), - F4: FnOnce(&mut Self), + F1: FnOnce(&mut Self) -> Result<(), CodegenError>, + F2: FnOnce(&mut Self) -> Result<(), CodegenError>, + F3: FnOnce(&mut Self) -> Result<(), CodegenError>, + F4: FnOnce(&mut Self) -> Result<(), CodegenError>, >( &mut self, reg: XMM, @@ -718,7 +767,7 @@ impl MachineX86_64 { overflow_cb: F2, nan_cb: Option, convert_cb: F4, - ) { + ) -> Result<(), CodegenError> { // As an optimization nan_cb is optional, and when set to None we turn // use 'underflow' as the 'nan' label. This is useful for callers who // set the return value to zero for both underflow and nan. @@ -741,25 +790,25 @@ impl MachineX86_64 { overflow, nan, convert, - ); + )?; - self.emit_label(underflow); - underflow_cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(underflow)?; + underflow_cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; - self.emit_label(overflow); - overflow_cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(overflow)?; + overflow_cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; if let Some(cb) = nan_cb { - self.emit_label(nan); - cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(nan)?; + cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; } - self.emit_label(convert); - convert_cb(self); - self.emit_label(end); + self.emit_label(convert)?; + convert_cb(self)?; + self.emit_label(end) } // Checks for underflow/overflow/nan. #[allow(clippy::too_many_arguments)] @@ -772,49 +821,60 @@ impl MachineX86_64 { overflow_label: Label, nan_label: Label, succeed_label: Label, - ) { + ) -> Result<(), CodegenError> { let lower_bound = f64::to_bits(lower_bound); let upper_bound = f64::to_bits(upper_bound); - let tmp = self.acquire_temp_gpr().unwrap(); - let tmp_x = self.acquire_temp_simd().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_x = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // Underflow. - self.move_location(Size::S64, Location::Imm64(lower_bound), Location::GPR(tmp)); - self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x)); + self.move_location(Size::S64, Location::Imm64(lower_bound), Location::GPR(tmp))?; + self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x))?; self.assembler - .emit_vcmplesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_vcmplesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; self.assembler - .emit_jmp(Condition::NotEqual, underflow_label); + .emit_jmp(Condition::NotEqual, underflow_label)?; // Overflow. - self.move_location(Size::S64, Location::Imm64(upper_bound), Location::GPR(tmp)); - self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x)); + self.move_location(Size::S64, Location::Imm64(upper_bound), Location::GPR(tmp))?; + self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x))?; + self.assembler + .emit_vcmpgesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_vcmpgesd(reg, XMMOrMemory::XMM(tmp_x), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - self.assembler.emit_jmp(Condition::NotEqual, overflow_label); + .emit_jmp(Condition::NotEqual, overflow_label)?; // NaN. self.assembler - .emit_vcmpeqsd(reg, XMMOrMemory::XMM(reg), tmp_x); - self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp)); + .emit_vcmpeqsd(reg, XMMOrMemory::XMM(reg), tmp_x)?; + self.move_location(Size::S32, Location::SIMD(tmp_x), Location::GPR(tmp))?; self.assembler - .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp)); - self.assembler.emit_jmp(Condition::Equal, nan_label); + .emit_cmp(Size::S32, Location::Imm32(0), Location::GPR(tmp))?; + self.assembler.emit_jmp(Condition::Equal, nan_label)?; - self.assembler.emit_jmp(Condition::None, succeed_label); + self.assembler.emit_jmp(Condition::None, succeed_label)?; self.release_simd(tmp_x); self.release_gpr(tmp); + Ok(()) } // Checks for underflow/overflow/nan before IxxTrunc{U/S}F64.. return offset/len for trap_overflow and trap_badconv - fn emit_f64_int_conv_check_trap(&mut self, reg: XMM, lower_bound: f64, upper_bound: f64) { + fn emit_f64_int_conv_check_trap( + &mut self, + reg: XMM, + lower_bound: f64, + upper_bound: f64, + ) -> Result<(), CodegenError> { let trap_overflow = self.assembler.get_label(); let trap_badconv = self.assembler.get_label(); let end = self.assembler.get_label(); @@ -827,22 +887,22 @@ impl MachineX86_64 { trap_overflow, trap_badconv, end, - ); + )?; - self.emit_label(trap_overflow); - self.emit_illegal_op_internal(TrapCode::IntegerOverflow); + self.emit_label(trap_overflow)?; + self.emit_illegal_op_internal(TrapCode::IntegerOverflow)?; - self.emit_label(trap_badconv); - self.emit_illegal_op_internal(TrapCode::BadConversionToInteger); + self.emit_label(trap_badconv)?; + self.emit_illegal_op_internal(TrapCode::BadConversionToInteger)?; - self.emit_label(end); + self.emit_label(end) } #[allow(clippy::too_many_arguments)] fn emit_f64_int_conv_check_sat< - F1: FnOnce(&mut Self), - F2: FnOnce(&mut Self), - F3: FnOnce(&mut Self), - F4: FnOnce(&mut Self), + F1: FnOnce(&mut Self) -> Result<(), CodegenError>, + F2: FnOnce(&mut Self) -> Result<(), CodegenError>, + F3: FnOnce(&mut Self) -> Result<(), CodegenError>, + F4: FnOnce(&mut Self) -> Result<(), CodegenError>, >( &mut self, reg: XMM, @@ -852,7 +912,7 @@ impl MachineX86_64 { overflow_cb: F2, nan_cb: Option, convert_cb: F4, - ) { + ) -> Result<(), CodegenError> { // As an optimization nan_cb is optional, and when set to None we turn // use 'underflow' as the 'nan' label. This is useful for callers who // set the return value to zero for both underflow and nan. @@ -875,76 +935,86 @@ impl MachineX86_64 { overflow, nan, convert, - ); + )?; - self.emit_label(underflow); - underflow_cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(underflow)?; + underflow_cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; - self.emit_label(overflow); - overflow_cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(overflow)?; + overflow_cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; if let Some(cb) = nan_cb { - self.emit_label(nan); - cb(self); - self.assembler.emit_jmp(Condition::None, end); + self.emit_label(nan)?; + cb(self)?; + self.assembler.emit_jmp(Condition::None, end)?; } - self.emit_label(convert); - convert_cb(self); - self.emit_label(end); + self.emit_label(convert)?; + convert_cb(self)?; + self.emit_label(end) } /// Moves `src1` and `src2` to valid locations and possibly adds a layer of indirection for `dst` for AVX instructions. fn emit_relaxed_avx( &mut self, - op: fn(&mut AssemblerX64, XMM, XMMOrMemory, XMM), + op: fn(&mut AssemblerX64, XMM, XMMOrMemory, XMM) -> Result<(), CodegenError>, src1: Location, src2: Location, dst: Location, - ) { + ) -> Result<(), CodegenError> { self.emit_relaxed_avx_base( |this, src1, src2, dst| op(&mut this.assembler, src1, src2, dst), src1, src2, dst, - ); + ) } /// Moves `src1` and `src2` to valid locations and possibly adds a layer of indirection for `dst` for AVX instructions. - fn emit_relaxed_avx_base( + fn emit_relaxed_avx_base< + F: FnOnce(&mut Self, XMM, XMMOrMemory, XMM) -> Result<(), CodegenError>, + >( &mut self, op: F, src1: Location, src2: Location, dst: Location, - ) { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmp3 = self.acquire_temp_simd().unwrap(); - let tmpg = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp3 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmpg = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; let src1 = match src1 { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { self.assembler - .emit_mov(Size::S64, src1, Location::SIMD(tmp1)); + .emit_mov(Size::S64, src1, Location::SIMD(tmp1))?; tmp1 } Location::Imm32(_) => { self.assembler - .emit_mov(Size::S32, src1, Location::GPR(tmpg)); - self.move_location(Size::S32, Location::GPR(tmpg), Location::SIMD(tmp1)); + .emit_mov(Size::S32, src1, Location::GPR(tmpg))?; + self.move_location(Size::S32, Location::GPR(tmpg), Location::SIMD(tmp1))?; tmp1 } Location::Imm64(_) => { self.assembler - .emit_mov(Size::S64, src1, Location::GPR(tmpg)); - self.move_location(Size::S64, Location::GPR(tmpg), Location::SIMD(tmp1)); + .emit_mov(Size::S64, src1, Location::GPR(tmpg))?; + self.move_location(Size::S64, Location::GPR(tmpg), Location::SIMD(tmp1))?; tmp1 } _ => { - unreachable!() + codegen_error!("singlepass emit_relaxed_avx_base unreachable") } }; @@ -953,37 +1023,37 @@ impl MachineX86_64 { Location::Memory(base, disp) => XMMOrMemory::Memory(base, disp), Location::GPR(_) => { self.assembler - .emit_mov(Size::S64, src2, Location::SIMD(tmp2)); + .emit_mov(Size::S64, src2, Location::SIMD(tmp2))?; XMMOrMemory::XMM(tmp2) } Location::Imm32(_) => { self.assembler - .emit_mov(Size::S32, src2, Location::GPR(tmpg)); - self.move_location(Size::S32, Location::GPR(tmpg), Location::SIMD(tmp2)); + .emit_mov(Size::S32, src2, Location::GPR(tmpg))?; + self.move_location(Size::S32, Location::GPR(tmpg), Location::SIMD(tmp2))?; XMMOrMemory::XMM(tmp2) } Location::Imm64(_) => { self.assembler - .emit_mov(Size::S64, src2, Location::GPR(tmpg)); - self.move_location(Size::S64, Location::GPR(tmpg), Location::SIMD(tmp2)); + .emit_mov(Size::S64, src2, Location::GPR(tmpg))?; + self.move_location(Size::S64, Location::GPR(tmpg), Location::SIMD(tmp2))?; XMMOrMemory::XMM(tmp2) } _ => { - unreachable!() + codegen_error!("singlepass emit_relaxed_avx_base unreachable") } }; match dst { Location::SIMD(x) => { - op(self, src1, src2, x); + op(self, src1, src2, x)?; } Location::Memory(_, _) | Location::GPR(_) => { - op(self, src1, src2, tmp3); + op(self, src1, src2, tmp3)?; self.assembler - .emit_mov(Size::S64, Location::SIMD(tmp3), dst); + .emit_mov(Size::S64, Location::SIMD(tmp3), dst)?; } _ => { - unreachable!() + codegen_error!("singlepass emit_relaxed_avx_base unreachable") } } @@ -991,122 +1061,155 @@ impl MachineX86_64 { self.release_simd(tmp3); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } - fn convert_i64_f64_u_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i64_f64_u_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; self.emit_f64_int_conv_check_sat( tmp_in, GEF64_LT_U64_MIN, LEF64_GT_U64_MAX, |this| { this.assembler - .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)) }, |this| { this.assembler.emit_mov( Size::S64, Location::Imm64(std::u64::MAX), Location::GPR(tmp_out), - ); + ) }, - None::, + None:: Result<(), CodegenError>>, |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i64_trunc_uf64(tmp_in, tmp_out); + this.assembler.arch_emit_i64_trunc_uf64(tmp_in, tmp_out) } else { - let tmp = this.acquire_temp_gpr().unwrap(); - let tmp_x1 = this.acquire_temp_simd().unwrap(); - let tmp_x2 = this.acquire_temp_simd().unwrap(); + let tmp = this.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_x1 = this.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_x2 = this.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; this.assembler.emit_mov( Size::S64, Location::Imm64(4890909195324358656u64), Location::GPR(tmp), - ); //double 9.2233720368547758E+18 - this.assembler - .emit_mov(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x1)); + )?; //double 9.2233720368547758E+18 + this.assembler.emit_mov( + Size::S64, + Location::GPR(tmp), + Location::SIMD(tmp_x1), + )?; this.assembler.emit_mov( Size::S64, Location::SIMD(tmp_in), Location::SIMD(tmp_x2), - ); + )?; this.assembler - .emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + .emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in)?; this.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; this.assembler.emit_mov( Size::S64, Location::Imm64(0x8000000000000000u64), Location::GPR(tmp), - ); - this.assembler - .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + )?; + this.assembler.emit_xor( + Size::S64, + Location::GPR(tmp_out), + Location::GPR(tmp), + )?; this.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out)?; this.assembler - .emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); - this.assembler.emit_cmovae_gpr_64(tmp, tmp_out); + .emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2)?; + this.assembler.emit_cmovae_gpr_64(tmp, tmp_out)?; this.release_simd(tmp_x2); this.release_simd(tmp_x1); this.release_gpr(tmp); + Ok(()) } }, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(tmp_out), ret); + .emit_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i64_f64_u_u(&mut self, loc: Location, ret: Location) { + fn convert_i64_f64_u_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i64_trunc_uf64(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i64_trunc_uf64(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); // xmm2 - - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_U64_MIN, LEF64_GT_U64_MAX); - - let tmp = self.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = self.acquire_temp_simd().unwrap(); // xmm1 - let tmp_x2 = self.acquire_temp_simd().unwrap(); // xmm3 + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm2 + + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_U64_MIN, LEF64_GT_U64_MAX)?; + + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; // r15 + let tmp_x1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm1 + let tmp_x2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm3 self.move_location( Size::S64, Location::Imm64(4890909195324358656u64), Location::GPR(tmp), - ); //double 9.2233720368547758E+18 - self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x1)); - self.move_location(Size::S64, Location::SIMD(tmp_in), Location::SIMD(tmp_x2)); + )?; //double 9.2233720368547758E+18 + self.move_location(Size::S64, Location::GPR(tmp), Location::SIMD(tmp_x1))?; + self.move_location(Size::S64, Location::SIMD(tmp_in), Location::SIMD(tmp_x2))?; self.assembler - .emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + .emit_vsubsd(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in)?; self.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; self.move_location( Size::S64, Location::Imm64(0x8000000000000000u64), Location::GPR(tmp), - ); + )?; self.assembler - .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp))?; self.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out)?; self.assembler - .emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2); - self.assembler.emit_cmovae_gpr_64(tmp, tmp_out); - self.move_location(Size::S64, Location::GPR(tmp_out), ret); + .emit_ucomisd(XMMOrMemory::XMM(tmp_x1), tmp_x2)?; + self.assembler.emit_cmovae_gpr_64(tmp, tmp_out)?; + self.move_location(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_x2); self.release_simd(tmp_x1); @@ -1114,12 +1217,17 @@ impl MachineX86_64 { self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i64_f64_s_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i64_f64_s_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; self.emit_f64_int_conv_check_sat( tmp_in, GEF64_LT_I64_MIN, @@ -1129,71 +1237,85 @@ impl MachineX86_64 { Size::S64, Location::Imm64(std::i64::MIN as u64), Location::GPR(tmp_out), - ); + ) }, |this| { this.assembler.emit_mov( Size::S64, Location::Imm64(std::i64::MAX as u64), Location::GPR(tmp_out), - ); + ) }, Some(|this: &mut Self| { this.assembler - .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)) }), |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i64_trunc_sf64(tmp_in, tmp_out); + this.assembler.arch_emit_i64_trunc_sf64(tmp_in, tmp_out) } else { this.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(tmp_out), ret); + .emit_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i64_f64_s_u(&mut self, loc: Location, ret: Location) { + fn convert_i64_f64_s_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i64_trunc_sf64(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i64_trunc_sf64(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_I64_MIN, LEF64_GT_I64_MAX); + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_I64_MIN, LEF64_GT_I64_MAX)?; self.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - self.move_location(Size::S64, Location::GPR(tmp_out), ret); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; + self.move_location(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i32_f64_s_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i32_f64_s_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; let real_in = match loc { Location::Imm32(_) | Location::Imm64(_) => { - self.move_location(Size::S64, loc, Location::GPR(tmp_out)); - self.move_location(Size::S64, Location::GPR(tmp_out), Location::SIMD(tmp_in)); + self.move_location(Size::S64, loc, Location::GPR(tmp_out))?; + self.move_location(Size::S64, Location::GPR(tmp_out), Location::SIMD(tmp_in))?; tmp_in } Location::SIMD(x) => x, _ => { - self.move_location(Size::S64, loc, Location::SIMD(tmp_in)); + self.move_location(Size::S64, loc, Location::SIMD(tmp_in))?; tmp_in } }; @@ -1207,244 +1329,300 @@ impl MachineX86_64 { Size::S32, Location::Imm32(std::i32::MIN as u32), Location::GPR(tmp_out), - ); + ) }, |this| { this.assembler.emit_mov( Size::S32, Location::Imm32(std::i32::MAX as u32), Location::GPR(tmp_out), - ); + ) }, Some(|this: &mut Self| { this.assembler - .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)) }), |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i32_trunc_sf64(tmp_in, tmp_out); + this.assembler.arch_emit_i32_trunc_sf64(tmp_in, tmp_out) } else { this.assembler - .emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); + .emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(tmp_out), ret); + .emit_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i32_f64_s_u(&mut self, loc: Location, ret: Location) { + fn convert_i32_f64_s_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i32_trunc_sf64(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i32_trunc_sf64(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; let real_in = match loc { Location::Imm32(_) | Location::Imm64(_) => { - self.move_location(Size::S64, loc, Location::GPR(tmp_out)); - self.move_location(Size::S64, Location::GPR(tmp_out), Location::SIMD(tmp_in)); + self.move_location(Size::S64, loc, Location::GPR(tmp_out))?; + self.move_location(Size::S64, Location::GPR(tmp_out), Location::SIMD(tmp_in))?; tmp_in } Location::SIMD(x) => x, _ => { - self.move_location(Size::S64, loc, Location::SIMD(tmp_in)); + self.move_location(Size::S64, loc, Location::SIMD(tmp_in))?; tmp_in } }; - self.emit_f64_int_conv_check_trap(real_in, GEF64_LT_I32_MIN, LEF64_GT_I32_MAX); + self.emit_f64_int_conv_check_trap(real_in, GEF64_LT_I32_MIN, LEF64_GT_I32_MAX)?; self.assembler - .emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out); - self.move_location(Size::S32, Location::GPR(tmp_out), ret); + .emit_cvttsd2si_32(XMMOrMemory::XMM(real_in), tmp_out)?; + self.move_location(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i32_f64_u_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i32_f64_u_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; self.emit_f64_int_conv_check_sat( tmp_in, GEF64_LT_U32_MIN, LEF64_GT_U32_MAX, |this| { this.assembler - .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)) }, |this| { this.assembler.emit_mov( Size::S32, Location::Imm32(std::u32::MAX), Location::GPR(tmp_out), - ); + ) }, - None::, + None:: Result<(), CodegenError>>, |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i32_trunc_uf64(tmp_in, tmp_out); + this.assembler.arch_emit_i32_trunc_uf64(tmp_in, tmp_out) } else { this.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(tmp_out), ret); + .emit_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i32_f64_u_u(&mut self, loc: Location, ret: Location) { + fn convert_i32_f64_u_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i32_trunc_uf64(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i32_trunc_uf64(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in)); - self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_U32_MIN, LEF64_GT_U32_MAX); + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp_in))?; + self.emit_f64_int_conv_check_trap(tmp_in, GEF64_LT_U32_MIN, LEF64_GT_U32_MAX)?; self.assembler - .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - self.move_location(Size::S32, Location::GPR(tmp_out), ret); + .emit_cvttsd2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; + self.move_location(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i64_f32_u_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i64_f32_u_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; self.emit_f32_int_conv_check_sat( tmp_in, GEF32_LT_U64_MIN, LEF32_GT_U64_MAX, |this| { this.assembler - .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)) }, |this| { this.assembler.emit_mov( Size::S64, Location::Imm64(std::u64::MAX), Location::GPR(tmp_out), - ); + ) }, - None::, + None:: Result<(), CodegenError>>, |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i64_trunc_uf32(tmp_in, tmp_out); + this.assembler.arch_emit_i64_trunc_uf32(tmp_in, tmp_out) } else { - let tmp = this.acquire_temp_gpr().unwrap(); - let tmp_x1 = this.acquire_temp_simd().unwrap(); - let tmp_x2 = this.acquire_temp_simd().unwrap(); + let tmp = this.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_x1 = this.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_x2 = this.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; this.assembler.emit_mov( Size::S32, Location::Imm32(1593835520u32), Location::GPR(tmp), - ); //float 9.22337203E+18 - this.assembler - .emit_mov(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x1)); + )?; //float 9.22337203E+18 + this.assembler.emit_mov( + Size::S32, + Location::GPR(tmp), + Location::SIMD(tmp_x1), + )?; this.assembler.emit_mov( Size::S32, Location::SIMD(tmp_in), Location::SIMD(tmp_x2), - ); + )?; this.assembler - .emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + .emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in)?; this.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; this.assembler.emit_mov( Size::S64, Location::Imm64(0x8000000000000000u64), Location::GPR(tmp), - ); - this.assembler - .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + )?; + this.assembler.emit_xor( + Size::S64, + Location::GPR(tmp_out), + Location::GPR(tmp), + )?; this.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out)?; this.assembler - .emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); - this.assembler.emit_cmovae_gpr_64(tmp, tmp_out); + .emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2)?; + this.assembler.emit_cmovae_gpr_64(tmp, tmp_out)?; this.release_simd(tmp_x2); this.release_simd(tmp_x1); this.release_gpr(tmp); + Ok(()) } }, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(tmp_out), ret); + .emit_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i64_f32_u_u(&mut self, loc: Location, ret: Location) { + fn convert_i64_f32_u_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i64_trunc_uf32(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i64_trunc_uf32(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); // xmm2 - - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_U64_MIN, LEF32_GT_U64_MAX); - - let tmp = self.acquire_temp_gpr().unwrap(); // r15 - let tmp_x1 = self.acquire_temp_simd().unwrap(); // xmm1 - let tmp_x2 = self.acquire_temp_simd().unwrap(); // xmm3 + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm2 + + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_U64_MIN, LEF32_GT_U64_MAX)?; + + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; // r15 + let tmp_x1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm1 + let tmp_x2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; // xmm3 self.move_location( Size::S32, Location::Imm32(1593835520u32), Location::GPR(tmp), - ); //float 9.22337203E+18 - self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x1)); - self.move_location(Size::S32, Location::SIMD(tmp_in), Location::SIMD(tmp_x2)); + )?; //float 9.22337203E+18 + self.move_location(Size::S32, Location::GPR(tmp), Location::SIMD(tmp_x1))?; + self.move_location(Size::S32, Location::SIMD(tmp_in), Location::SIMD(tmp_x2))?; self.assembler - .emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in); + .emit_vsubss(tmp_in, XMMOrMemory::XMM(tmp_x1), tmp_in)?; self.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; self.move_location( Size::S64, Location::Imm64(0x8000000000000000u64), Location::GPR(tmp), - ); + )?; self.assembler - .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp)); + .emit_xor(Size::S64, Location::GPR(tmp_out), Location::GPR(tmp))?; self.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_x2), tmp_out)?; self.assembler - .emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2); - self.assembler.emit_cmovae_gpr_64(tmp, tmp_out); - self.move_location(Size::S64, Location::GPR(tmp_out), ret); + .emit_ucomiss(XMMOrMemory::XMM(tmp_x1), tmp_x2)?; + self.assembler.emit_cmovae_gpr_64(tmp, tmp_out)?; + self.move_location(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_x2); self.release_simd(tmp_x1); @@ -1452,12 +1630,17 @@ impl MachineX86_64 { self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i64_f32_s_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i64_f32_s_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; self.emit_f32_int_conv_check_sat( tmp_in, GEF32_LT_I64_MIN, @@ -1467,62 +1650,76 @@ impl MachineX86_64 { Size::S64, Location::Imm64(std::i64::MIN as u64), Location::GPR(tmp_out), - ); + ) }, |this| { this.assembler.emit_mov( Size::S64, Location::Imm64(std::i64::MAX as u64), Location::GPR(tmp_out), - ); + ) }, Some(|this: &mut Self| { this.assembler - .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)); + .emit_mov(Size::S64, Location::Imm64(0), Location::GPR(tmp_out)) }), |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i64_trunc_sf32(tmp_in, tmp_out); + this.assembler.arch_emit_i64_trunc_sf32(tmp_in, tmp_out) } else { this.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(tmp_out), ret); + .emit_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i64_f32_s_u(&mut self, loc: Location, ret: Location) { + fn convert_i64_f32_s_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i64_trunc_sf32(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i64_trunc_sf32(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_I64_MIN, LEF32_GT_I64_MAX); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_I64_MIN, LEF32_GT_I64_MAX)?; self.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - self.move_location(Size::S64, Location::GPR(tmp_out), ret); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; + self.move_location(Size::S64, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn convert_i32_f32_s_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + fn convert_i32_f32_s_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; self.emit_f32_int_conv_check_sat( tmp_in, GEF32_LT_I32_MIN, @@ -1532,119 +1729,148 @@ impl MachineX86_64 { Size::S32, Location::Imm32(std::i32::MIN as u32), Location::GPR(tmp_out), - ); + ) }, |this| { this.assembler.emit_mov( Size::S32, Location::Imm32(std::i32::MAX as u32), Location::GPR(tmp_out), - ); + ) }, Some(|this: &mut Self| { this.assembler - .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)) }), |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i32_trunc_sf32(tmp_in, tmp_out); + this.assembler.arch_emit_i32_trunc_sf32(tmp_in, tmp_out) } else { this.assembler - .emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(tmp_out), ret); + .emit_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i32_f32_s_u(&mut self, loc: Location, ret: Location) { + fn convert_i32_f32_s_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i32_trunc_sf32(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i32_trunc_sf32(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_I32_MIN, LEF32_GT_I32_MAX); + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_I32_MIN, LEF32_GT_I32_MAX)?; self.assembler - .emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out); - self.move_location(Size::S32, Location::GPR(tmp_out), ret); + .emit_cvttss2si_32(XMMOrMemory::XMM(tmp_in), tmp_out)?; + self.move_location(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } - } - fn convert_i32_f32_u_s(&mut self, loc: Location, ret: Location) { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); + Ok(()) + } + fn convert_i32_f32_u_s(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; self.emit_f32_int_conv_check_sat( tmp_in, GEF32_LT_U32_MIN, LEF32_GT_U32_MAX, |this| { this.assembler - .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)); + .emit_mov(Size::S32, Location::Imm32(0), Location::GPR(tmp_out)) }, |this| { this.assembler.emit_mov( Size::S32, Location::Imm32(std::u32::MAX), Location::GPR(tmp_out), - ); + ) }, - None::, + None:: Result<(), CodegenError>>, |this| { if this.assembler.arch_has_itruncf() { - this.assembler.arch_emit_i32_trunc_uf32(tmp_in, tmp_out); + this.assembler.arch_emit_i32_trunc_uf32(tmp_in, tmp_out) } else { this.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out) } }, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(tmp_out), ret); + .emit_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); + Ok(()) } - fn convert_i32_f32_u_u(&mut self, loc: Location, ret: Location) { + fn convert_i32_f32_u_u(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_itruncf() { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.assembler.arch_emit_i32_trunc_uf32(tmp_in, tmp_out); - self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.assembler.arch_emit_i32_trunc_uf32(tmp_in, tmp_out)?; + self.emit_relaxed_mov(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } else { - let tmp_out = self.acquire_temp_gpr().unwrap(); - let tmp_in = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in)); - self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_U32_MIN, LEF32_GT_U32_MAX); + let tmp_out = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmp_in = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp_in))?; + self.emit_f32_int_conv_check_trap(tmp_in, GEF32_LT_U32_MIN, LEF32_GT_U32_MAX)?; self.assembler - .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out); - self.move_location(Size::S32, Location::GPR(tmp_out), ret); + .emit_cvttss2si_64(XMMOrMemory::XMM(tmp_in), tmp_out)?; + self.move_location(Size::S32, Location::GPR(tmp_out), ret)?; self.release_simd(tmp_in); self.release_gpr(tmp_out); } + Ok(()) } - fn emit_relaxed_atomic_xchg(&mut self, sz: Size, src: Location, dst: Location) { - self.emit_relaxed_binop(AssemblerX64::emit_xchg, sz, src, dst); + fn emit_relaxed_atomic_xchg( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(AssemblerX64::emit_xchg, sz, src, dst) } fn used_gprs_contains(&self, r: &GPR) -> bool { @@ -1669,12 +1895,13 @@ impl MachineX86_64 { self.used_simd &= !(1 << r.into_index()); ret } - fn emit_unwind_op(&mut self, op: UnwindOps) { + fn emit_unwind_op(&mut self, op: UnwindOps) -> Result<(), CodegenError> { self.unwind_ops.push((self.get_offset().0, op)); + Ok(()) } - fn emit_illegal_op_internal(&mut self, trap: TrapCode) { + fn emit_illegal_op_internal(&mut self, trap: TrapCode) -> Result<(), CodegenError> { let v = trap as u8; - self.assembler.emit_ud1_payload(v); + self.assembler.emit_ud1_payload(v) } } @@ -1754,16 +1981,17 @@ impl Machine for MachineX86_64 { self.used_gprs_insert(gpr); } - fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> usize { + fn push_used_gpr(&mut self, used_gprs: &[GPR]) -> Result { for r in used_gprs.iter() { - self.assembler.emit_push(Size::S64, Location::GPR(*r)); + self.assembler.emit_push(Size::S64, Location::GPR(*r))?; } - used_gprs.len() * 8 + Ok(used_gprs.len() * 8) } - fn pop_used_gpr(&mut self, used_gprs: &[GPR]) { + fn pop_used_gpr(&mut self, used_gprs: &[GPR]) -> Result<(), CodegenError> { for r in used_gprs.iter().rev() { - self.assembler.emit_pop(Size::S64, Location::GPR(*r)); + self.assembler.emit_pop(Size::S64, Location::GPR(*r))?; } + Ok(()) } // Picks an unused XMM register. @@ -1808,32 +2036,32 @@ impl Machine for MachineX86_64 { assert!(self.used_simd_remove(&simd)); } - fn push_used_simd(&mut self, used_xmms: &[XMM]) -> usize { - self.adjust_stack((used_xmms.len() * 8) as u32); + fn push_used_simd(&mut self, used_xmms: &[XMM]) -> Result { + self.adjust_stack((used_xmms.len() * 8) as u32)?; for (i, r) in used_xmms.iter().enumerate() { self.move_location( Size::S64, Location::SIMD(*r), Location::Memory(GPR::RSP, (i * 8) as i32), - ); + )?; } - used_xmms.len() * 8 + Ok(used_xmms.len() * 8) } - fn pop_used_simd(&mut self, used_xmms: &[XMM]) { + fn pop_used_simd(&mut self, used_xmms: &[XMM]) -> Result<(), CodegenError> { for (i, r) in used_xmms.iter().enumerate() { self.move_location( Size::S64, Location::Memory(GPR::RSP, (i * 8) as i32), Location::SIMD(*r), - ); + )?; } self.assembler.emit_add( Size::S64, Location::Imm32((used_xmms.len() * 8) as u32), Location::GPR(GPR::RSP), - ); + ) } /// Set the source location of the Wasm to the given offset. @@ -1907,43 +2135,48 @@ impl Machine for MachineX86_64 { } // Adjust stack for locals - fn adjust_stack(&mut self, delta_stack_offset: u32) { + fn adjust_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { self.assembler.emit_sub( Size::S64, Location::Imm32(delta_stack_offset), Location::GPR(GPR::RSP), - ); + ) } // restore stack - fn restore_stack(&mut self, delta_stack_offset: u32) { + fn restore_stack(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { self.assembler.emit_add( Size::S64, Location::Imm32(delta_stack_offset), Location::GPR(GPR::RSP), - ); + ) } - fn pop_stack_locals(&mut self, delta_stack_offset: u32) { + fn pop_stack_locals(&mut self, delta_stack_offset: u32) -> Result<(), CodegenError> { self.assembler.emit_add( Size::S64, Location::Imm32(delta_stack_offset), Location::GPR(GPR::RSP), - ); + ) } // push a value on the stack for a native call - fn move_location_for_native(&mut self, _size: Size, loc: Location, dest: Location) { + fn move_location_for_native( + &mut self, + _size: Size, + loc: Location, + dest: Location, + ) -> Result<(), CodegenError> { match loc { Location::Imm64(_) | Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { let tmp = self.pick_temp_gpr(); if let Some(x) = tmp { - self.assembler.emit_mov(Size::S64, loc, Location::GPR(x)); - self.assembler.emit_mov(Size::S64, Location::GPR(x), dest); + self.assembler.emit_mov(Size::S64, loc, Location::GPR(x))?; + self.assembler.emit_mov(Size::S64, Location::GPR(x), dest) } else { self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RAX), dest); + .emit_mov(Size::S64, Location::GPR(GPR::RAX), dest)?; self.assembler - .emit_mov(Size::S64, loc, Location::GPR(GPR::RAX)); + .emit_mov(Size::S64, loc, Location::GPR(GPR::RAX))?; self.assembler - .emit_xchg(Size::S64, Location::GPR(GPR::RAX), dest); + .emit_xchg(Size::S64, Location::GPR(GPR::RAX), dest) } } _ => self.assembler.emit_mov(Size::S64, loc, dest), @@ -1951,8 +2184,8 @@ impl Machine for MachineX86_64 { } // Zero a location that is 32bits - fn zero_location(&mut self, size: Size, location: Location) { - self.assembler.emit_mov(size, Location::Imm32(0), location); + fn zero_location(&mut self, size: Size, location: Location) -> Result<(), CodegenError> { + self.assembler.emit_mov(size, Location::Imm32(0), location) } // GPR Reg used for local pointer on the stack @@ -1977,26 +2210,22 @@ impl Machine for MachineX86_64 { } } // Move a local to the stack - fn move_local(&mut self, stack_offset: i32, location: Location) { + fn move_local(&mut self, stack_offset: i32, location: Location) -> Result<(), CodegenError> { self.assembler.emit_mov( Size::S64, location, Location::Memory(GPR::RBP, -stack_offset), - ); + )?; match location { - Location::GPR(x) => { - self.emit_unwind_op(UnwindOps::SaveRegister { - reg: x.to_dwarf(), - bp_neg_offset: stack_offset, - }); - } - Location::SIMD(x) => { - self.emit_unwind_op(UnwindOps::SaveRegister { - reg: x.to_dwarf(), - bp_neg_offset: stack_offset, - }); - } - _ => (), + Location::GPR(x) => self.emit_unwind_op(UnwindOps::SaveRegister { + reg: x.to_dwarf(), + bp_neg_offset: stack_offset, + }), + Location::SIMD(x) => self.emit_unwind_op(UnwindOps::SaveRegister { + reg: x.to_dwarf(), + bp_neg_offset: stack_offset, + }), + _ => Ok(()), } } @@ -2098,48 +2327,49 @@ impl Machine for MachineX86_64 { } } // move a location to another - fn move_location(&mut self, size: Size, source: Location, dest: Location) { + fn move_location( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { match source { - Location::GPR(_) => { - self.assembler.emit_mov(size, source, dest); - } + Location::GPR(_) => self.assembler.emit_mov(size, source, dest), Location::Memory(_, _) => match dest { - Location::GPR(_) | Location::SIMD(_) => { - self.assembler.emit_mov(size, source, dest); - } + Location::GPR(_) | Location::SIMD(_) => self.assembler.emit_mov(size, source, dest), Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { - let tmp = self.pick_temp_gpr().unwrap(); - self.assembler.emit_mov(size, source, Location::GPR(tmp)); - self.assembler.emit_mov(size, Location::GPR(tmp), dest); + let tmp = self.pick_temp_gpr().ok_or(CodegenError { + message: "singlepass can't pick a temp gpr".to_string(), + })?; + self.assembler.emit_mov(size, source, Location::GPR(tmp))?; + self.assembler.emit_mov(size, Location::GPR(tmp), dest) } - _ => unreachable!(), + _ => codegen_error!("singlepass move_location unreachable"), }, Location::Memory2(_, _, _, _) => match dest { - Location::GPR(_) | Location::SIMD(_) => { - self.assembler.emit_mov(size, source, dest); - } + Location::GPR(_) | Location::SIMD(_) => self.assembler.emit_mov(size, source, dest), Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { - let tmp = self.pick_temp_gpr().unwrap(); - self.assembler.emit_mov(size, source, Location::GPR(tmp)); - self.assembler.emit_mov(size, Location::GPR(tmp), dest); + let tmp = self.pick_temp_gpr().ok_or(CodegenError { + message: "singlepass can't pick a temp gpr".to_string(), + })?; + self.assembler.emit_mov(size, source, Location::GPR(tmp))?; + self.assembler.emit_mov(size, Location::GPR(tmp), dest) } - _ => unreachable!(), + _ => codegen_error!("singlepass move_location unreachable"), }, Location::Imm8(_) | Location::Imm32(_) | Location::Imm64(_) => match dest { - Location::GPR(_) | Location::SIMD(_) => { - self.assembler.emit_mov(size, source, dest); - } + Location::GPR(_) | Location::SIMD(_) => self.assembler.emit_mov(size, source, dest), Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { - let tmp = self.pick_temp_gpr().unwrap(); - self.assembler.emit_mov(size, source, Location::GPR(tmp)); - self.assembler.emit_mov(size, Location::GPR(tmp), dest); + let tmp = self.pick_temp_gpr().ok_or(CodegenError { + message: "singlepass can't pick a temp gpr".to_string(), + })?; + self.assembler.emit_mov(size, source, Location::GPR(tmp))?; + self.assembler.emit_mov(size, Location::GPR(tmp), dest) } - _ => unreachable!(), + _ => codegen_error!("singlepass move_location unreachable"), }, - Location::SIMD(_) => { - self.assembler.emit_mov(size, source, dest); - } - _ => unreachable!(), + Location::SIMD(_) => self.assembler.emit_mov(size, source, dest), + _ => codegen_error!("singlepass move_location unreachable"), } } // move a location to another @@ -2150,13 +2380,15 @@ impl Machine for MachineX86_64 { source: Location, size_op: Size, dest: Location, - ) { + ) -> Result<(), CodegenError> { let dst = match dest { Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { - Location::GPR(self.acquire_temp_gpr().unwrap()) + Location::GPR(self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?) } Location::GPR(_) | Location::SIMD(_) => dest, - _ => unreachable!(), + _ => codegen_error!("singlepass move_location_extend unreachable"), }; match source { Location::GPR(_) @@ -2176,57 +2408,68 @@ impl Machine for MachineX86_64 { "unimplemented move_location_extend({:?}, {}, {:?}, {:?}, {:?}", size_val, signed, source, size_op, dest ), - } + }?; if dst != dest { - self.assembler.emit_mov(size_op, dst, dest); + self.assembler.emit_mov(size_op, dst, dest)?; match dst { Location::GPR(x) => self.release_gpr(x), - _ => unreachable!(), + _ => codegen_error!("singlepass move_location_extend unreachable"), }; } + Ok(()) } - fn load_address(&mut self, size: Size, reg: Location, mem: Location) { + fn load_address( + &mut self, + size: Size, + reg: Location, + mem: Location, + ) -> Result<(), CodegenError> { match reg { Location::GPR(_) => { match mem { Location::Memory(_, _) | Location::Memory2(_, _, _, _) => { // Memory moves with size < 32b do not zero upper bits. if size < Size::S32 { - self.assembler.emit_xor(Size::S32, reg, reg); + self.assembler.emit_xor(Size::S32, reg, reg)?; } - self.assembler.emit_mov(size, mem, reg); + self.assembler.emit_mov(size, mem, reg)?; } - _ => unreachable!(), + _ => codegen_error!("singlepass load_address unreachable"), } } - _ => unreachable!(), + _ => codegen_error!("singlepass load_address unreachable"), } + Ok(()) } // Init the stack loc counter - fn init_stack_loc(&mut self, init_stack_loc_cnt: u64, last_stack_loc: Location) { + fn init_stack_loc( + &mut self, + init_stack_loc_cnt: u64, + last_stack_loc: Location, + ) -> Result<(), CodegenError> { // Since these assemblies take up to 24 bytes, if more than 2 slots are initialized, then they are smaller. self.assembler.emit_mov( Size::S64, Location::Imm64(init_stack_loc_cnt), Location::GPR(GPR::RCX), - ); + )?; self.assembler - .emit_xor(Size::S64, Location::GPR(GPR::RAX), Location::GPR(GPR::RAX)); + .emit_xor(Size::S64, Location::GPR(GPR::RAX), Location::GPR(GPR::RAX))?; self.assembler - .emit_lea(Size::S64, last_stack_loc, Location::GPR(GPR::RDI)); - self.assembler.emit_rep_stosq(); + .emit_lea(Size::S64, last_stack_loc, Location::GPR(GPR::RDI))?; + self.assembler.emit_rep_stosq() } // Restore save_area - fn restore_saved_area(&mut self, saved_area_offset: i32) { + fn restore_saved_area(&mut self, saved_area_offset: i32) -> Result<(), CodegenError> { self.assembler.emit_lea( Size::S64, Location::Memory(GPR::RBP, -saved_area_offset), Location::GPR(GPR::RSP), - ); + ) } // Pop a location - fn pop_location(&mut self, location: Location) { - self.assembler.emit_pop(Size::S64, location); + fn pop_location(&mut self, location: Location) -> Result<(), CodegenError> { + self.assembler.emit_pop(Size::S64, location) } // Create a new `MachineState` with default values. fn new_machine_state(&self) -> MachineState { @@ -2242,94 +2485,114 @@ impl Machine for MachineX86_64 { self.assembler.get_offset() } - fn finalize_function(&mut self) { - self.assembler.finalize_function(); + fn finalize_function(&mut self) -> Result<(), CodegenError> { + self.assembler.finalize_function()?; + Ok(()) } - fn emit_function_prolog(&mut self) { - self.emit_push(Size::S64, Location::GPR(GPR::RBP)); - self.emit_unwind_op(UnwindOps::PushFP { up_to_sp: 16 }); - self.move_location(Size::S64, Location::GPR(GPR::RSP), Location::GPR(GPR::RBP)); - self.emit_unwind_op(UnwindOps::DefineNewFrame); + fn emit_function_prolog(&mut self) -> Result<(), CodegenError> { + self.emit_push(Size::S64, Location::GPR(GPR::RBP))?; + self.emit_unwind_op(UnwindOps::PushFP { up_to_sp: 16 })?; + self.move_location(Size::S64, Location::GPR(GPR::RSP), Location::GPR(GPR::RBP))?; + self.emit_unwind_op(UnwindOps::DefineNewFrame) } - fn emit_function_epilog(&mut self) { - self.move_location(Size::S64, Location::GPR(GPR::RBP), Location::GPR(GPR::RSP)); - self.emit_pop(Size::S64, Location::GPR(GPR::RBP)); + fn emit_function_epilog(&mut self) -> Result<(), CodegenError> { + self.move_location(Size::S64, Location::GPR(GPR::RBP), Location::GPR(GPR::RSP))?; + self.emit_pop(Size::S64, Location::GPR(GPR::RBP)) } - fn emit_function_return_value(&mut self, ty: WpType, canonicalize: bool, loc: Location) { + fn emit_function_return_value( + &mut self, + ty: WpType, + canonicalize: bool, + loc: Location, + ) -> Result<(), CodegenError> { if canonicalize { self.canonicalize_nan( match ty { WpType::F32 => Size::S32, WpType::F64 => Size::S64, - _ => unreachable!(), + _ => codegen_error!("singlepass emit_function_return_value unreachable"), }, loc, Location::GPR(GPR::RAX), - ); + ) } else { - self.emit_relaxed_mov(Size::S64, loc, Location::GPR(GPR::RAX)); + self.emit_relaxed_mov(Size::S64, loc, Location::GPR(GPR::RAX)) } } - fn emit_function_return_float(&mut self) { + fn emit_function_return_float(&mut self) -> Result<(), CodegenError> { self.move_location( Size::S64, Location::GPR(GPR::RAX), Location::SIMD(XMM::XMM0), - ); + ) } fn arch_supports_canonicalize_nan(&self) -> bool { self.assembler.arch_supports_canonicalize_nan() } - fn canonicalize_nan(&mut self, sz: Size, input: Location, output: Location) { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmp3 = self.acquire_temp_simd().unwrap(); - - self.emit_relaxed_mov(sz, input, Location::SIMD(tmp1)); - let tmpg1 = self.acquire_temp_gpr().unwrap(); + fn canonicalize_nan( + &mut self, + sz: Size, + input: Location, + output: Location, + ) -> Result<(), CodegenError> { + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp3 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + + self.emit_relaxed_mov(sz, input, Location::SIMD(tmp1))?; + let tmpg1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; match sz { Size::S32 => { self.assembler - .emit_vcmpunordss(tmp1, XMMOrMemory::XMM(tmp1), tmp2); + .emit_vcmpunordss(tmp1, XMMOrMemory::XMM(tmp1), tmp2)?; self.move_location( Size::S32, Location::Imm32(0x7FC0_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp3)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp3))?; self.assembler - .emit_vblendvps(tmp2, XMMOrMemory::XMM(tmp3), tmp1, tmp1); + .emit_vblendvps(tmp2, XMMOrMemory::XMM(tmp3), tmp1, tmp1)?; } Size::S64 => { self.assembler - .emit_vcmpunordsd(tmp1, XMMOrMemory::XMM(tmp1), tmp2); + .emit_vcmpunordsd(tmp1, XMMOrMemory::XMM(tmp1), tmp2)?; self.move_location( Size::S64, Location::Imm64(0x7FF8_0000_0000_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp3)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp3))?; self.assembler - .emit_vblendvpd(tmp2, XMMOrMemory::XMM(tmp3), tmp1, tmp1); + .emit_vblendvpd(tmp2, XMMOrMemory::XMM(tmp3), tmp1, tmp1)?; } - _ => unreachable!(), + _ => codegen_error!("singlepass canonicalize_nan unreachable"), } - self.emit_relaxed_mov(sz, Location::SIMD(tmp1), output); + self.emit_relaxed_mov(sz, Location::SIMD(tmp1), output)?; self.release_gpr(tmpg1); self.release_simd(tmp3); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } - fn emit_illegal_op(&mut self, trap: TrapCode) { + fn emit_illegal_op(&mut self, trap: TrapCode) -> Result<(), CodegenError> { // code below is kept as a reference on how to emit illegal op with trap info // without an Undefined opcode with payload /* @@ -2343,23 +2606,24 @@ impl Machine for MachineX86_64 { // payload needs to be between 0-15 // this will emit an 40 0F B9 Cx opcode, with x the payload let offset = self.assembler.get_offset().0; - self.assembler.emit_ud1_payload(v); + self.assembler.emit_ud1_payload(v)?; self.mark_instruction_address_end(offset); + Ok(()) } fn get_label(&mut self) -> Label { self.assembler.new_dynamic_label() } - fn emit_label(&mut self, label: Label) { - self.assembler.emit_label(label); + fn emit_label(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_label(label) } fn get_grp_for_call(&self) -> GPR { GPR::RAX } - fn emit_call_register(&mut self, reg: GPR) { - self.assembler.emit_call_register(reg); + fn emit_call_register(&mut self, reg: GPR) -> Result<(), CodegenError> { + self.assembler.emit_call_register(reg) } - fn emit_call_label(&mut self, label: Label) { - self.assembler.emit_call_label(label); + fn emit_call_label(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_call_label(label) } fn get_gpr_for_ret(&self) -> GPR { GPR::RAX @@ -2372,114 +2636,170 @@ impl Machine for MachineX86_64 { self.assembler.arch_requires_indirect_call_trampoline() } - fn arch_emit_indirect_call_with_trampoline(&mut self, location: Location) { + fn arch_emit_indirect_call_with_trampoline( + &mut self, + location: Location, + ) -> Result<(), CodegenError> { self.assembler - .arch_emit_indirect_call_with_trampoline(location); + .arch_emit_indirect_call_with_trampoline(location) } - fn emit_debug_breakpoint(&mut self) { - self.assembler.emit_bkpt(); + fn emit_debug_breakpoint(&mut self) -> Result<(), CodegenError> { + self.assembler.emit_bkpt() } - fn emit_call_location(&mut self, location: Location) { - self.assembler.emit_call_location(location); + fn emit_call_location(&mut self, location: Location) -> Result<(), CodegenError> { + self.assembler.emit_call_location(location) } - fn location_address(&mut self, size: Size, source: Location, dest: Location) { - self.assembler.emit_lea(size, source, dest); + fn location_address( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { + self.assembler.emit_lea(size, source, dest) } // logic - fn location_and(&mut self, size: Size, source: Location, dest: Location, _flags: bool) { - self.assembler.emit_and(size, source, dest); + fn location_and( + &mut self, + size: Size, + source: Location, + dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + self.assembler.emit_and(size, source, dest) } - fn location_xor(&mut self, size: Size, source: Location, dest: Location, _flags: bool) { - self.assembler.emit_xor(size, source, dest); + fn location_xor( + &mut self, + size: Size, + source: Location, + dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + self.assembler.emit_xor(size, source, dest) } - fn location_or(&mut self, size: Size, source: Location, dest: Location, _flags: bool) { - self.assembler.emit_or(size, source, dest); + fn location_or( + &mut self, + size: Size, + source: Location, + dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + self.assembler.emit_or(size, source, dest) } - fn location_test(&mut self, size: Size, source: Location, dest: Location) { - self.assembler.emit_test(size, source, dest); + fn location_test( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { + self.assembler.emit_test(size, source, dest) } // math - fn location_add(&mut self, size: Size, source: Location, dest: Location, _flags: bool) { - self.assembler.emit_add(size, source, dest); + fn location_add( + &mut self, + size: Size, + source: Location, + dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + self.assembler.emit_add(size, source, dest) } - fn location_sub(&mut self, size: Size, source: Location, dest: Location, _flags: bool) { - self.assembler.emit_sub(size, source, dest); + fn location_sub( + &mut self, + size: Size, + source: Location, + dest: Location, + _flags: bool, + ) -> Result<(), CodegenError> { + self.assembler.emit_sub(size, source, dest) } - fn location_cmp(&mut self, size: Size, source: Location, dest: Location) { - self.assembler.emit_cmp(size, source, dest); + fn location_cmp( + &mut self, + size: Size, + source: Location, + dest: Location, + ) -> Result<(), CodegenError> { + self.assembler.emit_cmp(size, source, dest) } // (un)conditionnal jmp // (un)conditionnal jmp - fn jmp_unconditionnal(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::None, label); + fn jmp_unconditionnal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::None, label) } - fn jmp_on_equal(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::Equal, label); + fn jmp_on_equal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::Equal, label) } - fn jmp_on_different(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::NotEqual, label); + fn jmp_on_different(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::NotEqual, label) } - fn jmp_on_above(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::Above, label); + fn jmp_on_above(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::Above, label) } - fn jmp_on_aboveequal(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::AboveEqual, label); + fn jmp_on_aboveequal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::AboveEqual, label) } - fn jmp_on_belowequal(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::BelowEqual, label); + fn jmp_on_belowequal(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::BelowEqual, label) } - fn jmp_on_overflow(&mut self, label: Label) { - self.assembler.emit_jmp(Condition::Carry, label); + fn jmp_on_overflow(&mut self, label: Label) -> Result<(), CodegenError> { + self.assembler.emit_jmp(Condition::Carry, label) } // jmp table - fn emit_jmp_to_jumptable(&mut self, label: Label, cond: Location) { - let tmp1 = self.pick_temp_gpr().unwrap(); + fn emit_jmp_to_jumptable(&mut self, label: Label, cond: Location) -> Result<(), CodegenError> { + let tmp1 = self.pick_temp_gpr().ok_or(CodegenError { + message: "singlepass can't pick a temp gpr".to_string(), + })?; self.reserve_gpr(tmp1); - let tmp2 = self.pick_temp_gpr().unwrap(); + let tmp2 = self.pick_temp_gpr().ok_or(CodegenError { + message: "singlepass can't pick a temp gpr".to_string(), + })?; self.reserve_gpr(tmp2); - self.assembler.emit_lea_label(label, Location::GPR(tmp1)); - self.move_location(Size::S32, cond, Location::GPR(tmp2)); + self.assembler.emit_lea_label(label, Location::GPR(tmp1))?; + self.move_location(Size::S32, cond, Location::GPR(tmp2))?; let instr_size = self.assembler.get_jmp_instr_size(); - self.assembler.emit_imul_imm32_gpr64(instr_size as _, tmp2); self.assembler - .emit_add(Size::S64, Location::GPR(tmp1), Location::GPR(tmp2)); - self.assembler.emit_jmp_location(Location::GPR(tmp2)); + .emit_imul_imm32_gpr64(instr_size as _, tmp2)?; + self.assembler + .emit_add(Size::S64, Location::GPR(tmp1), Location::GPR(tmp2))?; + self.assembler.emit_jmp_location(Location::GPR(tmp2))?; self.release_gpr(tmp2); self.release_gpr(tmp1); + Ok(()) } - fn align_for_loop(&mut self) { + fn align_for_loop(&mut self) -> Result<(), CodegenError> { // Pad with NOPs to the next 16-byte boundary. // Here we don't use the dynasm `.align 16` attribute because it pads the alignment with single-byte nops // which may lead to efficiency problems. match self.assembler.get_offset().0 % 16 { 0 => {} x => { - self.assembler.emit_nop_n(16 - x); + self.assembler.emit_nop_n(16 - x)?; } } assert_eq!(self.assembler.get_offset().0 % 16, 0); + Ok(()) } - fn emit_ret(&mut self) { - self.assembler.emit_ret(); + fn emit_ret(&mut self) -> Result<(), CodegenError> { + self.assembler.emit_ret() } - fn emit_push(&mut self, size: Size, loc: Location) { - self.assembler.emit_push(size, loc); + fn emit_push(&mut self, size: Size, loc: Location) -> Result<(), CodegenError> { + self.assembler.emit_push(size, loc) } - fn emit_pop(&mut self, size: Size, loc: Location) { - self.assembler.emit_pop(size, loc); + fn emit_pop(&mut self, size: Size, loc: Location) -> Result<(), CodegenError> { + self.assembler.emit_pop(size, loc) } - fn emit_memory_fence(&mut self) { + fn emit_memory_fence(&mut self) -> Result<(), CodegenError> { // nothing on x86_64 + Ok(()) } fn location_neg( @@ -2489,28 +2809,36 @@ impl Machine for MachineX86_64 { source: Location, size_op: Size, dest: Location, - ) { - self.move_location_extend(size_val, signed, source, size_op, dest); - self.assembler.emit_neg(size_val, dest); + ) -> Result<(), CodegenError> { + self.move_location_extend(size_val, signed, source, size_op, dest)?; + self.assembler.emit_neg(size_val, dest) } - fn emit_imul_imm32(&mut self, size: Size, imm32: u32, gpr: GPR) { + fn emit_imul_imm32(&mut self, size: Size, imm32: u32, gpr: GPR) -> Result<(), CodegenError> { match size { - Size::S64 => { - self.assembler.emit_imul_imm32_gpr64(imm32, gpr); - } + Size::S64 => self.assembler.emit_imul_imm32_gpr64(imm32, gpr), _ => { - unreachable!(); + codegen_error!("singlepass emit_imul_imm32 unreachable"); } } } // relaxed binop based... - fn emit_relaxed_mov(&mut self, sz: Size, src: Location, dst: Location) { - self.emit_relaxed_binop(AssemblerX64::emit_mov, sz, src, dst); + fn emit_relaxed_mov( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(AssemblerX64::emit_mov, sz, src, dst) } - fn emit_relaxed_cmp(&mut self, sz: Size, src: Location, dst: Location) { - self.emit_relaxed_binop(AssemblerX64::emit_cmp, sz, src, dst); + fn emit_relaxed_cmp( + &mut self, + sz: Size, + src: Location, + dst: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_binop(AssemblerX64::emit_cmp, sz, src, dst) } fn emit_relaxed_zero_extension( &mut self, @@ -2518,11 +2846,11 @@ impl Machine for MachineX86_64 { src: Location, sz_dst: Size, dst: Location, - ) { + ) -> Result<(), CodegenError> { if (sz_src == Size::S32 || sz_src == Size::S64) && sz_dst == Size::S64 { - self.emit_relaxed_binop(AssemblerX64::emit_mov, sz_src, src, dst); + self.emit_relaxed_binop(AssemblerX64::emit_mov, sz_src, src, dst) } else { - self.emit_relaxed_zx_sx(AssemblerX64::emit_movzx, sz_src, src, sz_dst, dst); + self.emit_relaxed_zx_sx(AssemblerX64::emit_movzx, sz_src, src, sz_dst, dst) } } fn emit_relaxed_sign_extension( @@ -2531,18 +2859,33 @@ impl Machine for MachineX86_64 { src: Location, sz_dst: Size, dst: Location, - ) { - self.emit_relaxed_zx_sx(AssemblerX64::emit_movsx, sz_src, src, sz_dst, dst); + ) -> Result<(), CodegenError> { + self.emit_relaxed_zx_sx(AssemblerX64::emit_movsx, sz_src, src, sz_dst, dst) } - fn emit_binop_add32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_add, loc_a, loc_b, ret); + fn emit_binop_add32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_add, loc_a, loc_b, ret) } - fn emit_binop_sub32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_sub, loc_a, loc_b, ret); + fn emit_binop_sub32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_sub, loc_a, loc_b, ret) } - fn emit_binop_mul32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_imul, loc_a, loc_b, ret); + fn emit_binop_mul32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_imul, loc_a, loc_b, ret) } fn emit_binop_udiv32( &mut self, @@ -2551,21 +2894,21 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX)); + .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX))?; self.assembler - .emit_xor(Size::S32, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX)); + .emit_xor(Size::S32, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX))?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_div, Size::S32, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::RAX), ret); - offset + .emit_mov(Size::S32, Location::GPR(GPR::RAX), ret)?; + Ok(offset) } fn emit_binop_sdiv32( &mut self, @@ -2574,20 +2917,20 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX)); - self.assembler.emit_cdq(); + .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX))?; + self.assembler.emit_cdq()?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_idiv, Size::S32, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::RAX), ret); - offset + .emit_mov(Size::S32, Location::GPR(GPR::RAX), ret)?; + Ok(offset) } fn emit_binop_urem32( &mut self, @@ -2596,21 +2939,21 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX)); + .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX))?; self.assembler - .emit_xor(Size::S32, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX)); + .emit_xor(Size::S32, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX))?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_div, Size::S32, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::RDX), ret); - offset + .emit_mov(Size::S32, Location::GPR(GPR::RDX), ret)?; + Ok(offset) } fn emit_binop_srem32( &mut self, @@ -2619,110 +2962,179 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. let normal_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.emit_relaxed_cmp(Size::S32, Location::Imm32(0x80000000), loc_a); - self.assembler.emit_jmp(Condition::NotEqual, normal_path); - self.emit_relaxed_cmp(Size::S32, Location::Imm32(0xffffffff), loc_b); - self.assembler.emit_jmp(Condition::NotEqual, normal_path); - self.move_location(Size::S32, Location::Imm32(0), ret); - self.assembler.emit_jmp(Condition::None, end); + self.emit_relaxed_cmp(Size::S32, Location::Imm32(0x80000000), loc_a)?; + self.assembler.emit_jmp(Condition::NotEqual, normal_path)?; + self.emit_relaxed_cmp(Size::S32, Location::Imm32(0xffffffff), loc_b)?; + self.assembler.emit_jmp(Condition::NotEqual, normal_path)?; + self.move_location(Size::S32, Location::Imm32(0), ret)?; + self.assembler.emit_jmp(Condition::None, end)?; - self.emit_label(normal_path); + self.emit_label(normal_path)?; self.assembler - .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX)); - self.assembler.emit_cdq(); + .emit_mov(Size::S32, loc_a, Location::GPR(GPR::RAX))?; + self.assembler.emit_cdq()?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_idiv, Size::S32, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S32, Location::GPR(GPR::RDX), ret); + .emit_mov(Size::S32, Location::GPR(GPR::RDX), ret)?; - self.emit_label(end); - offset + self.emit_label(end)?; + Ok(offset) } - fn emit_binop_and32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_and, loc_a, loc_b, ret); + fn emit_binop_and32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_and, loc_a, loc_b, ret) } - fn emit_binop_or32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_or, loc_a, loc_b, ret); + fn emit_binop_or32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_or, loc_a, loc_b, ret) } - fn emit_binop_xor32(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i32(AssemblerX64::emit_xor, loc_a, loc_b, ret); + fn emit_binop_xor32( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i32(AssemblerX64::emit_xor, loc_a, loc_b, ret) } - fn i32_cmp_ge_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::GreaterEqual, loc_a, loc_b, ret); + fn i32_cmp_ge_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::GreaterEqual, loc_a, loc_b, ret) } - fn i32_cmp_gt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Greater, loc_a, loc_b, ret); + fn i32_cmp_gt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Greater, loc_a, loc_b, ret) } - fn i32_cmp_le_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::LessEqual, loc_a, loc_b, ret); + fn i32_cmp_le_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::LessEqual, loc_a, loc_b, ret) } - fn i32_cmp_lt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Less, loc_a, loc_b, ret); + fn i32_cmp_lt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Less, loc_a, loc_b, ret) } - fn i32_cmp_ge_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::AboveEqual, loc_a, loc_b, ret); + fn i32_cmp_ge_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::AboveEqual, loc_a, loc_b, ret) } - fn i32_cmp_gt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Above, loc_a, loc_b, ret); + fn i32_cmp_gt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Above, loc_a, loc_b, ret) } - fn i32_cmp_le_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::BelowEqual, loc_a, loc_b, ret); + fn i32_cmp_le_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::BelowEqual, loc_a, loc_b, ret) } - fn i32_cmp_lt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Below, loc_a, loc_b, ret); + fn i32_cmp_lt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Below, loc_a, loc_b, ret) } - fn i32_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::NotEqual, loc_a, loc_b, ret); + fn i32_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::NotEqual, loc_a, loc_b, ret) } - fn i32_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i32_dynamic_b(Condition::Equal, loc_a, loc_b, ret); + fn i32_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i32_dynamic_b(Condition::Equal, loc_a, loc_b, ret) } - fn i32_clz(&mut self, loc: Location, ret: Location) { + fn i32_clz(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { let src = match loc { Location::Imm32(_) | Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; tmp } Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i32_clz unreachable"); } }; let dst = match ret { - Location::Memory(_, _) => self.acquire_temp_gpr().unwrap(), + Location::Memory(_, _) => self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?, Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i32_clz unreachable"); } }; if self.assembler.arch_has_xzcnt() { self.assembler - .arch_emit_lzcnt(Size::S32, Location::GPR(src), Location::GPR(dst)); + .arch_emit_lzcnt(Size::S32, Location::GPR(src), Location::GPR(dst))?; } else { let zero_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.assembler.emit_test_gpr_64(src); - self.assembler.emit_jmp(Condition::Equal, zero_path); + self.assembler.emit_test_gpr_64(src)?; + self.assembler.emit_jmp(Condition::Equal, zero_path)?; self.assembler - .emit_bsr(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_bsr(Size::S32, Location::GPR(src), Location::GPR(dst))?; self.assembler - .emit_xor(Size::S32, Location::Imm32(31), Location::GPR(dst)); - self.assembler.emit_jmp(Condition::None, end); - self.emit_label(zero_path); - self.move_location(Size::S32, Location::Imm32(32), Location::GPR(dst)); - self.emit_label(end); + .emit_xor(Size::S32, Location::Imm32(31), Location::GPR(dst))?; + self.assembler.emit_jmp(Condition::None, end)?; + self.emit_label(zero_path)?; + self.move_location(Size::S32, Location::Imm32(32), Location::GPR(dst))?; + self.emit_label(end)?; } match loc { Location::Imm32(_) | Location::Memory(_, _) => { @@ -2731,45 +3143,50 @@ impl Machine for MachineX86_64 { _ => {} }; if let Location::Memory(_, _) = ret { - self.move_location(Size::S32, Location::GPR(dst), ret); + self.move_location(Size::S32, Location::GPR(dst), ret)?; self.release_gpr(dst); }; + Ok(()) } - fn i32_ctz(&mut self, loc: Location, ret: Location) { + fn i32_ctz(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { let src = match loc { Location::Imm32(_) | Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; tmp } Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i32_ctz unreachable"); } }; let dst = match ret { - Location::Memory(_, _) => self.acquire_temp_gpr().unwrap(), + Location::Memory(_, _) => self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?, Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i32_ctz unreachable"); } }; if self.assembler.arch_has_xzcnt() { self.assembler - .arch_emit_tzcnt(Size::S32, Location::GPR(src), Location::GPR(dst)); + .arch_emit_tzcnt(Size::S32, Location::GPR(src), Location::GPR(dst))?; } else { let zero_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.assembler.emit_test_gpr_64(src); - self.assembler.emit_jmp(Condition::Equal, zero_path); + self.assembler.emit_test_gpr_64(src)?; + self.assembler.emit_jmp(Condition::Equal, zero_path)?; self.assembler - .emit_bsf(Size::S32, Location::GPR(src), Location::GPR(dst)); - self.assembler.emit_jmp(Condition::None, end); - self.emit_label(zero_path); - self.move_location(Size::S32, Location::Imm32(32), Location::GPR(dst)); - self.emit_label(end); + .emit_bsf(Size::S32, Location::GPR(src), Location::GPR(dst))?; + self.assembler.emit_jmp(Condition::None, end)?; + self.emit_label(zero_path)?; + self.move_location(Size::S32, Location::Imm32(32), Location::GPR(dst))?; + self.emit_label(end)?; } match loc { @@ -2779,60 +3196,93 @@ impl Machine for MachineX86_64 { _ => {} }; if let Location::Memory(_, _) = ret { - self.move_location(Size::S32, Location::GPR(dst), ret); + self.move_location(Size::S32, Location::GPR(dst), ret)?; self.release_gpr(dst); }; + Ok(()) } - fn i32_popcnt(&mut self, loc: Location, ret: Location) { + fn i32_popcnt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { match loc { Location::Imm32(_) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; if let Location::Memory(_, _) = ret { - let out_tmp = self.acquire_temp_gpr().unwrap(); + let out_tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler.emit_popcnt( Size::S32, Location::GPR(tmp), Location::GPR(out_tmp), - ); - self.move_location(Size::S32, Location::GPR(out_tmp), ret); + )?; + self.move_location(Size::S32, Location::GPR(out_tmp), ret)?; self.release_gpr(out_tmp); } else { self.assembler - .emit_popcnt(Size::S32, Location::GPR(tmp), ret); + .emit_popcnt(Size::S32, Location::GPR(tmp), ret)?; } self.release_gpr(tmp); } Location::Memory(_, _) | Location::GPR(_) => { if let Location::Memory(_, _) = ret { - let out_tmp = self.acquire_temp_gpr().unwrap(); + let out_tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_popcnt(Size::S32, loc, Location::GPR(out_tmp)); - self.move_location(Size::S32, Location::GPR(out_tmp), ret); + .emit_popcnt(Size::S32, loc, Location::GPR(out_tmp))?; + self.move_location(Size::S32, Location::GPR(out_tmp), ret)?; self.release_gpr(out_tmp); } else { - self.assembler.emit_popcnt(Size::S32, loc, ret); + self.assembler.emit_popcnt(Size::S32, loc, ret)?; } } _ => { - unreachable!(); + codegen_error!("singlepass i32_popcnt unreachable"); } } + Ok(()) } - fn i32_shl(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i32(AssemblerX64::emit_shl, loc_a, loc_b, ret); + fn i32_shl( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i32(AssemblerX64::emit_shl, loc_a, loc_b, ret) } - fn i32_shr(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i32(AssemblerX64::emit_shr, loc_a, loc_b, ret); + fn i32_shr( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i32(AssemblerX64::emit_shr, loc_a, loc_b, ret) } - fn i32_sar(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i32(AssemblerX64::emit_sar, loc_a, loc_b, ret); + fn i32_sar( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i32(AssemblerX64::emit_sar, loc_a, loc_b, ret) } - fn i32_rol(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i32(AssemblerX64::emit_rol, loc_a, loc_b, ret); + fn i32_rol( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i32(AssemblerX64::emit_rol, loc_a, loc_b, ret) } - fn i32_ror(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i32(AssemblerX64::emit_ror, loc_a, loc_b, ret); + fn i32_ror( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i32(AssemblerX64::emit_ror, loc_a, loc_b, ret) } fn i32_load( &mut self, @@ -2843,7 +3293,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2859,9 +3309,9 @@ impl Machine for MachineX86_64 { Size::S32, Location::Memory(addr, 0), ret, - ); + ) }, - ); + ) } fn i32_load_8u( &mut self, @@ -2872,7 +3322,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2889,9 +3339,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_load_8s( &mut self, @@ -2902,7 +3352,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2919,9 +3369,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_load_16u( &mut self, @@ -2932,7 +3382,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2949,9 +3399,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_load_16s( &mut self, @@ -2962,7 +3412,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -2979,9 +3429,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_atomic_load( &mut self, @@ -2992,7 +3442,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3002,10 +3452,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_mov(Size::S32, Location::Memory(addr, 0), ret); - }, - ); + |this, addr| this.emit_relaxed_mov(Size::S32, Location::Memory(addr, 0), ret), + ) } fn i32_atomic_load_8u( &mut self, @@ -3016,7 +3464,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3032,9 +3480,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_atomic_load_16u( &mut self, @@ -3045,7 +3493,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -3061,9 +3509,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S32, ret, - ); + ) }, - ); + ) } fn i32_save( &mut self, @@ -3074,7 +3522,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3090,9 +3538,9 @@ impl Machine for MachineX86_64 { Size::S32, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i32_save_8( &mut self, @@ -3103,7 +3551,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3119,9 +3567,9 @@ impl Machine for MachineX86_64 { Size::S8, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i32_save_16( &mut self, @@ -3132,7 +3580,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3148,9 +3596,9 @@ impl Machine for MachineX86_64 { Size::S16, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } // x86_64 have a strong memory model, so coherency between all threads (core) is garantied // and aligned move is guarantied to be atomic, too or from memory @@ -3164,7 +3612,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3180,9 +3628,9 @@ impl Machine for MachineX86_64 { Size::S32, value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i32_atomic_save_8( &mut self, @@ -3193,7 +3641,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3209,9 +3657,9 @@ impl Machine for MachineX86_64 { Size::S8, value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i32_atomic_save_16( &mut self, @@ -3222,7 +3670,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -3238,9 +3686,9 @@ impl Machine for MachineX86_64 { Size::S16, value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } // i32 atomic Add with i32 fn i32_atomic_add( @@ -3253,9 +3701,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3270,11 +3720,12 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Add with u8 fn i32_atomic_add_8u( @@ -3287,9 +3738,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location_extend(Size::S8, false, loc, Size::S32, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location_extend(Size::S8, false, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3304,11 +3757,12 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Add with u16 fn i32_atomic_add_16u( @@ -3321,9 +3775,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location_extend(Size::S16, false, loc, Size::S32, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location_extend(Size::S16, false, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3338,11 +3794,12 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Sub with i32 fn i32_atomic_sub( @@ -3355,9 +3812,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S32, false, loc, Size::S32, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S32, false, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3372,11 +3831,12 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Sub with u8 fn i32_atomic_sub_8u( @@ -3389,9 +3849,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S8, false, loc, Size::S32, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S8, false, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3406,11 +3868,12 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Sub with u16 fn i32_atomic_sub_16u( @@ -3423,9 +3886,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S16, false, loc, Size::S32, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S16, false, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3440,11 +3905,12 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic And with i32 fn i32_atomic_and( @@ -3457,7 +3923,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3472,9 +3938,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic And with u8 fn i32_atomic_and_8u( @@ -3487,7 +3953,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3502,9 +3968,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic And with u16 fn i32_atomic_and_16u( @@ -3517,7 +3983,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3532,9 +3998,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Or with i32 fn i32_atomic_or( @@ -3547,7 +4013,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3562,9 +4028,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Or with u8 fn i32_atomic_or_8u( @@ -3577,7 +4043,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3592,9 +4058,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Or with u16 fn i32_atomic_or_16u( @@ -3607,7 +4073,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3622,9 +4088,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_or(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Xor with i32 fn i32_atomic_xor( @@ -3637,7 +4103,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3652,9 +4118,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Xor with u8 fn i32_atomic_xor_8u( @@ -3667,7 +4133,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3682,9 +4148,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Xor with u16 fn i32_atomic_xor_16u( @@ -3697,7 +4163,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -3712,9 +4178,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)); + .emit_xor(Size::S32, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i32 atomic Exchange with i32 fn i32_atomic_xchg( @@ -3727,9 +4193,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3740,15 +4208,13 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.assembler.emit_xchg( - Size::S32, - Location::GPR(value), - Location::Memory(addr, 0), - ); + this.assembler + .emit_xchg(Size::S32, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Exchange with u8 fn i32_atomic_xchg_8u( @@ -3761,10 +4227,12 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_movzx(Size::S8, loc, Size::S32, Location::GPR(value)); + .emit_movzx(Size::S8, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3776,11 +4244,12 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, addr| { this.assembler - .emit_xchg(Size::S8, Location::GPR(value), Location::Memory(addr, 0)); + .emit_xchg(Size::S8, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Exchange with u16 fn i32_atomic_xchg_16u( @@ -3793,10 +4262,12 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_movzx(Size::S16, loc, Size::S32, Location::GPR(value)); + .emit_movzx(Size::S16, loc, Size::S32, Location::GPR(value))?; self.memory_op( target, memarg, @@ -3807,15 +4278,13 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.assembler.emit_xchg( - Size::S16, - Location::GPR(value), - Location::Memory(addr, 0), - ); + this.assembler + .emit_xchg(Size::S16, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S32, Location::GPR(value), ret); + )?; + self.move_location(Size::S32, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i32 atomic Exchange with i32 fn i32_atomic_cmpxchg( @@ -3829,7 +4298,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -3840,11 +4309,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S32, cmp, Location::GPR(compare)); + .emit_mov(Size::S32, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S32, new, Location::GPR(value)); + .emit_mov(Size::S32, new, Location::GPR(value))?; self.memory_op( target, @@ -3860,13 +4329,14 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_mov(Size::S32, Location::GPR(compare), ret); + .emit_mov(Size::S32, Location::GPR(compare), ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // i32 atomic Exchange with u8 fn i32_atomic_cmpxchg_8u( @@ -3880,7 +4350,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -3891,11 +4361,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S32, cmp, Location::GPR(compare)); + .emit_mov(Size::S32, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S32, new, Location::GPR(value)); + .emit_mov(Size::S32, new, Location::GPR(value))?; self.memory_op( target, @@ -3911,13 +4381,14 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_movzx(Size::S8, Location::GPR(compare), Size::S32, ret); + .emit_movzx(Size::S8, Location::GPR(compare), Size::S32, ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // i32 atomic Exchange with u16 fn i32_atomic_cmpxchg_16u( @@ -3931,7 +4402,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -3942,11 +4413,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S32, cmp, Location::GPR(compare)); + .emit_mov(Size::S32, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S32, new, Location::GPR(value)); + .emit_mov(Size::S32, new, Location::GPR(value))?; self.memory_op( target, @@ -3962,42 +4433,58 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_movzx(Size::S16, Location::GPR(compare), Size::S32, ret); + .emit_movzx(Size::S16, Location::GPR(compare), Size::S32, ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } fn emit_call_with_reloc( &mut self, _calling_convention: CallingConvention, reloc_target: RelocationTarget, - ) -> Vec { + ) -> Result, CodegenError> { let mut relocations = vec![]; let next = self.get_label(); let reloc_at = self.assembler.get_offset().0 + 1; // skip E8 - self.assembler.emit_call_label(next); - self.emit_label(next); + self.assembler.emit_call_label(next)?; + self.emit_label(next)?; relocations.push(Relocation { kind: RelocationKind::X86CallPCRel4, reloc_target, offset: reloc_at as u32, addend: -4, }); - relocations + Ok(relocations) } - fn emit_binop_add64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_add, loc_a, loc_b, ret); + fn emit_binop_add64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_add, loc_a, loc_b, ret) } - fn emit_binop_sub64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_sub, loc_a, loc_b, ret); + fn emit_binop_sub64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_sub, loc_a, loc_b, ret) } - fn emit_binop_mul64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_imul, loc_a, loc_b, ret); + fn emit_binop_mul64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_imul, loc_a, loc_b, ret) } fn emit_binop_udiv64( &mut self, @@ -4006,21 +4493,21 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX)); + .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX))?; self.assembler - .emit_xor(Size::S64, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX)); + .emit_xor(Size::S64, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX))?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_div, Size::S64, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); - offset + .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret)?; + Ok(offset) } fn emit_binop_sdiv64( &mut self, @@ -4029,20 +4516,20 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX)); - self.assembler.emit_cqo(); + .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX))?; + self.assembler.emit_cqo()?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_idiv, Size::S64, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret); - offset + .emit_mov(Size::S64, Location::GPR(GPR::RAX), ret)?; + Ok(offset) } fn emit_binop_urem64( &mut self, @@ -4051,21 +4538,21 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. self.assembler - .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX)); + .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX))?; self.assembler - .emit_xor(Size::S64, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX)); + .emit_xor(Size::S64, Location::GPR(GPR::RDX), Location::GPR(GPR::RDX))?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_div, Size::S64, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RDX), ret); - offset + .emit_mov(Size::S64, Location::GPR(GPR::RDX), ret)?; + Ok(offset) } fn emit_binop_srem64( &mut self, @@ -4074,110 +4561,179 @@ impl Machine for MachineX86_64 { ret: Location, integer_division_by_zero: Label, _integer_overflow: Label, - ) -> usize { + ) -> Result { // We assume that RAX and RDX are temporary registers here. let normal_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.emit_relaxed_cmp(Size::S64, Location::Imm64(0x8000000000000000u64), loc_a); - self.assembler.emit_jmp(Condition::NotEqual, normal_path); - self.emit_relaxed_cmp(Size::S64, Location::Imm64(0xffffffffffffffffu64), loc_b); - self.assembler.emit_jmp(Condition::NotEqual, normal_path); - self.move_location(Size::S64, Location::Imm64(0), ret); - self.assembler.emit_jmp(Condition::None, end); + self.emit_relaxed_cmp(Size::S64, Location::Imm64(0x8000000000000000u64), loc_a)?; + self.assembler.emit_jmp(Condition::NotEqual, normal_path)?; + self.emit_relaxed_cmp(Size::S64, Location::Imm64(0xffffffffffffffffu64), loc_b)?; + self.assembler.emit_jmp(Condition::NotEqual, normal_path)?; + self.move_location(Size::S64, Location::Imm64(0), ret)?; + self.assembler.emit_jmp(Condition::None, end)?; - self.emit_label(normal_path); + self.emit_label(normal_path)?; self.assembler - .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX)); - self.assembler.emit_cqo(); + .emit_mov(Size::S64, loc_a, Location::GPR(GPR::RAX))?; + self.assembler.emit_cqo()?; let offset = self.emit_relaxed_xdiv( AssemblerX64::emit_idiv, Size::S64, loc_b, integer_division_by_zero, - ); + )?; self.assembler - .emit_mov(Size::S64, Location::GPR(GPR::RDX), ret); + .emit_mov(Size::S64, Location::GPR(GPR::RDX), ret)?; - self.emit_label(end); - offset + self.emit_label(end)?; + Ok(offset) } - fn emit_binop_and64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_and, loc_a, loc_b, ret); + fn emit_binop_and64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_and, loc_a, loc_b, ret) } - fn emit_binop_or64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_or, loc_a, loc_b, ret); + fn emit_binop_or64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_or, loc_a, loc_b, ret) } - fn emit_binop_xor64(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_binop_i64(AssemblerX64::emit_xor, loc_a, loc_b, ret); + fn emit_binop_xor64( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_binop_i64(AssemblerX64::emit_xor, loc_a, loc_b, ret) } - fn i64_cmp_ge_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::GreaterEqual, loc_a, loc_b, ret); + fn i64_cmp_ge_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::GreaterEqual, loc_a, loc_b, ret) } - fn i64_cmp_gt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Greater, loc_a, loc_b, ret); + fn i64_cmp_gt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Greater, loc_a, loc_b, ret) } - fn i64_cmp_le_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::LessEqual, loc_a, loc_b, ret); + fn i64_cmp_le_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::LessEqual, loc_a, loc_b, ret) } - fn i64_cmp_lt_s(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Less, loc_a, loc_b, ret); + fn i64_cmp_lt_s( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Less, loc_a, loc_b, ret) } - fn i64_cmp_ge_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::AboveEqual, loc_a, loc_b, ret); + fn i64_cmp_ge_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::AboveEqual, loc_a, loc_b, ret) } - fn i64_cmp_gt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Above, loc_a, loc_b, ret); + fn i64_cmp_gt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Above, loc_a, loc_b, ret) } - fn i64_cmp_le_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::BelowEqual, loc_a, loc_b, ret); + fn i64_cmp_le_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::BelowEqual, loc_a, loc_b, ret) } - fn i64_cmp_lt_u(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Below, loc_a, loc_b, ret); + fn i64_cmp_lt_u( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Below, loc_a, loc_b, ret) } - fn i64_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::NotEqual, loc_a, loc_b, ret); + fn i64_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::NotEqual, loc_a, loc_b, ret) } - fn i64_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_cmpop_i64_dynamic_b(Condition::Equal, loc_a, loc_b, ret); + fn i64_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_cmpop_i64_dynamic_b(Condition::Equal, loc_a, loc_b, ret) } - fn i64_clz(&mut self, loc: Location, ret: Location) { + fn i64_clz(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { let src = match loc { Location::Imm64(_) | Location::Imm32(_) | Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(tmp))?; tmp } Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i64_clz unreachable"); } }; let dst = match ret { - Location::Memory(_, _) => self.acquire_temp_gpr().unwrap(), + Location::Memory(_, _) => self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?, Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i64_clz unreachable"); } }; if self.assembler.arch_has_xzcnt() { self.assembler - .arch_emit_lzcnt(Size::S64, Location::GPR(src), Location::GPR(dst)); + .arch_emit_lzcnt(Size::S64, Location::GPR(src), Location::GPR(dst))?; } else { let zero_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.assembler.emit_test_gpr_64(src); - self.assembler.emit_jmp(Condition::Equal, zero_path); + self.assembler.emit_test_gpr_64(src)?; + self.assembler.emit_jmp(Condition::Equal, zero_path)?; self.assembler - .emit_bsr(Size::S64, Location::GPR(src), Location::GPR(dst)); + .emit_bsr(Size::S64, Location::GPR(src), Location::GPR(dst))?; self.assembler - .emit_xor(Size::S64, Location::Imm32(63), Location::GPR(dst)); - self.assembler.emit_jmp(Condition::None, end); - self.emit_label(zero_path); - self.move_location(Size::S64, Location::Imm32(64), Location::GPR(dst)); - self.emit_label(end); + .emit_xor(Size::S64, Location::Imm32(63), Location::GPR(dst))?; + self.assembler.emit_jmp(Condition::None, end)?; + self.emit_label(zero_path)?; + self.move_location(Size::S64, Location::Imm32(64), Location::GPR(dst))?; + self.emit_label(end)?; } match loc { Location::Imm64(_) | Location::Memory(_, _) => { @@ -4186,45 +4742,50 @@ impl Machine for MachineX86_64 { _ => {} }; if let Location::Memory(_, _) = ret { - self.move_location(Size::S64, Location::GPR(dst), ret); + self.move_location(Size::S64, Location::GPR(dst), ret)?; self.release_gpr(dst); }; + Ok(()) } - fn i64_ctz(&mut self, loc: Location, ret: Location) { + fn i64_ctz(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { let src = match loc { Location::Imm64(_) | Location::Imm32(_) | Location::Memory(_, _) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(tmp))?; tmp } Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i64_ctz unreachable"); } }; let dst = match ret { - Location::Memory(_, _) => self.acquire_temp_gpr().unwrap(), + Location::Memory(_, _) => self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?, Location::GPR(reg) => reg, _ => { - unreachable!(); + codegen_error!("singlepass i64_ctz unreachable"); } }; if self.assembler.arch_has_xzcnt() { self.assembler - .arch_emit_tzcnt(Size::S64, Location::GPR(src), Location::GPR(dst)); + .arch_emit_tzcnt(Size::S64, Location::GPR(src), Location::GPR(dst))?; } else { let zero_path = self.assembler.get_label(); let end = self.assembler.get_label(); - self.assembler.emit_test_gpr_64(src); - self.assembler.emit_jmp(Condition::Equal, zero_path); + self.assembler.emit_test_gpr_64(src)?; + self.assembler.emit_jmp(Condition::Equal, zero_path)?; self.assembler - .emit_bsf(Size::S64, Location::GPR(src), Location::GPR(dst)); - self.assembler.emit_jmp(Condition::None, end); - self.emit_label(zero_path); - self.move_location(Size::S64, Location::Imm64(64), Location::GPR(dst)); - self.emit_label(end); + .emit_bsf(Size::S64, Location::GPR(src), Location::GPR(dst))?; + self.assembler.emit_jmp(Condition::None, end)?; + self.emit_label(zero_path)?; + self.move_location(Size::S64, Location::Imm64(64), Location::GPR(dst))?; + self.emit_label(end)?; } match loc { @@ -4234,60 +4795,93 @@ impl Machine for MachineX86_64 { _ => {} }; if let Location::Memory(_, _) = ret { - self.move_location(Size::S64, Location::GPR(dst), ret); + self.move_location(Size::S64, Location::GPR(dst), ret)?; self.release_gpr(dst); }; + Ok(()) } - fn i64_popcnt(&mut self, loc: Location, ret: Location) { + fn i64_popcnt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { match loc { Location::Imm64(_) | Location::Imm32(_) => { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(tmp)); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(tmp))?; if let Location::Memory(_, _) = ret { - let out_tmp = self.acquire_temp_gpr().unwrap(); + let out_tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler.emit_popcnt( Size::S64, Location::GPR(tmp), Location::GPR(out_tmp), - ); - self.move_location(Size::S64, Location::GPR(out_tmp), ret); + )?; + self.move_location(Size::S64, Location::GPR(out_tmp), ret)?; self.release_gpr(out_tmp); } else { self.assembler - .emit_popcnt(Size::S64, Location::GPR(tmp), ret); + .emit_popcnt(Size::S64, Location::GPR(tmp), ret)?; } self.release_gpr(tmp); } Location::Memory(_, _) | Location::GPR(_) => { if let Location::Memory(_, _) = ret { - let out_tmp = self.acquire_temp_gpr().unwrap(); + let out_tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_popcnt(Size::S64, loc, Location::GPR(out_tmp)); - self.move_location(Size::S64, Location::GPR(out_tmp), ret); + .emit_popcnt(Size::S64, loc, Location::GPR(out_tmp))?; + self.move_location(Size::S64, Location::GPR(out_tmp), ret)?; self.release_gpr(out_tmp); } else { - self.assembler.emit_popcnt(Size::S64, loc, ret); + self.assembler.emit_popcnt(Size::S64, loc, ret)?; } } _ => { - unreachable!(); + codegen_error!("singlepass i64_popcnt unreachable"); } } + Ok(()) } - fn i64_shl(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i64(AssemblerX64::emit_shl, loc_a, loc_b, ret); + fn i64_shl( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i64(AssemblerX64::emit_shl, loc_a, loc_b, ret) } - fn i64_shr(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i64(AssemblerX64::emit_shr, loc_a, loc_b, ret); + fn i64_shr( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i64(AssemblerX64::emit_shr, loc_a, loc_b, ret) } - fn i64_sar(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i64(AssemblerX64::emit_sar, loc_a, loc_b, ret); + fn i64_sar( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i64(AssemblerX64::emit_sar, loc_a, loc_b, ret) } - fn i64_rol(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i64(AssemblerX64::emit_rol, loc_a, loc_b, ret); + fn i64_rol( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i64(AssemblerX64::emit_rol, loc_a, loc_b, ret) } - fn i64_ror(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_shift_i64(AssemblerX64::emit_ror, loc_a, loc_b, ret); + fn i64_ror( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_shift_i64(AssemblerX64::emit_ror, loc_a, loc_b, ret) } fn i64_load( &mut self, @@ -4298,7 +4892,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4314,9 +4908,9 @@ impl Machine for MachineX86_64 { Size::S64, Location::Memory(addr, 0), ret, - ); + ) }, - ); + ) } fn i64_load_8u( &mut self, @@ -4327,7 +4921,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4344,9 +4938,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_load_8s( &mut self, @@ -4357,7 +4951,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4374,9 +4968,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_load_16u( &mut self, @@ -4387,7 +4981,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4404,9 +4998,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_load_16s( &mut self, @@ -4417,7 +5011,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4434,9 +5028,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_load_32u( &mut self, @@ -4447,7 +5041,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4465,10 +5059,10 @@ impl Machine for MachineX86_64 { Size::S32, Location::Imm32(0), Location::Memory(base, offset + 4), - ); // clear upper bits + )?; // clear upper bits } _ => { - unreachable!(); + codegen_error!("singlepass i64_load_32u unreacahble"); } } this.emit_relaxed_binop( @@ -4476,9 +5070,9 @@ impl Machine for MachineX86_64 { Size::S32, Location::Memory(addr, 0), ret, - ); + ) }, - ); + ) } fn i64_load_32s( &mut self, @@ -4489,7 +5083,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4506,9 +5100,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_atomic_load( &mut self, @@ -4519,7 +5113,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4529,10 +5123,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_mov(Size::S64, Location::Memory(addr, 0), ret); - }, - ); + |this, addr| this.emit_relaxed_mov(Size::S64, Location::Memory(addr, 0), ret), + ) } fn i64_atomic_load_8u( &mut self, @@ -4543,7 +5135,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4559,9 +5151,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_atomic_load_16u( &mut self, @@ -4572,7 +5164,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4588,9 +5180,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_atomic_load_32u( &mut self, @@ -4601,7 +5193,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -4619,10 +5211,10 @@ impl Machine for MachineX86_64 { Size::S32, Location::Imm32(0), Location::Memory(base, offset + 4), - ); // clear upper bits + )?; // clear upper bits } _ => { - unreachable!(); + codegen_error!("singlepass i64_atomic_load_32u unreachable"); } } this.emit_relaxed_zero_extension( @@ -4630,9 +5222,9 @@ impl Machine for MachineX86_64 { Location::Memory(addr, 0), Size::S64, ret, - ); + ) }, - ); + ) } fn i64_save( &mut self, @@ -4643,7 +5235,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4659,9 +5251,9 @@ impl Machine for MachineX86_64 { Size::S64, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i64_save_8( &mut self, @@ -4672,7 +5264,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4688,9 +5280,9 @@ impl Machine for MachineX86_64 { Size::S8, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i64_save_16( &mut self, @@ -4701,7 +5293,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4717,9 +5309,9 @@ impl Machine for MachineX86_64 { Size::S16, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i64_save_32( &mut self, @@ -4730,7 +5322,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4746,9 +5338,9 @@ impl Machine for MachineX86_64 { Size::S32, target_value, Location::Memory(addr, 0), - ); + ) }, - ); + ) } fn i64_atomic_save( &mut self, @@ -4759,7 +5351,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4769,10 +5361,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_atomic_xchg(Size::S64, value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_atomic_xchg(Size::S64, value, Location::Memory(addr, 0)), + ) } fn i64_atomic_save_8( &mut self, @@ -4783,7 +5373,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4793,10 +5383,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_atomic_xchg(Size::S8, value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_atomic_xchg(Size::S8, value, Location::Memory(addr, 0)), + ) } fn i64_atomic_save_16( &mut self, @@ -4807,7 +5395,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4817,10 +5405,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_atomic_xchg(Size::S16, value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_atomic_xchg(Size::S16, value, Location::Memory(addr, 0)), + ) } fn i64_atomic_save_32( &mut self, @@ -4831,7 +5417,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( target_addr, memarg, @@ -4841,10 +5427,8 @@ impl Machine for MachineX86_64 { imported_memories, offset, heap_access_oob, - |this, addr| { - this.emit_relaxed_atomic_xchg(Size::S32, value, Location::Memory(addr, 0)); - }, - ); + |this, addr| this.emit_relaxed_atomic_xchg(Size::S32, value, Location::Memory(addr, 0)), + ) } // i64 atomic Add with i64 fn i64_atomic_add( @@ -4857,9 +5441,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(value))?; self.memory_op( target, memarg, @@ -4874,11 +5460,12 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Add with u8 fn i64_atomic_add_8u( @@ -4891,9 +5478,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location_extend(Size::S8, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location_extend(Size::S8, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -4908,11 +5497,12 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Add with u16 fn i64_atomic_add_16u( @@ -4925,9 +5515,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location_extend(Size::S16, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location_extend(Size::S16, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -4942,11 +5534,12 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Add with u32 fn i64_atomic_add_32u( @@ -4959,9 +5552,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location_extend(Size::S32, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location_extend(Size::S32, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -4976,11 +5571,12 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Sub with i64 fn i64_atomic_sub( @@ -4993,9 +5589,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S64, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S64, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5010,11 +5608,12 @@ impl Machine for MachineX86_64 { Size::S64, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Sub with u8 fn i64_atomic_sub_8u( @@ -5027,9 +5626,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S8, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S8, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5044,11 +5645,12 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Sub with u16 fn i64_atomic_sub_16u( @@ -5061,9 +5663,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S16, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S16, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5078,11 +5682,12 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Sub with u32 fn i64_atomic_sub_32u( @@ -5095,9 +5700,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.location_neg(Size::S32, false, loc, Size::S64, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.location_neg(Size::S32, false, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5112,11 +5719,12 @@ impl Machine for MachineX86_64 { Size::S32, Location::GPR(value), Location::Memory(addr, 0), - ); + ) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic And with i64 fn i64_atomic_and( @@ -5129,7 +5737,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5144,9 +5752,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i64 atomic And with u8 fn i64_atomic_and_8u( @@ -5159,7 +5767,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5174,9 +5782,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i64 atomic And with u16 fn i64_atomic_and_16u( @@ -5189,7 +5797,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5204,9 +5812,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i64 atomic And with u32 fn i64_atomic_and_32u( @@ -5219,7 +5827,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5234,9 +5842,9 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, src, dst| { this.assembler - .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)); + .emit_and(Size::S64, Location::GPR(src), Location::GPR(dst)) }, - ); + ) } // i64 atomic Or with i64 fn i64_atomic_or( @@ -5249,7 +5857,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5263,9 +5871,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic Or with u8 fn i64_atomic_or_8u( @@ -5278,7 +5886,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5292,9 +5900,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic Or with u16 fn i64_atomic_or_16u( @@ -5307,7 +5915,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5321,9 +5929,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic Or with u32 fn i64_atomic_or_32u( @@ -5336,7 +5944,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5350,9 +5958,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_or(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic xor with i64 fn i64_atomic_xor( @@ -5365,7 +5973,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5379,9 +5987,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic xor with u8 fn i64_atomic_xor_8u( @@ -5394,7 +6002,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5408,9 +6016,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic xor with u16 fn i64_atomic_xor_16u( @@ -5423,7 +6031,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5437,9 +6045,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic xor with u32 fn i64_atomic_xor_32u( @@ -5452,7 +6060,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.emit_compare_and_swap( loc, target, @@ -5466,9 +6074,9 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, src, dst| { - this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false); + this.location_xor(Size::S64, Location::GPR(src), Location::GPR(dst), false) }, - ); + ) } // i64 atomic Exchange with i64 fn i64_atomic_xchg( @@ -5481,9 +6089,11 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(value)); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5494,15 +6104,13 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.assembler.emit_xchg( - Size::S64, - Location::GPR(value), - Location::Memory(addr, 0), - ); + this.assembler + .emit_xchg(Size::S64, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Exchange with u8 fn i64_atomic_xchg_8u( @@ -5515,10 +6123,12 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_movzx(Size::S8, loc, Size::S64, Location::GPR(value)); + .emit_movzx(Size::S8, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5530,11 +6140,12 @@ impl Machine for MachineX86_64 { heap_access_oob, |this, addr| { this.assembler - .emit_xchg(Size::S8, Location::GPR(value), Location::Memory(addr, 0)); + .emit_xchg(Size::S8, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Exchange with u16 fn i64_atomic_xchg_16u( @@ -5547,10 +6158,12 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_movzx(Size::S16, loc, Size::S64, Location::GPR(value)); + .emit_movzx(Size::S16, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5561,15 +6174,13 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.assembler.emit_xchg( - Size::S16, - Location::GPR(value), - Location::Memory(addr, 0), - ); + this.assembler + .emit_xchg(Size::S16, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Exchange with u32 fn i64_atomic_xchg_32u( @@ -5582,10 +6193,12 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { - let value = self.acquire_temp_gpr().unwrap(); + ) -> Result<(), CodegenError> { + let value = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.assembler - .emit_movzx(Size::S32, loc, Size::S64, Location::GPR(value)); + .emit_movzx(Size::S32, loc, Size::S64, Location::GPR(value))?; self.memory_op( target, memarg, @@ -5596,15 +6209,13 @@ impl Machine for MachineX86_64 { offset, heap_access_oob, |this, addr| { - this.assembler.emit_xchg( - Size::S32, - Location::GPR(value), - Location::Memory(addr, 0), - ); + this.assembler + .emit_xchg(Size::S32, Location::GPR(value), Location::Memory(addr, 0)) }, - ); - self.move_location(Size::S64, Location::GPR(value), ret); + )?; + self.move_location(Size::S64, Location::GPR(value), ret)?; self.release_gpr(value); + Ok(()) } // i64 atomic Exchange with i64 fn i64_atomic_cmpxchg( @@ -5618,7 +6229,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -5629,11 +6240,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S64, cmp, Location::GPR(compare)); + .emit_mov(Size::S64, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S64, new, Location::GPR(value)); + .emit_mov(Size::S64, new, Location::GPR(value))?; self.memory_op( target, @@ -5649,13 +6260,14 @@ impl Machine for MachineX86_64 { Size::S64, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_mov(Size::S64, Location::GPR(compare), ret); + .emit_mov(Size::S64, Location::GPR(compare), ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // i64 atomic Exchange with u8 fn i64_atomic_cmpxchg_8u( @@ -5669,7 +6281,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -5680,11 +6292,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S64, cmp, Location::GPR(compare)); + .emit_mov(Size::S64, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S64, new, Location::GPR(value)); + .emit_mov(Size::S64, new, Location::GPR(value))?; self.memory_op( target, @@ -5700,13 +6312,14 @@ impl Machine for MachineX86_64 { Size::S8, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_movzx(Size::S8, Location::GPR(compare), Size::S64, ret); + .emit_movzx(Size::S8, Location::GPR(compare), Size::S64, ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // i64 atomic Exchange with u16 fn i64_atomic_cmpxchg_16u( @@ -5720,7 +6333,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -5731,11 +6344,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S64, cmp, Location::GPR(compare)); + .emit_mov(Size::S64, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S64, new, Location::GPR(value)); + .emit_mov(Size::S64, new, Location::GPR(value))?; self.memory_op( target, @@ -5751,13 +6364,14 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_movzx(Size::S16, Location::GPR(compare), Size::S64, ret); + .emit_movzx(Size::S16, Location::GPR(compare), Size::S64, ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } // i64 atomic Exchange with u32 fn i64_atomic_cmpxchg_32u( @@ -5771,7 +6385,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let compare = self.reserve_unused_temp_gpr(GPR::RAX); let value = if cmp == Location::GPR(GPR::R14) { if new == Location::GPR(GPR::R13) { @@ -5782,11 +6396,11 @@ impl Machine for MachineX86_64 { } else { GPR::R14 }; - self.assembler.emit_push(Size::S64, Location::GPR(value)); + self.assembler.emit_push(Size::S64, Location::GPR(value))?; self.assembler - .emit_mov(Size::S64, cmp, Location::GPR(compare)); + .emit_mov(Size::S64, cmp, Location::GPR(compare))?; self.assembler - .emit_mov(Size::S64, new, Location::GPR(value)); + .emit_mov(Size::S64, new, Location::GPR(value))?; self.memory_op( target, @@ -5802,13 +6416,14 @@ impl Machine for MachineX86_64 { Size::S16, Location::GPR(value), Location::Memory(addr, 0), - ); + )?; this.assembler - .emit_mov(Size::S32, Location::GPR(compare), ret); + .emit_mov(Size::S32, Location::GPR(compare), ret) }, - ); - self.assembler.emit_pop(Size::S64, Location::GPR(value)); + )?; + self.assembler.emit_pop(Size::S64, Location::GPR(value))?; self.release_gpr(compare); + Ok(()) } fn f32_load( @@ -5820,7 +6435,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -5836,9 +6451,9 @@ impl Machine for MachineX86_64 { Size::S32, Location::Memory(addr, 0), ret, - ); + ) }, - ); + ) } fn f32_save( &mut self, @@ -5850,7 +6465,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let canonicalize = canonicalize && self.arch_supports_canonicalize_nan(); self.memory_op( target_addr, @@ -5868,12 +6483,12 @@ impl Machine for MachineX86_64 { Size::S32, target_value, Location::Memory(addr, 0), - ); + ) } else { - this.canonicalize_nan(Size::S32, target_value, Location::Memory(addr, 0)); + this.canonicalize_nan(Size::S32, target_value, Location::Memory(addr, 0)) } }, - ); + ) } fn f64_load( &mut self, @@ -5884,7 +6499,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { self.memory_op( addr, memarg, @@ -5900,9 +6515,9 @@ impl Machine for MachineX86_64 { Size::S64, Location::Memory(addr, 0), ret, - ); + ) }, - ); + ) } fn f64_save( &mut self, @@ -5914,7 +6529,7 @@ impl Machine for MachineX86_64 { imported_memories: bool, offset: i32, heap_access_oob: Label, - ) { + ) -> Result<(), CodegenError> { let canonicalize = canonicalize && self.arch_supports_canonicalize_nan(); self.memory_op( target_addr, @@ -5932,192 +6547,217 @@ impl Machine for MachineX86_64 { Size::S64, target_value, Location::Memory(addr, 0), - ); + ) } else { - this.canonicalize_nan(Size::S64, target_value, Location::Memory(addr, 0)); + this.canonicalize_nan(Size::S64, target_value, Location::Memory(addr, 0)) } }, - ); + ) } - fn convert_f64_i64(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f64_i64( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_in = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; if self.assembler.arch_has_fconverti() { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::GPR(tmp_in)); + self.emit_relaxed_mov(Size::S64, loc, Location::GPR(tmp_in))?; if signed { - self.assembler.arch_emit_f64_convert_si64(tmp_in, tmp_out); + self.assembler.arch_emit_f64_convert_si64(tmp_in, tmp_out)?; } else { - self.assembler.arch_emit_f64_convert_ui64(tmp_in, tmp_out); + self.assembler.arch_emit_f64_convert_ui64(tmp_in, tmp_out)?; } - self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp_out), ret); - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp_out), ret)?; + } else if signed { + self.assembler + .emit_mov(Size::S64, loc, Location::GPR(tmp_in))?; + self.assembler + .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.move_location(Size::S64, Location::SIMD(tmp_out), ret)?; } else { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - if signed { - self.assembler - .emit_mov(Size::S64, loc, Location::GPR(tmp_in)); - self.assembler - .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.move_location(Size::S64, Location::SIMD(tmp_out), ret); - } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; - let do_convert = self.assembler.get_label(); - let end_convert = self.assembler.get_label(); - - self.assembler - .emit_mov(Size::S64, loc, Location::GPR(tmp_in)); - self.assembler.emit_test_gpr_64(tmp_in); - self.assembler.emit_jmp(Condition::Signed, do_convert); - self.assembler - .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.assembler.emit_jmp(Condition::None, end_convert); - self.emit_label(do_convert); - self.move_location(Size::S64, Location::GPR(tmp_in), Location::GPR(tmp)); - self.assembler - .emit_and(Size::S64, Location::Imm32(1), Location::GPR(tmp)); - self.assembler - .emit_shr(Size::S64, Location::Imm8(1), Location::GPR(tmp_in)); - self.assembler - .emit_or(Size::S64, Location::GPR(tmp), Location::GPR(tmp_in)); - self.assembler - .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.assembler - .emit_vaddsd(tmp_out, XMMOrMemory::XMM(tmp_out), tmp_out); - self.emit_label(end_convert); - self.move_location(Size::S64, Location::SIMD(tmp_out), ret); + let do_convert = self.assembler.get_label(); + let end_convert = self.assembler.get_label(); - self.release_gpr(tmp); - } + self.assembler + .emit_mov(Size::S64, loc, Location::GPR(tmp_in))?; + self.assembler.emit_test_gpr_64(tmp_in)?; + self.assembler.emit_jmp(Condition::Signed, do_convert)?; + self.assembler + .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.assembler.emit_jmp(Condition::None, end_convert)?; + self.emit_label(do_convert)?; + self.move_location(Size::S64, Location::GPR(tmp_in), Location::GPR(tmp))?; + self.assembler + .emit_and(Size::S64, Location::Imm32(1), Location::GPR(tmp))?; + self.assembler + .emit_shr(Size::S64, Location::Imm8(1), Location::GPR(tmp_in))?; + self.assembler + .emit_or(Size::S64, Location::GPR(tmp), Location::GPR(tmp_in))?; + self.assembler + .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.assembler + .emit_vaddsd(tmp_out, XMMOrMemory::XMM(tmp_out), tmp_out)?; + self.emit_label(end_convert)?; + self.move_location(Size::S64, Location::SIMD(tmp_out), ret)?; - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.release_gpr(tmp); } + self.release_gpr(tmp_in); + self.release_simd(tmp_out); + Ok(()) } - fn convert_f64_i32(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f64_i32( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_in = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; if self.assembler.arch_has_fconverti() { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::GPR(tmp_in)); + self.emit_relaxed_mov(Size::S32, loc, Location::GPR(tmp_in))?; if signed { - self.assembler.arch_emit_f64_convert_si32(tmp_in, tmp_out); + self.assembler.arch_emit_f64_convert_si32(tmp_in, tmp_out)?; } else { - self.assembler.arch_emit_f64_convert_ui32(tmp_in, tmp_out); + self.assembler.arch_emit_f64_convert_ui32(tmp_in, tmp_out)?; } - self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp_out), ret); - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp_out), ret)?; } else { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.assembler - .emit_mov(Size::S32, loc, Location::GPR(tmp_in)); + .emit_mov(Size::S32, loc, Location::GPR(tmp_in))?; if signed { self.assembler - .emit_vcvtsi2sd_32(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); + .emit_vcvtsi2sd_32(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; } else { self.assembler - .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); + .emit_vcvtsi2sd_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; } - self.move_location(Size::S64, Location::SIMD(tmp_out), ret); - - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.move_location(Size::S64, Location::SIMD(tmp_out), ret)?; } + self.release_gpr(tmp_in); + self.release_simd(tmp_out); + Ok(()) } - fn convert_f32_i64(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f32_i64( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_in = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; if self.assembler.arch_has_fconverti() { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::GPR(tmp_in)); + self.emit_relaxed_mov(Size::S64, loc, Location::GPR(tmp_in))?; if signed { - self.assembler.arch_emit_f32_convert_si64(tmp_in, tmp_out); + self.assembler.arch_emit_f32_convert_si64(tmp_in, tmp_out)?; } else { - self.assembler.arch_emit_f32_convert_ui64(tmp_in, tmp_out); + self.assembler.arch_emit_f32_convert_ui64(tmp_in, tmp_out)?; } - self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp_out), ret); - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp_out), ret)?; + } else if signed { + self.assembler + .emit_mov(Size::S64, loc, Location::GPR(tmp_in))?; + self.assembler + .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.move_location(Size::S32, Location::SIMD(tmp_out), ret)?; } else { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - if signed { - self.assembler - .emit_mov(Size::S64, loc, Location::GPR(tmp_in)); - self.assembler - .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.move_location(Size::S32, Location::SIMD(tmp_out), ret); - } else { - let tmp = self.acquire_temp_gpr().unwrap(); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; - let do_convert = self.assembler.get_label(); - let end_convert = self.assembler.get_label(); + let do_convert = self.assembler.get_label(); + let end_convert = self.assembler.get_label(); - self.assembler - .emit_mov(Size::S64, loc, Location::GPR(tmp_in)); - self.assembler.emit_test_gpr_64(tmp_in); - self.assembler.emit_jmp(Condition::Signed, do_convert); - self.assembler - .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.assembler.emit_jmp(Condition::None, end_convert); - self.emit_label(do_convert); - self.move_location(Size::S64, Location::GPR(tmp_in), Location::GPR(tmp)); - self.assembler - .emit_and(Size::S64, Location::Imm32(1), Location::GPR(tmp)); - self.assembler - .emit_shr(Size::S64, Location::Imm8(1), Location::GPR(tmp_in)); - self.assembler - .emit_or(Size::S64, Location::GPR(tmp), Location::GPR(tmp_in)); - self.assembler - .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); - self.assembler - .emit_vaddss(tmp_out, XMMOrMemory::XMM(tmp_out), tmp_out); - self.emit_label(end_convert); - self.move_location(Size::S32, Location::SIMD(tmp_out), ret); + self.assembler + .emit_mov(Size::S64, loc, Location::GPR(tmp_in))?; + self.assembler.emit_test_gpr_64(tmp_in)?; + self.assembler.emit_jmp(Condition::Signed, do_convert)?; + self.assembler + .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.assembler.emit_jmp(Condition::None, end_convert)?; + self.emit_label(do_convert)?; + self.move_location(Size::S64, Location::GPR(tmp_in), Location::GPR(tmp))?; + self.assembler + .emit_and(Size::S64, Location::Imm32(1), Location::GPR(tmp))?; + self.assembler + .emit_shr(Size::S64, Location::Imm8(1), Location::GPR(tmp_in))?; + self.assembler + .emit_or(Size::S64, Location::GPR(tmp), Location::GPR(tmp_in))?; + self.assembler + .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; + self.assembler + .emit_vaddss(tmp_out, XMMOrMemory::XMM(tmp_out), tmp_out)?; + self.emit_label(end_convert)?; + self.move_location(Size::S32, Location::SIMD(tmp_out), ret)?; - self.release_gpr(tmp); - } - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.release_gpr(tmp); } + self.release_gpr(tmp_in); + self.release_simd(tmp_out); + Ok(()) } - fn convert_f32_i32(&mut self, loc: Location, signed: bool, ret: Location) { + fn convert_f32_i32( + &mut self, + loc: Location, + signed: bool, + ret: Location, + ) -> Result<(), CodegenError> { + let tmp_out = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp_in = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; if self.assembler.arch_has_fconverti() { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::GPR(tmp_in)); + self.emit_relaxed_mov(Size::S32, loc, Location::GPR(tmp_in))?; if signed { - self.assembler.arch_emit_f32_convert_si32(tmp_in, tmp_out); + self.assembler.arch_emit_f32_convert_si32(tmp_in, tmp_out)?; } else { - self.assembler.arch_emit_f32_convert_ui32(tmp_in, tmp_out); + self.assembler.arch_emit_f32_convert_ui32(tmp_in, tmp_out)?; } - self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp_out), ret); - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp_out), ret)?; } else { - let tmp_out = self.acquire_temp_simd().unwrap(); - let tmp_in = self.acquire_temp_gpr().unwrap(); - self.assembler - .emit_mov(Size::S32, loc, Location::GPR(tmp_in)); + .emit_mov(Size::S32, loc, Location::GPR(tmp_in))?; if signed { self.assembler - .emit_vcvtsi2ss_32(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); + .emit_vcvtsi2ss_32(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; } else { self.assembler - .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out); + .emit_vcvtsi2ss_64(tmp_out, GPROrMemory::GPR(tmp_in), tmp_out)?; } - self.move_location(Size::S32, Location::SIMD(tmp_out), ret); - - self.release_gpr(tmp_in); - self.release_simd(tmp_out); + self.move_location(Size::S32, Location::SIMD(tmp_out), ret)?; } + self.release_gpr(tmp_in); + self.release_simd(tmp_out); + Ok(()) } - fn convert_i64_f64(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i64_f64( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { match (signed, sat) { (false, true) => self.convert_i64_f64_u_s(loc, ret), (false, false) => self.convert_i64_f64_u_u(loc, ret), @@ -6125,7 +6765,13 @@ impl Machine for MachineX86_64 { (true, false) => self.convert_i64_f64_s_u(loc, ret), } } - fn convert_i32_f64(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i32_f64( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { match (signed, sat) { (false, true) => self.convert_i32_f64_u_s(loc, ret), (false, false) => self.convert_i32_f64_u_u(loc, ret), @@ -6133,7 +6779,13 @@ impl Machine for MachineX86_64 { (true, false) => self.convert_i32_f64_s_u(loc, ret), } } - fn convert_i64_f32(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i64_f32( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { match (signed, sat) { (false, true) => self.convert_i64_f32_u_s(loc, ret), (false, false) => self.convert_i64_f32_u_u(loc, ret), @@ -6141,7 +6793,13 @@ impl Machine for MachineX86_64 { (true, false) => self.convert_i64_f32_s_u(loc, ret), } } - fn convert_i32_f32(&mut self, loc: Location, ret: Location, signed: bool, sat: bool) { + fn convert_i32_f32( + &mut self, + loc: Location, + ret: Location, + signed: bool, + sat: bool, + ) -> Result<(), CodegenError> { match (signed, sat) { (false, true) => self.convert_i32_f32_u_s(loc, ret), (false, false) => self.convert_i32_f32_u_u(loc, ret), @@ -6149,154 +6807,210 @@ impl Machine for MachineX86_64 { (true, false) => self.convert_i32_f32_s_u(loc, ret), } } - fn convert_f64_f32(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcvtss2sd, loc, loc, ret); + fn convert_f64_f32(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcvtss2sd, loc, loc, ret) } - fn convert_f32_f64(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcvtsd2ss, loc, loc, ret); + fn convert_f32_f64(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcvtsd2ss, loc, loc, ret) } - fn f64_neg(&mut self, loc: Location, ret: Location) { + fn f64_neg(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_fneg() { - let tmp = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp)); - self.assembler.arch_emit_f64_neg(tmp, tmp); - self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp), ret); + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S64, loc, Location::SIMD(tmp))?; + self.assembler.arch_emit_f64_neg(tmp, tmp)?; + self.emit_relaxed_mov(Size::S64, Location::SIMD(tmp), ret)?; self.release_simd(tmp); } else { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S64, loc, Location::GPR(tmp)); - self.assembler.emit_btc_gpr_imm8_64(63, tmp); - self.move_location(Size::S64, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S64, loc, Location::GPR(tmp))?; + self.assembler.emit_btc_gpr_imm8_64(63, tmp)?; + self.move_location(Size::S64, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } + Ok(()) } - fn f64_abs(&mut self, loc: Location, ret: Location) { - let tmp = self.acquire_temp_gpr().unwrap(); - let c = self.acquire_temp_gpr().unwrap(); + fn f64_abs(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let c = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; - self.move_location(Size::S64, loc, Location::GPR(tmp)); + self.move_location(Size::S64, loc, Location::GPR(tmp))?; self.move_location( Size::S64, Location::Imm64(0x7fffffffffffffffu64), Location::GPR(c), - ); + )?; self.assembler - .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp)); - self.move_location(Size::S64, Location::GPR(tmp), ret); + .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp))?; + self.move_location(Size::S64, Location::GPR(tmp), ret)?; self.release_gpr(c); self.release_gpr(tmp); + Ok(()) } - fn emit_i64_copysign(&mut self, tmp1: GPR, tmp2: GPR) { - let c = self.acquire_temp_gpr().unwrap(); + fn emit_i64_copysign(&mut self, tmp1: GPR, tmp2: GPR) -> Result<(), CodegenError> { + let c = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; self.move_location( Size::S64, Location::Imm64(0x7fffffffffffffffu64), Location::GPR(c), - ); + )?; self.assembler - .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp1)); + .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp1))?; self.move_location( Size::S64, Location::Imm64(0x8000000000000000u64), Location::GPR(c), - ); + )?; self.assembler - .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp2)); + .emit_and(Size::S64, Location::GPR(c), Location::GPR(tmp2))?; self.assembler - .emit_or(Size::S64, Location::GPR(tmp2), Location::GPR(tmp1)); + .emit_or(Size::S64, Location::GPR(tmp2), Location::GPR(tmp1))?; self.release_gpr(c); + Ok(()) } - fn f64_sqrt(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vsqrtsd, loc, loc, ret); + fn f64_sqrt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vsqrtsd, loc, loc, ret) } - fn f64_trunc(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_trunc, loc, loc, ret); + fn f64_trunc(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_trunc, loc, loc, ret) } - fn f64_ceil(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_ceil, loc, loc, ret); + fn f64_ceil(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_ceil, loc, loc, ret) } - fn f64_floor(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_floor, loc, loc, ret); + fn f64_floor(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_floor, loc, loc, ret) } - fn f64_nearest(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_nearest, loc, loc, ret); + fn f64_nearest(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundsd_nearest, loc, loc, ret) } - fn f64_cmp_ge(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpgesd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_ge( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpgesd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_cmp_gt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpgtsd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_gt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpgtsd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_cmp_le(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmplesd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_le( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmplesd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_cmp_lt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpltsd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_lt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpltsd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpneqsd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpneqsd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpeqsd, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f64_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpeqsd, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f64_min(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_min( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { if !self.arch_supports_canonicalize_nan() { - self.emit_relaxed_avx(AssemblerX64::emit_vminsd, loc_a, loc_b, ret); + self.emit_relaxed_avx(AssemblerX64::emit_vminsd, loc_a, loc_b, ret) } else { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmpg1 = self.acquire_temp_gpr().unwrap(); - let tmpg2 = self.acquire_temp_gpr().unwrap(); + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmpg1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmpg2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; let src1 = match loc_a { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_a, Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::SIMD(tmp1))?; tmp1 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S32, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } _ => { - unreachable!(); + codegen_error!("singlepass f64_min unreachable"); } }; let src2 = match loc_b { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_b, Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::SIMD(tmp2))?; tmp2 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S32, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } _ => { - unreachable!(); + codegen_error!("singlepass f64_min unreachable"); } }; @@ -6304,52 +7018,56 @@ impl Machine for MachineX86_64 { let tmp_xmm2 = XMM::XMM9; let tmp_xmm3 = XMM::XMM10; - self.move_location(Size::S64, Location::SIMD(src1), Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::SIMD(src2), Location::GPR(tmpg2)); + self.move_location(Size::S64, Location::SIMD(src1), Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::SIMD(src2), Location::GPR(tmpg2))?; self.assembler - .emit_cmp(Size::S64, Location::GPR(tmpg2), Location::GPR(tmpg1)); + .emit_cmp(Size::S64, Location::GPR(tmpg2), Location::GPR(tmpg1))?; self.assembler - .emit_vminsd(src1, XMMOrMemory::XMM(src2), tmp_xmm1); + .emit_vminsd(src1, XMMOrMemory::XMM(src2), tmp_xmm1)?; let label1 = self.assembler.get_label(); let label2 = self.assembler.get_label(); - self.assembler.emit_jmp(Condition::NotEqual, label1); + self.assembler.emit_jmp(Condition::NotEqual, label1)?; self.assembler - .emit_vmovapd(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2)); - self.assembler.emit_jmp(Condition::None, label2); - self.emit_label(label1); + .emit_vmovapd(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2))?; + self.assembler.emit_jmp(Condition::None, label2)?; + self.emit_label(label1)?; // load float -0.0 self.move_location( Size::S64, Location::Imm64(0x8000_0000_0000_0000), // Negative zero Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp_xmm2)); - self.emit_label(label2); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp_xmm2))?; + self.emit_label(label2)?; self.assembler - .emit_vcmpeqsd(src1, XMMOrMemory::XMM(src2), tmp_xmm3); + .emit_vcmpeqsd(src1, XMMOrMemory::XMM(src2), tmp_xmm3)?; + self.assembler.emit_vblendvpd( + tmp_xmm3, + XMMOrMemory::XMM(tmp_xmm2), + tmp_xmm1, + tmp_xmm1, + )?; self.assembler - .emit_vblendvpd(tmp_xmm3, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm1, tmp_xmm1); - self.assembler - .emit_vcmpunordsd(src1, XMMOrMemory::XMM(src2), src1); + .emit_vcmpunordsd(src1, XMMOrMemory::XMM(src2), src1)?; // load float canonical nan self.move_location( Size::S64, Location::Imm64(0x7FF8_0000_0000_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2))?; self.assembler - .emit_vblendvpd(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1); + .emit_vblendvpd(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1)?; match ret { Location::SIMD(x) => { self.assembler - .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x)); + .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x))?; } Location::Memory(_, _) | Location::GPR(_) => { - self.move_location(Size::S64, Location::SIMD(src1), ret); + self.move_location(Size::S64, Location::SIMD(src1), ret)?; } _ => { - unreachable!(); + codegen_error!("singlepass f64_min unreachable"); } } @@ -6357,55 +7075,69 @@ impl Machine for MachineX86_64 { self.release_gpr(tmpg1); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } } - fn f64_max(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f64_max( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { if !self.arch_supports_canonicalize_nan() { - self.emit_relaxed_avx(AssemblerX64::emit_vmaxsd, loc_a, loc_b, ret); + self.emit_relaxed_avx(AssemblerX64::emit_vmaxsd, loc_a, loc_b, ret) } else { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmpg1 = self.acquire_temp_gpr().unwrap(); - let tmpg2 = self.acquire_temp_gpr().unwrap(); + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmpg1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmpg2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; let src1 = match loc_a { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_a, Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::SIMD(tmp1))?; tmp1 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S32, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } _ => { - unreachable!(); + codegen_error!("singlepass f64_max unreachable"); } }; let src2 = match loc_b { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_b, Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::SIMD(tmp2))?; tmp2 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S32, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } _ => { - unreachable!(); + codegen_error!("singlepass f64_max unreachable"); } }; @@ -6413,47 +7145,51 @@ impl Machine for MachineX86_64 { let tmp_xmm2 = XMM::XMM9; let tmp_xmm3 = XMM::XMM10; - self.move_location(Size::S64, Location::SIMD(src1), Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::SIMD(src2), Location::GPR(tmpg2)); + self.move_location(Size::S64, Location::SIMD(src1), Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::SIMD(src2), Location::GPR(tmpg2))?; self.assembler - .emit_cmp(Size::S64, Location::GPR(tmpg2), Location::GPR(tmpg1)); + .emit_cmp(Size::S64, Location::GPR(tmpg2), Location::GPR(tmpg1))?; self.assembler - .emit_vmaxsd(src1, XMMOrMemory::XMM(src2), tmp_xmm1); + .emit_vmaxsd(src1, XMMOrMemory::XMM(src2), tmp_xmm1)?; let label1 = self.assembler.get_label(); let label2 = self.assembler.get_label(); - self.assembler.emit_jmp(Condition::NotEqual, label1); - self.assembler - .emit_vmovapd(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2)); - self.assembler.emit_jmp(Condition::None, label2); - self.emit_label(label1); + self.assembler.emit_jmp(Condition::NotEqual, label1)?; self.assembler - .emit_vxorpd(tmp_xmm2, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm2); - self.emit_label(label2); + .emit_vmovapd(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2))?; + self.assembler.emit_jmp(Condition::None, label2)?; + self.emit_label(label1)?; self.assembler - .emit_vcmpeqsd(src1, XMMOrMemory::XMM(src2), tmp_xmm3); + .emit_vxorpd(tmp_xmm2, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm2)?; + self.emit_label(label2)?; self.assembler - .emit_vblendvpd(tmp_xmm3, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm1, tmp_xmm1); + .emit_vcmpeqsd(src1, XMMOrMemory::XMM(src2), tmp_xmm3)?; + self.assembler.emit_vblendvpd( + tmp_xmm3, + XMMOrMemory::XMM(tmp_xmm2), + tmp_xmm1, + tmp_xmm1, + )?; self.assembler - .emit_vcmpunordsd(src1, XMMOrMemory::XMM(src2), src1); + .emit_vcmpunordsd(src1, XMMOrMemory::XMM(src2), src1)?; // load float canonical nan self.move_location( Size::S64, Location::Imm64(0x7FF8_0000_0000_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2))?; self.assembler - .emit_vblendvpd(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1); + .emit_vblendvpd(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1)?; match ret { Location::SIMD(x) => { self.assembler - .emit_vmovapd(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x)); + .emit_vmovapd(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x))?; } Location::Memory(_, _) | Location::GPR(_) => { - self.move_location(Size::S64, Location::SIMD(src1), ret); + self.move_location(Size::S64, Location::SIMD(src1), ret)?; } _ => { - unreachable!(); + codegen_error!("singlepass f64_max unreachable"); } } @@ -6461,146 +7197,218 @@ impl Machine for MachineX86_64 { self.release_gpr(tmpg1); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } } - fn f64_add(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vaddsd, loc_a, loc_b, ret); + fn f64_add( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vaddsd, loc_a, loc_b, ret) } - fn f64_sub(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vsubsd, loc_a, loc_b, ret); + fn f64_sub( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vsubsd, loc_a, loc_b, ret) } - fn f64_mul(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vmulsd, loc_a, loc_b, ret); + fn f64_mul( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vmulsd, loc_a, loc_b, ret) } - fn f64_div(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vdivsd, loc_a, loc_b, ret); + fn f64_div( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vdivsd, loc_a, loc_b, ret) } - fn f32_neg(&mut self, loc: Location, ret: Location) { + fn f32_neg(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { if self.assembler.arch_has_fneg() { - let tmp = self.acquire_temp_simd().unwrap(); - self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp)); - self.assembler.arch_emit_f32_neg(tmp, tmp); - self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp), ret); + let tmp = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + self.emit_relaxed_mov(Size::S32, loc, Location::SIMD(tmp))?; + self.assembler.arch_emit_f32_neg(tmp, tmp)?; + self.emit_relaxed_mov(Size::S32, Location::SIMD(tmp), ret)?; self.release_simd(tmp); } else { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); - self.assembler.emit_btc_gpr_imm8_32(31, tmp); - self.move_location(Size::S32, Location::GPR(tmp), ret); + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; + self.assembler.emit_btc_gpr_imm8_32(31, tmp)?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); } + Ok(()) } - fn f32_abs(&mut self, loc: Location, ret: Location) { - let tmp = self.acquire_temp_gpr().unwrap(); - self.move_location(Size::S32, loc, Location::GPR(tmp)); + fn f32_abs(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + let tmp = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + self.move_location(Size::S32, loc, Location::GPR(tmp))?; self.assembler.emit_and( Size::S32, Location::Imm32(0x7fffffffu32), Location::GPR(tmp), - ); - self.move_location(Size::S32, Location::GPR(tmp), ret); + )?; + self.move_location(Size::S32, Location::GPR(tmp), ret)?; self.release_gpr(tmp); + Ok(()) } - fn emit_i32_copysign(&mut self, tmp1: GPR, tmp2: GPR) { + fn emit_i32_copysign(&mut self, tmp1: GPR, tmp2: GPR) -> Result<(), CodegenError> { self.assembler.emit_and( Size::S32, Location::Imm32(0x7fffffffu32), Location::GPR(tmp1), - ); + )?; self.assembler.emit_and( Size::S32, Location::Imm32(0x80000000u32), Location::GPR(tmp2), - ); + )?; self.assembler - .emit_or(Size::S32, Location::GPR(tmp2), Location::GPR(tmp1)); + .emit_or(Size::S32, Location::GPR(tmp2), Location::GPR(tmp1)) } - fn f32_sqrt(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vsqrtss, loc, loc, ret); + fn f32_sqrt(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vsqrtss, loc, loc, ret) } - fn f32_trunc(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundss_trunc, loc, loc, ret); + fn f32_trunc(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundss_trunc, loc, loc, ret) } - fn f32_ceil(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundss_ceil, loc, loc, ret); + fn f32_ceil(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundss_ceil, loc, loc, ret) } - fn f32_floor(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundss_floor, loc, loc, ret); + fn f32_floor(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundss_floor, loc, loc, ret) } - fn f32_nearest(&mut self, loc: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vroundss_nearest, loc, loc, ret); + fn f32_nearest(&mut self, loc: Location, ret: Location) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vroundss_nearest, loc, loc, ret) } - fn f32_cmp_ge(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpgess, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_ge( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpgess, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_cmp_gt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpgtss, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_gt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpgtss, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_cmp_le(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpless, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_le( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpless, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_cmp_lt(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpltss, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_lt( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpltss, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_cmp_ne(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpneqss, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_ne( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpneqss, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_cmp_eq(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vcmpeqss, loc_a, loc_b, ret); - self.assembler.emit_and(Size::S32, Location::Imm32(1), ret); + fn f32_cmp_eq( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vcmpeqss, loc_a, loc_b, ret)?; + self.assembler.emit_and(Size::S32, Location::Imm32(1), ret) } - fn f32_min(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_min( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { if !self.arch_supports_canonicalize_nan() { - self.emit_relaxed_avx(AssemblerX64::emit_vminss, loc_a, loc_b, ret); + self.emit_relaxed_avx(AssemblerX64::emit_vminss, loc_a, loc_b, ret) } else { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmpg1 = self.acquire_temp_gpr().unwrap(); - let tmpg2 = self.acquire_temp_gpr().unwrap(); + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmpg1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmpg2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; let src1 = match loc_a { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_a, Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::SIMD(tmp1))?; tmp1 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S32, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } _ => { - unreachable!(); + codegen_error!("singlepass f32_min unreachable"); } }; let src2 = match loc_b { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_b, Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::SIMD(tmp2))?; tmp2 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S32, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } _ => { - unreachable!(); + codegen_error!("singlepass f32_min unreachable"); } }; @@ -6608,52 +7416,56 @@ impl Machine for MachineX86_64 { let tmp_xmm2 = XMM::XMM9; let tmp_xmm3 = XMM::XMM10; - self.move_location(Size::S32, Location::SIMD(src1), Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::SIMD(src2), Location::GPR(tmpg2)); + self.move_location(Size::S32, Location::SIMD(src1), Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::SIMD(src2), Location::GPR(tmpg2))?; self.assembler - .emit_cmp(Size::S32, Location::GPR(tmpg2), Location::GPR(tmpg1)); + .emit_cmp(Size::S32, Location::GPR(tmpg2), Location::GPR(tmpg1))?; self.assembler - .emit_vminss(src1, XMMOrMemory::XMM(src2), tmp_xmm1); + .emit_vminss(src1, XMMOrMemory::XMM(src2), tmp_xmm1)?; let label1 = self.assembler.get_label(); let label2 = self.assembler.get_label(); - self.assembler.emit_jmp(Condition::NotEqual, label1); + self.assembler.emit_jmp(Condition::NotEqual, label1)?; self.assembler - .emit_vmovaps(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2)); - self.assembler.emit_jmp(Condition::None, label2); - self.emit_label(label1); + .emit_vmovaps(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2))?; + self.assembler.emit_jmp(Condition::None, label2)?; + self.emit_label(label1)?; // load float -0.0 self.move_location( Size::S64, Location::Imm32(0x8000_0000), // Negative zero Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp_xmm2)); - self.emit_label(label2); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp_xmm2))?; + self.emit_label(label2)?; self.assembler - .emit_vcmpeqss(src1, XMMOrMemory::XMM(src2), tmp_xmm3); + .emit_vcmpeqss(src1, XMMOrMemory::XMM(src2), tmp_xmm3)?; + self.assembler.emit_vblendvps( + tmp_xmm3, + XMMOrMemory::XMM(tmp_xmm2), + tmp_xmm1, + tmp_xmm1, + )?; self.assembler - .emit_vblendvps(tmp_xmm3, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm1, tmp_xmm1); - self.assembler - .emit_vcmpunordss(src1, XMMOrMemory::XMM(src2), src1); + .emit_vcmpunordss(src1, XMMOrMemory::XMM(src2), src1)?; // load float canonical nan self.move_location( Size::S64, Location::Imm32(0x7FC0_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2))?; self.assembler - .emit_vblendvps(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1); + .emit_vblendvps(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1)?; match ret { Location::SIMD(x) => { self.assembler - .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x)); + .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x))?; } Location::Memory(_, _) | Location::GPR(_) => { - self.move_location(Size::S64, Location::SIMD(src1), ret); + self.move_location(Size::S64, Location::SIMD(src1), ret)?; } _ => { - unreachable!(); + codegen_error!("singlepass f32_min unreachable"); } } @@ -6661,55 +7473,69 @@ impl Machine for MachineX86_64 { self.release_gpr(tmpg1); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } } - fn f32_max(&mut self, loc_a: Location, loc_b: Location, ret: Location) { + fn f32_max( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { if !self.arch_supports_canonicalize_nan() { - self.emit_relaxed_avx(AssemblerX64::emit_vmaxss, loc_a, loc_b, ret); + self.emit_relaxed_avx(AssemblerX64::emit_vmaxss, loc_a, loc_b, ret) } else { - let tmp1 = self.acquire_temp_simd().unwrap(); - let tmp2 = self.acquire_temp_simd().unwrap(); - let tmpg1 = self.acquire_temp_gpr().unwrap(); - let tmpg2 = self.acquire_temp_gpr().unwrap(); + let tmp1 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmp2 = self.acquire_temp_simd().ok_or(CodegenError { + message: "singlepass cannot acquire temp simd".to_string(), + })?; + let tmpg1 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; + let tmpg2 = self.acquire_temp_gpr().ok_or(CodegenError { + message: "singlepass cannot acquire temp gpr".to_string(), + })?; let src1 = match loc_a { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_a, Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::SIMD(tmp1))?; tmp1 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S32, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_a, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1)); + self.move_location(Size::S64, loc_a, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp1))?; tmp1 } _ => { - unreachable!(); + codegen_error!("singlepass f32_max unreachable"); } }; let src2 = match loc_b { Location::SIMD(x) => x, Location::GPR(_) | Location::Memory(_, _) => { - self.move_location(Size::S64, loc_b, Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::SIMD(tmp2))?; tmp2 } Location::Imm32(_) => { - self.move_location(Size::S32, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S32, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } Location::Imm64(_) => { - self.move_location(Size::S64, loc_b, Location::GPR(tmpg1)); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2)); + self.move_location(Size::S64, loc_b, Location::GPR(tmpg1))?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(tmp2))?; tmp2 } _ => { - unreachable!(); + codegen_error!("singlepass f32_max unreachable"); } }; @@ -6717,47 +7543,51 @@ impl Machine for MachineX86_64 { let tmp_xmm2 = XMM::XMM9; let tmp_xmm3 = XMM::XMM10; - self.move_location(Size::S32, Location::SIMD(src1), Location::GPR(tmpg1)); - self.move_location(Size::S32, Location::SIMD(src2), Location::GPR(tmpg2)); + self.move_location(Size::S32, Location::SIMD(src1), Location::GPR(tmpg1))?; + self.move_location(Size::S32, Location::SIMD(src2), Location::GPR(tmpg2))?; self.assembler - .emit_cmp(Size::S32, Location::GPR(tmpg2), Location::GPR(tmpg1)); + .emit_cmp(Size::S32, Location::GPR(tmpg2), Location::GPR(tmpg1))?; self.assembler - .emit_vmaxss(src1, XMMOrMemory::XMM(src2), tmp_xmm1); + .emit_vmaxss(src1, XMMOrMemory::XMM(src2), tmp_xmm1)?; let label1 = self.assembler.get_label(); let label2 = self.assembler.get_label(); - self.assembler.emit_jmp(Condition::NotEqual, label1); - self.assembler - .emit_vmovaps(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2)); - self.assembler.emit_jmp(Condition::None, label2); - self.emit_label(label1); + self.assembler.emit_jmp(Condition::NotEqual, label1)?; self.assembler - .emit_vxorps(tmp_xmm2, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm2); - self.emit_label(label2); + .emit_vmovaps(XMMOrMemory::XMM(tmp_xmm1), XMMOrMemory::XMM(tmp_xmm2))?; + self.assembler.emit_jmp(Condition::None, label2)?; + self.emit_label(label1)?; self.assembler - .emit_vcmpeqss(src1, XMMOrMemory::XMM(src2), tmp_xmm3); + .emit_vxorps(tmp_xmm2, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm2)?; + self.emit_label(label2)?; self.assembler - .emit_vblendvps(tmp_xmm3, XMMOrMemory::XMM(tmp_xmm2), tmp_xmm1, tmp_xmm1); + .emit_vcmpeqss(src1, XMMOrMemory::XMM(src2), tmp_xmm3)?; + self.assembler.emit_vblendvps( + tmp_xmm3, + XMMOrMemory::XMM(tmp_xmm2), + tmp_xmm1, + tmp_xmm1, + )?; self.assembler - .emit_vcmpunordss(src1, XMMOrMemory::XMM(src2), src1); + .emit_vcmpunordss(src1, XMMOrMemory::XMM(src2), src1)?; // load float canonical nan self.move_location( Size::S64, Location::Imm32(0x7FC0_0000), // Canonical NaN Location::GPR(tmpg1), - ); - self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2)); + )?; + self.move_location(Size::S64, Location::GPR(tmpg1), Location::SIMD(src2))?; self.assembler - .emit_vblendvps(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1); + .emit_vblendvps(src1, XMMOrMemory::XMM(src2), tmp_xmm1, src1)?; match ret { Location::SIMD(x) => { self.assembler - .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x)); + .emit_vmovaps(XMMOrMemory::XMM(src1), XMMOrMemory::XMM(x))?; } Location::Memory(_, _) | Location::GPR(_) => { - self.move_location(Size::S64, Location::SIMD(src1), ret); + self.move_location(Size::S64, Location::SIMD(src1), ret)?; } _ => { - unreachable!(); + codegen_error!("singlepass f32_max unreachable"); } } @@ -6765,26 +7595,47 @@ impl Machine for MachineX86_64 { self.release_gpr(tmpg1); self.release_simd(tmp2); self.release_simd(tmp1); + Ok(()) } } - fn f32_add(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vaddss, loc_a, loc_b, ret); + fn f32_add( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vaddss, loc_a, loc_b, ret) } - fn f32_sub(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vsubss, loc_a, loc_b, ret); + fn f32_sub( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vsubss, loc_a, loc_b, ret) } - fn f32_mul(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vmulss, loc_a, loc_b, ret); + fn f32_mul( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vmulss, loc_a, loc_b, ret) } - fn f32_div(&mut self, loc_a: Location, loc_b: Location, ret: Location) { - self.emit_relaxed_avx(AssemblerX64::emit_vdivss, loc_a, loc_b, ret); + fn f32_div( + &mut self, + loc_a: Location, + loc_b: Location, + ret: Location, + ) -> Result<(), CodegenError> { + self.emit_relaxed_avx(AssemblerX64::emit_vdivss, loc_a, loc_b, ret) } fn gen_std_trampoline( &self, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody { + ) -> Result { // the cpu feature here is irrelevant let mut a = AssemblerX64::new(0, None); @@ -6808,27 +7659,27 @@ impl Machine for MachineX86_64 { } // Used callee-saved registers - a.emit_push(Size::S64, Location::GPR(GPR::R15)); - a.emit_push(Size::S64, Location::GPR(GPR::R14)); + a.emit_push(Size::S64, Location::GPR(GPR::R15))?; + a.emit_push(Size::S64, Location::GPR(GPR::R14))?; // Prepare stack space. a.emit_sub( Size::S64, Location::Imm32(stack_offset + stack_padding), Location::GPR(GPR::RSP), - ); + )?; // Arguments a.emit_mov( Size::S64, self.get_simple_param_location(1, calling_convention), Location::GPR(GPR::R15), - ); // func_ptr + )?; // func_ptr a.emit_mov( Size::S64, self.get_simple_param_location(2, calling_convention), Location::GPR(GPR::R14), - ); // args_rets + )?; // args_rets // Move arguments to their locations. // `callee_vmctx` is already in the first argument register, so no need to move. @@ -6840,12 +7691,12 @@ impl Machine for MachineX86_64 { match dst_loc { Location::GPR(_) => { - a.emit_mov(Size::S64, src_loc, dst_loc); + a.emit_mov(Size::S64, src_loc, dst_loc)?; } Location::Memory(_, _) => { // This location is for reading arguments but we are writing arguments here. // So recalculate it. - a.emit_mov(Size::S64, src_loc, Location::GPR(GPR::RAX)); + a.emit_mov(Size::S64, src_loc, Location::GPR(GPR::RAX))?; a.emit_mov( Size::S64, Location::GPR(GPR::RAX), @@ -6853,23 +7704,23 @@ impl Machine for MachineX86_64 { GPR::RSP, (stack_padding as usize + n_stack_args * 8) as _, ), - ); + )?; n_stack_args += 1; } - _ => unreachable!(), + _ => codegen_error!("singlepass gen_std_trampoline unreachable"), } } } // Call. - a.emit_call_location(Location::GPR(GPR::R15)); + a.emit_call_location(Location::GPR(GPR::R15))?; // Restore stack. a.emit_add( Size::S64, Location::Imm32(stack_offset + stack_padding), Location::GPR(GPR::RSP), - ); + )?; // Write return value. if !sig.results().is_empty() { @@ -6877,19 +7728,19 @@ impl Machine for MachineX86_64 { Size::S64, Location::GPR(GPR::RAX), Location::Memory(GPR::R14, 0), - ); + )?; } // Restore callee-saved registers. - a.emit_pop(Size::S64, Location::GPR(GPR::R14)); - a.emit_pop(Size::S64, Location::GPR(GPR::R15)); + a.emit_pop(Size::S64, Location::GPR(GPR::R14))?; + a.emit_pop(Size::S64, Location::GPR(GPR::R15))?; - a.emit_ret(); + a.emit_ret()?; - FunctionBody { + Ok(FunctionBody { body: a.finalize().unwrap().to_vec(), unwind_info: None, - } + }) } // Generates dynamic import function call trampoline for a function type. fn gen_std_dynamic_import_trampoline( @@ -6897,7 +7748,7 @@ impl Machine for MachineX86_64 { vmoffsets: &VMOffsets, sig: &FunctionType, calling_convention: CallingConvention, - ) -> FunctionBody { + ) -> Result { // the cpu feature here is irrelevant let mut a = AssemblerX64::new(0, None); @@ -6911,7 +7762,7 @@ impl Machine for MachineX86_64 { Size::S64, Location::Imm32((stack_offset + stack_padding) as _), Location::GPR(GPR::RSP), - ); + )?; // Copy arguments. if !sig.params().is_empty() { @@ -6932,7 +7783,7 @@ impl Machine for MachineX86_64 { (stack_padding * 2 + stack_offset + 8 + stack_param_count * 8) as _, ), Location::GPR(GPR::RAX), - ); + )?; stack_param_count += 1; Location::GPR(GPR::RAX) } @@ -6941,14 +7792,14 @@ impl Machine for MachineX86_64 { Size::S64, source_loc, Location::Memory(GPR::RSP, (stack_padding + i * 16) as _), - ); + )?; // Zero upper 64 bits. a.emit_mov( Size::S64, Location::Imm32(0), Location::Memory(GPR::RSP, (stack_padding + i * 16 + 8) as _), - ); + )?; } } @@ -6962,13 +7813,13 @@ impl Machine for MachineX86_64 { vmoffsets.vmdynamicfunction_import_context_address() as i32, ), Location::GPR(GPR::RAX), - ); + )?; // Load values array. a.emit_lea( Size::S64, Location::Memory(GPR::RSP, stack_padding as i32), Location::GPR(GPR::RDX), - ); + )?; } _ => { // Load target address. @@ -6979,14 +7830,14 @@ impl Machine for MachineX86_64 { vmoffsets.vmdynamicfunction_import_context_address() as i32, ), Location::GPR(GPR::RAX), - ); + )?; // Load values array. - a.emit_mov(Size::S64, Location::GPR(GPR::RSP), Location::GPR(GPR::RSI)); + a.emit_mov(Size::S64, Location::GPR(GPR::RSP), Location::GPR(GPR::RSI))?; } }; // Call target. - a.emit_call_location(Location::GPR(GPR::RAX)); + a.emit_call_location(Location::GPR(GPR::RAX))?; // Fetch return value. if !sig.results().is_empty() { @@ -6995,7 +7846,7 @@ impl Machine for MachineX86_64 { Size::S64, Location::Memory(GPR::RSP, stack_padding as i32), Location::GPR(GPR::RAX), - ); + )?; } // Release values array. @@ -7003,15 +7854,15 @@ impl Machine for MachineX86_64 { Size::S64, Location::Imm32((stack_offset + stack_padding) as _), Location::GPR(GPR::RSP), - ); + )?; // Return. - a.emit_ret(); + a.emit_ret()?; - FunctionBody { + Ok(FunctionBody { body: a.finalize().unwrap().to_vec(), unwind_info: None, - } + }) } // Singlepass calls import functions through a trampoline. fn gen_import_call_trampoline( @@ -7020,7 +7871,7 @@ impl Machine for MachineX86_64 { index: FunctionIndex, sig: &FunctionType, calling_convention: CallingConvention, - ) -> CustomSection { + ) -> Result { // the cpu feature here is irrelevant let mut a = AssemblerX64::new(0, None); @@ -7058,7 +7909,7 @@ impl Machine for MachineX86_64 { match argalloc.next(*ty, calling_convention) { Some(X64Register::GPR(_gpr)) => continue, Some(X64Register::XMM(xmm)) => { - a.emit_mov(Size::S64, prev_loc, Location::SIMD(xmm)) + a.emit_mov(Size::S64, prev_loc, Location::SIMD(xmm))? } None => continue, }; @@ -7078,7 +7929,7 @@ impl Machine for MachineX86_64 { Size::S64, Location::Imm32(stack_offset as u32), Location::GPR(GPR::RSP), - ); + )?; } // Store all arguments to the stack to prevent overwrite. @@ -7088,7 +7939,7 @@ impl Machine for MachineX86_64 { let loc = match i { 0..=4 => { let loc = Location::Memory(GPR::RSP, (i * 8) as i32); - a.emit_mov(Size::S64, Location::GPR(PARAM_REGS[i]), loc); + a.emit_mov(Size::S64, Location::GPR(PARAM_REGS[i]), loc)?; loc } _ => { @@ -7113,7 +7964,7 @@ impl Machine for MachineX86_64 { // Since here we never use fewer registers than by the original call, on the caller's frame // we always have enough space to store the rearranged arguments, and the copy "backward" between different // slots in the caller argument region will always work. - a.emit_mov(Size::S64, prev_loc, Location::GPR(GPR::RAX)); + a.emit_mov(Size::S64, prev_loc, Location::GPR(GPR::RAX))?; a.emit_mov( Size::S64, Location::GPR(GPR::RAX), @@ -7121,12 +7972,12 @@ impl Machine for MachineX86_64 { GPR::RSP, stack_offset + 8 + caller_stack_offset, ), - ); + )?; caller_stack_offset += 8; continue; } }; - a.emit_mov(Size::S64, prev_loc, targ); + a.emit_mov(Size::S64, prev_loc, targ)?; } // Restore stack pointer. @@ -7135,7 +7986,7 @@ impl Machine for MachineX86_64 { Size::S64, Location::Imm32(stack_offset as u32), Location::GPR(GPR::RSP), - ); + )?; } } } @@ -7152,35 +8003,35 @@ impl Machine for MachineX86_64 { Size::S64, Location::Memory(GPR::RCX, offset as i32), // function pointer Location::GPR(GPR::RAX), - ); + )?; a.emit_mov( Size::S64, Location::Memory(GPR::RCX, offset as i32 + 8), // target vmctx Location::GPR(GPR::RCX), - ); + )?; } _ => { a.emit_mov( Size::S64, Location::Memory(GPR::RDI, offset as i32), // function pointer Location::GPR(GPR::RAX), - ); + )?; a.emit_mov( Size::S64, Location::Memory(GPR::RDI, offset as i32 + 8), // target vmctx Location::GPR(GPR::RDI), - ); + )?; } } - a.emit_host_redirection(GPR::RAX); + a.emit_host_redirection(GPR::RAX)?; let section_body = SectionBody::new_with_vec(a.finalize().unwrap().to_vec()); - CustomSection { + Ok(CustomSection { protection: CustomSectionProtection::ReadExecute, bytes: section_body, relocations: vec![], - } + }) } #[cfg(feature = "unwind")] fn gen_dwarf_unwind_info(&mut self, code_len: usize) -> Option {