From f7079749620aec94ae36bd86cbe4cff505cf6af9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C3=BAl=20Cabrera?= Date: Mon, 11 Sep 2023 18:33:41 -0400 Subject: [PATCH] winch: Use `Reg` where appropriate in the Masm This change is a small refactoring to some of the MacroAssembler functions to use `Reg` instead of `RegImm` where appropriate (e.g. when the operand is a destination). @elliottt pointed this out while working on https://github.com/bytecodealliance/wasmtime/pull/6982 This change also changes the signature of `float_abs` and `float_neg`, which can be simplified to take a single register. --- winch/codegen/src/codegen/context.rs | 32 +++++-------- winch/codegen/src/isa/aarch64/masm.rs | 49 +++++++------------ winch/codegen/src/isa/x64/masm.rs | 69 +++++++++++++-------------- winch/codegen/src/masm.rs | 18 +++---- winch/codegen/src/visitor.rs | 12 ++--- 5 files changed, 75 insertions(+), 105 deletions(-) diff --git a/winch/codegen/src/codegen/context.rs b/winch/codegen/src/codegen/context.rs index 27b134c66b83..ad4bc40693ee 100644 --- a/winch/codegen/src/codegen/context.rs +++ b/winch/codegen/src/codegen/context.rs @@ -150,11 +150,11 @@ impl<'a> CodeGenContext<'a> { pub fn move_val_to_reg(&self, src: &Val, dst: Reg, masm: &mut M) { let size: OperandSize = src.ty().into(); match src { - Val::Reg(tr) => masm.mov(RegImm::reg(tr.reg), RegImm::reg(dst), size), - Val::I32(imm) => masm.mov(RegImm::i32(*imm), RegImm::reg(dst), size), - Val::I64(imm) => masm.mov(RegImm::i64(*imm), RegImm::reg(dst), size), - Val::F32(imm) => masm.mov(RegImm::f32(imm.bits()), RegImm::reg(dst), size), - Val::F64(imm) => masm.mov(RegImm::f64(imm.bits()), RegImm::reg(dst), size), + Val::Reg(tr) => masm.mov(RegImm::reg(tr.reg), dst, size), + Val::I32(imm) => masm.mov(RegImm::i32(*imm), dst, size), + Val::I64(imm) => masm.mov(RegImm::i64(*imm), dst, size), + Val::F32(imm) => masm.mov(RegImm::f32(imm.bits()), dst, size), + Val::F64(imm) => masm.mov(RegImm::f64(imm.bits()), dst, size), Val::Local(local) => { let slot = self .frame @@ -184,7 +184,7 @@ impl<'a> CodeGenContext<'a> { /// Prepares arguments for emitting an i32 binary operation. pub fn i32_binop(&mut self, masm: &mut M, mut emit: F) where - F: FnMut(&mut M, RegImm, RegImm, OperandSize), + F: FnMut(&mut M, Reg, RegImm, OperandSize), M: MacroAssembler, { let top = self.stack.peek().expect("value at stack top"); @@ -195,17 +195,12 @@ impl<'a> CodeGenContext<'a> { .pop_i32_const() .expect("i32 const value at stack top"); let typed_reg = self.pop_to_reg(masm, None); - emit( - masm, - RegImm::reg(typed_reg.reg), - RegImm::i32(val), - OperandSize::S32, - ); + emit(masm, typed_reg.reg, RegImm::i32(val), OperandSize::S32); self.stack.push(typed_reg.into()); } else { let src = self.pop_to_reg(masm, None); let dst = self.pop_to_reg(masm, None); - emit(masm, dst.reg.into(), src.reg.into(), OperandSize::S32); + emit(masm, dst.reg, src.reg.into(), OperandSize::S32); self.free_reg(src); self.stack.push(dst.into()); } @@ -214,7 +209,7 @@ impl<'a> CodeGenContext<'a> { /// Prepares arguments for emitting an i64 binary operation. pub fn i64_binop(&mut self, masm: &mut M, mut emit: F) where - F: FnMut(&mut M, RegImm, RegImm, OperandSize), + F: FnMut(&mut M, Reg, RegImm, OperandSize), M: MacroAssembler, { let top = self.stack.peek().expect("value at stack top"); @@ -224,17 +219,12 @@ impl<'a> CodeGenContext<'a> { .pop_i64_const() .expect("i64 const value at stack top"); let typed_reg = self.pop_to_reg(masm, None); - emit( - masm, - RegImm::reg(typed_reg.reg), - RegImm::i64(val), - OperandSize::S64, - ); + emit(masm, typed_reg.reg, RegImm::i64(val), OperandSize::S64); self.stack.push(typed_reg.into()); } else { let src = self.pop_to_reg(masm, None); let dst = self.pop_to_reg(masm, None); - emit(masm, dst.reg.into(), src.reg.into(), OperandSize::S64); + emit(masm, dst.reg, src.reg.into(), OperandSize::S64); self.free_reg(src); self.stack.push(dst.into()); } diff --git a/winch/codegen/src/isa/aarch64/masm.rs b/winch/codegen/src/isa/aarch64/masm.rs index 76f8157db18e..54ea9b4a4627 100644 --- a/winch/codegen/src/isa/aarch64/masm.rs +++ b/winch/codegen/src/isa/aarch64/masm.rs @@ -147,9 +147,9 @@ impl Masm for MacroAssembler { self.asm.finalize() } - fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) { + fn mov(&mut self, src: RegImm, dst: Reg, size: OperandSize) { match (src, dst) { - (RegImm::Imm(v), RegImm::Reg(rd)) => { + (RegImm::Imm(v), rd) => { let imm = match v { I::I32(v) => v as u64, I::I64(v) => v, @@ -160,10 +160,9 @@ impl Masm for MacroAssembler { self.asm.load_constant(imm as u64, scratch); self.asm.mov_rr(scratch, rd, size); } - (RegImm::Reg(rs), RegImm::Reg(rd)) => { + (RegImm::Reg(rs), rd) => { self.asm.mov_rr(rs, rd, size); } - _ => Self::handle_invalid_two_form_operand_combination(src, dst), } } @@ -171,9 +170,9 @@ impl Masm for MacroAssembler { todo!() } - fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn add(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { match (rhs, lhs, dst) { - (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Imm(v), rn, rd) => { let imm = match v { I::I32(v) => v as u64, I::I64(v) => v, @@ -183,16 +182,15 @@ impl Masm for MacroAssembler { self.asm.add_ir(imm, rn, rd, size); } - (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Reg(rm), rn, rd) => { self.asm.add_rrr(rm, rn, rd, size); } - _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), } } - fn sub(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn sub(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { match (rhs, lhs, dst) { - (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Imm(v), rn, rd) => { let imm = match v { I::I32(v) => v as u64, I::I64(v) => v, @@ -202,16 +200,15 @@ impl Masm for MacroAssembler { self.asm.sub_ir(imm, rn, rd, size); } - (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Reg(rm), rn, rd) => { self.asm.sub_rrr(rm, rn, rd, size); } - _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), } } - fn mul(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn mul(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { match (rhs, lhs, dst) { - (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Imm(v), rn, rd) => { let imm = match v { I::I32(v) => v as u64, I::I64(v) => v, @@ -221,30 +218,29 @@ impl Masm for MacroAssembler { self.asm.mul_ir(imm, rn, rd, size); } - (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + (RegImm::Reg(rm), rn, rd) => { self.asm.mul_rrr(rm, rn, rd, size); } - _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), } } - fn float_neg(&mut self, _dst: Reg, _src: RegImm, _size: OperandSize) { + fn float_neg(&mut self, _dst: Reg, _size: OperandSize) { todo!() } - fn float_abs(&mut self, _dst: Reg, _src: RegImm, _size: OperandSize) { + fn float_abs(&mut self, _dst: Reg, _size: OperandSize) { todo!() } - fn and(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) { + fn and(&mut self, _dst: Reg, _lhs: Reg, _rhs: RegImm, _size: OperandSize) { todo!() } - fn or(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) { + fn or(&mut self, _dst: Reg, _lhs: Reg, _rhs: RegImm, _size: OperandSize) { todo!() } - fn xor(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) { + fn xor(&mut self, _dst: Reg, _lhs: Reg, _rhs: RegImm, _size: OperandSize) { todo!() } @@ -351,15 +347,4 @@ impl MacroAssembler { let shadow_sp = regs::shadow_sp(); self.asm.mov_rr(sp, shadow_sp, OperandSize::S64); } - - fn handle_invalid_two_form_operand_combination(src: RegImm, dst: RegImm) { - panic!("Invalid operand combination; src={:?}, dst={:?}", src, dst); - } - - fn handle_invalid_three_form_operand_combination(dst: RegImm, lhs: RegImm, rhs: RegImm) { - panic!( - "Invalid operand combination; dst={:?}, lhs={:?}, rhs={:?}", - dst, lhs, rhs - ); - } } diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index ee83b6920cbd..b15253308f4f 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -179,14 +179,14 @@ impl Masm for MacroAssembler { self.asm.xor_rr(reg, reg, OperandSize::S32); } - fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) { + fn mov(&mut self, src: RegImm, dst: Reg, size: OperandSize) { match (src, dst) { - rr @ (RegImm::Reg(src), RegImm::Reg(dst)) => match (src.class(), dst.class()) { + rr @ (RegImm::Reg(src), dst) => match (src.class(), dst.class()) { (RegClass::Int, RegClass::Int) => self.asm.mov_rr(src, dst, size), (RegClass::Float, RegClass::Float) => self.asm.xmm_mov_rr(src, dst, size), _ => Self::handle_invalid_operand_combination(rr.0, rr.1), }, - (RegImm::Imm(imm), RegImm::Reg(dst)) => match imm { + (RegImm::Imm(imm), dst) => match imm { I::I32(v) => self.asm.mov_ir(v as u64, dst, size), I::I64(v) => self.asm.mov_ir(v as u64, dst, size), I::F32(v) => { @@ -198,7 +198,6 @@ impl Masm for MacroAssembler { self.asm.xmm_mov_mr(&addr, dst, size); } }, - _ => Self::handle_invalid_operand_combination(src, dst), } } @@ -206,14 +205,14 @@ impl Masm for MacroAssembler { match (src.class(), dst.class()) { (RegClass::Int, RegClass::Int) => self.asm.cmov(src, dst, cc, size), (RegClass::Float, RegClass::Float) => self.asm.xmm_cmov(src, dst, cc, size), - _ => Self::handle_invalid_operand_combination(src.into(), dst.into()), + _ => Self::handle_invalid_operand_combination(src, dst), } } - fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn add(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.add_ir(v, reg, size); } else { @@ -223,17 +222,16 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.add_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } - fn sub(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn sub(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.sub_ir(v, reg, size); } else { @@ -243,17 +241,16 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.sub_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } - fn mul(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn mul(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.mul_ir(v, reg, size); } else { @@ -263,15 +260,13 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.mul_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } - fn float_neg(&mut self, dst: Reg, src: RegImm, size: OperandSize) { - Self::ensure_two_argument_form(&dst.into(), &src); + fn float_neg(&mut self, dst: Reg, size: OperandSize) { assert_eq!(dst.class(), RegClass::Float); let mask = match size { OperandSize::S32 => I::I32(0x80000000), @@ -285,8 +280,7 @@ impl Masm for MacroAssembler { self.asm.xor_rr(scratch_xmm, dst, size); } - fn float_abs(&mut self, dst: Reg, src: RegImm, size: OperandSize) { - Self::ensure_two_argument_form(&dst.into(), &src); + fn float_abs(&mut self, dst: Reg, size: OperandSize) { assert_eq!(dst.class(), RegClass::Float); let mask = match size { OperandSize::S32 => I::I32(0x7fffffff), @@ -300,10 +294,10 @@ impl Masm for MacroAssembler { self.asm.and_rr(scratch_xmm, dst, size); } - fn and(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn and(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.and_ir(v, reg, size); } else { @@ -313,17 +307,16 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.and_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } - fn or(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn or(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.or_ir(v, reg, size); } else { @@ -333,17 +326,16 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.or_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } - fn xor(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { + fn xor(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize) { Self::ensure_two_argument_form(&dst, &lhs); match (rhs, dst) { - (RegImm::Imm(imm), RegImm::Reg(reg)) => { + (RegImm::Imm(imm), reg) => { if let Some(v) = imm.to_i32() { self.asm.xor_ir(v, reg, size); } else { @@ -353,10 +345,9 @@ impl Masm for MacroAssembler { } } - (RegImm::Reg(src), RegImm::Reg(dst)) => { + (RegImm::Reg(src), dst) => { self.asm.xor_rr(src, dst, size); } - _ => Self::handle_invalid_operand_combination(rhs, dst), } } @@ -590,7 +581,7 @@ impl Masm for MacroAssembler { // x -= (x >> 1) & m1; self.asm.shift_ir(1u8, dst.into(), ShiftKind::ShrU, size); - let lhs = dst.reg.into(); + let lhs = dst.reg; self.and(lhs, lhs, RegImm::i64(masks[0]), size); self.asm.sub_rr(dst.into(), tmp, size); @@ -679,11 +670,15 @@ impl MacroAssembler { } } - fn handle_invalid_operand_combination(src: RegImm, dst: RegImm) -> T { - panic!("Invalid operand combination; src={:?}, dst={:?}", src, dst); + fn handle_invalid_operand_combination(src: impl Into, dst: impl Into) -> T { + panic!( + "Invalid operand combination; src={:?}, dst={:?}", + src.into(), + dst.into() + ); } - fn ensure_two_argument_form(dst: &RegImm, lhs: &RegImm) { + fn ensure_two_argument_form(dst: &Reg, lhs: &Reg) { assert!( dst == lhs, "the destination and first source argument must be the same, dst={:?}, lhs={:?}", diff --git a/winch/codegen/src/masm.rs b/winch/codegen/src/masm.rs index 0ad22b579cae..f5852ff6bfd2 100644 --- a/winch/codegen/src/masm.rs +++ b/winch/codegen/src/masm.rs @@ -303,34 +303,34 @@ pub(crate) trait MacroAssembler { fn pop(&mut self, dst: Reg, size: OperandSize); /// Perform a move. - fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize); + fn mov(&mut self, src: RegImm, dst: Reg, size: OperandSize); /// Perform a conditional move. fn cmov(&mut self, src: Reg, dst: Reg, cc: CmpKind, size: OperandSize); /// Perform add operation. - fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn add(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform subtraction operation. - fn sub(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn sub(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform multiplication operation. - fn mul(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn mul(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform a floating point abs operation. - fn float_abs(&mut self, dst: Reg, src: RegImm, size: OperandSize); + fn float_abs(&mut self, dst: Reg, size: OperandSize); /// Perform a floating point negation operation. - fn float_neg(&mut self, dst: Reg, src: RegImm, size: OperandSize); + fn float_neg(&mut self, dst: Reg, size: OperandSize); /// Perform logical and operation. - fn and(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn and(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform logical or operation. - fn or(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn or(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform logical exclusive or operation. - fn xor(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize); + fn xor(&mut self, dst: Reg, lhs: Reg, rhs: RegImm, size: OperandSize); /// Perform a shift operation. /// Shift is special in that some architectures have specific expectations diff --git a/winch/codegen/src/visitor.rs b/winch/codegen/src/visitor.rs index 097a2279a7f2..3b0a90fb6877 100644 --- a/winch/codegen/src/visitor.rs +++ b/winch/codegen/src/visitor.rs @@ -149,28 +149,28 @@ where fn visit_f32_abs(&mut self) { self.context .unop(self.masm, OperandSize::S32, &mut |masm, reg, size| { - masm.float_abs(reg, RegImm::Reg(reg), size); + masm.float_abs(reg, size); }); } fn visit_f64_abs(&mut self) { self.context .unop(self.masm, OperandSize::S64, &mut |masm, reg, size| { - masm.float_abs(reg, RegImm::Reg(reg), size); + masm.float_abs(reg, size); }); } fn visit_f32_neg(&mut self) { self.context .unop(self.masm, OperandSize::S32, &mut |masm, reg, size| { - masm.float_neg(reg, RegImm::Reg(reg), size); + masm.float_neg(reg, size); }); } fn visit_f64_neg(&mut self) { self.context .unop(self.masm, OperandSize::S64, &mut |masm, reg, size| { - masm.float_neg(reg, RegImm::Reg(reg), size); + masm.float_neg(reg, size); }); } @@ -745,14 +745,14 @@ where { fn cmp_i32s(&mut self, kind: CmpKind) { self.context.i32_binop(self.masm, |masm, dst, src, size| { - masm.cmp_with_set(src, dst.get_reg().unwrap(), kind, size); + masm.cmp_with_set(src, dst, kind, size); }); } fn cmp_i64s(&mut self, kind: CmpKind) { self.context .i64_binop(self.masm, move |masm, dst, src, size| { - masm.cmp_with_set(src, dst.get_reg().unwrap(), kind, size); + masm.cmp_with_set(src, dst, kind, size); }); } }