diff --git a/cranelift/codegen/src/isa/aarch64/inst/args.rs b/cranelift/codegen/src/isa/aarch64/inst/args.rs index 2893c9e0d57c..28a1c3405bc8 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/args.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/args.rs @@ -145,20 +145,10 @@ impl AMode { extendop: op, } } - - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } } pub use crate::isa::aarch64::lower::isle::generated_code::PairAMode; -impl PairAMode { - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } -} - //============================================================================= // Instruction sub-components (conditions, branches and branch targets): // definitions diff --git a/cranelift/codegen/src/isa/aarch64/inst/emit.rs b/cranelift/codegen/src/isa/aarch64/inst/emit.rs index 66dd4242ea85..9dc389d111d9 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/emit.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/emit.rs @@ -997,7 +997,7 @@ impl MachInstEmit for Inst { | &Inst::FpuLoad64 { rd, ref mem, flags } | &Inst::FpuLoad128 { rd, ref mem, flags } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state); @@ -1138,7 +1138,7 @@ impl MachInstEmit for Inst { | &Inst::FpuStore64 { rd, ref mem, flags } | &Inst::FpuStore128 { rd, ref mem, flags } => { let rd = allocs.next(rd); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_insts, mem) = mem_finalize(Some(sink), &mem, access_ty, state); @@ -1233,7 +1233,7 @@ impl MachInstEmit for Inst { } => { let rt = allocs.next(rt); let rt2 = allocs.next(rt2); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); if let Some(trap_code) = flags.trap_code() { // Register the offset at which the actual store instruction starts. sink.add_trap(trap_code); @@ -1264,7 +1264,7 @@ impl MachInstEmit for Inst { } => { let rt = allocs.next(rt.to_reg()); let rt2 = allocs.next(rt2.to_reg()); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); if let Some(trap_code) = flags.trap_code() { // Register the offset at which the actual load instruction starts. sink.add_trap(trap_code); @@ -1302,7 +1302,7 @@ impl MachInstEmit for Inst { } => { let rt = allocs.next(rt.to_reg()); let rt2 = allocs.next(rt2.to_reg()); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); if let Some(trap_code) = flags.trap_code() { // Register the offset at which the actual load instruction starts. @@ -1347,7 +1347,7 @@ impl MachInstEmit for Inst { } => { let rt = allocs.next(rt); let rt2 = allocs.next(rt2); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); if let Some(trap_code) = flags.trap_code() { // Register the offset at which the actual store instruction starts. @@ -1416,7 +1416,6 @@ impl MachInstEmit for Inst { } &Inst::MovFromPReg { rd, rm } => { let rd = allocs.next_writable(rd); - allocs.next_fixed_nonallocatable(rm); let rm: Reg = rm.into(); debug_assert!([ regs::fp_reg(), @@ -1431,7 +1430,6 @@ impl MachInstEmit for Inst { Inst::Mov { size, rd, rm }.emit(&[], sink, emit_info, state); } &Inst::MovToPReg { rd, rm } => { - allocs.next_fixed_nonallocatable(rd); let rd: Writable = Writable::from_reg(rd.into()); let rm = allocs.next(rm); debug_assert!([ @@ -3454,7 +3452,7 @@ impl MachInstEmit for Inst { } &Inst::LoadAddr { rd, ref mem } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(&mut allocs); + let mem = mem.clone(); let (mem_insts, mem) = mem_finalize(Some(sink), &mem, I8, state); for inst in mem_insts.into_iter() { inst.emit(&[], sink, emit_info, state); diff --git a/cranelift/codegen/src/isa/aarch64/inst/mod.rs b/cranelift/codegen/src/isa/aarch64/inst/mod.rs index 7cffbce30eea..7bed4c982b53 100644 --- a/cranelift/codegen/src/isa/aarch64/inst/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/inst/mod.rs @@ -397,7 +397,6 @@ impl Inst { // Instructions: get_regs fn memarg_operands(memarg: &mut AMode, collector: &mut impl OperandVisitor) { - // This should match `AMode::with_allocs()`. match memarg { AMode::Unscaled { rn, .. } | AMode::UnsignedOffset { rn, .. } => { collector.reg_use(rn); @@ -421,7 +420,6 @@ fn memarg_operands(memarg: &mut AMode, collector: &mut impl OperandVisitor) { } fn pairmemarg_operands(pairmemarg: &mut PairAMode, collector: &mut impl OperandVisitor) { - // This should match `PairAMode::with_allocs()`. match pairmemarg { PairAMode::SignedOffset { reg, .. } => { collector.reg_use(reg); @@ -1265,9 +1263,6 @@ impl Inst { } } - // N.B.: order of `allocs` consumption (via register - // pretty-printing or memarg.with_allocs()) needs to match the - // order in `aarch64_get_operands` above. match self { &Inst::Nop0 => "nop-zero-len".to_string(), &Inst::Nop4 => "nop".to_string(), @@ -1416,7 +1411,7 @@ impl Inst { }; let rd = pretty_print_ireg(rd.to_reg(), size, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); @@ -1443,7 +1438,7 @@ impl Inst { }; let rd = pretty_print_ireg(rd, size, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); @@ -1454,7 +1449,7 @@ impl Inst { } => { let rt = pretty_print_ireg(rt, OperandSize::Size64, allocs); let rt2 = pretty_print_ireg(rt2, OperandSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("stp {}, {}, {}", rt, rt2, mem) } @@ -1463,7 +1458,7 @@ impl Inst { } => { let rt = pretty_print_ireg(rt.to_reg(), OperandSize::Size64, allocs); let rt2 = pretty_print_ireg(rt2.to_reg(), OperandSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("ldp {}, {}, {}", rt, rt2, mem) } @@ -1474,12 +1469,10 @@ impl Inst { } &Inst::MovFromPReg { rd, rm } => { let rd = pretty_print_ireg(rd.to_reg(), OperandSize::Size64, allocs); - allocs.next_fixed_nonallocatable(rm); let rm = show_ireg_sized(rm.into(), OperandSize::Size64); format!("mov {}, {}", rd, rm) } &Inst::MovToPReg { rd, rm } => { - allocs.next_fixed_nonallocatable(rd); let rd = show_ireg_sized(rd.into(), OperandSize::Size64); let rm = pretty_print_ireg(rm, OperandSize::Size64, allocs); format!("mov {}, {}", rd, rm) @@ -1832,14 +1825,14 @@ impl Inst { } &Inst::FpuLoad32 { rd, ref mem, .. } => { let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size32, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}ldr {}, {}", mem_str, rd, mem) } &Inst::FpuLoad64 { rd, ref mem, .. } => { let rd = pretty_print_vreg_scalar(rd.to_reg(), ScalarSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}ldr {}, {}", mem_str, rd, mem) @@ -1847,21 +1840,21 @@ impl Inst { &Inst::FpuLoad128 { rd, ref mem, .. } => { let rd = pretty_print_reg(rd.to_reg(), allocs); let rd = "q".to_string() + &rd[1..]; - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}ldr {}, {}", mem_str, rd, mem) } &Inst::FpuStore32 { rd, ref mem, .. } => { let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size32, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}str {}, {}", mem_str, rd, mem) } &Inst::FpuStore64 { rd, ref mem, .. } => { let rd = pretty_print_vreg_scalar(rd, ScalarSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}str {}, {}", mem_str, rd, mem) @@ -1869,7 +1862,7 @@ impl Inst { &Inst::FpuStore128 { rd, ref mem, .. } => { let rd = pretty_print_reg(rd, allocs); let rd = "q".to_string() + &rd[1..]; - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let access_ty = self.mem_type().unwrap(); let (mem_str, mem) = mem_finalize_for_show(&mem, access_ty, state); format!("{}str {}, {}", mem_str, rd, mem) @@ -1879,7 +1872,7 @@ impl Inst { } => { let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size64, allocs); let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("ldp {}, {}, {}", rt, rt2, mem) @@ -1889,7 +1882,7 @@ impl Inst { } => { let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size64, allocs); let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size64, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("stp {}, {}, {}", rt, rt2, mem) @@ -1899,7 +1892,7 @@ impl Inst { } => { let rt = pretty_print_vreg_scalar(rt.to_reg(), ScalarSize::Size128, allocs); let rt2 = pretty_print_vreg_scalar(rt2.to_reg(), ScalarSize::Size128, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("ldp {}, {}, {}", rt, rt2, mem) @@ -1909,7 +1902,7 @@ impl Inst { } => { let rt = pretty_print_vreg_scalar(rt, ScalarSize::Size128, allocs); let rt2 = pretty_print_vreg_scalar(rt2, ScalarSize::Size128, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let mem = mem.pretty_print_default(); format!("stp {}, {}, {}", rt, rt2, mem) @@ -2781,7 +2774,7 @@ impl Inst { // expansion stage (i.e., legalization, but without the slow edit-in-place // of the existing legalization framework). let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_insts, mem) = mem_finalize(None, &mem, I8, state); let mut ret = String::new(); for inst in mem_insts.into_iter() { diff --git a/cranelift/codegen/src/isa/riscv64/inst/args.rs b/cranelift/codegen/src/isa/riscv64/inst/args.rs index 15bb09f0a7c2..561d18da8434 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/args.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/args.rs @@ -113,10 +113,6 @@ pub enum AMode { } impl AMode { - pub(crate) fn with_allocs(self, _allocs: &mut AllocationConsumer) -> Self { - self - } - /// Add the registers referenced by this AMode to `collector`. pub(crate) fn get_operands(&mut self, collector: &mut impl OperandVisitor) { match self { @@ -176,10 +172,6 @@ impl AMode { | &AMode::NominalSPOffset(..) => None, } } - - pub(crate) fn to_string_with_alloc(&self, allocs: &mut AllocationConsumer) -> String { - format!("{}", self.clone().with_allocs(allocs)) - } } impl Display for AMode { diff --git a/cranelift/codegen/src/isa/riscv64/inst/emit.rs b/cranelift/codegen/src/isa/riscv64/inst/emit.rs index e0ceb87a2841..f7318af99998 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/emit.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/emit.rs @@ -223,17 +223,13 @@ impl MachInstEmit for Inst { fn emit( &self, - allocs: &[Allocation], + _allocs: &[Allocation], sink: &mut MachBuffer, emit_info: &Self::Info, state: &mut EmitState, ) { - // Transform this into a instruction with all the physical regs - let mut allocs = AllocationConsumer::new(allocs); - let inst = self.clone().allocate(&mut allocs); - // Check if we need to update the vector state before emitting this instruction - if let Some(expected) = inst.expected_vstate() { + if let Some(expected) = self.expected_vstate() { if state.vstate != EmitVState::Known(expected.clone()) { // Update the vector state. Inst::VecSetState { @@ -252,10 +248,10 @@ impl MachInstEmit for Inst { let mut start_off = sink.cur_offset(); // First try to emit this as a compressed instruction - let res = inst.try_emit_compressed(sink, emit_info, state, &mut start_off); + let res = self.try_emit_compressed(sink, emit_info, state, &mut start_off); if res.is_none() { // If we can't lets emit it as a normal instruction - inst.emit_uncompressed(sink, emit_info, state, &mut start_off); + self.emit_uncompressed(sink, emit_info, state, &mut start_off); } let end_off = sink.cur_offset(); @@ -2605,10 +2601,6 @@ impl Inst { } }; } - - fn allocate(self, _allocs: &mut AllocationConsumer) -> Self { - self - } } fn emit_return_call_common_sequence( diff --git a/cranelift/codegen/src/isa/riscv64/inst/mod.rs b/cranelift/codegen/src/isa/riscv64/inst/mod.rs index 376a1cb5a303..33fd8c04c32a 100644 --- a/cranelift/codegen/src/isa/riscv64/inst/mod.rs +++ b/cranelift/codegen/src/isa/riscv64/inst/mod.rs @@ -934,9 +934,9 @@ impl Inst { reg_name(reg) }; - let format_vec_amode = |amode: &VecAMode, allocs: &mut AllocationConsumer| -> String { + let format_vec_amode = |amode: &VecAMode, _allocs: &mut AllocationConsumer| -> String { match amode { - VecAMode::UnitStride { base } => base.to_string_with_alloc(allocs), + VecAMode::UnitStride { base } => base.to_string(), } }; @@ -1346,7 +1346,7 @@ impl Inst { from, flags: _flags, } => { - let base = from.to_string_with_alloc(allocs); + let base = from.to_string(); let rd = format_reg(rd.to_reg(), allocs); format!("{} {},{}", op.op_name(), rd, base,) } @@ -1356,7 +1356,7 @@ impl Inst { op, flags: _flags, } => { - let base = to.to_string_with_alloc(allocs); + let base = to.to_string(); let src = format_reg(src, allocs); format!("{} {},{}", op.op_name(), src, base,) } @@ -1497,7 +1497,7 @@ impl Inst { format!("elf_tls_get_addr {rd},{}", name.display(None)) } &MInst::LoadAddr { ref rd, ref mem } => { - let rs = mem.to_string_with_alloc(allocs); + let rs = mem.to_string(); let rd = format_reg(rd.to_reg(), allocs); format!("load_addr {},{}", rd, rs) } diff --git a/cranelift/codegen/src/isa/s390x/inst/args.rs b/cranelift/codegen/src/isa/s390x/inst/args.rs index f14333bfb741..294ac21135de 100644 --- a/cranelift/codegen/src/isa/s390x/inst/args.rs +++ b/cranelift/codegen/src/isa/s390x/inst/args.rs @@ -101,11 +101,6 @@ impl MemArg { MemArg::NominalSPOffset { .. } => MemFlags::trusted(), } } - - /// Edit registers with allocations. - pub fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } } /// A memory argument for an instruction with two memory operands. @@ -153,11 +148,6 @@ impl MemArgPair { _ => None, } } - - /// Edit registers with allocations. - pub fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } } //============================================================================= diff --git a/cranelift/codegen/src/isa/s390x/inst/emit.rs b/cranelift/codegen/src/isa/s390x/inst/emit.rs index bf43ebd9e51e..cee096d92596 100644 --- a/cranelift/codegen/src/isa/s390x/inst/emit.rs +++ b/cranelift/codegen/src/isa/s390x/inst/emit.rs @@ -1533,7 +1533,7 @@ impl Inst { let rd = allocs.next_writable(rd); let ri = allocs.next(ri); debug_assert_eq!(rd.to_reg(), ri); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_rx, opcode_rxy) = match alu_op { ALUOp::Add32 => (Some(0x5a), Some(0xe35a)), // A(Y) @@ -1952,7 +1952,7 @@ impl Inst { } &Inst::CmpRX { op, rn, ref mem } => { let rn = allocs.next(rn); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_rx, opcode_rxy, opcode_ril) = match op { CmpOp::CmpS32 => (Some(0x59), Some(0xe359), Some(0xc6d)), // C(Y), CRL @@ -2068,7 +2068,7 @@ impl Inst { } => { let rd = allocs.next_writable(rd); let rn = allocs.next(rn); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode = match alu_op { ALUOp::Add32 => 0xebf8, // LAA @@ -2146,7 +2146,7 @@ impl Inst { let ri = allocs.next(ri); debug_assert_eq!(rd.to_reg(), ri); let rn = allocs.next(rn); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_rs, opcode_rsy) = match self { &Inst::AtomicCas32 { .. } => (Some(0xba), Some(0xeb14)), // CS(Y) @@ -2179,7 +2179,7 @@ impl Inst { | &Inst::LoadRev32 { rd, ref mem } | &Inst::LoadRev64 { rd, ref mem } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_rx, opcode_rxy, opcode_ril) = match self { &Inst::Load32 { .. } => (Some(0x58), Some(0xe358), Some(0xc4d)), // L(Y), LRL @@ -2213,7 +2213,7 @@ impl Inst { | &Inst::StoreRev32 { rd, ref mem } | &Inst::StoreRev64 { rd, ref mem } => { let rd = allocs.next(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_rx, opcode_rxy, opcode_ril) = match self { &Inst::Store8 { .. } => (Some(0x42), Some(0xe372), None), // STC(Y) @@ -2230,7 +2230,7 @@ impl Inst { ); } &Inst::StoreImm8 { imm, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode_si = 0x92; // MVI let opcode_siy = 0xeb52; // MVIY @@ -2241,7 +2241,7 @@ impl Inst { &Inst::StoreImm16 { imm, ref mem } | &Inst::StoreImm32SExt16 { imm, ref mem } | &Inst::StoreImm64SExt16 { imm, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode = match self { &Inst::StoreImm16 { .. } => 0xe544, // MVHHI @@ -2256,14 +2256,14 @@ impl Inst { ref src, len_minus_one, } => { - let dst = dst.with_allocs(allocs); - let src = src.with_allocs(allocs); + let dst = dst.clone(); + let src = src.clone(); let opcode = 0xd2; // MVC mem_mem_emit(&dst, &src, len_minus_one, opcode, true, sink, state); } &Inst::LoadMultiple64 { rt, rt2, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode = 0xeb04; // LMG let rt = rt.to_reg(); @@ -2281,7 +2281,7 @@ impl Inst { ); } &Inst::StoreMultiple64 { rt, rt2, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode = 0xeb24; // STMG mem_rs_emit( @@ -2299,7 +2299,7 @@ impl Inst { &Inst::LoadAddr { rd, ref mem } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode_rx = Some(0x41); // LA let opcode_rxy = Some(0xe371); // LAY @@ -3045,7 +3045,7 @@ impl Inst { | &Inst::VecLoadElt32Rev { rd, ref mem } | &Inst::VecLoadElt64Rev { rd, ref mem } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode, m3) = match self { &Inst::VecLoad { .. } => (0xe706, 0), // VL @@ -3069,7 +3069,7 @@ impl Inst { | &Inst::VecStoreElt32Rev { rd, ref mem } | &Inst::VecStoreElt64Rev { rd, ref mem } => { let rd = allocs.next(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode, m3) = match self { &Inst::VecStore { .. } => (0xe70e, 0), // VST @@ -3087,7 +3087,7 @@ impl Inst { &Inst::VecLoadReplicate { size, rd, ref mem } | &Inst::VecLoadReplicateRev { size, rd, ref mem } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode, m3) = match (self, size) { (&Inst::VecLoadReplicate { .. }, 8) => (0xe705, 0), // VLREPB @@ -3215,7 +3215,7 @@ impl Inst { let rd = allocs.next_writable(rd); let ri = allocs.next(ri); debug_assert_eq!(rd.to_reg(), ri); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let opcode_vrx = match (self, size) { (&Inst::VecLoadLane { .. }, 8) => 0xe700, // VLEB @@ -3253,7 +3253,7 @@ impl Inst { lane_imm, } => { let rd = allocs.next_writable(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) { (&Inst::VecLoadLaneUndef { .. }, 8) => (0xe700, None, None), // VLEB @@ -3297,7 +3297,7 @@ impl Inst { lane_imm, } => { let rd = allocs.next(rd); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) { (&Inst::VecStoreLane { .. }, 8) => (0xe708, None, None), // VSTEB diff --git a/cranelift/codegen/src/isa/s390x/inst/mod.rs b/cranelift/codegen/src/isa/s390x/inst/mod.rs index 834be5eb0a62..8fb823cc8b40 100644 --- a/cranelift/codegen/src/isa/s390x/inst/mod.rs +++ b/cranelift/codegen/src/isa/s390x/inst/mod.rs @@ -1333,7 +1333,7 @@ impl Inst { }; let rd = pretty_print_reg_mod(rd, ri, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1615,7 +1615,7 @@ impl Inst { }; let rn = pretty_print_reg(rn, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1725,7 +1725,7 @@ impl Inst { let rd = pretty_print_reg(rd.to_reg(), allocs); let rn = pretty_print_reg(rn, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1760,7 +1760,7 @@ impl Inst { let rd = pretty_print_reg_mod(rd, ri, allocs); let rn = pretty_print_reg(rn, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1817,7 +1817,7 @@ impl Inst { }; let rd = pretty_print_reg(rd.to_reg(), allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1857,7 +1857,7 @@ impl Inst { }; let rd = pretty_print_reg(rd, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1880,7 +1880,7 @@ impl Inst { format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem) } &Inst::StoreImm8 { imm, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1904,7 +1904,7 @@ impl Inst { &Inst::StoreImm16 { imm, ref mem } | &Inst::StoreImm32SExt16 { imm, ref mem } | &Inst::StoreImm64SExt16 { imm, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1931,8 +1931,8 @@ impl Inst { ref src, len_minus_one, } => { - let dst = dst.with_allocs(allocs); - let src = src.with_allocs(allocs); + let dst = dst.clone(); + let src = src.clone(); format!( "mvc {}({},{}), {}({})", dst.disp.pretty_print_default(), @@ -1943,7 +1943,7 @@ impl Inst { ) } &Inst::LoadMultiple64 { rt, rt2, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -1961,7 +1961,7 @@ impl Inst { format!("{}lmg {}, {}, {}", mem_str, rt, rt2, mem) } &Inst::StoreMultiple64 { rt, rt2, ref mem } => { - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -2671,7 +2671,7 @@ impl Inst { }; let rd = pretty_print_reg(rd.to_reg(), allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -2707,7 +2707,7 @@ impl Inst { }; let rd = pretty_print_reg(rd, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -2736,7 +2736,7 @@ impl Inst { }; let rd = pretty_print_reg(rd.to_reg(), allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -2857,7 +2857,7 @@ impl Inst { let (rd, _) = pretty_print_fpr(rd.to_reg(), allocs); let _ri = allocs.next(ri); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, @@ -2896,7 +2896,7 @@ impl Inst { }; let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg(), allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() { let (mem_str, mem) = mem_finalize_for_show( &mem, @@ -2956,7 +2956,7 @@ impl Inst { }; let (rd, rd_fpr) = pretty_print_fpr(rd, allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() { let (mem_str, mem) = mem_finalize_for_show( &mem, @@ -3246,7 +3246,7 @@ impl Inst { } &Inst::LoadAddr { rd, ref mem } => { let rd = pretty_print_reg(rd.to_reg(), allocs); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); let (mem_str, mem) = mem_finalize_for_show( &mem, state, diff --git a/cranelift/codegen/src/isa/x64/inst/args.rs b/cranelift/codegen/src/isa/x64/inst/args.rs index 975a33e76464..72acd7171557 100644 --- a/cranelift/codegen/src/isa/x64/inst/args.rs +++ b/cranelift/codegen/src/isa/x64/inst/args.rs @@ -407,10 +407,6 @@ impl Amode { } } - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } - /// Offset the amode by a fixed offset. pub(crate) fn offset(&self, offset: i32) -> Self { let mut ret = self.clone(); @@ -539,10 +535,6 @@ impl SyntheticAmode { } } - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } - pub(crate) fn aligned(&self) -> bool { match self { SyntheticAmode::Real(addr) => addr.aligned(), @@ -636,10 +628,6 @@ impl RegMemImm { Self::Imm { .. } => {} } } - - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } } impl From for RegMemImm { @@ -734,10 +722,6 @@ impl RegMem { RegMem::Mem { addr, .. } => addr.get_operands(collector), } } - - pub(crate) fn with_allocs(&self, _allocs: &mut AllocationConsumer) -> Self { - self.clone() - } } impl From for RegMem { diff --git a/cranelift/codegen/src/isa/x64/inst/emit.rs b/cranelift/codegen/src/isa/x64/inst/emit.rs index d978caefcbe6..3305e231fb18 100644 --- a/cranelift/codegen/src/isa/x64/inst/emit.rs +++ b/cranelift/codegen/src/isa/x64/inst/emit.rs @@ -156,7 +156,7 @@ pub(crate) fn emit( let src1 = allocs.next(src1.to_reg()); let reg_g = allocs.next(reg_g.to_reg().to_reg()); debug_assert_eq!(src1, reg_g); - let src2 = src2.clone().to_reg_mem_imm().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem_imm().clone(); let prefix = if *size == OperandSize::Size16 { LegacyPrefixes::_66 @@ -261,7 +261,7 @@ pub(crate) fn emit( op, } => { let src2 = allocs.next(src2.to_reg()); - let src1_dst = src1_dst.finalize(state, sink).with_allocs(allocs); + let src1_dst = src1_dst.finalize(state, sink).clone(); let opcode = match op { AluRmiROpcode::Add => 0x01, @@ -305,7 +305,7 @@ pub(crate) fn emit( let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -369,7 +369,7 @@ pub(crate) fn emit( emit_std_reg_reg(sink, prefix, opcode, num_opcodes, dst, src, rex_flags); } RegMem::Mem { addr: src } => { - let amode = src.finalize(state, sink).with_allocs(allocs); + let amode = src.finalize(state, sink).clone(); emit_std_reg_mem(sink, prefix, opcode, num_opcodes, dst, &amode, rex_flags, 0); } } @@ -377,7 +377,7 @@ pub(crate) fn emit( Inst::UnaryRmRVex { size, op, src, dst } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -408,7 +408,7 @@ pub(crate) fn emit( imm, } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -476,7 +476,7 @@ pub(crate) fn emit( divisor, .. } => { - let divisor = divisor.clone().to_reg_mem().with_allocs(allocs); + let divisor = divisor.clone().to_reg_mem().clone(); let size = match inst { Inst::Div { size, @@ -562,7 +562,7 @@ pub(crate) fn emit( debug_assert_eq!(src1, regs::rax()); debug_assert_eq!(dst_lo, regs::rax()); debug_assert_eq!(dst_hi, regs::rdx()); - let src2 = src2.clone().to_reg_mem().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem().clone(); let rex_flags = RexFlags::from(*size); let prefix = match size { @@ -594,7 +594,7 @@ pub(crate) fn emit( let dst = allocs.next(dst.to_reg().to_reg()); debug_assert_eq!(src1, regs::rax()); debug_assert_eq!(dst, regs::rax()); - let src2 = src2.clone().to_reg_mem().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem().clone(); let mut rex_flags = RexFlags::from(OperandSize::Size8); let prefix = LegacyPrefixes::None; @@ -633,7 +633,7 @@ pub(crate) fn emit( let src1 = allocs.next(src1.to_reg()); let dst = allocs.next(dst.to_reg().to_reg()); debug_assert_eq!(src1, dst); - let src2 = src2.clone().to_reg_mem().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem().clone(); let rex = RexFlags::from(*size); let prefix = LegacyPrefixes::None; @@ -656,7 +656,7 @@ pub(crate) fn emit( dst, } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src1 = src1.clone().to_reg_mem().with_allocs(allocs); + let src1 = src1.clone().to_reg_mem().clone(); let rex = RexFlags::from(*size); let prefix = match size { @@ -847,7 +847,7 @@ pub(crate) fn emit( } Inst::MovImmM { size, simm32, dst } => { - let dst = &dst.finalize(state, sink).with_allocs(allocs); + let dst = &dst.finalize(state, sink).clone(); let default_rex = RexFlags::clear_w(); let default_opcode = 0xC7; let bytes = size.to_bytes(); @@ -887,7 +887,6 @@ pub(crate) fn emit( } Inst::MovFromPReg { src, dst } => { - allocs.next_fixed_nonallocatable(*src); let src: Reg = (*src).into(); debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&src)); let src = Gpr::new(src).unwrap(); @@ -900,7 +899,6 @@ pub(crate) fn emit( Inst::MovToPReg { src, dst } => { let src = allocs.next(src.to_reg()); let src = Gpr::new(src).unwrap(); - allocs.next_fixed_nonallocatable(*dst); let dst: Reg = (*dst).into(); debug_assert!([regs::rsp(), regs::rbp(), regs::pinned_reg()].contains(&dst)); let dst = WritableGpr::from_writable_reg(Writable::from_reg(dst)).unwrap(); @@ -963,7 +961,7 @@ pub(crate) fn emit( } RegMem::Mem { addr: src } => { - let src = &src.finalize(state, sink).with_allocs(allocs); + let src = &src.finalize(state, sink).clone(); emit_std_reg_mem( sink, @@ -981,7 +979,7 @@ pub(crate) fn emit( Inst::Mov64MR { src, dst } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = &src.finalize(state, sink).with_allocs(allocs); + let src = &src.finalize(state, sink).clone(); emit_std_reg_mem( sink, @@ -997,7 +995,7 @@ pub(crate) fn emit( Inst::LoadEffectiveAddress { addr, dst, size } => { let dst = allocs.next(dst.to_reg().to_reg()); - let amode = addr.finalize(state, sink).with_allocs(allocs); + let amode = addr.finalize(state, sink).clone(); // If this `lea` can actually get encoded as an `add` then do that // instead. Currently all candidate `iadd`s become an `lea` @@ -1122,7 +1120,7 @@ pub(crate) fn emit( } RegMem::Mem { addr: src } => { - let src = &src.finalize(state, sink).with_allocs(allocs); + let src = &src.finalize(state, sink).clone(); emit_std_reg_mem( sink, @@ -1140,7 +1138,7 @@ pub(crate) fn emit( Inst::MovRM { size, src, dst } => { let src = allocs.next(src.to_reg()); - let dst = &dst.finalize(state, sink).with_allocs(allocs); + let dst = &dst.finalize(state, sink).clone(); let prefix = match size { OperandSize::Size16 => LegacyPrefixes::_66, @@ -1270,7 +1268,7 @@ pub(crate) fn emit( emit_std_reg_reg(sink, prefix, opcode_bytes, 2, dst, reg, rex); } RegMemImm::Mem { addr } => { - let addr = &addr.finalize(state, sink).with_allocs(allocs); + let addr = &addr.finalize(state, sink).clone(); emit_std_reg_mem(sink, prefix, opcode_bytes, 2, dst, addr, rex, 0); } RegMemImm::Imm { .. } => unreachable!(), @@ -1318,7 +1316,7 @@ pub(crate) fn emit( } RegMemImm::Mem { addr } => { - let addr = &addr.finalize(state, sink).with_allocs(allocs); + let addr = &addr.finalize(state, sink).clone(); // Whereas here we revert to the "normal" G-E ordering for CMP. let opcode = match (*size, is_cmp) { (OperandSize::Size8, true) => 0x3A, @@ -1414,7 +1412,7 @@ pub(crate) fn emit( emit_std_reg_reg(sink, prefix, opcode, 2, dst, reg, rex_flags); } RegMem::Mem { addr } => { - let addr = &addr.finalize(state, sink).with_allocs(allocs); + let addr = &addr.finalize(state, sink).clone(); emit_std_reg_mem(sink, prefix, opcode, 2, dst, addr, rex_flags, 0); } } @@ -1456,7 +1454,7 @@ pub(crate) fn emit( } Inst::Push64 { src } => { - let src = src.clone().to_reg_mem_imm().with_allocs(allocs); + let src = src.clone().to_reg_mem_imm().clone(); match src { RegMemImm::Reg { reg } => { @@ -1661,7 +1659,7 @@ pub(crate) fn emit( } Inst::CallUnknown { dest, opcode, info } => { - let dest = dest.with_allocs(allocs); + let dest = dest.clone(); let start_offset = sink.cur_offset(); match dest { @@ -1776,7 +1774,7 @@ pub(crate) fn emit( } Inst::JmpUnknown { target } => { - let target = target.with_allocs(allocs); + let target = target.clone(); match target { RegMem::Reg { reg } => { @@ -1938,7 +1936,7 @@ pub(crate) fn emit( dst: reg_g, } => { let reg_g = allocs.next(reg_g.to_reg().to_reg()); - let src_e = src_e.clone().to_reg_mem().with_allocs(allocs); + let src_e = src_e.clone().to_reg_mem().clone(); let rex = RexFlags::clear_w(); @@ -1991,7 +1989,7 @@ pub(crate) fn emit( Inst::XmmUnaryRmRImm { op, src, dst, imm } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = src.clone().to_reg_mem().with_allocs(allocs); + let src = src.clone().to_reg_mem().clone(); let rex = RexFlags::clear_w(); let (prefix, opcode, len) = match op { @@ -2019,7 +2017,7 @@ pub(crate) fn emit( Inst::XmmUnaryRmREvex { op, src, dst } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2046,7 +2044,7 @@ pub(crate) fn emit( Inst::XmmUnaryRmRImmEvex { op, src, dst, imm } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2097,7 +2095,7 @@ pub(crate) fn emit( } => { let src1 = allocs.next(src1.to_reg()); let reg_g = allocs.next(reg_g.to_reg().to_reg()); - let src_e = src_e.clone().to_reg_mem().with_allocs(allocs); + let src_e = src_e.clone().to_reg_mem().clone(); debug_assert_eq!(src1, reg_g); let rex = RexFlags::clear_w(); @@ -2236,7 +2234,7 @@ pub(crate) fn emit( debug_assert_eq!(mask, regs::xmm0()); let reg_g = allocs.next(dst.to_reg().to_reg()); debug_assert_eq!(src1, reg_g); - let src_e = src2.clone().to_reg_mem().with_allocs(allocs); + let src_e = src2.clone().to_reg_mem().clone(); let rex = RexFlags::clear_w(); let (prefix, opcode, length) = match op { @@ -2268,7 +2266,7 @@ pub(crate) fn emit( let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = src2.clone().to_reg_mem_imm().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem_imm().clone(); // When the opcode is commutative, src1 is xmm{0..7}, and src2 is // xmm{8..15}, then we can swap the operands to save one byte on the @@ -2458,7 +2456,7 @@ pub(crate) fn emit( } => { let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2497,7 +2495,7 @@ pub(crate) fn emit( } => { let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2536,7 +2534,7 @@ pub(crate) fn emit( let dst = allocs.next(dst.to_reg().to_reg()); debug_assert_eq!(src1, dst); let src2 = allocs.next(src2.to_reg()); - let src3 = match src3.clone().to_reg_mem().with_allocs(allocs) { + let src3 = match src3.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2587,7 +2585,7 @@ pub(crate) fn emit( } => { let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2616,7 +2614,7 @@ pub(crate) fn emit( Inst::XmmUnaryRmRVex { op, src, dst } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2680,7 +2678,7 @@ pub(crate) fn emit( Inst::XmmUnaryRmRImmVex { op, src, dst, imm } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2720,7 +2718,7 @@ pub(crate) fn emit( Inst::XmmMovRMVex { op, src, dst } => { let src = allocs.next(src.to_reg()); - let dst = dst.with_allocs(allocs).finalize(state, sink); + let dst = dst.clone().finalize(state, sink); let (prefix, map, opcode) = match op { AvxOpcode::Vmovdqu => (LegacyPrefixes::_F3, OpcodeMap::_0F, 0x7F), @@ -2742,7 +2740,7 @@ pub(crate) fn emit( Inst::XmmMovRMImmVex { op, src, dst, imm } => { let src = allocs.next(src.to_reg()); - let dst = dst.with_allocs(allocs).finalize(state, sink); + let dst = dst.clone().finalize(state, sink); let (w, prefix, map, opcode) = match op { AvxOpcode::Vpextrb => (false, LegacyPrefixes::_66, OpcodeMap::_0F3A, 0x14), @@ -2833,7 +2831,7 @@ pub(crate) fn emit( src_size, } => { let dst = allocs.next(dst.to_reg().to_reg()); - let src = match src.clone().to_reg_mem().with_allocs(allocs) { + let src = match src.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2862,7 +2860,7 @@ pub(crate) fn emit( Inst::XmmCmpRmRVex { op, src1, src2 } => { let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -2904,7 +2902,7 @@ pub(crate) fn emit( _ => None, }; let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -3038,7 +3036,7 @@ pub(crate) fn emit( } => { let src1 = allocs.next(*src1); let dst = allocs.next(dst.to_reg()); - let src2 = src2.with_allocs(allocs); + let src2 = src2.clone(); debug_assert_eq!(src1, dst); let (prefix, opcode, len) = match op { @@ -3092,7 +3090,7 @@ pub(crate) fn emit( Inst::XmmMovRM { op, src, dst } => { let src = allocs.next(src.to_reg()); - let dst = dst.with_allocs(allocs); + let dst = dst.clone(); let (prefix, opcode) = match op { SseOpcode::Movaps => (LegacyPrefixes::None, 0x0F29), @@ -3110,7 +3108,7 @@ pub(crate) fn emit( Inst::XmmMovRMImm { op, src, dst, imm } => { let src = allocs.next(src.to_reg()); - let dst = dst.with_allocs(allocs); + let dst = dst.clone(); let (w, prefix, opcode) = match op { SseOpcode::Pextrb => (false, LegacyPrefixes::_66, 0x0F3A14), @@ -3182,7 +3180,7 @@ pub(crate) fn emit( src_size, } => { let reg_g = allocs.next(reg_g.to_reg().to_reg()); - let src_e = src_e.clone().to_reg_mem().with_allocs(allocs); + let src_e = src_e.clone().to_reg_mem().clone(); let (prefix, opcode) = match op { // Movd and movq use the same opcode; the presence of the REX prefix (set below) @@ -3204,7 +3202,7 @@ pub(crate) fn emit( Inst::XmmCmpRmR { op, src1, src2 } => { let src1 = allocs.next(src1.to_reg()); - let src2 = src2.clone().to_reg_mem().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem().clone(); let rex = RexFlags::clear_w(); let (prefix, opcode, len) = match op { @@ -3235,7 +3233,7 @@ pub(crate) fn emit( let src1 = allocs.next(src1.to_reg()); let dst = allocs.next(dst.to_reg().to_reg()); assert_eq!(src1, dst); - let src2 = src2.clone().to_reg_mem().with_allocs(allocs); + let src2 = src2.clone().to_reg_mem().clone(); let (prefix, opcode) = match op { SseOpcode::Cvtsi2ss => (LegacyPrefixes::_F3, 0x0F2A), @@ -3263,7 +3261,7 @@ pub(crate) fn emit( } => { let dst = allocs.next(dst.to_reg().to_reg()); let src1 = allocs.next(src1.to_reg()); - let src2 = match src2.clone().to_reg_mem().with_allocs(allocs) { + let src2 = match src2.clone().to_reg_mem().clone() { RegMem::Reg { reg } => { RegisterOrAmode::Register(reg.to_real_reg().unwrap().hw_enc().into()) } @@ -3859,7 +3857,7 @@ pub(crate) fn emit( let replacement = allocs.next(*replacement); let expected = allocs.next(*expected); let dst_old = allocs.next(dst_old.to_reg()); - let mem = mem.with_allocs(allocs); + let mem = mem.clone(); debug_assert_eq!(expected, regs::rax()); debug_assert_eq!(dst_old, regs::rax()); @@ -3890,7 +3888,7 @@ pub(crate) fn emit( let temp = allocs.next_writable(*temp); let dst_old = allocs.next_writable(*dst_old); debug_assert_eq!(dst_old.to_reg(), regs::rax()); - let mem = mem.finalize(state, sink).with_allocs(allocs); + let mem = mem.finalize(state, sink).clone(); // Emit this: // mov{zbq,zwq,zlq,q} (%r_address), %rax // rax = old value diff --git a/cranelift/codegen/src/isa/x64/inst/mod.rs b/cranelift/codegen/src/isa/x64/inst/mod.rs index 45790ad5b454..593f1620c6f6 100644 --- a/cranelift/codegen/src/isa/x64/inst/mod.rs +++ b/cranelift/codegen/src/isa/x64/inst/mod.rs @@ -1465,7 +1465,6 @@ impl PrettyPrint for Inst { } Inst::MovFromPReg { src, dst } => { - allocs.next_fixed_nonallocatable(*src); let src: Reg = (*src).into(); let src = regs::show_ireg_sized(src, 8); let dst = pretty_print_reg(dst.to_reg().to_reg(), 8, allocs); @@ -1475,7 +1474,6 @@ impl PrettyPrint for Inst { Inst::MovToPReg { src, dst } => { let src = pretty_print_reg(src.to_reg(), 8, allocs); - allocs.next_fixed_nonallocatable(*dst); let dst: Reg = (*dst).into(); let dst = regs::show_ireg_sized(dst, 8); let op = ljustify("movq".to_string()); diff --git a/cranelift/codegen/src/machinst/reg.rs b/cranelift/codegen/src/machinst/reg.rs index 8dbcd32fa684..269c95c8a220 100644 --- a/cranelift/codegen/src/machinst/reg.rs +++ b/cranelift/codegen/src/machinst/reg.rs @@ -504,8 +504,6 @@ impl AllocationConsumer { Self } - pub fn next_fixed_nonallocatable(&mut self, _preg: PReg) {} - pub fn next(&mut self, pre_regalloc_reg: Reg) -> Reg { pre_regalloc_reg }