diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 282cf5d681685..3d5a55c631301 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -95,7 +95,8 @@ class RISCVInstructionSelector : public InstructionSelector { void addVectorLoadStoreOperands(MachineInstr &I, SmallVectorImpl &SrcOps, unsigned &CurOp, bool IsMasked, - bool IsStrided) const; + bool IsStridedOrIndexed, + LLT *IndexVT = nullptr) const; bool selectIntrinsicWithSideEffects(MachineInstr &I, MachineIRBuilder &MIB) const; @@ -722,15 +723,17 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) { void RISCVInstructionSelector::addVectorLoadStoreOperands( MachineInstr &I, SmallVectorImpl &SrcOps, unsigned &CurOp, - bool IsMasked, bool IsStrided) const { + bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const { // Base Pointer auto PtrReg = I.getOperand(CurOp++).getReg(); SrcOps.push_back(PtrReg); - // Stride - if (IsStrided) { + // Stride or Index + if (IsStridedOrIndexed) { auto StrideReg = I.getOperand(CurOp++).getReg(); SrcOps.push_back(StrideReg); + if (IndexVT) + *IndexVT = MRI->getType(StrideReg); } // Mask @@ -805,6 +808,70 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( I.eraseFromParent(); return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); } + case Intrinsic::riscv_vloxei: + case Intrinsic::riscv_vloxei_mask: + case Intrinsic::riscv_vluxei: + case Intrinsic::riscv_vluxei_mask: { + bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask || + IntrinID == Intrinsic::riscv_vluxei_mask; + bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei || + IntrinID == Intrinsic::riscv_vloxei_mask; + LLT VT = MRI->getType(I.getOperand(0).getReg()); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + + // Result vector + const Register DstReg = I.getOperand(0).getReg(); + + // Sources + bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; + unsigned CurOp = 2; + SmallVector SrcOps; // Source registers. + + // Passthru + if (HasPassthruOperand) { + auto PassthruReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PassthruReg); + } else { + // Use NoRegister if there is no specified passthru. + SrcOps.push_back(Register()); + } + LLT IndexVT; + addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); + RISCVVType::VLMUL IndexLMUL = + RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); + unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + reportFatalUsageError("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } + const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( + IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), + static_cast(IndexLMUL)); + + auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps); + + // Select VL + auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); + for (auto &RenderFn : *VLOpFn) + RenderFn(PseudoMI); + + // SEW + PseudoMI.addImm(Log2SEW); + + // Policy + uint64_t Policy = RISCVVType::MASK_AGNOSTIC; + if (IsMasked) + Policy = I.getOperand(CurOp++).getImm(); + PseudoMI.addImm(Policy); + + // Memref + PseudoMI.cloneMemRefs(I); + + I.eraseFromParent(); + return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); + } case Intrinsic::riscv_vsm: case Intrinsic::riscv_vse: case Intrinsic::riscv_vse_mask: @@ -847,6 +914,56 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( I.eraseFromParent(); return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); } + case Intrinsic::riscv_vsoxei: + case Intrinsic::riscv_vsoxei_mask: + case Intrinsic::riscv_vsuxei: + case Intrinsic::riscv_vsuxei_mask: { + bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask || + IntrinID == Intrinsic::riscv_vsuxei_mask; + bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei || + IntrinID == Intrinsic::riscv_vsoxei_mask; + LLT VT = MRI->getType(I.getOperand(1).getReg()); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + + // Sources + unsigned CurOp = 1; + SmallVector SrcOps; // Source registers. + + // Store value + auto PassthruReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PassthruReg); + + LLT IndexVT; + addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); + RISCVVType::VLMUL IndexLMUL = + RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); + unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + reportFatalUsageError("The V extension does not support EEW=64 for index " + "values when XLEN=32"); + } + const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( + IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), + static_cast(IndexLMUL)); + + auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps); + + // Select VL + auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); + for (auto &RenderFn : *VLOpFn) + RenderFn(PseudoMI); + + // SEW + PseudoMI.addImm(Log2SEW); + + // Memref + PseudoMI.cloneMemRefs(I); + + I.eraseFromParent(); + return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); + } } } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll new file mode 100644 index 0000000000000..5cb55f15c7c8c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll @@ -0,0 +1,1341 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll new file mode 100644 index 0000000000000..fafd45b7579e8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll @@ -0,0 +1,5100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxei16.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxei16.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxei16.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv64i8.nxv64i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vloxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vloxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll new file mode 100644 index 0000000000000..916af2556c6a8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll @@ -0,0 +1,1341 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei64.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei64.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei64.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei64.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( + , + ptr, + , + i64); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei64.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( + poison, + ptr %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + , + ptr, + , + , + i64, + i64); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4, i64 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll new file mode 100644 index 0000000000000..8dd32a1d640dc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll @@ -0,0 +1,5100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei32.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei32.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei32.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei32.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxei16.v v10, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxei16.v v12, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxei16.v v16, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei16.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv64i8.nxv64i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vluxei8.v v9, (a0), v8 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( + , + ptr, + , + iXLen); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT: vmv1r.v v16, v8 +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vluxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( + poison, + ptr %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + , + ptr, + , + , + iXLen, + iXLen); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll new file mode 100644 index 0000000000000..4963d91a14988 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll @@ -0,0 +1,1293 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll new file mode 100644 index 0000000000000..7ea2e1734e5a2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll @@ -0,0 +1,4881 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll new file mode 100644 index 0000000000000..9bd272a368d20 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll @@ -0,0 +1,1310 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +define void @intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + splat (i1 true), + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + , + ptr, + , + i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + , + ptr, + , + , + i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + %0, + ptr %1, + %2, + %3, + i64 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll new file mode 100644 index 0000000000000..7cd15454d40b9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll @@ -0,0 +1,4881 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + , + ptr, + , + , + iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, ptr %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( + %0, + ptr %1, + %2, + %3, + iXLen %4) + + ret void +}