Skip to content

Conversation

@preames
Copy link
Collaborator

@preames preames commented Jul 17, 2025

We're going to end up repeating the operand extraction four times once all of the routines have been updated to support both plain load/store and vp.load/vp.store. I plan to add masked.load/masked.store in the near future, and we'd need to add that to each of the four cases. Instead, factor out a single copy of the operand normalization.

We're going to end up repeating the operand extraction four times once
all of the routines have been updated to support both plain load/store
and vp.load/vp.store.  I plan to add masked.load/masked.store in the
near future, and we'd need to add that to each of the four cases.
Instead, factor out a single copy of the operand normalization.
@llvmbot
Copy link
Member

llvmbot commented Jul 17, 2025

@llvm/pr-subscribers-backend-risc-v

Author: Philip Reames (preames)

Changes

We're going to end up repeating the operand extraction four times once all of the routines have been updated to support both plain load/store and vp.load/vp.store. I plan to add masked.load/masked.store in the near future, and we'd need to add that to each of the four cases. Instead, factor out a single copy of the operand normalization.


Full diff: https://github.com/llvm/llvm-project/pull/149344.diff

1 Files Affected:

  • (modified) llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp (+52-56)
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
index 0d4f24172b574..1336145db53db 100644
--- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp
@@ -102,6 +102,52 @@ static bool isMultipleOfN(const Value *V, const DataLayout &DL, unsigned N) {
   return false;
 }
 
+static bool getMemOperands(IRBuilderBase &Builder, unsigned Factor,
+                           VectorType *VTy, const DataLayout &DL, Type *XLenTy,
+                           Instruction *I, Value *&Ptr, Value *&Mask,
+                           Value *&VL, Align &Alignment) {
+  ElementCount EC = VTy->getElementCount();
+  if (auto *LI = dyn_cast<LoadInst>(I)) {
+    assert(LI->isSimple());
+    Ptr = LI->getPointerOperand();
+    Alignment = LI->getAlign();
+    assert(!Mask && "Unexpected mask on a load\n");
+    Mask = Builder.getAllOnesMask(EC);
+    VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
+                                   : Constant::getAllOnesValue(XLenTy);
+    return true;
+  }
+  if (auto *SI = dyn_cast<StoreInst>(I)) {
+    assert(SI->isSimple());
+    Ptr = SI->getPointerOperand();
+    Alignment = SI->getAlign();
+    assert(!Mask && "Unexpected mask on a store");
+    Mask = Builder.getAllOnesMask(EC);
+    VL = isa<FixedVectorType>(VTy) ? Builder.CreateElementCount(XLenTy, EC)
+                                   : Constant::getAllOnesValue(XLenTy);
+    return true;
+  }
+  auto *VPLdSt = cast<VPIntrinsic>(I);
+  assert((VPLdSt->getIntrinsicID() == Intrinsic::vp_load ||
+          VPLdSt->getIntrinsicID() == Intrinsic::vp_store) &&
+         "Unexpected intrinsic");
+  Ptr = VPLdSt->getMemoryPointerParam();
+  Alignment = VPLdSt->getPointerAlignment().value_or(
+      DL.getABITypeAlign(VTy->getElementType()));
+
+  assert(Mask && "vp.load needs a mask!");
+
+  Value *WideEVL = VPLdSt->getVectorLengthParam();
+  // Conservatively check if EVL is a multiple of factor, otherwise some
+  // (trailing) elements might be lost after the transformation.
+  if (!isMultipleOfN(WideEVL, I->getDataLayout(), Factor))
+    return false;
+
+  auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
+  VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
+  return true;
+}
+
 /// Lower an interleaved load into a vlsegN intrinsic.
 ///
 /// E.g. Lower an interleaved load (Factor = 2):
@@ -271,34 +317,9 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
 
   Value *Ptr, *VL;
   Align Alignment;
-  if (auto *LI = dyn_cast<LoadInst>(Load)) {
-    assert(LI->isSimple());
-    Ptr = LI->getPointerOperand();
-    Alignment = LI->getAlign();
-    assert(!Mask && "Unexpected mask on a load\n");
-    Mask = Builder.getAllOnesMask(ResVTy->getElementCount());
-    VL = isa<FixedVectorType>(ResVTy)
-             ? Builder.CreateElementCount(XLenTy, ResVTy->getElementCount())
-             : Constant::getAllOnesValue(XLenTy);
-  } else {
-    auto *VPLoad = cast<VPIntrinsic>(Load);
-    assert(VPLoad->getIntrinsicID() == Intrinsic::vp_load &&
-           "Unexpected intrinsic");
-    Ptr = VPLoad->getMemoryPointerParam();
-    Alignment = VPLoad->getPointerAlignment().value_or(
-        DL.getABITypeAlign(ResVTy->getElementType()));
-
-    assert(Mask && "vp.load needs a mask!");
-
-    Value *WideEVL = VPLoad->getVectorLengthParam();
-    // Conservatively check if EVL is a multiple of factor, otherwise some
-    // (trailing) elements might be lost after the transformation.
-    if (!isMultipleOfN(WideEVL, Load->getDataLayout(), Factor))
-      return false;
-
-    auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
-    VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
-  }
+  if (!getMemOperands(Builder, Factor, ResVTy, DL, XLenTy, Load, Ptr, Mask, VL,
+                      Alignment))
+    return false;
 
   Type *PtrTy = Ptr->getType();
   unsigned AS = PtrTy->getPointerAddressSpace();
@@ -360,34 +381,9 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
 
   Value *Ptr, *VL;
   Align Alignment;
-  if (auto *SI = dyn_cast<StoreInst>(Store)) {
-    assert(SI->isSimple());
-    Ptr = SI->getPointerOperand();
-    Alignment = SI->getAlign();
-    assert(!Mask && "Unexpected mask on a store");
-    Mask = Builder.getAllOnesMask(InVTy->getElementCount());
-    VL = isa<FixedVectorType>(InVTy)
-             ? Builder.CreateElementCount(XLenTy, InVTy->getElementCount())
-             : Constant::getAllOnesValue(XLenTy);
-  } else {
-    auto *VPStore = cast<VPIntrinsic>(Store);
-    assert(VPStore->getIntrinsicID() == Intrinsic::vp_store &&
-           "Unexpected intrinsic");
-    Ptr = VPStore->getMemoryPointerParam();
-    Alignment = VPStore->getPointerAlignment().value_or(
-        DL.getABITypeAlign(InVTy->getElementType()));
-
-    assert(Mask && "vp.store needs a mask!");
-
-    Value *WideEVL = VPStore->getVectorLengthParam();
-    // Conservatively check if EVL is a multiple of factor, otherwise some
-    // (trailing) elements might be lost after the transformation.
-    if (!isMultipleOfN(WideEVL, DL, Factor))
-      return false;
-
-    auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor);
-    VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy);
-  }
+  if (!getMemOperands(Builder, Factor, InVTy, DL, XLenTy, Store, Ptr, Mask, VL,
+                      Alignment))
+    return false;
   Type *PtrTy = Ptr->getType();
   unsigned AS = Ptr->getType()->getPointerAddressSpace();
   if (!isLegalInterleavedAccessType(InVTy, Factor, Alignment, AS, DL))

Copy link
Member

@mshockwave mshockwave left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM w/ minor comments

@preames preames merged commit f6641e2 into llvm:main Jul 18, 2025
7 of 9 checks passed
@preames preames deleted the pr-riscv-ia-common-operand-code branch July 18, 2025 18:04
mdfazlay pushed a commit to mdfazlay/llvm-project that referenced this pull request Jul 18, 2025
…fc] (llvm#149344)

We're going to end up repeating the operand extraction four times once
all of the routines have been updated to support both plain load/store
and vp.load/vp.store. I plan to add masked.load/masked.store in the near
future, and we'd need to add that to each of the four cases. Instead,
factor out a single copy of the operand normalization.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants