Skip to content

Commit

Permalink
[VPlan] Introduce recipes for VP loads and stores. (llvm#87816)
Browse files Browse the repository at this point in the history
Introduce new subclasses of VPWidenMemoryRecipe for VP
(vector-predicated) loads and stores to address multiple TODOs from
llvm#76172

Note that the introduction of the new recipes also improves code-gen for
VP gather/scatters by removing the redundant header mask. With the new
approach, it is not sufficient to look at users of the widened canonical
IV to find all uses of the header mask.

In some cases, a widened IV is used instead of separately widening the
canonical IV. To handle that, first collect all VPValues representing header
masks (by looking at users of both the canonical IV and widened inductions
that are canonical) and then checking all users (recursively) of those header
masks.

Depends on llvm#87411.

PR: llvm#87816
  • Loading branch information
fhahn authored and aniplcc committed Apr 21, 2024
1 parent 07171c3 commit 9aca852
Show file tree
Hide file tree
Showing 8 changed files with 283 additions and 181 deletions.
207 changes: 96 additions & 111 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9324,52 +9324,6 @@ void VPReplicateRecipe::execute(VPTransformState &State) {
State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, Lane), State);
}

/// Creates either vp_store or vp_scatter intrinsics calls to represent
/// predicated store/scatter.
static Instruction *
lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr,
Value *StoredVal, bool IsScatter, Value *Mask,
Value *EVL, const Align &Alignment) {
CallInst *Call;
if (IsScatter) {
Call = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
Intrinsic::vp_scatter,
{StoredVal, Addr, Mask, EVL});
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
Call = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Store, Type::getVoidTy(EVL->getContext()),
{StoredVal, Addr}));
}
Call->addParamAttr(
1, Attribute::getWithAlignment(Call->getContext(), Alignment));
return Call;
}

/// Creates either vp_load or vp_gather intrinsics calls to represent
/// predicated load/gather.
static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder,
VectorType *DataTy,
Value *Addr, bool IsGather,
Value *Mask, Value *EVL,
const Align &Alignment) {
CallInst *Call;
if (IsGather) {
Call =
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
nullptr, "wide.masked.gather");
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
Call = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Load, DataTy, Addr, "vp.op.load"));
}
Call->addParamAttr(
0, Attribute::getWithAlignment(Call->getContext(), Alignment));
return Call;
}

void VPWidenLoadRecipe::execute(VPTransformState &State) {
auto *LI = cast<LoadInst>(&Ingredient);

Expand All @@ -9391,48 +9345,62 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
Mask = Builder.CreateVectorReverse(Mask, "reverse");
}

// TODO: split this into several classes for better design.
if (State.EVL) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
VPInstruction::ExplicitVectorLength &&
"EVL must be VPInstruction::ExplicitVectorLength.");
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
// If EVL is not nullptr, then EVL must be a valid value set during plan
// creation, possibly default value = whole vector register length. EVL
// is created only if TTI prefers predicated vectorization, thus if EVL
// is not nullptr it also implies preference for predicated
// vectorization.
// FIXME: Support reverse loading after vp_reverse is added.
NewLI = lowerLoadUsingVectorIntrinsics(
Builder, DataTy, State.get(getAddr(), Part, !CreateGather),
CreateGather, Mask, EVL, Alignment);
} else if (CreateGather) {
Value *VectorGep = State.get(getAddr(), Part);
NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, Mask,
nullptr, "wide.masked.gather");
State.addMetadata(NewLI, LI);
Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateGather);
if (CreateGather) {
NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
"wide.masked.gather");
} else if (Mask) {
NewLI = Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
PoisonValue::get(DataTy),
"wide.masked.load");
} else {
auto *VecPtr = State.get(getAddr(), Part, /*IsScalar*/ true);
if (Mask)
NewLI = Builder.CreateMaskedLoad(DataTy, VecPtr, Alignment, Mask,
PoisonValue::get(DataTy),
"wide.masked.load");
else
NewLI =
Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");

// Add metadata to the load, but setVectorValue to the reverse shuffle.
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
}

// Add metadata to the load, but setVectorValue to the reverse shuffle.
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
State.set(this, NewLI, Part);
}
}

void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
// FIXME: Support reverse loading after vp_reverse is added.
assert(!isReverse() && "Reverse loads are not implemented yet.");

auto *LI = cast<LoadInst>(&Ingredient);

Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
bool CreateGather = !isConsecutive();

auto &Builder = State.Builder;
State.setDebugLocFrom(getDebugLoc());
CallInst *NewLI;
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
Value *Addr = State.get(getAddr(), 0, !CreateGather);
Value *Mask =
getMask() ? State.get(getMask(), 0)
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
if (CreateGather) {
NewLI =
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
nullptr, "wide.masked.gather");
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
NewLI = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Load, DataTy, Addr, "vp.op.load"));
}
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
State.addMetadata(NewLI, LI);
State.set(this, NewLI, 0);
}

void VPWidenStoreRecipe::execute(VPTransformState &State) {
auto *SI = cast<StoreInst>(&Ingredient);

Expand All @@ -9456,45 +9424,62 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {

Value *StoredVal = State.get(StoredVPValue, Part);
if (isReverse()) {
assert(!State.EVL && "reversing not yet implemented with EVL");
// If we store to reverse consecutive memory locations, then we need
// to reverse the order of elements in the stored value.
StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
// We don't want to update the value in the map as it might be used in
// another expression. So don't call resetVectorValue(StoredVal).
}
// TODO: split this into several classes for better design.
if (State.EVL) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
VPInstruction::ExplicitVectorLength &&
"EVL must be VPInstruction::ExplicitVectorLength.");
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
// If EVL is not nullptr, then EVL must be a valid value set during plan
// creation, possibly default value = whole vector register length. EVL
// is created only if TTI prefers predicated vectorization, thus if EVL
// is not nullptr it also implies preference for predicated
// vectorization.
// FIXME: Support reverse store after vp_reverse is added.
NewSI = lowerStoreUsingVectorIntrinsics(
Builder, State.get(getAddr(), Part, !CreateScatter), StoredVal,
CreateScatter, Mask, EVL, Alignment);
} else if (CreateScatter) {
Value *VectorGep = State.get(getAddr(), Part);
NewSI =
Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, Mask);
} else {
auto *VecPtr = State.get(getAddr(), Part, /*IsScalar*/ true);
if (Mask)
NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
}
Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateScatter);
if (CreateScatter)
NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
else if (Mask)
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
State.addMetadata(NewSI, SI);
}
}

void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
// FIXME: Support reverse loading after vp_reverse is added.
assert(!isReverse() && "Reverse store are not implemented yet.");

auto *SI = cast<StoreInst>(&Ingredient);

VPValue *StoredValue = getStoredValue();
bool CreateScatter = !isConsecutive();
const Align Alignment = getLoadStoreAlignment(&Ingredient);

auto &Builder = State.Builder;
State.setDebugLocFrom(getDebugLoc());

CallInst *NewSI = nullptr;
Value *StoredVal = State.get(StoredValue, 0);
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
// FIXME: Support reverse store after vp_reverse is added.
Value *Mask =
getMask() ? State.get(getMask(), 0)
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
Value *Addr = State.get(getAddr(), 0, !CreateScatter);
if (CreateScatter) {
NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
Intrinsic::vp_scatter,
{StoredVal, Addr, Mask, EVL});
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
NewSI = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Store, Type::getVoidTy(EVL->getContext()),
{StoredVal, Addr}));
}
NewSI->addParamAttr(
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
State.addMetadata(NewSI, SI);
}

// Determine how to lower the scalar epilogue, which depends on 1) optimising
// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
// predication, and 4) a TTI hook that analyses whether the loop is suitable
Expand Down
102 changes: 89 additions & 13 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,15 +242,6 @@ struct VPTransformState {
ElementCount VF;
unsigned UF;

/// If EVL (Explicit Vector Length) is not nullptr, then EVL must be a valid
/// value set during plan transformation, possibly a default value = whole
/// vector register length. EVL is created only if TTI prefers predicated
/// vectorization, thus if EVL is not nullptr it also implies preference for
/// predicated vectorization.
/// TODO: this is a temporarily solution, the EVL must be explicitly used by
/// the recipes and must be removed here.
VPValue *EVL = nullptr;

/// Hold the indices to generate specific scalar instructions. Null indicates
/// that all instances are to be generated, using either scalar or vector
/// instructions.
Expand Down Expand Up @@ -875,7 +866,9 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
return true;
case VPRecipeBase::VPInterleaveSC:
case VPRecipeBase::VPBranchOnMaskSC:
case VPRecipeBase::VPWidenLoadEVLSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
case VPRecipeBase::VPWidenStoreSC:
// TODO: Widened stores don't define a value, but widened loads do. Split
// the recipes to be able to make widened loads VPSingleDefRecipes.
Expand Down Expand Up @@ -2318,11 +2311,15 @@ class VPWidenMemoryRecipe : public VPRecipeBase {
}

public:
VPWidenMemoryRecipe *clone() override = 0;
VPWidenMemoryRecipe *clone() override {
llvm_unreachable("cloning not supported");
}

static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPDef::VPWidenLoadSC ||
R->getVPDefID() == VPDef::VPWidenStoreSC;
return R->getVPDefID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPDefID() == VPRecipeBase::VPWidenLoadEVLSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreEVLSC;
}

static inline bool classof(const VPUser *U) {
Expand Down Expand Up @@ -2390,13 +2387,48 @@ struct VPWidenLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");

// Widened, consecutive loads operations only demand the first lane of
// their address.
return Op == getAddr() && isConsecutive();
}
};

/// A recipe for widening load operations with vector-predication intrinsics,
/// using the address to load from, the explicit vector length and an optional
/// mask.
struct VPWidenLoadEVLRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenLoadEVLRecipe(VPWidenLoadRecipe *L, VPValue *EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenLoadEVLSC, L->getIngredient(),
{L->getAddr(), EVL}, L->isConsecutive(), false,
L->getDebugLoc()),
VPValue(this, &getIngredient()) {
setMask(Mask);
}

VP_CLASSOF_IMPL(VPDef::VPWidenLoadEVLSC)

/// Return the EVL operand.
VPValue *getEVL() const { return getOperand(1); }

/// Generate the wide load or gather.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
// Widened loads only demand the first lane of EVL and consecutive loads
// only demand the first lane of their address.
return Op == getEVL() || (Op == getAddr() && isConsecutive());
}
};

/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
Expand Down Expand Up @@ -2436,6 +2468,50 @@ struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
}
};

/// A recipe for widening store operations with vector-predication intrinsics,
/// using the value to store, the address to store to, the explicit vector
/// length and an optional mask.
struct VPWidenStoreEVLRecipe final : public VPWidenMemoryRecipe {
VPWidenStoreEVLRecipe(VPWidenStoreRecipe *S, VPValue *EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenStoreEVLSC, S->getIngredient(),
{S->getAddr(), S->getStoredValue(), EVL},
S->isConsecutive(), false, S->getDebugLoc()) {
setMask(Mask);
}

VP_CLASSOF_IMPL(VPDef::VPWidenStoreEVLSC)

/// Return the address accessed by this recipe.
VPValue *getStoredValue() const { return getOperand(1); }

/// Return the EVL operand.
VPValue *getEVL() const { return getOperand(2); }

/// Generate the wide store or scatter.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
if (Op == getEVL()) {
assert(getStoredValue() != Op && "unexpected store of EVL");
return true;
}
// Widened, consecutive memory operations only demand the first lane of
// their address, unless the same operand is also stored. That latter can
// happen with opaque pointers.
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
}
};

/// Recipe to expand a SCEV expression.
class VPExpandSCEVRecipe : public VPSingleDefRecipe {
const SCEV *Expr;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}

Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
assert(isa<VPWidenLoadRecipe>(R) &&
assert((isa<VPWidenLoadRecipe>(R) || isa<VPWidenLoadEVLRecipe>(R)) &&
"Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}
Expand Down
Loading

0 comments on commit 9aca852

Please sign in to comment.