Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[VPlan] Introduce recipes for VP loads and stores. #87816

Merged
merged 7 commits into from
Apr 19, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
201 changes: 95 additions & 106 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9317,52 +9317,6 @@ void VPReplicateRecipe::execute(VPTransformState &State) {
State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, Lane), State);
}

/// Creates either vp_store or vp_scatter intrinsics calls to represent
/// predicated store/scatter.
static Instruction *
lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr,
Value *StoredVal, bool IsScatter, Value *Mask,
Value *EVL, const Align &Alignment) {
CallInst *Call;
if (IsScatter) {
Call = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
Intrinsic::vp_scatter,
{StoredVal, Addr, Mask, EVL});
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
Call = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Store, Type::getVoidTy(EVL->getContext()),
{StoredVal, Addr}));
}
Call->addParamAttr(
1, Attribute::getWithAlignment(Call->getContext(), Alignment));
return Call;
}

/// Creates either vp_load or vp_gather intrinsics calls to represent
/// predicated load/gather.
static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder,
VectorType *DataTy,
Value *Addr, bool IsGather,
Value *Mask, Value *EVL,
const Align &Alignment) {
CallInst *Call;
if (IsGather) {
Call =
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
nullptr, "wide.masked.gather");
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
Call = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Load, DataTy, Addr, "vp.op.load"));
}
Call->addParamAttr(
0, Attribute::getWithAlignment(Call->getContext(), Alignment));
return Call;
}

void VPWidenLoadRecipe::execute(VPTransformState &State) {
auto *LI = cast<LoadInst>(&Ingredient);

Expand All @@ -9384,48 +9338,64 @@ void VPWidenLoadRecipe::execute(VPTransformState &State) {
Mask = Builder.CreateVectorReverse(Mask, "reverse");
}

// TODO: split this into several classes for better design.
if (State.EVL) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
VPInstruction::ExplicitVectorLength &&
"EVL must be VPInstruction::ExplicitVectorLength.");
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
// If EVL is not nullptr, then EVL must be a valid value set during plan
// creation, possibly default value = whole vector register length. EVL
// is created only if TTI prefers predicated vectorization, thus if EVL
// is not nullptr it also implies preference for predicated
// vectorization.
// FIXME: Support reverse loading after vp_reverse is added.
NewLI = lowerLoadUsingVectorIntrinsics(
Builder, DataTy, State.get(getAddr(), Part, !CreateGather),
CreateGather, Mask, EVL, Alignment);
} else if (CreateGather) {
Value *VectorGep = State.get(getAddr(), Part);
NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, Mask,
nullptr, "wide.masked.gather");
Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateGather);
if (CreateGather) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if (CreateGather) {
Value *Addr = State.get(getAddr(), Part, !CreateGather);
if (CreateGather)
NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask,
nullptr, "wide.masked.gather");
else if (Mask)
NewLI = Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
PoisonValue::get(DataTy),
"wide.masked.load");
else
NewLI =
Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
// Add metadata to the load, but setVectorValue to possibly reversed shuffle.
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
State.set(this, NewLI, Part);

nit: could be simplified a bit, consistent with VPWidenVPLoadRecipe::execute()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated, thanks!

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! The brackets could be removed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Kept the braces as there are multi-line statements (for which I think it is recommended to use braces)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, but let the store case below be consistent with the load case here?

NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
"wide.masked.gather");
State.addMetadata(NewLI, LI);
} else if (Mask) {
NewLI = Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
PoisonValue::get(DataTy),
"wide.masked.load");
} else {
auto *VecPtr = State.get(getAddr(), Part, /*IsScalar*/ true);
if (Mask)
NewLI = Builder.CreateMaskedLoad(DataTy, VecPtr, Alignment, Mask,
PoisonValue::get(DataTy),
"wide.masked.load");
else
NewLI =
Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");

// Add metadata to the load, but setVectorValue to the reverse shuffle.
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
}

// Add metadata to the load, but setVectorValue to the reverse shuffle.
State.addMetadata(NewLI, LI);
if (Reverse)
NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
State.set(this, NewLI, Part);
}
}

void VPWidenEVLLoadRecipe::execute(VPTransformState &State) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
// FIXME: Support reverse loading after vp_reverse is added.
assert(!isReverse() && "Reverse loads are not implemented yet.");

auto *LI = cast<LoadInst>(&Ingredient);

Type *ScalarDataTy = getLoadStoreType(&Ingredient);
auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
const Align Alignment = getLoadStoreAlignment(&Ingredient);
bool CreateGather = !isConsecutive();

auto &Builder = State.Builder;
State.setDebugLocFrom(getDebugLoc());
CallInst *NewLI;
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
Value *Addr = State.get(getAddr(), 0, !CreateGather);
Value *Mask =
getMask() ? State.get(getMask(), 0)
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
if (CreateGather) {
NewLI =
Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
nullptr, "wide.masked.gather");
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
NewLI = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Load, DataTy, Addr, "vp.op.load"));
}
NewLI->addParamAttr(
0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));

State.addMetadata(NewLI, LI);
State.set(this, NewLI, 0);
}

void VPWidenStoreRecipe::execute(VPTransformState &State) {
auto *SI = cast<StoreInst>(&Ingredient);

Expand All @@ -9449,45 +9419,64 @@ void VPWidenStoreRecipe::execute(VPTransformState &State) {

Value *StoredVal = State.get(StoredVPValue, Part);
if (isReverse()) {
assert(!State.EVL && "reversing not yet implemented with EVL");
// If we store to reverse consecutive memory locations, then we need
// to reverse the order of elements in the stored value.
StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
// We don't want to update the value in the map as it might be used in
// another expression. So don't call resetVectorValue(StoredVal).
}
// TODO: split this into several classes for better design.
if (State.EVL) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
assert(cast<VPInstruction>(State.EVL)->getOpcode() ==
VPInstruction::ExplicitVectorLength &&
"EVL must be VPInstruction::ExplicitVectorLength.");
Value *EVL = State.get(State.EVL, VPIteration(0, 0));
// If EVL is not nullptr, then EVL must be a valid value set during plan
// creation, possibly default value = whole vector register length. EVL
// is created only if TTI prefers predicated vectorization, thus if EVL
// is not nullptr it also implies preference for predicated
// vectorization.
// FIXME: Support reverse store after vp_reverse is added.
NewSI = lowerStoreUsingVectorIntrinsics(
Builder, State.get(getAddr(), Part, !CreateScatter), StoredVal,
CreateScatter, Mask, EVL, Alignment);
} else if (CreateScatter) {
Value *VectorGep = State.get(getAddr(), Part);
NewSI =
Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, Mask);
Value *Addr = State.get(getAddr(), Part, /*IsScalar*/ !CreateScatter);
if (CreateScatter) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if (CreateScatter) {
Value *Addr = State.get(getAddr(), Part, !CreateScatter);
if (CreateScatter)
NewSI =
Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
else if (Mask)
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);

nit: can be simplified a bit, consistent with VPWidenVPStoreRecipe::execute()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, thanks!

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! The brackets can be removed, along with inlining else if.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, thanks!

NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
} else {
auto *VecPtr = State.get(getAddr(), Part, /*IsScalar*/ true);
if (Mask)
NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, Mask);
NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
else
NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
}
State.addMetadata(NewSI, SI);
}
}

void VPWidenEVLStoreRecipe::execute(VPTransformState &State) {
assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with "
"explicit vector length.");
// FIXME: Support reverse loading after vp_reverse is added.
assert(!isReverse() && "Reverse store are not implemented yet.");

auto *SI = cast<StoreInst>(&Ingredient);

VPValue *StoredValue = getStoredValue();
bool CreateScatter = !isConsecutive();
const Align Alignment = getLoadStoreAlignment(&Ingredient);

auto &Builder = State.Builder;
State.setDebugLocFrom(getDebugLoc());

CallInst *NewSI = nullptr;
Value *StoredVal = State.get(StoredValue, 0);
Value *EVL = State.get(getEVL(), VPIteration(0, 0));
// FIXME: Support reverse store after vp_reverse is added.
Value *Mask =
getMask() ? State.get(getMask(), 0)
: Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
Value *Addr = State.get(getAddr(), 0, !CreateScatter);
if (CreateScatter) {
NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
Intrinsic::vp_scatter,
{StoredVal, Addr, Mask, EVL});
} else {
VectorBuilder VBuilder(Builder);
VBuilder.setEVL(EVL).setMask(Mask);
NewSI = cast<CallInst>(VBuilder.createVectorInstruction(
Instruction::Store, Type::getVoidTy(EVL->getContext()),
{StoredVal, Addr}));
}
NewSI->addParamAttr(
1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));

State.addMetadata(NewSI, SI);
}
// Determine how to lower the scalar epilogue, which depends on 1) optimising
// for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
// predication, and 4) a TTI hook that analyses whether the loop is suitable
Expand Down
103 changes: 90 additions & 13 deletions llvm/lib/Transforms/Vectorize/VPlan.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,15 +242,6 @@ struct VPTransformState {
ElementCount VF;
unsigned UF;

/// If EVL (Explicit Vector Length) is not nullptr, then EVL must be a valid
/// value set during plan transformation, possibly a default value = whole
/// vector register length. EVL is created only if TTI prefers predicated
/// vectorization, thus if EVL is not nullptr it also implies preference for
/// predicated vectorization.
/// TODO: this is a temporarily solution, the EVL must be explicitly used by
/// the recipes and must be removed here.
VPValue *EVL = nullptr;

/// Hold the indices to generate specific scalar instructions. Null indicates
/// that all instances are to be generated, using either scalar or vector
/// instructions.
Expand Down Expand Up @@ -877,6 +868,8 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
case VPRecipeBase::VPBranchOnMaskSC:
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreSC:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add the two VP variants?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added, thanks!

case VPRecipeBase::VPWidenEVLLoadSC:
case VPRecipeBase::VPWidenEVLStoreSC:
// TODO: Widened stores don't define a value, but widened loads do. Split
// the recipes to be able to make widened loads VPSingleDefRecipes.
return false;
Expand Down Expand Up @@ -2318,11 +2311,15 @@ class VPWidenMemoryRecipe : public VPRecipeBase {
}

public:
VPWidenMemoryRecipe *clone() override = 0;
VPWidenMemoryRecipe *clone() override {
llvm_unreachable("cloning not supported");
}

static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPDef::VPWidenLoadSC ||
R->getVPDefID() == VPDef::VPWidenStoreSC;
return R->getVPDefID() == VPRecipeBase::VPWidenLoadSC ||
R->getVPDefID() == VPRecipeBase::VPWidenStoreSC ||
R->getVPDefID() == VPRecipeBase::VPWidenEVLLoadSC ||
R->getVPDefID() == VPRecipeBase::VPWidenEVLStoreSC;
}

static inline bool classof(const VPUser *U) {
Expand Down Expand Up @@ -2390,13 +2387,48 @@ struct VPWidenLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");

// Widened, consecutive loads operations only demand the first lane of
// their address.
return Op == getAddr() && isConsecutive();
}
};

/// A recipe for widening load operations with vector-predication intrinsics,
/// using the address to load from, the explicit vector length and an optional
/// mask.
struct VPWidenEVLLoadRecipe final : public VPWidenMemoryRecipe, public VPValue {
VPWidenEVLLoadRecipe(VPWidenLoadRecipe *L, VPValue *EVL, VPValue *Mask)
: VPWidenMemoryRecipe(
VPDef::VPWidenEVLLoadSC, *cast<LoadInst>(&L->getIngredient()),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: is the cast to LoadInst needed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah no, removed, thanks!

{L->getAddr(), EVL}, L->isConsecutive(), false, L->getDebugLoc()),
VPValue(this, &getIngredient()) {
setMask(Mask);
}

VP_CLASSOF_IMPL(VPDef::VPWidenEVLLoadSC)

/// Return the EVL operand.
VPValue *getEVL() const { return getOperand(1); }

/// Generate the wide load or gather.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
// Widened loads only demand the first lane of EVL and consecutive loads
// only demand the first lane of their address.
return Op == getEVL() || (Op == getAddr() && isConsecutive());
}
};

/// A recipe for widening store operations, using the stored value, the address
/// to store to and an optional mask.
struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
Expand Down Expand Up @@ -2436,6 +2468,51 @@ struct VPWidenStoreRecipe final : public VPWidenMemoryRecipe {
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
}
};

/// A recipe for widening store operations with vector-predication intrinsics,
/// using the value to store, the address to store to, the explicit vector
/// length and an optional mask.
struct VPWidenEVLStoreRecipe final : public VPWidenMemoryRecipe {
VPWidenEVLStoreRecipe(VPWidenStoreRecipe *S, VPValue *EVL, VPValue *Mask)
: VPWidenMemoryRecipe(VPDef::VPWidenEVLStoreSC,
*cast<StoreInst>(&S->getIngredient()),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: is the cast to StoreInst needed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not needed, removed, thanks!

{S->getAddr(), S->getStoredValue(), EVL},
S->isConsecutive(), false, S->getDebugLoc()) {
setMask(Mask);
}

VP_CLASSOF_IMPL(VPDef::VPWidenEVLStoreSC)

/// Return the address accessed by this recipe.
VPValue *getStoredValue() const { return getOperand(1); }

/// Return the EVL operand.
VPValue *getEVL() const { return getOperand(2); }

/// Generate the wide store or scatter.
void execute(VPTransformState &State) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// Print the recipe.
void print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const override;
#endif

/// Returns true if the recipe only uses the first lane of operand \p Op.
bool onlyFirstLaneUsed(const VPValue *Op) const override {
assert(is_contained(operands(), Op) &&
"Op must be an operand of the recipe");
if (Op == getEVL()) {
assert(getStoredValue() != Op && "unexpected store of EVL");
return true;
}
// Widened, consecutive memory operations only demand the first lane of
// their address, unless the same operand is also stored. That latter can
// happen with opaque pointers.
return Op == getAddr() && isConsecutive() && Op != getStoredValue();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: unlike the address operand, the EVL operand (scalar by nature) cannot also be stored (vector by nature). Worth an assert?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, thanks!

}
};

/// Recipe to expand a SCEV expression.
class VPExpandSCEVRecipe : public VPSingleDefRecipe {
const SCEV *Expr;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenCallRecipe *R) {
}

Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPWidenMemoryRecipe *R) {
assert(isa<VPWidenLoadRecipe>(R) &&
assert((isa<VPWidenLoadRecipe>(R) || isa<VPWidenEVLLoadRecipe>(R)) &&
"Store recipes should not define any values");
return cast<LoadInst>(&R->getIngredient())->getType();
}
Expand Down
Loading
Loading