Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
236 changes: 236 additions & 0 deletions llvm/lib/Transforms/Vectorize/VectorCombine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ class VectorCombine {
bool foldShufflesOfLengthChangingShuffles(Instruction &I);
bool foldShuffleOfIntrinsics(Instruction &I);
bool foldShuffleToIdentity(Instruction &I);
bool compactShuffleOperands(Instruction &I);
bool foldShuffleFromReductions(Instruction &I);
bool foldShuffleChainsToReduce(Instruction &I);
bool foldCastFromReductions(Instruction &I);
Expand Down Expand Up @@ -2762,6 +2763,239 @@ bool VectorCombine::foldShuffleOfCastops(Instruction &I) {
return true;
}

/// Describes whether and how a shuffle operand can be compacted.
struct ShuffleOperandCompaction {
/// The cost difference between compacted and original operand. Used to avoid
/// compactions that increase cost. Zero if compaction cannot be applied, but
/// note that valid compactions may also have zero cost.
InstructionCost Cost;
/// The minimal width required for the compacted vector.
unsigned CompactedWidth;
/// Function to create the compacted operand, or nullptr if no compaction can
/// be applied.
std::function<Value *(unsigned, IRBuilder<InstSimplifyFolder> &)> Apply;
};

/// Attempt to narrow/compact a constant vector used in a shuffle by removing
/// elements that are not referenced by the shuffle mask.
static ShuffleOperandCompaction
compactShuffleOperand(Constant *ShuffleInput,
MutableArrayRef<int> UserShuffleMask, int IndexStart) {
auto *VecTy = cast<FixedVectorType>(ShuffleInput->getType());
unsigned Width = VecTy->getNumElements();

// Collect only the constant elements that are actually used.
SmallVector<Constant *, 16> CompactedElts;
// Map from original element index to compacted index.
SmallVector<int, 16> IndexRemap(Width, -1);

// Track whether used elements are already compacted at the front. Even if
// true, we may still shrink this operand by not re-adding trailing poison.
bool AlreadyCompacted = true;

// This modifies UserShuffleMask, so we cannot back out of transforming the
// operand while proceeding with compactShuffleOperands on the instruction.
for (int &MaskElt : UserShuffleMask) {
if (MaskElt >= IndexStart && MaskElt < IndexStart + (int)Width) {
int RelMaskElt = MaskElt - IndexStart;
if (IndexRemap[RelMaskElt] < 0) {
IndexRemap[RelMaskElt] = CompactedElts.size() + IndexStart;
CompactedElts.push_back(ShuffleInput->getAggregateElement(RelMaskElt));
}
if (IndexRemap[RelMaskElt] != MaskElt) {
AlreadyCompacted = false;
MaskElt = IndexRemap[RelMaskElt];
}
}
}

unsigned CompactedWidth = CompactedElts.size();

// To determine the eventual width (between CompactedWidth and Width), we have
// to consider the other operand. Hence, we return a functor here to delay.
return {0, CompactedWidth,
[ShuffleInput, AlreadyCompacted, Width, VecTy,
CompactedElts = std::move(CompactedElts)](
unsigned PaddedWidth,
IRBuilder<InstSimplifyFolder> &Builder) -> Value * {
// Return original if unchanged to guarantee fixpoint termination.
if (AlreadyCompacted && Width == PaddedWidth)
return ShuffleInput;

// Pad with poison to reach the requested width.
SmallVector<Constant *, 16> PaddedElts(CompactedElts);
while (PaddedElts.size() < PaddedWidth)
PaddedElts.push_back(PoisonValue::get(VecTy->getElementType()));

return ConstantVector::get(PaddedElts);
}};
}

/// Attempt to narrow/compact a shuffle instruction used in a shuffle by
/// removing elements that are not referenced by the shuffle mask.
static ShuffleOperandCompaction
compactShuffleOperand(ShuffleVectorInst *ShuffleInput,
MutableArrayRef<int> UserShuffleMask, int IndexStart,
const TargetTransformInfo &TTI,
TTI::TargetCostKind CostKind) {
auto *VecTy = cast<FixedVectorType>(ShuffleInput->getType());
unsigned Width = VecTy->getNumElements();

// Collect only the shuffle mask elements that are actually used.
SmallVector<int, 16> CompactedMask;
// Map from original element index to compacted index.
SmallVector<int, 16> IndexRemap(Width, -1);

// Track whether used elements are already compacted at the front. Even if
// true, we may still shrink this operand by not re-adding trailing poison.
bool AlreadyCompacted = true;

// This modifies UserShuffleMask, so we cannot back out of transforming the
// operand while proceeding with compactShuffleOperands on the instruction.
for (int &MaskElt : UserShuffleMask) {
if (MaskElt >= IndexStart && MaskElt < IndexStart + (int)Width) {
int RelMaskElt = MaskElt - IndexStart;
if (IndexRemap[RelMaskElt] < 0) {
IndexRemap[RelMaskElt] = CompactedMask.size() + IndexStart;
CompactedMask.push_back(ShuffleInput->getMaskValue(RelMaskElt));
}
if (IndexRemap[RelMaskElt] != MaskElt) {
AlreadyCompacted = false;
MaskElt = IndexRemap[RelMaskElt];
}
}
}

unsigned CompactedWidth = CompactedMask.size();

// Check if the compacted shuffle would be more expensive than the original.
InstructionCost CompactionCost(0);
if (!AlreadyCompacted) {
ArrayRef<int> OriginalMask = ShuffleInput->getShuffleMask();
auto *OriginalSrcTy =
cast<FixedVectorType>(ShuffleInput->getOperand(0)->getType());

InstructionCost OriginalCost =
TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, VecTy,
OriginalSrcTy, OriginalMask, CostKind);

// Create a type for the compacted shuffle result.
auto *CompactedDstTy =
FixedVectorType::get(VecTy->getElementType(), CompactedWidth);

InstructionCost CompactedCost = TTI.getShuffleCost(
TargetTransformInfo::SK_PermuteTwoSrc, CompactedDstTy, OriginalSrcTy,
CompactedMask, CostKind);

CompactionCost = CompactedCost - OriginalCost;
}

// To determine the eventual width (between CompactedWidth and Width), we have
// to consider the other operand. Hence, we return a functor here to delay.
return {CompactionCost, CompactedWidth,
[ShuffleInput, AlreadyCompacted, Width,
CompactedMask = std::move(CompactedMask)](
unsigned PaddedWidth,
IRBuilder<InstSimplifyFolder> &Builder) -> Value * {
// Return original if unchanged to guarantee fixpoint termination.
if (AlreadyCompacted && Width == PaddedWidth)
return ShuffleInput;

// Pad with poison mask elements to reach the requested width.
SmallVector<int, 16> PaddedMask(CompactedMask);
while (PaddedMask.size() < PaddedWidth)
PaddedMask.push_back(PoisonMaskElem);

return Builder.CreateShuffleVector(ShuffleInput->getOperand(0),
ShuffleInput->getOperand(1),
PaddedMask);
}};
}

/// Try to narrow/compact a shuffle operand by eliminating elements that are
/// not used by the shuffle mask. This updates the shuffle mask in-place to
/// reflect the compaction. Returns information about whether compaction is
/// possible and a lambda to apply the compaction if beneficial.
static ShuffleOperandCompaction
compactShuffleOperand(Value *ShuffleInput, MutableArrayRef<int> ShuffleMask,
int IndexStart, const TargetTransformInfo &TTI,
TTI::TargetCostKind CostKind) {
auto *VecTy = cast<FixedVectorType>(ShuffleInput->getType());
unsigned Width = VecTy->getNumElements();
if (ShuffleInput->getNumUses() > 1)
return {0, Width, nullptr};

if (auto *C = dyn_cast<Constant>(ShuffleInput))
return compactShuffleOperand(C, ShuffleMask, IndexStart);
if (auto *Shuf = dyn_cast<ShuffleVectorInst>(ShuffleInput))
return compactShuffleOperand(Shuf, ShuffleMask, IndexStart, TTI, CostKind);

return {0, Width, nullptr};
}

/// Try to narrow the shuffle by eliminating unused elements from the operands.
bool VectorCombine::compactShuffleOperands(Instruction &I) {
Value *LHS, *RHS;
ArrayRef<int> Mask;
if (!match(&I, m_Shuffle(m_Value(LHS), m_Value(RHS), m_Mask(Mask))))
return false;

// Require at least one constant operand to ensure profitability.
if (!isa<Constant>(LHS) && !isa<Constant>(RHS))
return false;

auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
if (!LHSTy)
return false;

// Analyze both operands. This updates NewMask in-place to reflect compaction.
unsigned LHSWidth = LHSTy->getNumElements();
SmallVector<int, 16> NewMask(Mask.begin(), Mask.end());
ShuffleOperandCompaction LHSCompact =
compactShuffleOperand(LHS, NewMask, 0, TTI, CostKind);
ShuffleOperandCompaction RHSCompact =
compactShuffleOperand(RHS, NewMask, LHSWidth, TTI, CostKind);

unsigned CompactedWidth =
std::max(LHSCompact.CompactedWidth, RHSCompact.CompactedWidth);

// Check total cost: compacting operands + change to outer shuffle.
if (LHSCompact.Apply || RHSCompact.Apply) {
auto *ShuffleDstTy = cast<FixedVectorType>(I.getType());
InstructionCost CostBefore =
TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, ShuffleDstTy,
LHSTy, Mask, CostKind, 0, nullptr, {LHS, RHS}, &I);

InstructionCost CostAfter =
TTI.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, ShuffleDstTy,
LHSTy, NewMask, CostKind);

InstructionCost OuterCost = CostAfter - CostBefore;

if (OuterCost + LHSCompact.Cost + RHSCompact.Cost > 0)
return false;
} else if (CompactedWidth == LHSWidth)
return false;

Value *NewLHS =
LHSCompact.Apply ? LHSCompact.Apply(CompactedWidth, Builder) : LHS;
Value *NewRHS =
RHSCompact.Apply ? RHSCompact.Apply(CompactedWidth, Builder) : RHS;

// Ensure we terminate from the optimization fixpoint loop eventually.
if (LHS == NewLHS && RHS == NewRHS)
return false;

// Adjust RHS indices in the mask to account for the new LHS width.
for (int &MaskElt : NewMask)
if (MaskElt >= (int)LHSWidth)
MaskElt = MaskElt - LHSWidth + CompactedWidth;

Value *NewShuf = Builder.CreateShuffleVector(NewLHS, NewRHS, NewMask);
replaceValue(I, *NewShuf);
return true;
}

/// Try to convert any of:
/// "shuffle (shuffle x, y), (shuffle y, x)"
/// "shuffle (shuffle x, undef), (shuffle y, undef)"
Expand Down Expand Up @@ -5034,6 +5268,8 @@ bool VectorCombine::run() {
return true;
if (foldShuffleToIdentity(I))
return true;
if (compactShuffleOperands(I))
return true;
break;
case Instruction::Load:
if (shrinkLoadForShuffles(I))
Expand Down
15 changes: 6 additions & 9 deletions llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
Original file line number Diff line number Diff line change
Expand Up @@ -334,8 +334,7 @@ define <4 x float> @test_addsub_v4f32_partial_23(<4 x float> %A, <4 x float> %B)
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: [[TMP3:%.*]] = fsub <2 x float> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x float> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <4 x i32> <i32 0, i32 3, i32 poison, i32 poison>
; CHECK-NEXT: [[VECINSERT21:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> <float undef, float undef, float poison, float poison>, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
; CHECK-NEXT: [[VECINSERT21:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <4 x i32> <i32 poison, i32 poison, i32 0, i32 3>
; CHECK-NEXT: ret <4 x float> [[VECINSERT21]]
;
%1 = extractelement <4 x float> %A, i32 2
Expand All @@ -344,7 +343,7 @@ define <4 x float> @test_addsub_v4f32_partial_23(<4 x float> %A, <4 x float> %B)
%3 = extractelement <4 x float> %A, i32 3
%4 = extractelement <4 x float> %B, i32 3
%add2 = fadd float %3, %4
%vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 2
%vecinsert1 = insertelement <4 x float> poison, float %sub2, i32 2
%vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
ret <4 x float> %vecinsert2
}
Expand All @@ -353,8 +352,7 @@ define <4 x float> @test_addsub_v4f32_partial_03(<4 x float> %A, <4 x float> %B)
; CHECK-LABEL: @test_addsub_v4f32_partial_03(
; CHECK-NEXT: [[FOLDEXTEXTBINOP:%.*]] = fsub <4 x float> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[FOLDEXTEXTBINOP2:%.*]] = fadd <4 x float> [[A]], [[B]]
; CHECK-NEXT: [[VECINSERT1:%.*]] = shufflevector <4 x float> [[FOLDEXTEXTBINOP]], <4 x float> <float poison, float undef, float undef, float poison>, <4 x i32> <i32 0, i32 5, i32 6, i32 poison>
; CHECK-NEXT: [[VECINSERT2:%.*]] = shufflevector <4 x float> [[VECINSERT1]], <4 x float> [[FOLDEXTEXTBINOP2]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
; CHECK-NEXT: [[VECINSERT2:%.*]] = shufflevector <4 x float> [[FOLDEXTEXTBINOP]], <4 x float> [[FOLDEXTEXTBINOP2]], <4 x i32> <i32 0, i32 poison, i32 poison, i32 7>
; CHECK-NEXT: ret <4 x float> [[VECINSERT2]]
;
%1 = extractelement <4 x float> %A, i32 0
Expand All @@ -363,7 +361,7 @@ define <4 x float> @test_addsub_v4f32_partial_03(<4 x float> %A, <4 x float> %B)
%3 = extractelement <4 x float> %A, i32 3
%4 = extractelement <4 x float> %B, i32 3
%add = fadd float %4, %3
%vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
%vecinsert1 = insertelement <4 x float> poison, float %sub, i32 0
%vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 3
ret <4 x float> %vecinsert2
}
Expand All @@ -374,8 +372,7 @@ define <4 x float> @test_addsub_v4f32_partial_12(<4 x float> %A, <4 x float> %B)
; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <2 x i32> <i32 1, i32 2>
; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x float> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = fsub <2 x float> [[TMP1]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <4 x i32> <i32 0, i32 3, i32 poison, i32 poison>
; CHECK-NEXT: [[VECINSERT21:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> <float undef, float poison, float poison, float undef>, <4 x i32> <i32 4, i32 0, i32 1, i32 7>
; CHECK-NEXT: [[VECINSERT21:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP4]], <4 x i32> <i32 poison, i32 0, i32 3, i32 poison>
; CHECK-NEXT: ret <4 x float> [[VECINSERT21]]
;
%1 = extractelement <4 x float> %A, i32 2
Expand All @@ -384,7 +381,7 @@ define <4 x float> @test_addsub_v4f32_partial_12(<4 x float> %A, <4 x float> %B)
%3 = extractelement <4 x float> %A, i32 1
%4 = extractelement <4 x float> %B, i32 1
%add = fadd float %3, %4
%vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
%vecinsert1 = insertelement <4 x float> poison, float %sub, i32 2
%vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 1
ret <4 x float> %vecinsert2
}
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/Transforms/PhaseOrdering/X86/fmaddsub.ll
Original file line number Diff line number Diff line change
Expand Up @@ -419,11 +419,11 @@ define <8 x double> @buildvector_mul_addsub_pd512_partial(<8 x double> %C, <8 x
; SSE-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[TMP3]], <8 x double> poison, <2 x i32> <i32 1, i32 3>
; SSE-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison>
; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <6 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison>
; SSE-NEXT: [[TMP7:%.*]] = shufflevector <6 x double> [[TMP5]], <6 x double> [[TMP6]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 7>
; SSE-NEXT: [[A7:%.*]] = extractelement <8 x double> [[A]], i64 7
; SSE-NEXT: [[B7:%.*]] = extractelement <8 x double> [[B]], i64 7
; SSE-NEXT: [[ADD7:%.*]] = fadd double [[A7]], [[B7]]
; SSE-NEXT: [[TMP8:%.*]] = shufflevector <6 x double> [[TMP7]], <6 x double> <double undef, double poison, double poison, double poison, double poison, double poison>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 poison>
; SSE-NEXT: [[TMP7:%.*]] = shufflevector <6 x double> [[TMP5]], <6 x double> [[TMP6]], <6 x i32> <i32 0, i32 6, i32 1, i32 7, i32 2, i32 3>
; SSE-NEXT: [[TMP8:%.*]] = shufflevector <6 x double> [[TMP7]], <6 x double> <double undef, double poison, double poison, double poison, double poison, double poison>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 6, i32 5, i32 poison>
; SSE-NEXT: [[VECINSERT8:%.*]] = insertelement <8 x double> [[TMP8]], double [[ADD7]], i64 7
; SSE-NEXT: ret <8 x double> [[VECINSERT8]]
;
Expand Down Expand Up @@ -934,11 +934,11 @@ define <8 x double> @buildvector_mul_subadd_pd512_partial(<8 x double> %C, <8 x
; SSE-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[TMP3]], <8 x double> poison, <2 x i32> <i32 1, i32 3>
; SSE-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> poison, <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison>
; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <6 x i32> <i32 0, i32 1, i32 poison, i32 poison, i32 poison, i32 poison>
; SSE-NEXT: [[TMP7:%.*]] = shufflevector <6 x double> [[TMP5]], <6 x double> [[TMP6]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 6, i32 7>
; SSE-NEXT: [[A7:%.*]] = extractelement <8 x double> [[A]], i64 7
; SSE-NEXT: [[B7:%.*]] = extractelement <8 x double> [[B]], i64 7
; SSE-NEXT: [[ADD7:%.*]] = fsub double [[A7]], [[B7]]
; SSE-NEXT: [[TMP8:%.*]] = shufflevector <6 x double> [[TMP7]], <6 x double> <double undef, double poison, double poison, double poison, double poison, double poison>, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 poison>
; SSE-NEXT: [[TMP7:%.*]] = shufflevector <6 x double> [[TMP5]], <6 x double> [[TMP6]], <6 x i32> <i32 0, i32 6, i32 1, i32 7, i32 2, i32 3>
; SSE-NEXT: [[TMP8:%.*]] = shufflevector <6 x double> [[TMP7]], <6 x double> <double undef, double poison, double poison, double poison, double poison, double poison>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 6, i32 5, i32 poison>
; SSE-NEXT: [[VECINSERT8:%.*]] = insertelement <8 x double> [[TMP8]], double [[ADD7]], i64 7
; SSE-NEXT: ret <8 x double> [[VECINSERT8]]
;
Expand Down
Loading