Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 77 additions & 27 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53502,6 +53502,26 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
return SDValue();
}

// Attempt to convert a (vXi1 bitcast(iX Mask)) mask before it might get split
// by legalization.
static SDValue canonicalizeBoolMask(unsigned Opcode, EVT VT, SDValue Mask,
const SDLoc &DL, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (!DCI.isBeforeLegalizeOps() || Mask.getOpcode() != ISD::BITCAST ||
Mask.getScalarValueSizeInBits() != 1 || Subtarget.hasAVX512() ||
!DAG.getTargetLoweringInfo().isOperationLegalOrCustom(Opcode, VT))
return SDValue();

EVT MaskVT = Mask.getValueType();
EVT ExtMaskVT = VT.changeVectorElementTypeToInteger();
assert(ExtMaskVT.bitsGT(MaskVT) && "Unexpected extension type");
if (SDValue NewMask = combineToExtendBoolVectorInReg(
ISD::SIGN_EXTEND, DL, ExtMaskVT, Mask, DAG, DCI, Subtarget))
return DAG.getNode(ISD::TRUNCATE, DL, MaskVT, NewMask);
return SDValue();
}

/// If V is a build vector of boolean constants and exactly one of those
/// constants is true, return the operand index of that true element.
/// Otherwise, return -1.
Expand Down Expand Up @@ -53678,12 +53698,23 @@ static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
return Blend;
}

EVT VT = Mld->getValueType(0);
SDValue Mask = Mld->getMask();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDLoc DL(N);

// Attempt to convert a (vXi1 bitcast(iX Mask)) mask before it might get split
// by legalization.
if (SDValue NewMask =
canonicalizeBoolMask(ISD::MLOAD, VT, Mask, DL, DAG, DCI, Subtarget))
return DAG.getMaskedLoad(VT, DL, Mld->getChain(), Mld->getBasePtr(),
Mld->getOffset(), NewMask, Mld->getPassThru(),
Mld->getMemoryVT(), Mld->getMemOperand(),
Mld->getAddressingMode(), Mld->getExtensionType());

// If the mask value has been legalized to a non-boolean vector, try to
// simplify ops leading up to it. We only demand the MSB of each lane.
SDValue Mask = Mld->getMask();
if (Mask.getScalarValueSizeInBits() != 1) {
EVT VT = Mld->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
if (N->getOpcode() != ISD::DELETED_NODE)
Expand All @@ -53693,8 +53724,8 @@ static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
if (SDValue NewMask =
TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
return DAG.getMaskedLoad(
VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
VT, DL, Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(), NewMask,
Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
Mld->getAddressingMode(), Mld->getExtensionType());
}

Expand Down Expand Up @@ -53784,6 +53815,15 @@ static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
return SDValue();
}

// Attempt to convert a (vXi1 bitcast(iX Mask)) mask before it might get split
// by legalization.
if (SDValue NewMask =
canonicalizeBoolMask(ISD::MSTORE, VT, Mask, DL, DAG, DCI, Subtarget))
return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
Mst->getBasePtr(), Mst->getOffset(), NewMask,
Mst->getMemoryVT(), Mst->getMemOperand(),
Mst->getAddressingMode());

// If the mask value has been legalized to a non-boolean vector, try to
// simplify ops leading up to it. We only demand the MSB of each lane.
if (Mask.getScalarValueSizeInBits() != 1) {
Expand Down Expand Up @@ -57399,35 +57439,35 @@ static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
}

static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
SDValue Index, SDValue Base, SDValue Scale,
SelectionDAG &DAG) {
SDValue Index, SDValue Base, SDValue Mask,
SDValue Scale, SelectionDAG &DAG) {
SDLoc DL(GorS);

if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
Gather->getMask(), Base, Index, Scale } ;
return DAG.getMaskedGather(Gather->getVTList(),
Gather->getMemoryVT(), DL, Ops,
Gather->getMemOperand(),
SDValue Ops[] = {
Gather->getChain(), Gather->getPassThru(), Mask, Base, Index, Scale};
return DAG.getMaskedGather(Gather->getVTList(), Gather->getMemoryVT(), DL,
Ops, Gather->getMemOperand(),
Gather->getIndexType(),
Gather->getExtensionType());
}
auto *Scatter = cast<MaskedScatterSDNode>(GorS);
SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
Scatter->getMask(), Base, Index, Scale };
return DAG.getMaskedScatter(Scatter->getVTList(),
Scatter->getMemoryVT(), DL,
SDValue Ops[] = {
Scatter->getChain(), Scatter->getValue(), Mask, Base, Index, Scale};
return DAG.getMaskedScatter(Scatter->getVTList(), Scatter->getMemoryVT(), DL,
Ops, Scatter->getMemOperand(),
Scatter->getIndexType(),
Scatter->isTruncatingStore());
}

static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
auto *GorS = cast<MaskedGatherScatterSDNode>(N);
SDValue Index = GorS->getIndex();
SDValue Base = GorS->getBasePtr();
SDValue Mask = GorS->getMask();
SDValue Scale = GorS->getScale();
EVT IndexVT = Index.getValueType();
EVT IndexSVT = IndexVT.getVectorElementType();
Expand Down Expand Up @@ -57461,7 +57501,8 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
Index.getOperand(0), NewShAmt);
SDValue NewScale =
DAG.getConstant(ScaleAmt * 2, DL, Scale.getValueType());
return rebuildGatherScatter(GorS, NewIndex, Base, NewScale, DAG);
return rebuildGatherScatter(GorS, NewIndex, Base, Mask, NewScale,
DAG);
}
}
}
Expand All @@ -57479,7 +57520,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
// a split.
if (SDValue TruncIndex =
DAG.FoldConstantArithmetic(ISD::TRUNCATE, DL, NewVT, Index))
return rebuildGatherScatter(GorS, TruncIndex, Base, Scale, DAG);
return rebuildGatherScatter(GorS, TruncIndex, Base, Mask, Scale, DAG);

// Shrink any sign/zero extends from 32 or smaller to larger than 32 if
// there are sufficient sign bits. Only do this before legalize types to
Expand All @@ -57488,13 +57529,13 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
Index.getOpcode() == ISD::ZERO_EXTEND) &&
Index.getOperand(0).getScalarValueSizeInBits() <= 32) {
Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
return rebuildGatherScatter(GorS, Index, Base, Mask, Scale, DAG);
}

// Shrink if we remove an illegal type.
if (!TLI.isTypeLegal(Index.getValueType()) && TLI.isTypeLegal(NewVT)) {
Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
return rebuildGatherScatter(GorS, Index, Base, Mask, Scale, DAG);
}
}
}
Expand All @@ -57519,13 +57560,15 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
SDValue NewBase = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
DAG.getConstant(Adder, DL, PtrVT));
SDValue NewIndex = Index.getOperand(1 - I);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Scale, DAG);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Mask, Scale,
DAG);
}
// For non-constant cases, limit this to non-scaled cases.
if (ScaleAmt == 1) {
SDValue NewBase = DAG.getNode(ISD::ADD, DL, PtrVT, Base, Splat);
SDValue NewIndex = Index.getOperand(1 - I);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Scale, DAG);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Mask, Scale,
DAG);
}
}
}
Expand All @@ -57540,7 +57583,8 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
SDValue NewIndex = DAG.getNode(ISD::ADD, DL, IndexVT,
Index.getOperand(1 - I), Splat);
SDValue NewBase = DAG.getConstant(0, DL, PtrVT);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Scale, DAG);
return rebuildGatherScatter(GorS, NewIndex, NewBase, Mask, Scale,
DAG);
}
}
}
Expand All @@ -57551,12 +57595,18 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
IndexVT = IndexVT.changeVectorElementType(*DAG.getContext(), EltVT);
Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
return rebuildGatherScatter(GorS, Index, Base, Mask, Scale, DAG);
}

// Attempt to convert a (vXi1 bitcast(iX Mask)) mask before it might get
// split by legalization.
if (SDValue NewMask =
canonicalizeBoolMask(GorS->getOpcode(), N->getValueType(0), Mask,
DL, DAG, DCI, Subtarget))
return rebuildGatherScatter(GorS, Index, Base, NewMask, Scale, DAG);
}

// With vector masks we only demand the upper bit of the mask.
SDValue Mask = GorS->getMask();
if (Mask.getScalarValueSizeInBits() != 1) {
APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
Expand Down Expand Up @@ -61701,7 +61751,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::MGATHER:
case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
case ISD::MGATHER:
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
Expand Down
76 changes: 10 additions & 66 deletions llvm/test/CodeGen/X86/masked_gather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -312,27 +312,11 @@ define <4 x float> @masked_gather_v4f32_ptr_v4i32(<4 x ptr> %ptr, i32 %trigger,
;
; AVX2-GATHER-LABEL: masked_gather_v4f32_ptr_v4i32:
; AVX2-GATHER: # %bb.0:
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vmovd %eax, %xmm2
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb $2, %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX2-GATHER-NEXT: andb $8, %dil
; AVX2-GATHER-NEXT: shrb $3, %dil
; AVX2-GATHER-NEXT: movzbl %dil, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
; AVX2-GATHER-NEXT: vmovd %edi, %xmm2
; AVX2-GATHER-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-GATHER-NEXT: vpmovsxbd {{.*#+}} xmm3 = [1,2,4,8]
; AVX2-GATHER-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX2-GATHER-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-GATHER-NEXT: vgatherqps %xmm2, (,%ymm0), %xmm1
; AVX2-GATHER-NEXT: vmovaps %xmm1, %xmm0
; AVX2-GATHER-NEXT: vzeroupper
Expand Down Expand Up @@ -2575,51 +2559,11 @@ define <8 x i32> @masked_gather_v8i32_v8i32(i8 %trigger) {
;
; AVX2-GATHER-LABEL: masked_gather_v8i32_v8i32:
; AVX2-GATHER: # %bb.0:
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb $5, %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: movl %edi, %ecx
; AVX2-GATHER-NEXT: shrb $4, %cl
; AVX2-GATHER-NEXT: movzbl %cl, %ecx
; AVX2-GATHER-NEXT: andl $1, %ecx
; AVX2-GATHER-NEXT: negl %ecx
; AVX2-GATHER-NEXT: vmovd %ecx, %xmm0
; AVX2-GATHER-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb $6, %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb $7, %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vmovd %eax, %xmm1
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; AVX2-GATHER-NEXT: movl %edi, %eax
; AVX2-GATHER-NEXT: shrb $2, %al
; AVX2-GATHER-NEXT: movzbl %al, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX2-GATHER-NEXT: shrb $3, %dil
; AVX2-GATHER-NEXT: movzbl %dil, %eax
; AVX2-GATHER-NEXT: andl $1, %eax
; AVX2-GATHER-NEXT: negl %eax
; AVX2-GATHER-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
; AVX2-GATHER-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-GATHER-NEXT: vmovd %edi, %xmm0
; AVX2-GATHER-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-GATHER-NEXT: vpmovzxbd {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128]
; AVX2-GATHER-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-GATHER-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; AVX2-GATHER-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-GATHER-NEXT: vmovdqa %ymm0, %ymm2
; AVX2-GATHER-NEXT: vpxor %xmm3, %xmm3, %xmm3
Expand Down
Loading
Loading