Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 28 additions & 8 deletions llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -568,6 +568,13 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,

const TypeSize PartSize = PartTy.getSizeInBits();

if (PartSize == SrcTy.getSizeInBits() && DstRegs.size() == 1) {
// TODO: Handle int<->ptr casts. It just happens the ABI lowering
// assignments are not pointer aware.
B.buildBitcast(DstRegs[0], SrcReg);
return;
}

if (PartTy.isVector() == SrcTy.isVector() &&
PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
assert(DstRegs.size() == 1);
Expand All @@ -576,7 +583,8 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
}

if (SrcTy.isVector() && !PartTy.isVector() &&
TypeSize::isKnownGT(PartSize, SrcTy.getElementType().getSizeInBits())) {
TypeSize::isKnownGT(PartSize, SrcTy.getElementType().getSizeInBits()) &&
SrcTy.getElementCount() == ElementCount::getFixed(DstRegs.size())) {
// Vector was scalarized, and the elements extended.
auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
for (int i = 0, e = DstRegs.size(); i != e; ++i)
Expand Down Expand Up @@ -614,21 +622,33 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,

MachineRegisterInfo &MRI = *B.getMRI();
LLT DstTy = MRI.getType(DstRegs[0]);
LLT LCMTy = getCoverTy(SrcTy, PartTy);
LLT CoverTy = getCoverTy(SrcTy, PartTy);
if (SrcTy.isVector() && DstRegs.size() > 1) {
TypeSize FullCoverSize =
DstTy.getSizeInBits().multiplyCoefficientBy(DstRegs.size());

LLT EltTy = SrcTy.getElementType();
TypeSize EltSize = EltTy.getSizeInBits();
if (FullCoverSize.isKnownMultipleOf(EltSize)) {
TypeSize VecSize = FullCoverSize.divideCoefficientBy(EltSize);
CoverTy =
LLT::vector(ElementCount::get(VecSize, VecSize.isScalable()), EltTy);
}
}

if (PartTy.isVector() && LCMTy == PartTy) {
if (PartTy.isVector() && CoverTy == PartTy) {
assert(DstRegs.size() == 1);
B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
return;
}

const unsigned DstSize = DstTy.getSizeInBits();
const unsigned SrcSize = SrcTy.getSizeInBits();
unsigned CoveringSize = LCMTy.getSizeInBits();
unsigned CoveringSize = CoverTy.getSizeInBits();

Register UnmergeSrc = SrcReg;

if (!LCMTy.isVector() && CoveringSize != SrcSize) {
if (!CoverTy.isVector() && CoveringSize != SrcSize) {
// For scalars, it's common to be able to use a simple extension.
if (SrcTy.isScalar() && DstTy.isScalar()) {
CoveringSize = alignTo(SrcSize, DstSize);
Expand All @@ -641,12 +661,12 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
SmallVector<Register, 8> MergeParts(1, SrcReg);
for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
MergeParts.push_back(Undef);
UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);
UnmergeSrc = B.buildMergeLikeInstr(CoverTy, MergeParts).getReg(0);
}
}

if (LCMTy.isVector() && CoveringSize != SrcSize)
UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
if (CoverTy.isVector() && CoveringSize != SrcSize)
UnmergeSrc = B.buildPadVectorWithUndefElements(CoverTy, SrcReg).getReg(0);

B.buildUnmerge(DstRegs, UnmergeSrc);
}
Expand Down
18 changes: 10 additions & 8 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1111,9 +1111,9 @@ MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
EVT ScalarVT = VT.getScalarType();
unsigned Size = ScalarVT.getSizeInBits();
if (Size == 16) {
if (Subtarget->has16BitInsts())
return MVT::getVectorVT(ScalarVT.getSimpleVT(), 2);
return VT.isInteger() ? MVT::i32 : MVT::f32;
return Subtarget->has16BitInsts()
? MVT::getVectorVT(ScalarVT.getSimpleVT(), 2)
: MVT::i32;
}

if (Size < 16)
Expand All @@ -1139,7 +1139,7 @@ unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
unsigned Size = ScalarVT.getSizeInBits();

// FIXME: Should probably promote 8-bit vectors to i16.
if (Size == 16 && Subtarget->has16BitInsts())
if (Size == 16)
return (NumElts + 1) / 2;

if (Size <= 32)
Expand All @@ -1163,11 +1163,13 @@ unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
// FIXME: We should fix the ABI to be the same on targets without 16-bit
// support, but unless we can properly handle 3-vectors, it will be still be
// inconsistent.
if (Size == 16 && Subtarget->has16BitInsts()) {
RegisterVT = MVT::getVectorVT(ScalarVT.getSimpleVT(), 2);
IntermediateVT = RegisterVT;
if (Size == 16) {
MVT SimpleIntermediateVT =
MVT::getVectorVT(ScalarVT.getSimpleVT(), ElementCount::getFixed(2));
IntermediateVT = SimpleIntermediateVT;
RegisterVT = Subtarget->has16BitInsts() ? SimpleIntermediateVT : MVT::i32;
NumIntermediates = (NumElts + 1) / 2;
return NumIntermediates;
return (NumElts + 1) / 2;
}

if (Size == 32) {
Expand Down
23 changes: 17 additions & 6 deletions llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
Original file line number Diff line number Diff line change
Expand Up @@ -200,10 +200,15 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX7-LABEL: s_add_v2i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: s_add_i32 s16, s16, s18
; GFX7-NEXT: s_add_i32 s17, s17, s19
; GFX7-NEXT: v_mov_b32_e32 v0, s16
; GFX7-NEXT: v_mov_b32_e32 v1, s17
; GFX7-NEXT: s_lshr_b32 s4, s16, 16
; GFX7-NEXT: s_lshr_b32 s5, s17, 16
; GFX7-NEXT: s_add_i32 s4, s4, s5
; GFX7-NEXT: s_add_i32 s16, s16, s17
; GFX7-NEXT: s_and_b32 s4, s4, 0xffff
; GFX7-NEXT: s_and_b32 s5, s16, 0xffff
; GFX7-NEXT: s_lshl_b32 s4, s4, 16
; GFX7-NEXT: s_or_b32 s4, s5, s4
; GFX7-NEXT: v_mov_b32_e32 v0, s4
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_add_v2i16:
Expand Down Expand Up @@ -278,8 +283,14 @@ define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
; GFX7-LABEL: v_add_v2i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16:
Expand Down
Loading
Loading