-
Notifications
You must be signed in to change notification settings - Fork 16.1k
AMDGPU: Change ABI of 16-bit element vectors on gfx6/7 #175781
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
AMDGPU: Change ABI of 16-bit element vectors on gfx6/7 #175781
Conversation
|
@llvm/pr-subscribers-llvm-globalisel @llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesFix ABI on old subtargets so match new subtargets, packing Note this only changes the vector cases. Scalar i16/half are This will help with removal of softPromoteHalfType. Patch is 21.22 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/175781.diff 157 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ed5988ee6efc3..49f5d514071e2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1113,7 +1113,7 @@ MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
if (Size == 16) {
if (Subtarget->has16BitInsts())
return MVT::getVectorVT(ScalarVT.getSimpleVT(), 2);
- return VT.isInteger() ? MVT::i32 : MVT::f32;
+ return ScalarVT == MVT::f32 ? MVT::f32 : MVT::i32;
}
if (Size < 16)
@@ -1139,7 +1139,7 @@ unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
unsigned Size = ScalarVT.getSizeInBits();
// FIXME: Should probably promote 8-bit vectors to i16.
- if (Size == 16 && Subtarget->has16BitInsts())
+ if (Size == 16)
return (NumElts + 1) / 2;
if (Size <= 32)
@@ -1163,11 +1163,13 @@ unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
// FIXME: We should fix the ABI to be the same on targets without 16-bit
// support, but unless we can properly handle 3-vectors, it will be still be
// inconsistent.
- if (Size == 16 && Subtarget->has16BitInsts()) {
- RegisterVT = MVT::getVectorVT(ScalarVT.getSimpleVT(), 2);
- IntermediateVT = RegisterVT;
+ if (Size == 16) {
+ MVT SimpleIntermediateVT =
+ MVT::getVectorVT(ScalarVT.getSimpleVT(), ElementCount::getFixed(2));
+ IntermediateVT = SimpleIntermediateVT;
+ RegisterVT = Subtarget->has16BitInsts() ? SimpleIntermediateVT : MVT::i32;
NumIntermediates = (NumElts + 1) / 2;
- return NumIntermediates;
+ return (NumElts + 1) / 2;
}
if (Size == 32) {
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
index d6f1b142b36e0..5c60eb696f6b2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll
@@ -200,10 +200,15 @@ define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX7-LABEL: s_add_v2i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: s_add_i32 s16, s16, s18
-; GFX7-NEXT: s_add_i32 s17, s17, s19
-; GFX7-NEXT: v_mov_b32_e32 v0, s16
-; GFX7-NEXT: v_mov_b32_e32 v1, s17
+; GFX7-NEXT: s_lshr_b32 s4, s16, 16
+; GFX7-NEXT: s_lshr_b32 s5, s17, 16
+; GFX7-NEXT: s_add_i32 s4, s4, s5
+; GFX7-NEXT: s_add_i32 s16, s16, s17
+; GFX7-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX7-NEXT: s_and_b32 s5, s16, 0xffff
+; GFX7-NEXT: s_lshl_b32 s4, s4, 16
+; GFX7-NEXT: s_or_b32 s4, s5, s4
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: s_add_v2i16:
@@ -278,8 +283,14 @@ define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
; GFX7-LABEL: v_add_v2i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
index 814acc3be1fc0..244d006844a09 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.v2i16.ll
@@ -9,8 +9,14 @@ define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
; GFX7-LABEL: v_add_v2i16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16:
@@ -40,13 +46,15 @@ define <2 x i16> @v_add_v2i16_fneg_lhs(<2 x half> %a, <2 x i16> %b) {
; GFX7-LABEL: v_add_v2i16_fneg_lhs:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
; GFX7-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_fneg_lhs:
@@ -79,13 +87,15 @@ define <2 x i16> @v_add_v2i16_fneg_rhs(<2 x i16> %a, <2 x half> %b) {
; GFX7-LABEL: v_add_v2i16_fneg_rhs:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
-; GFX7-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX7-NEXT: v_xor_b32_e32 v1, 0x80008000, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_fneg_rhs:
@@ -118,18 +128,16 @@ define <2 x i16> @v_add_v2i16_fneg_lhs_fneg_rhs(<2 x half> %a, <2 x half> %b) {
; GFX7-LABEL: v_add_v2i16_fneg_lhs_fneg_rhs:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v3
-; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
; GFX7-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
; GFX7-NEXT: v_xor_b32_e32 v1, 0x80008000, v1
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v1
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GFX7-NEXT: v_add_i32_e32 v1, vcc, v2, v3
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_fneg_lhs_fneg_rhs:
@@ -165,8 +173,13 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_splat(<2 x i16> %a) {
; GFX7-LABEL: v_add_v2i16_neg_inline_imm_splat:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc0, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT: v_add_i32_e32 v1, vcc, 0xffffffc0, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc0, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_splat:
@@ -197,8 +210,13 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_lo(<2 x i16> %a) {
; GFX7-LABEL: v_add_v2i16_neg_inline_imm_lo:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc0, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT: v_add_i32_e32 v1, vcc, 4, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc0, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_lo:
@@ -230,8 +248,13 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_hi(<2 x i16> %a) {
; GFX7-LABEL: v_add_v2i16_neg_inline_imm_hi:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_add_i32_e32 v0, vcc, 4, v0
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT: v_add_i32_e32 v1, vcc, 0xffffffc0, v1
+; GFX7-NEXT: v_add_i32_e32 v0, vcc, 4, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_hi:
@@ -262,6 +285,7 @@ define <2 x i16> @v_add_v2i16_neg_inline_imm_hi(<2 x i16> %a) {
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_splat(<2 x i16> inreg %a) {
; GFX7-LABEL: s_add_v2i16_neg_inline_imm_splat:
; GFX7: ; %bb.0:
+; GFX7-NEXT: s_lshr_b32 s1, s0, 16
; GFX7-NEXT: s_sub_i32 s1, s1, 64
; GFX7-NEXT: s_sub_i32 s0, s0, 64
; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
@@ -304,6 +328,7 @@ define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_splat(<2 x i16> inreg %a) {
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_lo(<2 x i16> inreg %a) {
; GFX7-LABEL: s_add_v2i16_neg_inline_imm_lo:
; GFX7: ; %bb.0:
+; GFX7-NEXT: s_lshr_b32 s1, s0, 16
; GFX7-NEXT: s_add_i32 s1, s1, 4
; GFX7-NEXT: s_sub_i32 s0, s0, 64
; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
@@ -346,6 +371,7 @@ define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_lo(<2 x i16> inreg %a) {
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_hi(<2 x i16> inreg %a) {
; GFX7-LABEL: s_add_v2i16_neg_inline_imm_hi:
; GFX7: ; %bb.0:
+; GFX7-NEXT: s_lshr_b32 s1, s0, 16
; GFX7-NEXT: s_sub_i32 s1, s1, 64
; GFX7-NEXT: s_add_i32 s0, s0, 4
; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
@@ -388,9 +414,11 @@ define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_hi(<2 x i16> inreg %a) {
define amdgpu_ps i32 @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
; GFX7-LABEL: s_add_v2i16:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_add_i32 s1, s1, s3
-; GFX7-NEXT: s_add_i32 s0, s0, s2
-; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX7-NEXT: s_lshr_b32 s2, s0, 16
+; GFX7-NEXT: s_lshr_b32 s3, s1, 16
+; GFX7-NEXT: s_add_i32 s2, s2, s3
+; GFX7-NEXT: s_add_i32 s0, s0, s1
+; GFX7-NEXT: s_and_b32 s1, s2, 0xffff
; GFX7-NEXT: s_and_b32 s0, s0, 0xffff
; GFX7-NEXT: s_lshl_b32 s1, s1, 16
; GFX7-NEXT: s_or_b32 s0, s0, s1
@@ -433,14 +461,12 @@ define amdgpu_ps i32 @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
define amdgpu_ps i32 @s_add_v2i16_fneg_lhs(<2 x half> inreg %a, <2 x i16> inreg %b) {
; GFX7-LABEL: s_add_v2i16_fneg_lhs:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_lshl_b32 s1, s1, 16
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX7-NEXT: s_or_b32 s0, s1, s0
; GFX7-NEXT: s_xor_b32 s0, s0, 0x80008000
-; GFX7-NEXT: s_lshr_b32 s1, s0, 16
-; GFX7-NEXT: s_add_i32 s1, s1, s3
-; GFX7-NEXT: s_add_i32 s0, s0, s2
-; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX7-NEXT: s_lshr_b32 s2, s0, 16
+; GFX7-NEXT: s_lshr_b32 s3, s1, 16
+; GFX7-NEXT: s_add_i32 s2, s2, s3
+; GFX7-NEXT: s_add_i32 s0, s0, s1
+; GFX7-NEXT: s_and_b32 s1, s2, 0xffff
; GFX7-NEXT: s_and_b32 s0, s0, 0xffff
; GFX7-NEXT: s_lshl_b32 s1, s1, 16
; GFX7-NEXT: s_or_b32 s0, s0, s1
@@ -488,14 +514,12 @@ define amdgpu_ps i32 @s_add_v2i16_fneg_lhs(<2 x half> inreg %a, <2 x i16> inreg
define amdgpu_ps i32 @s_add_v2i16_fneg_rhs(<2 x i16> inreg %a, <2 x half> inreg %b) {
; GFX7-LABEL: s_add_v2i16_fneg_rhs:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_lshl_b32 s3, s3, 16
-; GFX7-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX7-NEXT: s_or_b32 s2, s3, s2
-; GFX7-NEXT: s_xor_b32 s2, s2, 0x80008000
-; GFX7-NEXT: s_lshr_b32 s3, s2, 16
-; GFX7-NEXT: s_add_i32 s1, s1, s3
-; GFX7-NEXT: s_add_i32 s0, s0, s2
-; GFX7-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX7-NEXT: s_xor_b32 s1, s1, 0x80008000
+; GFX7-NEXT: s_lshr_b32 s2, s0, 16
+; GFX7-NEXT: s_lshr_b32 s3, s1, 16
+; GFX7-NEXT: s_add_i32 s2, s2, s3
+; GFX7-NEXT: s_add_i32 s0, s0, s1
+; GFX7-NEXT: s_and_b32 s1, s2, 0xffff
; GFX7-NEXT: s_and_b32 s0, s0, 0xffff
; GFX7-NEXT: s_lshl_b32 s1, s1, 16
; GFX7-NEXT: s_or_b32 s0, s0, s1
@@ -543,12 +567,6 @@ define amdgpu_ps i32 @s_add_v2i16_fneg_rhs(<2 x i16> inreg %a, <2 x half> inreg
define amdgpu_ps i32 @s_add_v2i16_fneg_lhs_fneg_rhs(<2 x half> inreg %a, <2 x half> inreg %b) {
; GFX7-LABEL: s_add_v2i16_fneg_lhs_fneg_rhs:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_lshl_b32 s1, s1, 16
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX7-NEXT: s_or_b32 s0, s1, s0
-; GFX7-NEXT: s_lshl_b32 s1, s3, 16
-; GFX7-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX7-NEXT: s_or_b32 s1, s1, s2
; GFX7-NEXT: s_xor_b32 s0, s0, 0x80008000
; GFX7-NEXT: s_xor_b32 s1, s1, 0x80008000
; GFX7-NEXT: s_lshr_b32 s2, s0, 16
@@ -609,7 +627,11 @@ define <2 x i16> @add_inline_imm_neg1_0(<2 x i16> %x) {
; GFX7-LABEL: add_inline_imm_neg1_0:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, -1, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: add_inline_imm_neg1_0:
@@ -640,7 +662,11 @@ define <2 x i16> @add_inline_imm_1_0(<2 x i16> %x) {
; GFX7-LABEL: add_inline_imm_1_0:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7-NEXT: v_add_i32_e32 v0, vcc, 1, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: add_inline_imm_1_0:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
index 22b63a7de5f89..29a688ccf280d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/andn2.ll
@@ -513,14 +513,8 @@ define amdgpu_ps float @v_andn2_i16_vs(i16 %src0, i16 inreg %src1) {
define amdgpu_ps i32 @s_andn2_v2i16(<2 x i16> inreg %src0, <2 x i16> inreg %src1) {
; GFX6-LABEL: s_andn2_v2i16:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_and_b32 s1, s2, 0xffff
-; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_lshl_b32 s1, s5, 16
-; GFX6-NEXT: s_and_b32 s2, s4, 0xffff
-; GFX6-NEXT: s_or_b32 s1, s1, s2
-; GFX6-NEXT: s_xor_b32 s1, s1, -1
-; GFX6-NEXT: s_and_b32 s0, s0, s1
+; GFX6-NEXT: s_xor_b32 s0, s3, -1
+; GFX6-NEXT: s_and_b32 s0, s2, s0
; GFX6-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_andn2_v2i16:
@@ -546,14 +540,8 @@ define amdgpu_ps i32 @s_andn2_v2i16(<2 x i16> inreg %src0, <2 x i16> inreg %src1
define amdgpu_ps i32 @s_andn2_v2i16_commute(<2 x i16> inreg %src0, <2 x i16> inreg %src1) {
; GFX6-LABEL: s_andn2_v2i16_commute:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_and_b32 s1, s2, 0xffff
-; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_lshl_b32 s1, s5, 16
-; GFX6-NEXT: s_and_b32 s2, s4, 0xffff
-; GFX6-NEXT: s_or_b32 s1, s1, s2
-; GFX6-NEXT: s_xor_b32 s1, s1, -1
-; GFX6-NEXT: s_and_b32 s0, s1, s0
+; GFX6-NEXT: s_xor_b32 s0, s3, -1
+; GFX6-NEXT: s_and_b32 s0, s0, s2
; GFX6-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_andn2_v2i16_commute:
@@ -579,14 +567,8 @@ define amdgpu_ps i32 @s_andn2_v2i16_commute(<2 x i16> inreg %src0, <2 x i16> inr
define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_use(<2 x i16> inreg %src0, <2 x i16> inreg %src1) {
; GFX6-LABEL: s_andn2_v2i16_multi_use:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_and_b32 s1, s2, 0xffff
-; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_lshl_b32 s1, s5, 16
-; GFX6-NEXT: s_and_b32 s2, s4, 0xffff
-; GFX6-NEXT: s_or_b32 s1, s1, s2
-; GFX6-NEXT: s_xor_b32 s1, s1, -1
-; GFX6-NEXT: s_and_b32 s0, s0, s1
+; GFX6-NEXT: s_xor_b32 s1, s3, -1
+; GFX6-NEXT: s_and_b32 s0, s2, s1
; GFX6-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_andn2_v2i16_multi_use:
@@ -619,18 +601,9 @@ define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_use(<2 x i16> inreg %src0, <2
define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_foldable_use(<2 x i16> inreg %src0, <2 x i16> inreg %src1, <2 x i16> inreg %src2) {
; GFX6-LABEL: s_andn2_v2i16_multi_foldable_use:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_and_b32 s1, s2, 0xffff
-; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_lshl_b32 s1, s5, 16
-; GFX6-NEXT: s_and_b32 s2, s4, 0xffff
-; GFX6-NEXT: s_or_b32 s1, s1, s2
-; GFX6-NEXT: s_lshl_b32 s2, s7, 16
-; GFX6-NEXT: s_and_b32 s3, s6, 0xffff
-; GFX6-NEXT: s_or_b32 s2, s2, s3
-; GFX6-NEXT: s_xor_b32 s2, s2, -1
-; GFX6-NEXT: s_and_b32 s0, s0, s2
-; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_xor_b32 s1, s4, -1
+; GFX6-NEXT: s_and_b32 s0, s2, s1
+; GFX6-NEXT: s_and_b32 s1, s3, s1
; GFX6-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_andn2_v2i16_multi_foldable_use:
@@ -662,26 +635,12 @@ define amdgpu_ps { i32, i32 } @s_andn2_v2i16_multi_foldable_use(<2 x i16> inreg
}
define <2 x i16> @v_andn2_v2i16(<2 x i16> %src0, <2 x i16> %src1) {
-; GFX6-LABEL: v_andn2_v2i16:
-; GFX6: ; %bb.0:
-; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX6-NEXT: v_or_b32_e32 v0, v1, v0
-; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v3
-; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
-; GFX6-NEXT: v_xor_b32_e32 v1, -1, v1
-; GFX6-NEXT: v_and_b32_e32 v0, v0, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX6-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-LABEL: v_andn2_v2i16:
-; GFX9: ; %bb.0:
-; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_xor_b32_e32 v1, -1, v1
-; GFX9-NEXT: v_and_b32_e32 v0, v0, v1
-; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GCN-LABEL: v_andn2_v2i16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_xor_b32_e32 v1, -1, v1
+; GCN-NEXT: v_and_b32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX10PLUS-LABEL: v_andn2_v2i16:
; GFX10PLUS: ; %bb.0:
@@ -698,19 +657,19 @@ define <2 x i16> @v_andn2_v2i16(<2 x i16> %src0, <2 x i16> %src1) {
define amdgpu_ps i48 @s_andn2_v3i16(<3 x i16> inreg %src0, <3 x i16> inreg %src1) {
; GFX6-LABEL: s_andn2_v3i16:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX6-NEXT: s_lshr_b32 s7, s4, 16
; GFX6-NEXT: s_mov_b32 s0, -1
-; GFX6-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX6-NEXT: s_lshl_b32 s6, s6, 16
-; GFX6-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX6-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX6-NEXT: s_lshl_b32 s7, s7, 16
+; GFX6-NEXT: s_lshr_b32 s6, s2, 16
; GFX6-NEXT: s_mov_b32 s1, 0xffff
-; GFX6-NEXT: s_or_b32 s6, s5, s6
-; GFX6-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX6-NEXT: s_or_b32 s4, s4, s7
+; GFX6-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX6-NEXT: s_xor_b64 s[0:1], s[4:5], s[0:1]
; GFX6-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX6-NEXT: s_lshl_b32 s3, s3, 16
-; GFX6-NEXT: s_xor_b...
[truncated]
|
3a21ff2 to
ca4c52a
Compare
186d520 to
aa92839
Compare
| if (Subtarget->has16BitInsts()) | ||
| return MVT::getVectorVT(ScalarVT.getSimpleVT(), 2); | ||
| return VT.isInteger() ? MVT::i32 : MVT::f32; | ||
| return ScalarVT == MVT::f32 ? MVT::f32 : MVT::i32; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How can ScalarVT be f32 if its size is 16?
ca4c52a to
a756489
Compare
aa92839 to
29cc6a8
Compare
a756489 to
06a1b06
Compare
This fixes 2 cases when the AMDGPU ABI is fixed to pass <2 x i16> values as packed on gfx6/gfx7. The ABI does not pack values currently; this is a pre-fix for that change. Insert a bitcast if there is a single part with a different size. Previously this would miscompile by going through the scalarization and extend path, dropping the high element. Also fix assertions in odd cases, like <3 x i16> -> i32. This needs to unmerge with excess elements from the widened source vector. All of this code is in need of a cleanup; this should look more like the DAG version using getVectorTypeBreakdown.
Fix ABI on old subtargets so match new subtargets, packing 16-bit element subvectors into 32-bit registers. Previously this would be scalarized and promoted to i32/float. Note this only changes the vector cases. Scalar i16/half are still promoted to i32/float for now. I've unsuccessfully tried to make that switch in the past, so leave that for later. This will help with removal of softPromoteHalfType.
29cc6a8 to
85d881b
Compare
06a1b06 to
8d2c40c
Compare
Fix ABI on old subtargets so match new subtargets, packing 16-bit element subvectors into 32-bit registers. Previously this would be scalarized and promoted to i32/float. Note this only changes the vector cases. Scalar i16/half are still promoted to i32/float for now. I've unsuccessfully tried to make that switch in the past, so leave that for later. This will help with removal of softPromoteHalfType.
Fix ABI on old subtargets so match new subtargets, packing 16-bit element subvectors into 32-bit registers. Previously this would be scalarized and promoted to i32/float. Note this only changes the vector cases. Scalar i16/half are still promoted to i32/float for now. I've unsuccessfully tried to make that switch in the past, so leave that for later. This will help with removal of softPromoteHalfType.

Fix ABI on old subtargets so match new subtargets, packing
16-bit element subvectors into 32-bit registers. Previously
this would be scalarized and promoted to i32/float.
Note this only changes the vector cases. Scalar i16/half are
still promoted to i32/float for now. I've unsuccessfully tried
to make that switch in the past, so leave that for later.
This will help with removal of softPromoteHalfType.