Skip to content
Merged
11 changes: 11 additions & 0 deletions llvm/lib/Target/AMDGPU/SIInstructions.td
Original file line number Diff line number Diff line change
Expand Up @@ -791,6 +791,17 @@ def : GCNPat<
(SI_CALL_ISEL $src0, (i64 0))
>;

// Funnel shift right (fshr) patterns for uniform inputs.
// These patterns implement this using scalar instructions by constructing a 64-bit
// value {a, b} and performing a single right shift.
def : GCNPat<(UniformTernaryFrag<fshr> i32:$src0, i32:$src1, i32:$src2),
(i32 (EXTRACT_SUBREG (S_LSHR_B64 (REG_SEQUENCE SReg_64, $src1, sub0, $src0, sub1), (S_AND_B32 $src2, (i32 31))), sub0))
>;

def : GCNPat<(UniformTernaryFrag<fshr> i32:$src0, i32:$src1, (i32 ShiftAmt32Imm:$src2)),
(i32 (EXTRACT_SUBREG (S_LSHR_B64 (REG_SEQUENCE SReg_64, $src1, sub0, $src0, sub1), $src2), sub0))
>;

// Wrapper around s_swappc_b64 with extra $callee parameter to track
// the called function after regalloc.
def SI_CALL : SPseudoInstSI <
Expand Down
24,690 changes: 13,018 additions & 11,672 deletions llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll

Large diffs are not rendered by default.

1,336 changes: 681 additions & 655 deletions llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll

Large diffs are not rendered by default.

2,627 changes: 1,345 additions & 1,282 deletions llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll

Large diffs are not rendered by default.

216 changes: 109 additions & 107 deletions llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll

Large diffs are not rendered by default.

92 changes: 46 additions & 46 deletions llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
Original file line number Diff line number Diff line change
Expand Up @@ -290,34 +290,34 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; VI-NEXT: s_cbranch_execnz .LBB1_4
; VI-NEXT: .LBB1_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
; VI-NEXT: v_bfe_u32 v2, v1, 16, 1
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v1
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
; VI-NEXT: v_bfe_u32 v2, v0, 16, 1
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; VI-NEXT: v_or_b32_e32 v3, 0x400000, v1
; VI-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; VI-NEXT: v_add_f32_e32 v2, s4, v0
; VI-NEXT: v_bfe_u32 v3, v2, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v2
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v2
; VI-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; VI-NEXT: v_add_f32_e32 v0, s4, v0
; VI-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; VI-NEXT: v_or_b32_e32 v3, 0x400000, v0
; VI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; VI-NEXT: v_add_f32_e32 v0, s4, v1
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; VI-NEXT: v_bfe_u32 v3, v0, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v0
; VI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; VI-NEXT: v_add_f32_e32 v1, s4, v1
; VI-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_bfe_u32 v3, v1, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v1
; VI-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB1_3:
; VI-NEXT: s_branch .LBB1_2
Expand Down Expand Up @@ -964,16 +964,16 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; SI-NEXT: s_cbranch_execnz .LBB5_3
; SI-NEXT: .LBB5_2: ; %cmp.true
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v1
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; SI-NEXT: v_lshr_b64 v[0:1], v[0:1], 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v5
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
; SI-NEXT: v_lshr_b64 v[3:4], v[1:2], 16
; SI-NEXT: v_alignbit_b32 v0, v5, v0, 16
; SI-NEXT: .LBB5_3: ; %end
; SI-NEXT: v_mov_b32_e32 v1, v3
; SI-NEXT: s_setpc_b64 s[30:31]
Expand All @@ -992,34 +992,34 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; VI-NEXT: s_cbranch_execnz .LBB5_4
; VI-NEXT: .LBB5_2: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
; VI-NEXT: v_bfe_u32 v2, v1, 16, 1
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v1
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
; VI-NEXT: v_bfe_u32 v2, v0, 16, 1
; VI-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; VI-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; VI-NEXT: v_or_b32_e32 v3, 0x400000, v1
; VI-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; VI-NEXT: v_add_f32_e32 v2, s4, v0
; VI-NEXT: v_bfe_u32 v3, v2, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v2
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v2
; VI-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; VI-NEXT: v_add_f32_e32 v0, s4, v0
; VI-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; VI-NEXT: v_or_b32_e32 v3, 0x400000, v0
; VI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; VI-NEXT: v_add_f32_e32 v0, s4, v1
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; VI-NEXT: v_bfe_u32 v3, v0, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v0
; VI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; VI-NEXT: v_add_f32_e32 v1, s4, v1
; VI-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_bfe_u32 v3, v1, 16, 1
; VI-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
; VI-NEXT: v_or_b32_e32 v4, 0x400000, v1
; VI-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; VI-NEXT: v_lshrrev_b64 v[0:1], 16, v[0:1]
; VI-NEXT: v_mov_b32_e32 v1, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB5_3:
; VI-NEXT: s_branch .LBB5_2
Expand Down
Loading
Loading