diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def index 74dfd1d214e849..d40fbed473bcdc 100644 --- a/clang/include/clang/Basic/BuiltinsAMDGPU.def +++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -293,20 +293,20 @@ TARGET_BUILTIN(__builtin_amdgcn_s_wait_event_export_ready, "v", "n", "gfx11-inst // Postfix w64 indicates the builtin requires wavefront size of 64. //===----------------------------------------------------------------------===// TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w32, "V8fV16hV16hV8f", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32, "V8fV16sV16sV8f", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32, "V8fV16yV16yV8f", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w32, "V16hV16hV16hV16hIb", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32, "V16sV16sV16sV16sIb", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32, "V16yV16yV16yV16yIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32, "V16hV16hV16hV16hIb", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32, "V16sV16sV16sV16sIb", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32, "V16yV16yV16yV16yIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32, "V8iIbV4iIbV4iV8iIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32, "V8iIbV2iIbV2iV8iIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w64, "V4fV16hV16hV4f", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64, "V4fV16sV16sV4f", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64, "V4fV16yV16yV4f", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w64, "V8hV16hV16hV8hIb", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64, "V8sV16sV16sV8sIb", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64, "V8yV16yV16yV8yIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64, "V8hV16hV16hV8hIb", "nc", "gfx11-insts") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64, "V8sV16sV16sV8sIb", "nc", "gfx11-insts") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64, "V8yV16yV16yV8yIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64, "V4iIbV4iIbV4iV4iIb", "nc", "gfx11-insts") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64, "V4iIbV2iIbV2iV4iIb", "nc", "gfx11-insts") @@ -447,9 +447,9 @@ TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v4f16, "V4hV4h*1", "nc", "gfx12-i // builtins. //===----------------------------------------------------------------------===// TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12, "V8fV8hV8hV8f", "nc", "gfx12-insts,wavefrontsize32") -TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12, "V8fV8sV8sV8f", "nc", "gfx12-insts,wavefrontsize32") +TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12, "V8fV8yV8yV8f", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12, "V8hV8hV8hV8h", "nc", "gfx12-insts,wavefrontsize32") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12, "V8sV8sV8sV8s", "nc", "gfx12-insts,wavefrontsize32") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12, "V8yV8yV8yV8y", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12, "V8iIbV2iIbV2iV8iIb", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12, "V8iIbiIbiV8iIb", "nc", "gfx12-insts,wavefrontsize32") // These are gfx12-only, but for consistency with the other WMMA variants we're @@ -461,9 +461,9 @@ TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12, "V8fV2iV2iV TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12, "V8iIbV2iIbV2iV8iIb", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12, "V4fV4hV4hV4f", "nc", "gfx12-insts,wavefrontsize64") -TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12, "V4fV4sV4sV4f", "nc", "gfx12-insts,wavefrontsize64") +TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12, "V4fV4yV4yV4f", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12, "V4hV4hV4hV4h", "nc", "gfx12-insts,wavefrontsize64") -TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12, "V4sV4sV4sV4s", "nc", "gfx12-insts,wavefrontsize64") +TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12, "V4yV4yV4yV4y", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64") // These are gfx12-only, but for consistency with the other WMMA variants we're @@ -475,9 +475,9 @@ TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12, "V4fiiV4f", TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32, "V8fV8hV16hV8fs", "nc", "gfx12-insts,wavefrontsize32") -TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32, "V8fV8sV16sV8fs", "nc", "gfx12-insts,wavefrontsize32") +TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32, "V8fV8yV16yV8fs", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32, "V8hV8hV16hV8hs", "nc", "gfx12-insts,wavefrontsize32") -TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32, "V8sV8sV16sV8ss", "nc", "gfx12-insts,wavefrontsize32") +TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32, "V8yV8yV16yV8ys", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32, "V8iIbV2iIbV4iV8isIb", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32, "V8iIbiIbV2iV8isIb", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32, "V8iIbV2iIbV4iV8isIb", "nc", "gfx12-insts,wavefrontsize32") @@ -487,9 +487,9 @@ TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32, "V8fV2iV4iV8fs" TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32, "V8fV2iV4iV8fs", "nc", "gfx12-insts,wavefrontsize32") TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64, "V4fV4hV8hV4fs", "nc", "gfx12-insts,wavefrontsize64") -TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64, "V4fV4sV8sV4fs", "nc", "gfx12-insts,wavefrontsize64") +TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64, "V4fV4yV8yV4fs", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64, "V4hV4hV8hV4hs", "nc", "gfx12-insts,wavefrontsize64") -TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64, "V4sV4sV8sV4ss", "nc", "gfx12-insts,wavefrontsize64") +TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64, "V4yV4yV8yV4ys", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64, "V4iIbiIbV2iV4isIb", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64, "V4iIbiIbiV4isIb", "nc", "gfx12-insts,wavefrontsize64") TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64, "V4iIbiIbV2iV4isIb", "nc", "gfx12-insts,wavefrontsize64") diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w32.cl index a5d8bb34a7842d..6606ba77708c50 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w32.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w32.cl @@ -5,7 +5,7 @@ typedef int v2i __attribute__((ext_vector_type(2))); typedef float v8f __attribute__((ext_vector_type(8))); typedef half v8h __attribute__((ext_vector_type(8))); -typedef short v8s __attribute__((ext_vector_type(8))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); typedef int v8i __attribute__((ext_vector_type(8))); // Wave32 @@ -31,11 +31,11 @@ void test_amdgcn_wmma_f32_16x16x16_f16_w32(global v8f* out, v8h a, v8h b, v8f c) // CHECK-GFX1200-LABEL: @test_amdgcn_wmma_f32_16x16x16_bf16_w32( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x float> [[C:%.*]]) +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat> [[A:%.*]], <8 x bfloat> [[B:%.*]], <8 x float> [[C:%.*]]) // CHECK-GFX1200-NEXT: store <8 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out, v8s a, v8s b, v8f c) +void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out, v8bf a, v8bf b, v8f c) { *out = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12(a, b, c); } @@ -61,11 +61,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_w32(global v8h* out, v8h a, v8h b, v8h c) // CHECK-GFX1200-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_w32( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], i1 false) -// CHECK-GFX1200-NEXT: store <8 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16(<8 x bfloat> [[A:%.*]], <8 x bfloat> [[B:%.*]], <8 x bfloat> [[C:%.*]], i1 false) +// CHECK-GFX1200-NEXT: store <8 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_w32(global v8s* out, v8s a, v8s b, v8s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_w32(global v8bf* out, v8bf a, v8bf b, v8bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12(a, b, c); } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w64.cl index 33be83bff03d57..2586761ffb00b4 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w64.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx12-wmma-w64.cl @@ -4,7 +4,7 @@ typedef float v4f __attribute__((ext_vector_type(4))); typedef half v4h __attribute__((ext_vector_type(4))); -typedef short v4s __attribute__((ext_vector_type(4))); +typedef __bf16 v4bf __attribute__((ext_vector_type(4))); typedef int v4i __attribute__((ext_vector_type(4))); // Wave64 @@ -30,11 +30,11 @@ void test_amdgcn_wmma_f32_16x16x16_f16_w64(global v4f* out, v4h a, v4h b, v4f c) // CHECK-GFX1200-LABEL: @test_amdgcn_wmma_f32_16x16x16_bf16_w64( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x float> [[C:%.*]]) +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat> [[A:%.*]], <4 x bfloat> [[B:%.*]], <4 x float> [[C:%.*]]) // CHECK-GFX1200-NEXT: store <4 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_wmma_f32_16x16x16_bf16_w64(global v4f* out, v4s a, v4s b, v4f c) +void test_amdgcn_wmma_f32_16x16x16_bf16_w64(global v4f* out, v4bf a, v4bf b, v4f c) { *out = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12(a, b, c); } @@ -60,11 +60,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_w64(global v4h* out, v4h a, v4h b, v4h c) // CHECK-GFX1200-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_w64( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16(<4 x i16> [[A:%.*]], <4 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], i1 false) -// CHECK-GFX1200-NEXT: store <4 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 8, !tbaa [[TBAA4]] +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16(<4 x bfloat> [[A:%.*]], <4 x bfloat> [[B:%.*]], <4 x bfloat> [[C:%.*]], i1 false) +// CHECK-GFX1200-NEXT: store <4 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 8, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_w64(global v4s* out, v4s a, v4s b, v4s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_w64(global v4bf* out, v4bf a, v4bf b, v4bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12(a, b, c); } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w32.cl index f6dadb964db867..9cdda319a245b6 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w32.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w32.cl @@ -6,10 +6,10 @@ typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); typedef float v8f __attribute__((ext_vector_type(8))); typedef half v8h __attribute__((ext_vector_type(8))); -typedef short v8s __attribute__((ext_vector_type(8))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); typedef int v8i __attribute__((ext_vector_type(8))); typedef half v16h __attribute__((ext_vector_type(16))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); // Wave32 @@ -26,11 +26,11 @@ void test_amdgcn_swmmac_f32_16x16x32_f16_w32(global v8f* out, v8h a, v16h b, v8f // CHECK-GFX1200-LABEL: @test_amdgcn_swmmac_f32_16x16x32_bf16_w32( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8i16.v16i16.v8f32.i16(<8 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <8 x float> [[C:%.*]], i16 [[INDEX:%.*]]) +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <8 x float> [[C:%.*]], i16 [[INDEX:%.*]]) // CHECK-GFX1200-NEXT: store <8 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_swmmac_f32_16x16x32_bf16_w32(global v8f* out, v8s a, v16s b, v8f c, short index) +void test_amdgcn_swmmac_f32_16x16x32_bf16_w32(global v8f* out, v8bf a, v16bf b, v8f c, short index) { *out = __builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32(a, b, c, index); } @@ -48,11 +48,11 @@ void test_amdgcn_swmmac_f16_16x16x32_f16_w32(global v8h* out, v8h a, v16h b, v8h // CHECK-GFX1200-LABEL: @test_amdgcn_swmmac_bf16_16x16x32_bf16_w32( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v16i16.v8i16.i16(<8 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], i16 [[INDEX:%.*]]) -// CHECK-GFX1200-NEXT: store <8 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <8 x bfloat> [[C:%.*]], i16 [[INDEX:%.*]]) +// CHECK-GFX1200-NEXT: store <8 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_swmmac_bf16_16x16x32_bf16_w32(global v8s* out, v8s a, v16s b, v8s c, short index) +void test_amdgcn_swmmac_bf16_16x16x32_bf16_w32(global v8bf* out, v8bf a, v16bf b, v8bf c, short index) { *out = __builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32(a, b, c, index); } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w64.cl index 90f8cd7a022c9b..d77751eccdfdab 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w64.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-swmmac-w64.cl @@ -6,9 +6,9 @@ typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); typedef float v4f __attribute__((ext_vector_type(4))); typedef half v4h __attribute__((ext_vector_type(4))); -typedef short v4s __attribute__((ext_vector_type(4))); +typedef __bf16 v4bf __attribute__((ext_vector_type(4))); typedef half v8h __attribute__((ext_vector_type(8))); -typedef short v8s __attribute__((ext_vector_type(8))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); // Wave64 @@ -25,11 +25,11 @@ void test_amdgcn_swmmac_f32_16x16x32_f16_w64(global v4f* out, v4h a, v8h b, v4f // CHECK-GFX1200-LABEL: @test_amdgcn_swmmac_f32_16x16x32_bf16_w64( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4i16.v8i16.v4f32.i16(<4 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x float> [[C:%.*]], i16 [[INDEX:%.*]]) +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4bf16.v8bf16.v4f32.i16(<4 x bfloat> [[A:%.*]], <8 x bfloat> [[B:%.*]], <4 x float> [[C:%.*]], i16 [[INDEX:%.*]]) // CHECK-GFX1200-NEXT: store <4 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_swmmac_f32_16x16x32_bf16_w64(global v4f* out, v4s a, v8s b, v4f c, short index) +void test_amdgcn_swmmac_f32_16x16x32_bf16_w64(global v4f* out, v4bf a, v8bf b, v4f c, short index) { *out = __builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64(a, b, c, index); } @@ -47,11 +47,11 @@ void test_amdgcn_swmmac_f16_16x16x32_f16_w64(global v4h* out, v4h a, v8h b, v4h // CHECK-GFX1200-LABEL: @test_amdgcn_swmmac_bf16_16x16x32_bf16_w64( // CHECK-GFX1200-NEXT: entry: -// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v8i16.v4i16.i16(<4 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <4 x i16> [[C:%.*]], i16 [[INDEX:%.*]]) -// CHECK-GFX1200-NEXT: store <4 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 8, !tbaa [[TBAA4]] +// CHECK-GFX1200-NEXT: [[TMP0:%.*]] = tail call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v8bf16.v4bf16.i16(<4 x bfloat> [[A:%.*]], <8 x bfloat> [[B:%.*]], <4 x bfloat> [[C:%.*]], i16 [[INDEX:%.*]]) +// CHECK-GFX1200-NEXT: store <4 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 8, !tbaa [[TBAA4]] // CHECK-GFX1200-NEXT: ret void // -void test_amdgcn_swmmac_bf16_16x16x32_bf16_w64(global v4s* out, v4s a, v8s b, v4s c, short index) +void test_amdgcn_swmmac_bf16_16x16x32_bf16_w64(global v4bf* out, v4bf a, v8bf b, v4bf c, short index) { *out = __builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64(a, b, c, index); } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-gfx10-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-gfx10-err.cl index 41a78ae268be57..6f0146430703f2 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-gfx10-err.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-gfx10-err.cl @@ -10,23 +10,23 @@ typedef half v16h __attribute__((ext_vector_type(16))); typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); typedef int v8i __attribute__((ext_vector_type(8))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS // Wave32 -void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out8f, v16s a16s, v16s b16s, v8f c8f, +void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out8f, v16bf a16bf, v16bf b16bf, v8f c8f, global v16h* out16h, v16h a16h, v16h b16h, v16h c16h, - global v16s* out16s, v2i a2i, v2i b2i, v16s c16s, + global v16bf* out16bf, v2i a2i, v2i b2i, v16bf c16bf, global v8i* out8i, v4i a4i, v4i b4i, v8i c8i) { *out8f = __builtin_amdgcn_wmma_f32_16x16x16_f16_w32(a16h, b16h, c8f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_f16_w32' needs target feature gfx11-insts}} - *out8f = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32(a16s, b16s, c8f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32' needs target feature gfx11-insts}} + *out8f = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32(a16bf, b16bf, c8f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32' needs target feature gfx11-insts}} *out16h = __builtin_amdgcn_wmma_f16_16x16x16_f16_w32(a16h, b16h, c16h, true); // expected-error{{'__builtin_amdgcn_wmma_f16_16x16x16_f16_w32' needs target feature gfx11-insts}} - *out16s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32(a16s, b16s, c16s, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32' needs target feature gfx11-insts}} + *out16bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32(a16bf, b16bf, c16bf, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32' needs target feature gfx11-insts}} *out16h = __builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32(a16h, b16h, c16h, true); // expected-error{{'__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32' needs target feature gfx11-insts}} - *out16s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(a16s, b16s, c16s, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32' needs target feature gfx11-insts}} + *out16bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(a16bf, b16bf, c16bf, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32' needs target feature gfx11-insts}} *out8i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32(true, a4i, true, b4i, c8i, false); // expected-error{{'__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32' needs target feature gfx11-insts}} *out8i = __builtin_amdgcn_wmma_i32_16x16x16_iu4_w32(true, a2i, true, b2i, c8i, false); // expected-error{{'__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32' needs target feature gfx11-insts}} } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-param-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-param-err.cl index b821ed0feef623..db8d482697b06a 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-param-err.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32-param-err.cl @@ -10,21 +10,21 @@ typedef half v16h __attribute__((ext_vector_type(16))); typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); typedef int v8i __attribute__((ext_vector_type(8))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS // Wave32 -void test_amdgcn_wmma_f32_16x16x16_bf16_w32(v16s a16s, v16s b16s, unsigned int i, +void test_amdgcn_wmma_f32_16x16x16_bf16_w32(v16bf a16bf, v16bf b16bf, unsigned int i, global v16h* out16h, v16h a16h, v16h b16h, v16h c16h, - global v16s* out16s, v2i a2i, v2i b2i, v16s c16s, + global v16bf* out16bf, v2i a2i, v2i b2i, v16bf c16bf, global v8i* out8i, v4i a4i, v4i b4i, v8i c8i) { *out16h = __builtin_amdgcn_wmma_f16_16x16x16_f16_w32(a16h, b16h, c16h, i); // expected-error{{argument to '__builtin_amdgcn_wmma_f16_16x16x16_f16_w32' must be a constant integer}} - *out16s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32(a16s, b16s, c16s, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32' must be a constant integer}} + *out16bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32(a16bf, b16bf, c16bf, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32' must be a constant integer}} *out16h = __builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32(a16h, b16h, c16h, i); // expected-error{{argument to '__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32' must be a constant integer}} - *out16s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(a16s, b16s, c16s, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32' must be a constant integer}} + *out16bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(a16bf, b16bf, c16bf, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32' must be a constant integer}} *out8i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32(i, a4i, true, b4i, c8i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32' must be a constant integer}} *out8i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w32(true, a4i, i, b4i, c8i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32' must be a constant integer}} *out8i = __builtin_amdgcn_wmma_i32_16x16x16_iu4_w32(i, a2i, true, b2i, c8i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32' must be a constant integer}} diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32.cl index 6cb283e5891901..7af4aaf5469b8a 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w32.cl @@ -8,6 +8,7 @@ typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); typedef int v8i __attribute__((ext_vector_type(8))); typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS @@ -34,11 +35,11 @@ void test_amdgcn_wmma_f32_16x16x16_f16_w32(global v8f* out, v16h a, v16h b, v8f // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_f32_16x16x16_bf16_w32( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v16i16.v8f32(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <8 x float> [[C:%.*]]) +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v16bf16.v8f32(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <8 x float> [[C:%.*]]) // CHECK-GFX1100-NEXT: store <8 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out, v16s a, v16s b, v8f c) +void test_amdgcn_wmma_f32_16x16x16_bf16_w32(global v8f* out, v16bf a, v16bf b, v8f c) { *out = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w32(a, b, c); } @@ -64,11 +65,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_w32(global v16h* out, v16h a, v16h b, v16 // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_w32( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v16i16.v16i16(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <16 x i16> [[C:%.*]], i1 true) -// CHECK-GFX1100-NEXT: store <16 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <16 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v16bf16.v16bf16(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <16 x bfloat> [[C:%.*]], i1 true) +// CHECK-GFX1100-NEXT: store <16 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_w32(global v16s* out, v16s a, v16s b, v16s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_w32(global v16bf* out, v16bf a, v16bf b, v16bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32(a, b, c, true); } @@ -94,11 +95,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_tied_w32(global v16h* out, v16h a, v16h b // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <16 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied.v16i16.v16i16(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <16 x i16> [[C:%.*]], i1 true) -// CHECK-GFX1100-NEXT: store <16 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <16 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied.v16bf16.v16bf16(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <16 x bfloat> [[C:%.*]], i1 true) +// CHECK-GFX1100-NEXT: store <16 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 32, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(global v16s* out, v16s a, v16s b, v16s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(global v16bf* out, v16bf a, v16bf b, v16bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32(a, b, c, true); } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-gfx10-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-gfx10-err.cl index d5d9d973eb3004..64efe7293ac93d 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-gfx10-err.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-gfx10-err.cl @@ -9,24 +9,24 @@ typedef half v8h __attribute__((ext_vector_type(8))); typedef half v16h __attribute__((ext_vector_type(16))); typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); -typedef short v8s __attribute__((ext_vector_type(8))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS // Wave64 void test_amdgcn_wmma_f32_16x16x16_bf16_w64(global v4f* out4f, v16h a16h, v16h b16h, v4f c4f, - global v8h* out8h, v16s a16s, v16s b16s, v8h c8h, - global v8s* out8s, v4i a4i, v4i b4i, v8s c8s, + global v8h* out8h, v16bf a16bf, v16bf b16bf, v8h c8h, + global v8bf* out8bf, v4i a4i, v4i b4i, v8bf c8bf, global v4i* out4i, v2i a2i, v2i b2i, v4i c4i) { *out4f = __builtin_amdgcn_wmma_f32_16x16x16_f16_w64(a16h, b16h, c4f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_f16_w64' needs target feature gfx11-insts}} - *out4f = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64(a16s, b16s, c4f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64' needs target feature gfx11-insts}} + *out4f = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64(a16bf, b16bf, c4f); // expected-error{{'__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64' needs target feature gfx11-insts}} *out8h = __builtin_amdgcn_wmma_f16_16x16x16_f16_w64(a16h, b16h, c8h, true); // expected-error{{'__builtin_amdgcn_wmma_f16_16x16x16_f16_w64' needs target feature gfx11-insts}} - *out8s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64(a16s, b16s, c8s, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64' needs target feature gfx11-insts}} + *out8bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64(a16bf, b16bf, c8bf, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64' needs target feature gfx11-insts}} *out8h = __builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64(a16h, b16h, c8h, true); // expected-error{{'__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64' needs target feature gfx11-insts}} - *out8s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(a16s, b16s, c8s, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64' needs target feature gfx11-insts}} + *out8bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(a16bf, b16bf, c8bf, true); // expected-error{{'__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64' needs target feature gfx11-insts}} *out4i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w64(true, a4i, true, b4i, c4i, false); // expected-error{{'__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64' needs target feature gfx11-insts}} *out4i = __builtin_amdgcn_wmma_i32_16x16x16_iu4_w64(true, a2i, true, b2i, c4i, false); // expected-error{{'__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64' needs target feature gfx11-insts}} } diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-param-err.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-param-err.cl index a579ac62d3c3fa..53a777268c9b72 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-param-err.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64-param-err.cl @@ -9,22 +9,22 @@ typedef half v8h __attribute__((ext_vector_type(8))); typedef half v16h __attribute__((ext_vector_type(16))); typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); -typedef short v8s __attribute__((ext_vector_type(8))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS // Wave64 void test_amdgcn_wmma_f32_16x16x16_bf16_w64(v16h a16h, v16h b16h, unsigned int i, - global v8h* out8h, v16s a16s, v16s b16s, v8h c8h, - global v8s* out8s, v4i a4i, v4i b4i, v8s c8s, + global v8h* out8h, v16bf a16bf, v16bf b16bf, v8h c8h, + global v8bf* out8bf, v4i a4i, v4i b4i, v8bf c8bf, global v4i* out4i, v2i a2i, v2i b2i, v4i c4i) { *out8h = __builtin_amdgcn_wmma_f16_16x16x16_f16_w64(a16h, b16h, c8h, i); // expected-error{{argument to '__builtin_amdgcn_wmma_f16_16x16x16_f16_w64' must be a constant integer}} - *out8s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64(a16s, b16s, c8s, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64' must be a constant integer}} + *out8bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64(a16bf, b16bf, c8bf, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64' must be a constant integer}} *out8h = __builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64(a16h, b16h, c8h, i); // expected-error{{argument to '__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64' must be a constant integer}} - *out8s = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(a16s, b16s, c8s, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64' must be a constant integer}} + *out8bf = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(a16bf, b16bf, c8bf, i); // expected-error{{argument to '__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64' must be a constant integer}} *out4i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w64(i, a4i, true, b4i, c4i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64' must be a constant integer}} *out4i = __builtin_amdgcn_wmma_i32_16x16x16_iu8_w64(true, a4i, i, b4i, c4i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64' must be a constant integer}} *out4i = __builtin_amdgcn_wmma_i32_16x16x16_iu4_w64(i, a2i, true, b2i, c4i, false); // expected-error{{argument to '__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64' must be a constant integer}} diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64.cl index 833c662d98189f..7cddf98eecc10e 100644 --- a/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64.cl +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-wmma-w64.cl @@ -7,8 +7,8 @@ typedef half v8h __attribute__((ext_vector_type(8))); typedef half v16h __attribute__((ext_vector_type(16))); typedef int v2i __attribute__((ext_vector_type(2))); typedef int v4i __attribute__((ext_vector_type(4))); -typedef short v8s __attribute__((ext_vector_type(8))); -typedef short v16s __attribute__((ext_vector_type(16))); +typedef __bf16 v8bf __attribute__((ext_vector_type(8))); +typedef __bf16 v16bf __attribute__((ext_vector_type(16))); #ifdef WMMA_GFX1100_TESTS @@ -35,11 +35,11 @@ void test_amdgcn_wmma_f32_16x16x16_f16_w64(global v4f* out, v16h a, v16h b, v4f // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_f32_16x16x16_bf16_w64( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v16i16.v4f32(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <4 x float> [[C:%.*]]) +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v16bf16.v4f32(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <4 x float> [[C:%.*]]) // CHECK-GFX1100-NEXT: store <4 x float> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_f32_16x16x16_bf16_w64(global v4f* out, v16s a, v16s b, v4f c) +void test_amdgcn_wmma_f32_16x16x16_bf16_w64(global v4f* out, v16bf a, v16bf b, v4f c) { *out = __builtin_amdgcn_wmma_f32_16x16x16_bf16_w64(a, b, c); } @@ -65,11 +65,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_w64(global v8h* out, v16h a, v16h b, v8h // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_w64( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v16i16.v8i16(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], i1 true) -// CHECK-GFX1100-NEXT: store <8 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v16bf16.v8bf16(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <8 x bfloat> [[C:%.*]], i1 true) +// CHECK-GFX1100-NEXT: store <8 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_w64(global v8s* out, v16s a, v16s b, v8s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_w64(global v8bf* out, v16bf a, v16bf b, v8bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64(a, b, c, true); } @@ -95,11 +95,11 @@ void test_amdgcn_wmma_f16_16x16x16_f16_tied_w64(global v8h* out, v16h a, v16h b, // CHECK-GFX1100-LABEL: @test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64( // CHECK-GFX1100-NEXT: entry: -// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied.v16i16.v8i16(<16 x i16> [[A:%.*]], <16 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], i1 true) -// CHECK-GFX1100-NEXT: store <8 x i16> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] +// CHECK-GFX1100-NEXT: [[TMP0:%.*]] = tail call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.tied.v16bf16.v8bf16(<16 x bfloat> [[A:%.*]], <16 x bfloat> [[B:%.*]], <8 x bfloat> [[C:%.*]], i1 true) +// CHECK-GFX1100-NEXT: store <8 x bfloat> [[TMP0]], ptr addrspace(1) [[OUT:%.*]], align 16, !tbaa [[TBAA4]] // CHECK-GFX1100-NEXT: ret void // -void test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(global v8s* out, v16s a, v16s b, v8s c) +void test_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(global v8bf* out, v16bf a, v16bf b, v8bf c) { *out = __builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64(a, b, c, true); } diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index 5055ceff875788..3700a8b3411a52 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -2654,12 +2654,12 @@ class AMDGPUWmmaIntrinsicIU : // The tied versions of the f16/bf16 wmma intrinsics tie the destination matrix registers to the input accumulator registers. // The content of the other 16-bit half is preserved from the input. def int_amdgcn_wmma_f16_16x16x16_f16_tied : AMDGPUWmmaIntrinsicOPSEL; -def int_amdgcn_wmma_bf16_16x16x16_bf16_tied : AMDGPUWmmaIntrinsicOPSEL; +def int_amdgcn_wmma_bf16_16x16x16_bf16_tied : AMDGPUWmmaIntrinsicOPSEL; // WMMA GFX11Plus def int_amdgcn_wmma_f32_16x16x16_f16 : AMDGPUWmmaIntrinsic; -def int_amdgcn_wmma_f32_16x16x16_bf16 : AMDGPUWmmaIntrinsic; +def int_amdgcn_wmma_f32_16x16x16_bf16 : AMDGPUWmmaIntrinsic; def int_amdgcn_wmma_i32_16x16x16_iu8 : AMDGPUWmmaIntrinsicIU; def int_amdgcn_wmma_i32_16x16x16_iu4 : AMDGPUWmmaIntrinsicIU; @@ -2667,7 +2667,7 @@ def int_amdgcn_wmma_i32_16x16x16_iu4 : AMDGPUWmmaIntrinsicIU; -def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL; +def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL; //===----------------------------------------------------------------------===// // GFX12 Intrinsics @@ -2735,9 +2735,9 @@ class AMDGPUSWmmacIntrinsicIUIdx; def int_amdgcn_swmmac_f32_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx; -def int_amdgcn_swmmac_f32_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx; +def int_amdgcn_swmmac_f32_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx; def int_amdgcn_swmmac_f16_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx; -def int_amdgcn_swmmac_bf16_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx; +def int_amdgcn_swmmac_bf16_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx; def int_amdgcn_swmmac_i32_16x16x32_iu8 : AMDGPUSWmmacIntrinsicIUIdx; def int_amdgcn_swmmac_i32_16x16x32_iu4 : AMDGPUSWmmacIntrinsicIUIdx; def int_amdgcn_swmmac_i32_16x16x64_iu4 : AMDGPUSWmmacIntrinsicIUIdx; diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index 107b95a9ca8eb0..a458f4cef12d08 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -972,6 +972,10 @@ let WaveSizePredicate = isWave64 in { } +class isBF16 { + bit ret = !or(!eq(SrcVT, v4bf16), !eq(SrcVT, v8bf16), !eq(SrcVT, v16bf16)); +} + class VOP3PWMMA_Profile ArgTy, bit _IsSWMMAC, int _IndexType, bit _IsIU, bit _IsFP8BF8> : VOP3P_Profile> { @@ -986,9 +990,9 @@ class VOP3PWMMA_Profile ArgTy, bit _IsSWMMAC, int _IndexType, let IsSWMMAC = _IsSWMMAC; bit IsAB_F16 = !and(IsF16BF16, ArgTy[1].isFP); - bit IsAB_BF16 = !and(IsF16BF16, isIntType.ret); + bit IsAB_BF16 = !and(IsF16BF16, isBF16.ret); bit IsC_F32 = !or(!eq(ArgTy[3], v8f32), !eq(ArgTy[3], v4f32)); - bit IsC_BF16 = !or(!eq(ArgTy[3], v8i16), !eq(ArgTy[3], v4i16)); + bit IsC_BF16 = !or(!eq(ArgTy[3], v8bf16), !eq(ArgTy[3], v4bf16)); bit IsC_F16 = !or(!eq(ArgTy[3], v8f16), !eq(ArgTy[3], v4f16)); bit NegLo01 = !or(IsF16BF16, IsIU); @@ -1001,24 +1005,24 @@ class VOP3PWMMA_Profile ArgTy, bit _IsSWMMAC, int _IndexType, let DstRC = !cond(!eq(ArgTy[0], v8f32): VDst_256, !eq(ArgTy[0], v8i32): VDst_256, !eq(ArgTy[0], v8f16): VDst_128, - !eq(ArgTy[0], v8i16): VDst_128, + !eq(ArgTy[0], v8bf16): VDst_128, !eq(ArgTy[0], v4f32): VDst_128, !eq(ArgTy[0], v4i32): VDst_128, !eq(ArgTy[0], v4f16): VDst_64, - !eq(ArgTy[0], v4i16): VDst_64); + !eq(ArgTy[0], v4bf16): VDst_64); let Src0RC64 = !cond(!eq(ArgTy[1], v8f16): VRegSrc_128, !eq(ArgTy[1], v4f16): VRegSrc_64, - !eq(ArgTy[1], v4i16): VRegSrc_64, - !eq(ArgTy[1], v8i16): VRegSrc_128, + !eq(ArgTy[1], v4bf16): VRegSrc_64, + !eq(ArgTy[1], v8bf16): VRegSrc_128, !eq(ArgTy[1], v4i32): VRegSrc_128, !eq(ArgTy[1], v2i32): VRegSrc_64, !eq(ArgTy[1], i32) : VRegSrc_32); let Src1RC64 = !cond(!eq(ArgTy[2], v16f16): VRegSrc_256, - !eq(ArgTy[2], v16i16): VRegSrc_256, + !eq(ArgTy[2], v16bf16): VRegSrc_256, !eq(ArgTy[2], v8f16): VRegSrc_128, - !eq(ArgTy[2], v8i16): VRegSrc_128, + !eq(ArgTy[2], v8bf16): VRegSrc_128, !eq(ArgTy[2], v4i32): VRegSrc_128, - !eq(ArgTy[1], v4i16): VRegSrc_64, + !eq(ArgTy[1], v4bf16): VRegSrc_64, !eq(ArgTy[1], v4f16): VRegSrc_64, !eq(ArgTy[2], v2i32): VRegSrc_64, !eq(ArgTy[2], i32) : VRegSrc_32); @@ -1026,9 +1030,9 @@ class VOP3PWMMA_Profile ArgTy, bit _IsSWMMAC, int _IndexType, !cond(!eq(ArgTy[3], v8f32): VISrc_256_f32, !eq(ArgTy[3], v8i32): VISrc_256_b32, !eq(ArgTy[3], v8f16): VISrc_128_f16, - !eq(ArgTy[3], v8i16): VISrc_128_f32, // bf16 + !eq(ArgTy[3], v8bf16): VISrc_128_f32, !eq(ArgTy[3], v4f16): VISrc_64_f16, - !eq(ArgTy[3], v4i16): VISrc_64_b32, + !eq(ArgTy[3], v4bf16): VISrc_64_b32, !eq(ArgTy[3], v4i32): VISrc_128_b32, !eq(ArgTy[3], v4f32): VISrc_128_f32)); @@ -1197,41 +1201,41 @@ multiclass SWMMACInstGFX12; -def F32_BF16_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v8i16, v8i16, v8f32], 0, 0, 0, 0>; -def F16_F16_WMMA_w32 : VOP3PWMMA_Profile<[v8f16, v8f16, v8f16, v8f16], 0, 0, 0, 0>; -def BF16_BF16_WMMA_w32 : VOP3PWMMA_Profile<[v8i16, v8i16, v8i16, v8i16], 0, 0, 0, 0>; -def I32_IU8_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 8xi8 -def I32_IU4X16_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, i32, i32, v8i32], 0, 0, 1, 0>; // 8xi4 -def F32_FP8BF8_WMMA_w32 : VOP3PWMMA_Profile<[v8f32, v2i32, v2i32, v8f32], 0, 0, 0, 1>; // 8xf8 -def I32_IU4X32_WMMA_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 16xi4 - -def F32_F16_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, v4f16, v4f16, v4f32], 0, 0, 0, 0>; -def F32_BF16_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, v4i16, v4i16, v4f32], 0, 0, 0, 0>; -def F16_F16_WMMA_w64 : VOP3PWMMA_Profile<[v4f16, v4f16, v4f16, v4f16], 0, 0, 0, 0>; -def BF16_BF16_WMMA_w64 : VOP3PWMMA_Profile<[v4i16, v4i16, v4i16, v4i16], 0, 0, 0, 0>; -def I32_IU8_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 4xi8 -def I32_IU4X16_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 * -def F32_FP8BF8_WMMA_w64 : VOP3PWMMA_Profile<[v4f32, i32, i32, v4f32], 0, 0, 0, 1>; // 4xf8 -def I32_IU4X32_WMMA_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 - -def F32_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v8f16, v16f16, v8f32], 1, 16, 0, 0>; -def F32_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v8i16, v16i16, v8f32], 1, 16, 0, 0>; -def F16_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f16, v8f16, v16f16, v8f16], 1, 16, 0, 0>; -def BF16_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i16, v8i16, v16i16, v8i16], 1, 16, 0, 0>; -def I32_IU8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v4i32, v8i32], 1, 16, 1, 0>; // 8xi8, 16xi8 -def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, i32, v2i32, v8i32], 1, 16, 1, 0>; // 8xi4, 16xi4 -def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[v8i32, v2i32, v4i32, v8i32], 1, 0, 1, 0>; // 16xi4, 32xi4 ** -def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[v8f32, v2i32, v4i32, v8f32], 1, 16, 0, 1>; // 8xf8, 16xf8 - -def F32_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, v4f16, v8f16, v4f32], 1, 8, 0, 0>; -def F32_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, v4i16, v8i16, v4f32], 1, 8, 0, 0>; -def F16_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f16, v4f16, v8f16, v4f16], 1, 8, 0, 0>; -def BF16_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i16, v4i16, v8i16, v4i16], 1, 8, 0, 0>; -def I32_IU8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, v2i32, v4i32], 1, 8, 1, 0>; // 4xi8, 8xi8 -def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, i32, v4i32], 1, 16, 1, 0>; // 8xi4, 8xi4 *** -def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[v4i32, i32, v2i32, v4i32], 1, 16, 1, 0>; // 8xi4, 16xi4 -def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[v4f32, i32, v2i32, v4f32], 1, 8, 0, 1>; // 4xf8, 8xf8 +def F32_F16_WMMA_w32 : VOP3PWMMA_Profile<[ v8f32, v8f16, v8f16, v8f32], 0, 0, 0, 0>; +def F32_BF16_WMMA_w32 : VOP3PWMMA_Profile<[ v8f32, v8bf16, v8bf16, v8f32], 0, 0, 0, 0>; +def F16_F16_WMMA_w32 : VOP3PWMMA_Profile<[ v8f16, v8f16, v8f16, v8f16], 0, 0, 0, 0>; +def BF16_BF16_WMMA_w32 : VOP3PWMMA_Profile<[v8bf16, v8bf16, v8bf16, v8bf16], 0, 0, 0, 0>; +def I32_IU8_WMMA_w32 : VOP3PWMMA_Profile<[ v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 8xi8 +def I32_IU4X16_WMMA_w32 : VOP3PWMMA_Profile<[ v8i32, i32, i32, v8i32], 0, 0, 1, 0>; // 8xi4 +def F32_FP8BF8_WMMA_w32 : VOP3PWMMA_Profile<[ v8f32, v2i32, v2i32, v8f32], 0, 0, 0, 1>; // 8xf8 +def I32_IU4X32_WMMA_w32 : VOP3PWMMA_Profile<[ v8i32, v2i32, v2i32, v8i32], 0, 0, 1, 0>; // 16xi4 + +def F32_F16_WMMA_w64 : VOP3PWMMA_Profile<[ v4f32, v4f16, v4f16, v4f32], 0, 0, 0, 0>; +def F32_BF16_WMMA_w64 : VOP3PWMMA_Profile<[ v4f32, v4bf16, v4bf16, v4f32], 0, 0, 0, 0>; +def F16_F16_WMMA_w64 : VOP3PWMMA_Profile<[ v4f16, v4f16, v4f16, v4f16], 0, 0, 0, 0>; +def BF16_BF16_WMMA_w64 : VOP3PWMMA_Profile<[v4bf16, v4bf16, v4bf16, v4bf16], 0, 0, 0, 0>; +def I32_IU8_WMMA_w64 : VOP3PWMMA_Profile<[ v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 4xi8 +def I32_IU4X16_WMMA_w64 : VOP3PWMMA_Profile<[ v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 * +def F32_FP8BF8_WMMA_w64 : VOP3PWMMA_Profile<[ v4f32, i32, i32, v4f32], 0, 0, 0, 1>; // 4xf8 +def I32_IU4X32_WMMA_w64 : VOP3PWMMA_Profile<[ v4i32, i32, i32, v4i32], 0, 0, 1, 0>; // 8xi4 + +def F32_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8f32, v8f16, v16f16, v8f32], 1, 16, 0, 0>; +def F32_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8f32, v8bf16, v16bf16, v8f32], 1, 16, 0, 0>; +def F16_F16_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8f16, v8f16, v16f16, v8f16], 1, 16, 0, 0>; +def BF16_BF16_SWMMAC_w32 : VOP3PWMMA_Profile<[v8bf16, v8bf16, v16bf16, v8bf16], 1, 16, 0, 0>; +def I32_IU8_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8i32, v2i32, v4i32, v8i32], 1, 16, 1, 0>; // 8xi8, 16xi8 +def I32_IU4X32_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8i32, i32, v2i32, v8i32], 1, 16, 1, 0>; // 8xi4, 16xi4 +def I32_IU4X64_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8i32, v2i32, v4i32, v8i32], 1, 0, 1, 0>; // 16xi4, 32xi4 ** +def F32_FP8BF8_SWMMAC_w32 : VOP3PWMMA_Profile<[ v8f32, v2i32, v4i32, v8f32], 1, 16, 0, 1>; // 8xf8, 16xf8 + +def F32_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4f32, v4f16, v8f16, v4f32], 1, 8, 0, 0>; +def F32_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4f32, v4bf16, v8bf16, v4f32], 1, 8, 0, 0>; +def F16_F16_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4f16, v4f16, v8f16, v4f16], 1, 8, 0, 0>; +def BF16_BF16_SWMMAC_w64 : VOP3PWMMA_Profile<[v4bf16, v4bf16, v8bf16, v4bf16], 1, 8, 0, 0>; +def I32_IU8_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4i32, i32, v2i32, v4i32], 1, 8, 1, 0>; // 4xi8, 8xi8 +def I32_IU4X32_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4i32, i32, i32, v4i32], 1, 16, 1, 0>; // 8xi4, 8xi4 *** +def I32_IU4X64_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4i32, i32, v2i32, v4i32], 1, 16, 1, 0>; // 8xi4, 16xi4 +def F32_FP8BF8_SWMMAC_w64 : VOP3PWMMA_Profile<[ v4f32, i32, v2i32, v4f32], 1, 8, 0, 1>; // 4xf8, 8xf8 // * IU4X16_WMMA_w64 lanes 0-31 will have 8xi4, remaining lanes are ignored // ** IU4X64_SWMMAC_w32 index is i32, index_key is not used diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll index ae5868edcc49bf..c91e2f099805e5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll @@ -69,9 +69,42 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v18, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v19, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v2, v20, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v21 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v3, v18, v3 +; GFX12-NEXT: v_or_b32_e32 v4, v19, v4 +; GFX12-NEXT: v_or_b32_e32 v5, v20, v5 +; GFX12-NEXT: v_or_b32_e32 v6, v21, v6 +; GFX12-NEXT: v_or_b32_e32 v7, v22, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[0,0,1] ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_store_b128 v[16:17], v[8:11], off @@ -81,14 +114,47 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_endpgm bb: %fneg.C = fneg <8 x float> %C - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %fneg.C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %fneg.C) store <8 x float> %res, <8 x float> addrspace(1)* %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v18, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v19, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v2, v20, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v21 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v3, v18, v3 +; GFX12-NEXT: v_or_b32_e32 v4, v19, v4 +; GFX12-NEXT: v_or_b32_e32 v5, v20, v5 +; GFX12-NEXT: v_or_b32_e32 v6, v21, v6 +; GFX12-NEXT: v_or_b32_e32 v7, v22, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] neg_hi:[0,0,1] ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_store_b128 v[16:17], v[8:11], off @@ -98,7 +164,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_endpgm bb: %fabs.C = call <8 x float> @llvm.fabs.v8f32(<8 x float> %C) - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %fabs.C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %fabs.C) store <8 x float> %res, <8 x float> addrspace(1)* %out ret void } @@ -494,7 +560,7 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float>) declare float @llvm.fabs.f32(float) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-imm.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-imm.ll index 6251dfdc392ebc..34fbf82ed562cd 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-imm.ll @@ -47,48 +47,114 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb -; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[10:17], v[0:3], v[4:7], 1.0 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v10, v10, v0 +; GFX12-NEXT: v_or_b32_e32 v11, v11, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v12, v12, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v16, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v17, 16, v14 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v13, v0, v2 +; GFX12-NEXT: v_or_b32_e32 v14, v1, v4 +; GFX12-NEXT: v_or_b32_e32 v15, v3, v5 +; GFX12-NEXT: v_or_b32_e32 v16, v16, v6 +; GFX12-NEXT: v_or_b32_e32 v17, v17, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[0:7], v[10:13], v[14:17], 1.0 ; GFX12-NEXT: s_clause 0x1 -; GFX12-NEXT: global_store_b128 v[8:9], v[10:13], off -; GFX12-NEXT: global_store_b128 v[8:9], v[14:17], off offset:16 +; GFX12-NEXT: global_store_b128 v[8:9], v[0:3], off +; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off offset:16 ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> ) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> ) store <8 x float> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 ; GFX12-NEXT: s_mov_b32 s0, 0x40400000 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX12-NEXT: s_mov_b32 s7, s0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 ; GFX12-NEXT: s_mov_b32 s1, s0 ; GFX12-NEXT: s_mov_b32 s2, s0 ; GFX12-NEXT: s_mov_b32 s3, s0 ; GFX12-NEXT: s_mov_b32 s4, s0 ; GFX12-NEXT: s_mov_b32 s5, s0 ; GFX12-NEXT: s_mov_b32 s6, s0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX12-NEXT: v_dual_mov_b32 v17, s7 :: v_dual_mov_b32 v16, s6 -; GFX12-NEXT: v_dual_mov_b32 v15, s5 :: v_dual_mov_b32 v14, s4 -; GFX12-NEXT: v_dual_mov_b32 v13, s3 :: v_dual_mov_b32 v12, s2 -; GFX12-NEXT: v_dual_mov_b32 v11, s1 :: v_dual_mov_b32 v10, s0 -; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[10:17], v[0:3], v[4:7], v[10:17] +; GFX12-NEXT: s_mov_b32 s7, s0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v10, v10, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v11, v11, v1 +; GFX12-NEXT: v_or_b32_e32 v12, v12, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v13 +; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v5 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v16, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_or_b32_e32 v13, v1, v3 +; GFX12-NEXT: v_or_b32_e32 v14, v0, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v17 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v15, v2, v5 +; GFX12-NEXT: v_or_b32_e32 v16, v16, v6 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-NEXT: v_or_b32_e32 v17, v0, v1 +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3 +; GFX12-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5 +; GFX12-NEXT: v_mov_b32_e32 v7, s7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[0:7], v[10:13], v[14:17], v[0:7] ; GFX12-NEXT: s_clause 0x1 -; GFX12-NEXT: global_store_b128 v[8:9], v[10:13], off -; GFX12-NEXT: global_store_b128 v[8:9], v[14:17], off offset:16 +; GFX12-NEXT: global_store_b128 v[8:9], v[0:3], off +; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off offset:16 ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> ) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> ) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -129,16 +195,48 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 ; GFX12-NEXT: s_mov_b32 s0, 0x3f803f80 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 ; GFX12-NEXT: s_mov_b32 s1, s0 ; GFX12-NEXT: s_mov_b32 s2, s0 -; GFX12-NEXT: v_dual_mov_b32 v13, s3 :: v_dual_mov_b32 v12, s2 -; GFX12-NEXT: v_dual_mov_b32 v11, s1 :: v_dual_mov_b32 v10, s0 +; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v12 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v1, v10, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX12-NEXT: v_or_b32_e32 v2, v11, v2 +; GFX12-NEXT: v_or_b32_e32 v3, v12, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v7 +; GFX12-NEXT: v_or_b32_e32 v4, v11, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_or_b32_e32 v5, v10, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_or_b32_e32 v6, v11, v6 +; GFX12-NEXT: v_or_b32_e32 v7, v10, v7 +; GFX12-NEXT: v_dual_mov_b32 v13, s3 :: v_dual_mov_b32 v10, s0 +; GFX12-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v11, s1 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[10:13], v[0:3], v[4:7], v[10:13] ; GFX12-NEXT: global_store_b128 v[8:9], v[10:13], off @@ -146,21 +244,53 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> , i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> , i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 ; GFX12-NEXT: s_mov_b32 s0, 0x3fc03fc0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1) -; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 ; GFX12-NEXT: s_mov_b32 s1, s0 ; GFX12-NEXT: s_mov_b32 s2, s0 -; GFX12-NEXT: v_dual_mov_b32 v13, s3 :: v_dual_mov_b32 v12, s2 -; GFX12-NEXT: v_dual_mov_b32 v11, s1 :: v_dual_mov_b32 v10, s0 +; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v12 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v1, v10, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX12-NEXT: v_or_b32_e32 v2, v11, v2 +; GFX12-NEXT: v_or_b32_e32 v3, v12, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v7 +; GFX12-NEXT: v_or_b32_e32 v4, v11, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_or_b32_e32 v5, v10, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_or_b32_e32 v6, v11, v6 +; GFX12-NEXT: v_or_b32_e32 v7, v10, v7 +; GFX12-NEXT: v_dual_mov_b32 v13, s3 :: v_dual_mov_b32 v10, s0 +; GFX12-NEXT: v_dual_mov_b32 v12, s2 :: v_dual_mov_b32 v11, s1 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[10:13], v[0:3], v[4:7], v[10:13] ; GFX12-NEXT: global_store_b128 v[8:9], v[10:13], off @@ -168,8 +298,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> , i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> , i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -496,9 +626,9 @@ bb: } declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f32.v8f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) -declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, i1 immarg) +declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32.i32.v8i32(i1 immarg, i32, i1 immarg, i32, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) @@ -507,9 +637,9 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v8f32.v2i32.v2i32.v8f declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat>, <16 x bfloat>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat>, <16 x bfloat>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.v8i32.i32(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i32 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-swmmac-index_key.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-swmmac-index_key.ll index e0e021123ddae5..2dd094f20ac107 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-swmmac-index_key.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32-swmmac-index_key.ll @@ -33,21 +33,69 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v20, v[20:21], off -; GFX12-NEXT: v_dual_mov_b32 v33, v19 :: v_dual_mov_b32 v32, v18 -; GFX12-NEXT: v_dual_mov_b32 v31, v17 :: v_dual_mov_b32 v30, v16 -; GFX12-NEXT: v_dual_mov_b32 v29, v15 :: v_dual_mov_b32 v28, v14 -; GFX12-NEXT: v_dual_mov_b32 v27, v13 :: v_dual_mov_b32 v26, v12 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v26, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v27, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v28, 16, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v33, 16, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v34, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v35, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v36, 16, v7 +; GFX12-NEXT: v_lshrrev_b32_e32 v37, 16, v8 +; GFX12-NEXT: v_lshrrev_b32_e32 v38, 16, v9 +; GFX12-NEXT: v_lshrrev_b32_e32 v39, 16, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v40, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v29, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v30, 0xffff, v1 +; GFX12-NEXT: v_and_b32_e32 v31, 0xffff, v2 +; GFX12-NEXT: v_and_b32_e32 v32, 0xffff, v3 +; GFX12-NEXT: v_and_b32_e32 v41, 0xffff, v4 +; GFX12-NEXT: v_and_b32_e32 v42, 0xffff, v5 +; GFX12-NEXT: v_and_b32_e32 v43, 0xffff, v6 +; GFX12-NEXT: v_and_b32_e32 v44, 0xffff, v7 +; GFX12-NEXT: v_and_b32_e32 v45, 0xffff, v8 +; GFX12-NEXT: v_and_b32_e32 v46, 0xffff, v9 +; GFX12-NEXT: v_and_b32_e32 v47, 0xffff, v10 +; GFX12-NEXT: v_and_b32_e32 v48, 0xffff, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v21 +; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v26 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v27 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v28 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v33 +; GFX12-NEXT: v_lshlrev_b32_e32 v27, 16, v34 +; GFX12-NEXT: v_lshlrev_b32_e32 v28, 16, v35 +; GFX12-NEXT: v_lshlrev_b32_e32 v33, 16, v36 +; GFX12-NEXT: v_lshlrev_b32_e32 v34, 16, v37 +; GFX12-NEXT: v_lshlrev_b32_e32 v35, 16, v38 +; GFX12-NEXT: v_lshlrev_b32_e32 v36, 16, v39 +; GFX12-NEXT: v_dual_mov_b32 v0, v12 :: v_dual_lshlrev_b32 v37, 16, v40 +; GFX12-NEXT: v_mov_b32_e32 v7, v19 +; GFX12-NEXT: v_dual_mov_b32 v1, v13 :: v_dual_mov_b32 v2, v14 +; GFX12-NEXT: v_dual_mov_b32 v3, v15 :: v_dual_mov_b32 v4, v16 +; GFX12-NEXT: v_dual_mov_b32 v5, v17 :: v_dual_mov_b32 v6, v18 +; GFX12-NEXT: v_or_b32_e32 v8, v8, v29 +; GFX12-NEXT: v_or_b32_e32 v9, v9, v30 +; GFX12-NEXT: v_or_b32_e32 v10, v10, v31 +; GFX12-NEXT: v_or_b32_e32 v11, v11, v32 +; GFX12-NEXT: v_or_b32_e32 v26, v21, v41 +; GFX12-NEXT: v_or_b32_e32 v27, v27, v42 +; GFX12-NEXT: v_or_b32_e32 v28, v28, v43 +; GFX12-NEXT: v_or_b32_e32 v29, v33, v44 +; GFX12-NEXT: v_or_b32_e32 v30, v34, v45 +; GFX12-NEXT: v_or_b32_e32 v31, v35, v46 +; GFX12-NEXT: v_or_b32_e32 v32, v36, v47 +; GFX12-NEXT: v_or_b32_e32 v33, v37, v48 ; GFX12-NEXT: s_waitcnt vmcnt(0) ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[26:33], v[0:3], v[4:11], v20 -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[12:19], v[0:3], v[4:11], v20 index_key:1 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[0:7], v[8:11], v[26:33], v20 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[12:19], v[8:11], v[26:33], v20 index_key:1 ; GFX12-NEXT: s_clause 0x1 -; GFX12-NEXT: global_store_b128 v[22:23], v[26:29], off -; GFX12-NEXT: global_store_b128 v[22:23], v[30:33], off offset:16 +; GFX12-NEXT: global_store_b128 v[22:23], v[0:3], off +; GFX12-NEXT: global_store_b128 v[22:23], v[4:7], off offset:16 ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_store_b128 v[24:25], v[12:15], off ; GFX12-NEXT: global_store_b128 v[24:25], v[16:19], off offset:16 @@ -57,10 +105,10 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x i16> %A, <16 bb: %IndexVec = load <2 x i16>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <2 x i16> %IndexVec, i32 0 - %res0 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index0) + %res0 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index0) store <8 x float> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <2 x i16> %IndexVec, i32 1 - %res1 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index1) + %res1 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index1) store <8 x float> %res1, ptr addrspace(1) %out1 ret void } @@ -91,16 +139,80 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v16, v[16:17], off -; GFX12-NEXT: v_dual_mov_b32 v25, v15 :: v_dual_mov_b32 v24, v14 -; GFX12-NEXT: v_dual_mov_b32 v23, v13 :: v_dual_mov_b32 v22, v12 +; GFX12-NEXT: v_lshrrev_b32_e32 v29, 16, v8 +; GFX12-NEXT: v_lshrrev_b32_e32 v30, 16, v9 +; GFX12-NEXT: v_lshrrev_b32_e32 v31, 16, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v32, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v33, 0xffff, v8 +; GFX12-NEXT: v_and_b32_e32 v34, 0xffff, v9 +; GFX12-NEXT: v_and_b32_e32 v35, 0xffff, v10 +; GFX12-NEXT: v_and_b32_e32 v36, 0xffff, v11 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v12 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v13 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v14 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v15 +; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v23, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v24, 16, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v25, 16, v4 +; GFX12-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; GFX12-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; GFX12-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; GFX12-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; GFX12-NEXT: v_lshlrev_b32_e32 v37, 16, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v38, 16, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v39, 16, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v40, 16, v11 +; GFX12-NEXT: v_lshrrev_b32_e32 v26, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v27, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v28, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX12-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX12-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX12-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX12-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX12-NEXT: v_or_b32_e32 v12, v37, v12 +; GFX12-NEXT: v_or_b32_e32 v13, v38, v13 +; GFX12-NEXT: v_or_b32_e32 v14, v39, v14 +; GFX12-NEXT: v_or_b32_e32 v15, v40, v15 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX12-NEXT: v_lshlrev_b32_e32 v27, 16, v27 +; GFX12-NEXT: v_lshlrev_b32_e32 v28, 16, v28 +; GFX12-NEXT: v_lshlrev_b32_e32 v29, 16, v29 +; GFX12-NEXT: v_lshlrev_b32_e32 v30, 16, v30 +; GFX12-NEXT: v_lshlrev_b32_e32 v31, 16, v31 +; GFX12-NEXT: v_lshlrev_b32_e32 v32, 16, v32 +; GFX12-NEXT: v_or_b32_e32 v8, v17, v0 +; GFX12-NEXT: v_or_b32_e32 v9, v22, v1 +; GFX12-NEXT: v_or_b32_e32 v10, v23, v2 +; GFX12-NEXT: v_or_b32_e32 v11, v24, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v25, v4 +; GFX12-NEXT: v_dual_mov_b32 v25, v15 :: v_dual_mov_b32 v22, v12 +; GFX12-NEXT: v_or_b32_e32 v1, v26, v5 +; GFX12-NEXT: v_or_b32_e32 v2, v27, v6 +; GFX12-NEXT: v_or_b32_e32 v3, v28, v7 +; GFX12-NEXT: v_or_b32_e32 v4, v29, v33 +; GFX12-NEXT: v_or_b32_e32 v5, v30, v34 +; GFX12-NEXT: v_or_b32_e32 v6, v31, v35 +; GFX12-NEXT: v_or_b32_e32 v7, v32, v36 +; GFX12-NEXT: v_dual_mov_b32 v24, v14 :: v_dual_mov_b32 v23, v13 ; GFX12-NEXT: s_waitcnt vmcnt(0) ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[22:25], v[0:3], v[4:11], v16 -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[12:15], v[0:3], v[4:11], v16 index_key:1 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[22:25], v[8:11], v[0:7], v16 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[12:15], v[8:11], v[0:7], v16 index_key:1 ; GFX12-NEXT: global_store_b128 v[18:19], v[22:25], off ; GFX12-NEXT: global_store_b128 v[20:21], v[12:15], off ; GFX12-NEXT: s_nop 0 @@ -109,11 +221,11 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x i16> %A, <1 bb: %IndexVec = load <2 x i16>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <2 x i16> %IndexVec, i32 0 - %res0 = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index0) - store <8 x i16> %res0, ptr addrspace(1) %out0 + %res0 = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index0) + store <8 x bfloat> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <2 x i16> %IndexVec, i32 1 - %res1 = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index1) - store <8 x i16> %res1, ptr addrspace(1) %out1 + %res1 = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index1) + store <8 x bfloat> %res1, ptr addrspace(1) %out1 ret void } @@ -310,9 +422,9 @@ bb: } declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat>, <16 x bfloat>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat>, <16 x bfloat>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.fp8.fp8.v8f32.v2i32.v4i32.v8f32.i16(<2 x i32>, <4 x i32>, <8 x float>, i16) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32.ll index c4edc5b72b2fbb..7b2c65a7bdcb48 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w32.ll @@ -17,9 +17,42 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v18, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v19, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v2, v20, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v21 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v3, v18, v3 +; GFX12-NEXT: v_or_b32_e32 v4, v19, v4 +; GFX12-NEXT: v_or_b32_e32 v5, v20, v5 +; GFX12-NEXT: v_or_b32_e32 v6, v21, v6 +; GFX12-NEXT: v_or_b32_e32 v7, v22, v7 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_store_b128 v[16:17], v[8:11], off @@ -28,7 +61,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, < ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -47,17 +80,66 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v16, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX12-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX12-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v14, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v15, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v4 +; GFX12-NEXT: v_or_b32_e32 v2, v16, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v15, 16, v17 +; GFX12-NEXT: v_lshrrev_b32_e32 v16, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v6 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v7 +; GFX12-NEXT: v_or_b32_e32 v3, v15, v3 +; GFX12-NEXT: v_or_b32_e32 v4, v14, v4 +; GFX12-NEXT: v_or_b32_e32 v5, v16, v5 +; GFX12-NEXT: v_or_b32_e32 v6, v17, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v14, 16, v18 +; GFX12-NEXT: v_lshrrev_b32_e32 v15, 16, v8 +; GFX12-NEXT: v_lshrrev_b32_e32 v16, 16, v9 +; GFX12-NEXT: v_lshrrev_b32_e32 v17, 16, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v15, 16, v15 +; GFX12-NEXT: v_and_b32_e32 v8, 0xffff, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v16, 16, v16 +; GFX12-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v17, 16, v17 +; GFX12-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX12-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; GFX12-NEXT: v_or_b32_e32 v7, v14, v7 +; GFX12-NEXT: v_or_b32_e32 v8, v15, v8 +; GFX12-NEXT: v_or_b32_e32 v9, v16, v9 +; GFX12-NEXT: v_or_b32_e32 v10, v17, v10 +; GFX12-NEXT: v_or_b32_e32 v11, v18, v11 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[8:11], v[0:3], v[4:7], v[8:11] ; GFX12-NEXT: global_store_b128 v[12:13], v[8:11], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> %C, i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -190,10 +272,59 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[12:19], v[0:3], v[4:11], v20 +; GFX12-NEXT: v_lshrrev_b32_e32 v23, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v24, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v25, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v26, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX12-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v25, 16, v25 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v26, 16, v26 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v23, v23, v0 +; GFX12-NEXT: v_or_b32_e32 v24, v24, v1 +; GFX12-NEXT: v_or_b32_e32 v25, v25, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v6 +; GFX12-NEXT: v_or_b32_e32 v26, v26, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v7 +; GFX12-NEXT: v_lshrrev_b32_e32 v27, 16, v8 +; GFX12-NEXT: v_lshrrev_b32_e32 v28, 16, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v29, 16, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v30, 16, v11 +; GFX12-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX12-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX12-NEXT: v_or_b32_e32 v2, v2, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v27 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v28 +; GFX12-NEXT: v_and_b32_e32 v8, 0xffff, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v29 +; GFX12-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v27, 16, v30 +; GFX12-NEXT: v_and_b32_e32 v11, 0xffff, v11 +; GFX12-NEXT: v_or_b32_e32 v3, v3, v4 +; GFX12-NEXT: v_or_b32_e32 v4, v5, v6 +; GFX12-NEXT: v_or_b32_e32 v5, v7, v8 +; GFX12-NEXT: v_or_b32_e32 v6, v9, v10 +; GFX12-NEXT: v_or_b32_e32 v7, v27, v11 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[12:19], v[23:26], v[0:7], v20 ; GFX12-NEXT: s_clause 0x1 ; GFX12-NEXT: global_store_b128 v[21:22], v[12:15], off ; GFX12-NEXT: global_store_b128 v[21:22], v[16:19], off offset:16 @@ -201,7 +332,7 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index) + %res = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -220,17 +351,82 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[12:15], v[0:3], v[4:11], v16 -; GFX12-NEXT: global_store_b128 v[17:18], v[12:15], off +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v22, 16, v22 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v19, v19, v0 +; GFX12-NEXT: v_or_b32_e32 v20, v20, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v0, 16, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v1, 16, v5 +; GFX12-NEXT: v_or_b32_e32 v21, v21, v2 +; GFX12-NEXT: v_or_b32_e32 v22, v22, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v2, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v3, 16, v7 +; GFX12-NEXT: v_lshrrev_b32_e32 v23, 16, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v24, 16, v9 +; GFX12-NEXT: v_lshrrev_b32_e32 v25, 16, v10 +; GFX12-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX12-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v23 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v24 +; GFX12-NEXT: v_and_b32_e32 v9, 0xffff, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v23, 16, v25 +; GFX12-NEXT: v_and_b32_e32 v10, 0xffff, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v26, 16, v11 +; GFX12-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX12-NEXT: v_or_b32_e32 v3, v3, v5 +; GFX12-NEXT: v_or_b32_e32 v4, v6, v7 +; GFX12-NEXT: v_or_b32_e32 v5, v8, v9 +; GFX12-NEXT: v_or_b32_e32 v6, v23, v10 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v9, 0xffff, v11 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v13 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v14 +; GFX12-NEXT: v_lshrrev_b32_e32 v23, 16, v15 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v26 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_and_b32_e32 v12, 0xffff, v12 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_and_b32_e32 v13, 0xffff, v13 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v14, 0xffff, v14 +; GFX12-NEXT: v_lshlrev_b32_e32 v23, 16, v23 +; GFX12-NEXT: v_and_b32_e32 v15, 0xffff, v15 +; GFX12-NEXT: v_or_b32_e32 v7, v7, v9 +; GFX12-NEXT: v_or_b32_e32 v8, v8, v12 +; GFX12-NEXT: v_or_b32_e32 v9, v10, v13 +; GFX12-NEXT: v_or_b32_e32 v10, v11, v14 +; GFX12-NEXT: v_or_b32_e32 v11, v23, v15 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[8:11], v[19:22], v[0:7], v16 +; GFX12-NEXT: global_store_b128 v[17:18], v[8:11], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -347,9 +543,9 @@ bb: } declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f32.v8f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) -declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, i1 immarg) +declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32.i32.v8i32(i1 immarg, i32, i1 immarg, i32, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) @@ -358,9 +554,9 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v8f32.v2i32.v2i32.v8f declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat>, <16 x bfloat>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat>, <16 x bfloat>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.v8i32.i32(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i32 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll index 04dd6de78ea165..886cd560070630 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll @@ -61,9 +61,26 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v11, v1 +; GFX12-NEXT: v_or_b32_e32 v2, v12, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_or_b32_e32 v3, v13, v3 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1] ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off ; GFX12-NEXT: s_nop 0 @@ -71,14 +88,31 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_endpgm bb: %fneg.C = fneg <4 x float> %C - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fneg.C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %fneg.C) store <4 x float> %res, <4 x float> addrspace(1)* %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v11, v1 +; GFX12-NEXT: v_or_b32_e32 v2, v12, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_or_b32_e32 v3, v13, v3 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1] ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off ; GFX12-NEXT: s_nop 0 @@ -86,7 +120,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_endpgm bb: %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C) - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fabs.C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %fabs.C) store <4 x float> %res, <4 x float> addrspace(1)* %out ret void } @@ -449,7 +483,7 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float>) declare float @llvm.fabs.f32(float) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32, i32, <4 x float>) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32, i32, <4 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-imm.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-imm.ll index c4d70fd5f0637f..b4bc7f0cd8c429 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-imm.ll @@ -39,40 +39,72 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb -; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[6:9], v[0:1], v[2:3], 1.0 -; GFX12-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v6, v6, v0 +; GFX12-NEXT: v_or_b32_e32 v7, v7, v1 +; GFX12-NEXT: v_or_b32_e32 v8, v8, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_or_b32_e32 v9, v9, v3 +; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[0:3], v[6:7], v[8:9], 1.0 +; GFX12-NEXT: global_store_b128 v[4:5], v[0:3], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> ) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> ) store <4 x float> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 ; GFX12-NEXT: s_mov_b32 s0, 0x40400000 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_or_b32_e32 v6, v6, v0 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v2 +; GFX12-NEXT: v_or_b32_e32 v7, v7, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v1, 16, v9 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v3 ; GFX12-NEXT: s_mov_b32 s1, s0 ; GFX12-NEXT: s_mov_b32 s2, s0 -; GFX12-NEXT: v_mov_b32_e32 v9, s3 -; GFX12-NEXT: v_mov_b32_e32 v8, s2 -; GFX12-NEXT: v_mov_b32_e32 v7, s1 -; GFX12-NEXT: v_mov_b32_e32 v6, s0 +; GFX12-NEXT: s_mov_b32 s3, s0 +; GFX12-NEXT: v_or_b32_e32 v8, v8, v0 +; GFX12-NEXT: v_or_b32_e32 v9, v1, v2 +; GFX12-NEXT: v_mov_b32_e32 v0, s0 +; GFX12-NEXT: v_mov_b32_e32 v1, s1 +; GFX12-NEXT: v_mov_b32_e32 v2, s2 +; GFX12-NEXT: v_mov_b32_e32 v3, s3 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[6:9], v[0:1], v[2:3], v[6:9] -; GFX12-NEXT: global_store_b128 v[4:5], v[6:9], off +; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[0:3], v[6:7], v[8:9], v[0:3] +; GFX12-NEXT: global_store_b128 v[4:5], v[0:3], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> ) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> ) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -111,13 +143,28 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX12-NEXT: s_mov_b32 s0, 0x3f803f80 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_or_b32_e32 v1, v7, v1 ; GFX12-NEXT: s_mov_b32 s1, s0 +; GFX12-NEXT: v_or_b32_e32 v3, v6, v3 ; GFX12-NEXT: v_mov_b32_e32 v7, s1 +; GFX12-NEXT: v_or_b32_e32 v2, v8, v2 ; GFX12-NEXT: v_mov_b32_e32 v6, s0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[6:7], v[0:1], v[2:3], v[6:7] @@ -126,18 +173,33 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> , i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> , i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v6, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v6, 16, v9 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 ; GFX12-NEXT: s_mov_b32 s0, 0x3fc03fc0 -; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: v_or_b32_e32 v1, v7, v1 ; GFX12-NEXT: s_mov_b32 s1, s0 +; GFX12-NEXT: v_or_b32_e32 v3, v6, v3 ; GFX12-NEXT: v_mov_b32_e32 v7, s1 +; GFX12-NEXT: v_or_b32_e32 v2, v8, v2 ; GFX12-NEXT: v_mov_b32_e32 v6, s0 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[6:7], v[0:1], v[2:3], v[6:7] @@ -146,8 +208,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> , i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> , i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -418,9 +480,9 @@ bb: } declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v4f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v4f16.v4f16.v4f16.v4f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) -declare <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, i1 immarg) +declare <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-swmmac-index_key.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-swmmac-index_key.ll index ec93d3b5bd246f..cf05be9082b395 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-swmmac-index_key.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64-swmmac-index_key.ll @@ -47,14 +47,38 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v10, v[10:11], off +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v22, 16, v3 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v23, 16, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v24, 16, v5 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_and_b32_e32 v26, 0xffff, v5 +; GFX12-NEXT: v_lshlrev_b32_e32 v5, 16, v20 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v22 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_and_b32_e32 v25, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v4, 16, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v21 +; GFX12-NEXT: v_lshlrev_b32_e32 v27, 16, v23 +; GFX12-NEXT: v_lshlrev_b32_e32 v24, 16, v24 +; GFX12-NEXT: v_or_b32_e32 v5, v5, v1 +; GFX12-NEXT: v_or_b32_e32 v1, v20, v3 ; GFX12-NEXT: v_mov_b32_e32 v23, v9 +; GFX12-NEXT: v_or_b32_e32 v4, v4, v0 +; GFX12-NEXT: v_or_b32_e32 v0, v11, v2 ; GFX12-NEXT: v_mov_b32_e32 v22, v8 ; GFX12-NEXT: v_mov_b32_e32 v21, v7 ; GFX12-NEXT: v_mov_b32_e32 v20, v6 +; GFX12-NEXT: v_or_b32_e32 v2, v27, v25 +; GFX12-NEXT: v_or_b32_e32 v3, v24, v26 ; GFX12-NEXT: v_mov_b32_e32 v27, v9 ; GFX12-NEXT: v_mov_b32_e32 v26, v8 ; GFX12-NEXT: v_mov_b32_e32 v25, v7 @@ -64,11 +88,11 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x i16> %A, <8 ; GFX12-NEXT: v_mov_b32_e32 v29, v7 ; GFX12-NEXT: v_mov_b32_e32 v28, v6 ; GFX12-NEXT: s_waitcnt vmcnt(0) -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[20:23], v[0:1], v[2:5], v10 -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[24:27], v[0:1], v[2:5], v10 index_key:1 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[20:23], v[4:5], v[0:3], v10 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[24:27], v[4:5], v[0:3], v10 index_key:1 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[28:31], v[0:1], v[2:5], v10 index_key:2 -; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[6:9], v[0:1], v[2:5], v10 index_key:3 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[28:31], v[4:5], v[0:3], v10 index_key:2 +; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[6:9], v[4:5], v[0:3], v10 index_key:3 ; GFX12-NEXT: global_store_b128 v[12:13], v[20:23], off ; GFX12-NEXT: global_store_b128 v[14:15], v[24:27], off ; GFX12-NEXT: global_store_b128 v[16:17], v[28:31], off @@ -79,16 +103,16 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x i16> %A, <8 bb: %IndexVec = load <4 x i8>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <4 x i8> %IndexVec, i32 0 - %res0 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index0) + %res0 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index0) store <4 x float> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <4 x i8> %IndexVec, i32 1 - %res1 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index1) + %res1 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index1) store <4 x float> %res1, ptr addrspace(1) %out1 %Index2 = extractelement <4 x i8> %IndexVec, i32 2 - %res2 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index2) + %res2 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index2) store <4 x float> %res2, ptr addrspace(1) %out2 %Index3 = extractelement <4 x i8> %IndexVec, i32 3 - %res3 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index3) + %res3 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index3) store <4 x float> %res3, ptr addrspace(1) %out3 ret void } @@ -133,22 +157,54 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v22, v[8:9], off +; GFX12-NEXT: v_lshrrev_b32_e32 v21, 16, v5 +; GFX12-NEXT: v_and_b32_e32 v24, 0xffff, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v5, 16, v7 +; GFX12-NEXT: v_lshrrev_b32_e32 v20, 16, v4 +; GFX12-NEXT: v_and_b32_e32 v23, 0xffff, v4 +; GFX12-NEXT: v_lshrrev_b32_e32 v4, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_lshlrev_b32_e32 v26, 16, v5 +; GFX12-NEXT: v_lshrrev_b32_e32 v18, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v19, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v25, 16, v4 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX12-NEXT: v_or_b32_e32 v7, v26, v7 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v18, 16, v18 +; GFX12-NEXT: v_lshlrev_b32_e32 v19, 16, v19 +; GFX12-NEXT: v_lshlrev_b32_e32 v20, 16, v20 +; GFX12-NEXT: v_lshlrev_b32_e32 v21, 16, v21 +; GFX12-NEXT: v_or_b32_e32 v6, v25, v6 +; GFX12-NEXT: v_or_b32_e32 v4, v8, v0 +; GFX12-NEXT: v_or_b32_e32 v5, v9, v1 ; GFX12-NEXT: v_mov_b32_e32 v9, v7 +; GFX12-NEXT: v_or_b32_e32 v0, v18, v2 +; GFX12-NEXT: v_or_b32_e32 v1, v19, v3 +; GFX12-NEXT: v_or_b32_e32 v2, v20, v23 +; GFX12-NEXT: v_or_b32_e32 v3, v21, v24 ; GFX12-NEXT: v_mov_b32_e32 v8, v6 ; GFX12-NEXT: v_mov_b32_e32 v19, v7 ; GFX12-NEXT: v_mov_b32_e32 v18, v6 ; GFX12-NEXT: v_mov_b32_e32 v21, v7 ; GFX12-NEXT: v_mov_b32_e32 v20, v6 ; GFX12-NEXT: s_waitcnt vmcnt(0) -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[8:9], v[0:1], v[2:5], v22 -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[18:19], v[0:1], v[2:5], v22 index_key:1 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[8:9], v[4:5], v[0:3], v22 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[18:19], v[4:5], v[0:3], v22 index_key:1 ; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[20:21], v[0:1], v[2:5], v22 index_key:2 -; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[6:7], v[0:1], v[2:5], v22 index_key:3 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[20:21], v[4:5], v[0:3], v22 index_key:2 +; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[6:7], v[4:5], v[0:3], v22 index_key:3 ; GFX12-NEXT: global_store_b64 v[10:11], v[8:9], off ; GFX12-NEXT: global_store_b64 v[12:13], v[18:19], off ; GFX12-NEXT: global_store_b64 v[14:15], v[20:21], off @@ -159,17 +215,17 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x i16> %A, <8 bb: %IndexVec = load <4 x i8>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <4 x i8> %IndexVec, i32 0 - %res0 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index0) - store <4 x i16> %res0, ptr addrspace(1) %out0 + %res0 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index0) + store <4 x bfloat> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <4 x i8> %IndexVec, i32 1 - %res1 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index1) - store <4 x i16> %res1, ptr addrspace(1) %out1 + %res1 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index1) + store <4 x bfloat> %res1, ptr addrspace(1) %out1 %Index2 = extractelement <4 x i8> %IndexVec, i32 2 - %res2 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index2) - store <4 x i16> %res2, ptr addrspace(1) %out2 + %res2 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index2) + store <4 x bfloat> %res2, ptr addrspace(1) %out2 %Index3 = extractelement <4 x i8> %IndexVec, i32 3 - %res3 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index3) - store <4 x i16> %res3, ptr addrspace(1) %out3 + %res3 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index3) + store <4 x bfloat> %res3, ptr addrspace(1) %out3 ret void } @@ -460,9 +516,9 @@ bb: } declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f32.v4f16.v8f16.v4f32.i8(<4 x half>, <8 x half>, <4 x float>, i8) -declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16>, <8 x i16>, <4 x float>, i8) +declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat>, <8 x bfloat>, <4 x float>, i8) declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v4f16.v8f16.v4f16.i8(<4 x half>, <8 x half>, <4 x half>, i8) -declare <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16>, <8 x i16>, <4 x i16>, i8) +declare <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat>, <8 x bfloat>, <4 x bfloat>, i8) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v4i32.i32.v2i32.v4i32.i8(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i8 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v4i32.i32.i32.v4i32.i16(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i16 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v4i32.i32.v2i32.v4i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i16 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64.ll index 0d1871a18d4055..841021e15b41f0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/wmma-gfx12-w64.ll @@ -15,16 +15,33 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_or_b32_e32 v0, v10, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v11, v1 +; GFX12-NEXT: v_or_b32_e32 v2, v12, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_or_b32_e32 v3, v13, v3 ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] ; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -43,17 +60,42 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v0 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v9, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v10, 16, v2 +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v5 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v9, 16, v9 +; GFX12-NEXT: v_lshlrev_b32_e32 v10, 16, v10 +; GFX12-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v8, 16, v3 +; GFX12-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-NEXT: v_and_b32_e32 v3, 0xffff, v3 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_and_b32_e32 v4, 0xffff, v4 +; GFX12-NEXT: v_lshlrev_b32_e32 v8, 16, v8 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v5, 0xffff, v5 +; GFX12-NEXT: v_or_b32_e32 v1, v9, v1 +; GFX12-NEXT: v_or_b32_e32 v2, v10, v2 +; GFX12-NEXT: v_or_b32_e32 v3, v8, v3 +; GFX12-NEXT: v_or_b32_e32 v4, v11, v4 +; GFX12-NEXT: v_or_b32_e32 v5, v12, v5 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[4:5], v[0:1], v[2:3], v[4:5] ; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> %C, i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -169,16 +211,27 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<4 x bfloat> %A, <8 x i16> %B, <4 x float> %C, i8 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v1 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX12-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_or_b32_e32 v0, v13, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v14, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[6:9], v[0:1], v[2:5], v10 ; GFX12-NEXT: global_store_b128 v[11:12], v[6:9], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index) + %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8i16.v4f32.i8(<4 x bfloat> %A, <8 x i16> %B, <4 x float> %C, i8 %Index) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -197,17 +250,34 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<4 x bfloat> %A, <8 x i16> %B, <4 x bfloat> %C, i8 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_lshrrev_b32_e32 v11, 16, v0 +; GFX12-NEXT: v_lshrrev_b32_e32 v12, 16, v1 +; GFX12-NEXT: v_lshrrev_b32_e32 v13, 16, v6 +; GFX12-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0 +; GFX12-NEXT: v_lshlrev_b32_e32 v11, 16, v11 +; GFX12-NEXT: v_lshlrev_b32_e32 v12, 16, v12 +; GFX12-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX12-NEXT: v_lshlrev_b32_e32 v13, 16, v13 +; GFX12-NEXT: v_and_b32_e32 v6, 0xffff, v6 +; GFX12-NEXT: v_lshlrev_b32_e32 v14, 16, v14 +; GFX12-NEXT: v_and_b32_e32 v7, 0xffff, v7 +; GFX12-NEXT: v_or_b32_e32 v0, v11, v0 +; GFX12-NEXT: v_or_b32_e32 v1, v12, v1 +; GFX12-NEXT: v_or_b32_e32 v6, v13, v6 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_or_b32_e32 v7, v14, v7 ; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[6:7], v[0:1], v[2:5], v8 ; GFX12-NEXT: global_store_b64 v[9:10], v[6:7], off ; GFX12-NEXT: s_nop 0 ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8i16.v4bf16.i8(<4 x bfloat> %A, <8 x i16> %B, <4 x bfloat> %C, i8 %Index) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -310,9 +380,9 @@ bb: } declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v4f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v4f16.v4f16.v4f16.v4f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) -declare <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, i1 immarg) +declare <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) @@ -321,9 +391,9 @@ declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v4f32.i32.i32.v4f32(i declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f32.v4f16.v8f16.v4f32.i8(<4 x half>, <8 x half>, <4 x float>, i8) -declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16>, <8 x i16>, <4 x float>, i8) +declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8i16.v4f32.i8(<4 x bfloat>, <8 x i16>, <4 x float>, i8) declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v4f16.v8f16.v4f16.i8(<4 x half>, <8 x half>, <4 x half>, i8) -declare <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16>, <8 x i16>, <4 x i16>, i8) +declare <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8i16.v4bf16.i8(<4 x bfloat>, <8 x i16>, <4 x bfloat>, i8) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v4i32.i32.v2i32.v4i32.i8(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i8 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v4i32.i32.i32.v4i32.i16(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i16 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v4i32.i32.v2i32.v4i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i16 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll index d4868df06d7943..0855b34506b42c 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-f16-f32-matrix-modifiers.ll @@ -69,7 +69,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] neg_lo:[0,0,1] @@ -81,12 +81,12 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_endpgm bb: %fneg.C = fneg <8 x float> %C - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %fneg.C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %fneg.C) store <8 x float> %res, <8 x float> addrspace(1)* %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, <8 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] neg_hi:[0,0,1] @@ -98,7 +98,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_endpgm bb: %fabs.C = call <8 x float> @llvm.fabs.v8f32(<8 x float> %C) - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %fabs.C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %fabs.C) store <8 x float> %res, <8 x float> addrspace(1)* %out ret void } @@ -489,7 +489,7 @@ declare <8 x float> @llvm.fabs.v8f32(<8 x float>) declare float @llvm.fabs.f32(float) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-imm.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-imm.ll index 51f93b57f38e90..634653161ce6d6 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-imm.ll @@ -39,7 +39,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[10:17], v[0:3], v[4:7], 1.0 @@ -50,12 +50,12 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> % ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> ) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> ) store <8 x float> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_mov_b32_e32 v10, 0x40400000 @@ -72,7 +72,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<8 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> ) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> ) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -109,7 +109,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[10:13], v[0:3], v[4:7], 1.0 @@ -118,12 +118,12 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<8 x i16> %A, <8 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> , i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> , i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x i16> %A, <8 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x bfloat> %A, <8 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_mov_b32_e32 v10, 0x3fc03fc0 @@ -136,8 +136,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<8 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> , i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> , i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -408,9 +408,9 @@ bb: } declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f32.v8f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) -declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, i1 immarg) +declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32.i32.v8i32(i1 immarg, i32, i1 immarg, i32, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) @@ -419,9 +419,9 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v8f32.v2i32.v2i32.v8f declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16i16.v8f32.i16(<8 x bfloat>, <16 x i16>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16i16.v8bf16.i16(<8 x bfloat>, <16 x i16>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.v8i32.i32(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i32 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-swmmac-index_key.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-swmmac-index_key.ll index 97170c3f368c89..bbaf4afdef25e2 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-swmmac-index_key.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32-swmmac-index_key.ll @@ -33,7 +33,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v20, v[20:21], off @@ -57,10 +57,10 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<8 x i16> %A, <16 bb: %IndexVec = load <2 x i16>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <2 x i16> %IndexVec, i32 0 - %res0 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index0) + %res0 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index0) store <8 x float> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <2 x i16> %IndexVec, i32 1 - %res1 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index1) + %res1 = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index1) store <8 x float> %res1, ptr addrspace(1) %out1 ret void } @@ -91,7 +91,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v16, v[16:17], off @@ -109,11 +109,11 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<8 x i16> %A, <1 bb: %IndexVec = load <2 x i16>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <2 x i16> %IndexVec, i32 0 - %res0 = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index0) - store <8 x i16> %res0, ptr addrspace(1) %out0 + %res0 = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index0) + store <8 x bfloat> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <2 x i16> %IndexVec, i32 1 - %res1 = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index1) - store <8 x i16> %res1, ptr addrspace(1) %out1 + %res1 = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index1) + store <8 x bfloat> %res1, ptr addrspace(1) %out1 ret void } @@ -310,9 +310,9 @@ bb: } declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat>, <16 x bfloat>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat>, <16 x bfloat>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.fp8.fp8.v8f32.v2i32.v4i32.v8f32.i16(<2 x i32>, <4 x i32>, <8 x float>, i16) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32.ll index 2db3b07de54d0a..022dc2ec3290e7 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w32.ll @@ -17,7 +17,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, <8 x float> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[8:15], v[0:3], v[4:7], v[8:15] @@ -28,7 +28,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, < ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16> %A, <8 x i16> %B, <8 x float> %C) + %res = call <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf.v8bf.v8f32(<8 x bfloat> %A, <8 x bfloat> %B, <8 x float> %C) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -47,7 +47,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[8:11], v[0:3], v[4:7], v[8:11] @@ -56,8 +56,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<8 x i16> %A, <8 x i16> %B, ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i1 0) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat> %A, <8 x bfloat> %B, <8 x bfloat> %C, i1 0) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -190,7 +190,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[12:19], v[0:3], v[4:11], v20 @@ -201,7 +201,7 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16> %A, <16 x i16> %B, <8 x float> %C, i16 %Index) + %res = call <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x float> %C, i16 %Index) store <8 x float> %res, ptr addrspace(1) %out ret void } @@ -220,7 +220,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[12:15], v[0:3], v[4:11], v16 @@ -229,8 +229,8 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<8 x i16> %A, <16 x i16> % ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16> %A, <16 x i16> %B, <8 x i16> %C, i16 %Index) - store <8 x i16> %res, ptr addrspace(1) %out + %res = call <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat> %A, <16 x bfloat> %B, <8 x bfloat> %C, i16 %Index) + store <8 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -347,9 +347,9 @@ bb: } declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v8f32.v8f16.v8f16.v8f32(<8 x half>, <8 x half>, <8 x float>) -declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8i16.v8i16.v8f32(<8 x i16>, <8 x i16>, <8 x float>) +declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v8f32.v8bf16.v8bf16.v8f32(<8 x bfloat>, <8 x bfloat>, <8 x float>) declare <8 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16.v8f16.v8f16(<8 x half>, <8 x half>, <8 x half>, i1 immarg) -declare <8 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8i16.v8i16.v8i16.v8i16(<8 x i16>, <8 x i16>, <8 x i16>, i1 immarg) +declare <8 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v8bf16.v8bf16.v8bf16.v8bf16(<8 x bfloat>, <8 x bfloat>, <8 x bfloat>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v8i32.i32.i32.v8i32(i1 immarg, i32, i1 immarg, i32, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) @@ -358,9 +358,9 @@ declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v8f32.v2i32.v2i32.v8f declare <8 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v8f32.v2i32.v2i32.v8f32(<2 x i32>, <2 x i32>, <8 x float>) declare <8 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v8i32.v2i32.v2i32.v8i32(i1 immarg, <2 x i32>, i1 immarg, <2 x i32>, <8 x i32>, i1 immarg) declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v8f32.v8f16.v16f16.v8f32.i16(<8 x half>, <16 x half>, <8 x float>, i16) -declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8i16.v16i16.v8f32.i16(<8 x i16>, <16 x i16>, <8 x float>, i16) +declare <8 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v8f32.v8bf16.v16bf16.v8f32.i16(<8 x bfloat>, <16 x bfloat>, <8 x float>, i16) declare <8 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v8f16.v8f16.v16f16.v8f16.i16(<8 x half>, <16 x half>, <8 x half>, i16) -declare <8 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8i16.v8i16.v16i16.v8i16.i16(<8 x i16>, <16 x i16>, <8 x i16>, i16) +declare <8 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v8bf16.v8bf16.v16bf16.v8bf16.i16(<8 x bfloat>, <16 x bfloat>, <8 x bfloat>, i16) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v8i32.v2i32.v4i32.v8i32.i16(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v8i32.i32.v2i32.v8i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <8 x i32>, i16 %Index, i1 immarg) declare <8 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v8i32.v2i32.v4i32.v8i32.i32(i1 immarg, <2 x i32>, i1 immarg, <4 x i32>, <8 x i32>, i32 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll index 51d8c4a02d7548..491848a912b553 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-f16-f32-matrix-modifiers.ll @@ -61,7 +61,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1] @@ -71,12 +71,12 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_endpgm bb: %fneg.C = fneg <4 x float> %C - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fneg.C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %fneg.C) store <4 x float> %res, <4 x float> addrspace(1)* %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1] @@ -86,7 +86,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_endpgm bb: %fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C) - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fabs.C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %fabs.C) store <4 x float> %res, <4 x float> addrspace(1)* %out ret void } @@ -446,7 +446,7 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float>) declare float @llvm.fabs.f32(float) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32, i32, <4 x float>) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32, i32, <4 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-imm.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-imm.ll index fa0a7c98cea323..6d43460f752805 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-imm.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-imm.ll @@ -34,7 +34,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[6:9], v[0:1], v[2:3], 1.0 @@ -43,12 +43,12 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> % ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> ) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> ) store <4 x float> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_mov_b32_e32 v6, 0x40400000 @@ -62,7 +62,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_imm_non_inlineable(<4 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> ) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> ) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -98,7 +98,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[6:7], v[0:1], v[2:3], 1.0 @@ -107,12 +107,12 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm(<4 x i16> %A, <4 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> , i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> , i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x i16> %A, <4 x i16> %B, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x bfloat> %A, <4 x bfloat> %B, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16_imm_non_inlineable: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_mov_b32_e32 v6, 0x3fc03fc0 @@ -124,8 +124,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16_imm_non_inlineable(<4 x i16> ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> , i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> , i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -361,9 +361,9 @@ bb: } declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v4f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v4f16.v4f16.v4f16.v4f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) -declare <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, i1 immarg) +declare <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-swmmac-index_key.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-swmmac-index_key.ll index f1bbfebdf5cafa..c45fbb53224bc0 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-swmmac-index_key.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64-swmmac-index_key.ll @@ -47,7 +47,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v10, v[10:11], off @@ -79,16 +79,16 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16_index_key(<4 x i16> %A, <8 bb: %IndexVec = load <4 x i8>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <4 x i8> %IndexVec, i32 0 - %res0 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index0) + %res0 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index0) store <4 x float> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <4 x i8> %IndexVec, i32 1 - %res1 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index1) + %res1 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index1) store <4 x float> %res1, ptr addrspace(1) %out1 %Index2 = extractelement <4 x i8> %IndexVec, i32 2 - %res2 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index2) + %res2 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index2) store <4 x float> %res2, ptr addrspace(1) %out2 %Index3 = extractelement <4 x i8> %IndexVec, i32 3 - %res3 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index3) + %res3 = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index3) store <4 x float> %res3, ptr addrspace(1) %out3 ret void } @@ -133,7 +133,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, ptr addrspace(1) %IndexVecPtr, ptr addrspace(1) %out0, ptr addrspace(1) %out1, ptr addrspace(1) %out2, ptr addrspace(1) %out3) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16_index_key: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: global_load_b32 v22, v[8:9], off @@ -159,17 +159,17 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16_index_key(<4 x i16> %A, <8 bb: %IndexVec = load <4 x i8>, ptr addrspace(1) %IndexVecPtr, align 4 %Index0 = extractelement <4 x i8> %IndexVec, i32 0 - %res0 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index0) - store <4 x i16> %res0, ptr addrspace(1) %out0 + %res0 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index0) + store <4 x bfloat> %res0, ptr addrspace(1) %out0 %Index1 = extractelement <4 x i8> %IndexVec, i32 1 - %res1 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index1) - store <4 x i16> %res1, ptr addrspace(1) %out1 + %res1 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index1) + store <4 x bfloat> %res1, ptr addrspace(1) %out1 %Index2 = extractelement <4 x i8> %IndexVec, i32 2 - %res2 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index2) - store <4 x i16> %res2, ptr addrspace(1) %out2 + %res2 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index2) + store <4 x bfloat> %res2, ptr addrspace(1) %out2 %Index3 = extractelement <4 x i8> %IndexVec, i32 3 - %res3 = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index3) - store <4 x i16> %res3, ptr addrspace(1) %out3 + %res3 = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index3) + store <4 x bfloat> %res3, ptr addrspace(1) %out3 ret void } @@ -460,9 +460,9 @@ bb: } declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f32.v4f16.v8f16.v4f32.i8(<4 x half>, <8 x half>, <4 x float>, i8) -declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16>, <8 x i16>, <4 x float>, i8) +declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat>, <8 x bfloat>, <4 x float>, i8) declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v4f16.v8f16.v4f16.i8(<4 x half>, <8 x half>, <4 x half>, i8) -declare <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16>, <8 x i16>, <4 x i16>, i8) +declare <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat>, <8 x bfloat>, <4 x bfloat>, i8) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v4i32.i32.v2i32.v4i32.i8(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i8 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v4i32.i32.i32.v4i32.i16(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i16 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v4i32.i32.v2i32.v4i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i16 %Index, i1 immarg) diff --git a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64.ll b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64.ll index b25f2785133310..b4685798d5a0f5 100644 --- a/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64.ll +++ b/llvm/test/CodeGen/AMDGPU/wmma-gfx12-w64.ll @@ -15,7 +15,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_f32_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] @@ -24,7 +24,7 @@ define amdgpu_ps void @test_wmma_f32_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, < ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %C) + %res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat> %A, <4 x bfloat> %B, <4 x float> %C) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -43,7 +43,7 @@ bb: ret void } -define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, ptr addrspace(1) %out) { +define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> %C, ptr addrspace(1) %out) { ; GFX12-LABEL: test_wmma_bf16_16x16x16_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_wmma_bf16_16x16x16_bf16 v[4:5], v[0:1], v[2:3], v[4:5] @@ -52,8 +52,8 @@ define amdgpu_ps void @test_wmma_bf16_16x16x16_bf16(<4 x i16> %A, <4 x i16> %B, ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i1 0) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat> %A, <4 x bfloat> %B, <4 x bfloat> %C, i1 0) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -169,7 +169,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_f32_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_swmmac_f32_16x16x32_bf16 v[6:9], v[0:1], v[2:5], v10 @@ -178,7 +178,7 @@ define amdgpu_ps void @test_swmmac_f32_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B, ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16> %A, <8 x i16> %B, <4 x float> %C, i8 %Index) + %res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x float> %C, i8 %Index) store <4 x float> %res, ptr addrspace(1) %out ret void } @@ -197,7 +197,7 @@ bb: ret void } -define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index, ptr addrspace(1) %out) { +define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index, ptr addrspace(1) %out) { ; GFX12-LABEL: test_swmmac_bf16_16x16x32_bf16: ; GFX12: ; %bb.0: ; %bb ; GFX12-NEXT: v_swmmac_bf16_16x16x32_bf16 v[6:7], v[0:1], v[2:5], v8 @@ -206,8 +206,8 @@ define amdgpu_ps void @test_swmmac_bf16_16x16x32_bf16(<4 x i16> %A, <8 x i16> %B ; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) ; GFX12-NEXT: s_endpgm bb: - %res = call <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16> %A, <8 x i16> %B, <4 x i16> %C, i8 %Index) - store <4 x i16> %res, ptr addrspace(1) %out + %res = call <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat> %A, <8 x bfloat> %B, <4 x bfloat> %C, i8 %Index) + store <4 x bfloat> %res, ptr addrspace(1) %out ret void } @@ -310,9 +310,9 @@ bb: } declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v4f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>) -declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4i16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>) +declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4f32.v4bf16.v4bf16.v4f32(<4 x bfloat>, <4 x bfloat>, <4 x float>) declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v4f16.v4f16.v4f16.v4f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg) -declare <4 x i16> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4i16.v4i16.v4i16.v4i16(<4 x i16>, <4 x i16>, <4 x i16>, i1 immarg) +declare <4 x bfloat> @llvm.amdgcn.wmma.bf16.16x16x16.bf16.v4bf16.v4bf16.v4bf16.v4bf16(<4 x bfloat>, <4 x bfloat>, <4 x bfloat>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu8.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x16.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) @@ -321,9 +321,9 @@ declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.v4f32.i32.i32.v4f32(i declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.v4f32.i32.i32.v4f32(i32, i32, <4 x float>) declare <4 x i32> @llvm.amdgcn.wmma.i32.16x16x32.iu4.v4i32.i32.i32.v4i32(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i1 immarg) declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f32.v4f16.v8f16.v4f32.i8(<4 x half>, <8 x half>, <4 x float>, i8) -declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4i16.v8i16.v4f32.i8(<4 x i16>, <8 x i16>, <4 x float>, i8) +declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.bf16.v4f32.v4bf16.v8bf16.v4f32.i8(<4 x bfloat>, <8 x bfloat>, <4 x float>, i8) declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v4f16.v8f16.v4f16.i8(<4 x half>, <8 x half>, <4 x half>, i8) -declare <4 x i16> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4i16.v4i16.v8i16.v4i16.i8(<4 x i16>, <8 x i16>, <4 x i16>, i8) +declare <4 x bfloat> @llvm.amdgcn.swmmac.bf16.16x16x32.bf16.v4bf16.v4bf16.v8bf16.v4bf16.i8(<4 x bfloat>, <8 x bfloat>, <4 x bfloat>, i8) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu8.v4i32.i32.v2i32.v4i32.i8(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i8 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x32.iu4.v4i32.i32.i32.v4i32.i16(i1 immarg, i32, i1 immarg, i32, <4 x i32>, i16 %Index, i1 immarg) declare <4 x i32> @llvm.amdgcn.swmmac.i32.16x16x64.iu4.v4i32.i32.v2i32.v4i32.i16(i1 immarg, i32, i1 immarg, <2 x i32>, <4 x i32>, i16 %Index, i1 immarg)