xref: /llvm-project/llvm/test/CodeGen/X86/AMX/amx-sched.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -mcpu=skx -verify-machineinstrs | FileCheck %s
2
3define <256 x i32> @test_shape_sched(i16 %m, i16 %n, i16 %k, <256 x i32> %c, <256 x i32> %a, <256 x i32> %b) nounwind {
4; Just to make sure shape def is not scheduled across ldtilecfg.
5; CHECK-LABEL: test_shape_sched:
6; CHECK:    ldtilecfg
7; CHECK-NOT: movw
8  %c1 = bitcast <256 x i32> %c to x86_amx
9  %a1 = bitcast <256 x i32> %a to x86_amx
10  %b1 = bitcast <256 x i32> %b to x86_amx
11  %t = call x86_amx @llvm.x86.tdpbssd.internal(i16 %m, i16 %n, i16 %k, x86_amx %c1, x86_amx %a1, x86_amx %b1)
12  %res = bitcast x86_amx %t to <256 x i32>
13  ret <256 x i32> %res
14}
15
16define <256 x i32> @test_shape_sched2(i16 %m, i16 %n, i16 %k, ptr %c, ptr %a, ptr %b) nounwind {
17; Just to make sure shape def is not scheduled across ldtilecfg.
18; CHECK-LABEL: test_shape_sched2:
19; CHECK:    ldtilecfg
20; CHECK-NOT: movw
21  %aa = lshr i16 %k, 2
22  %c1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %n, ptr %c, i64 64)
23  %a1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %m, i16 %k, ptr %a, i64 64)
24  %b1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %aa, i16 %n, ptr %b, i64 64)
25  %t = call x86_amx @llvm.x86.tdpbssd.internal(i16 %m, i16 %n, i16 %k, x86_amx %c1, x86_amx %a1, x86_amx %b1)
26  %res = bitcast x86_amx %t to <256 x i32>
27  ret <256 x i32> %res
28}
29
30declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
31declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
32