1*eddb79d5SFeng Zou; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2*eddb79d5SFeng Zou; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-tile,+avx512f, \ 3*eddb79d5SFeng Zou; RUN: -mattr=+amx-tf32,+amx-transpose -verify-machineinstrs | FileCheck %s 4*eddb79d5SFeng Zou 5*eddb79d5SFeng Zoudefine void @test_amx(i8* %pointer, i8* %base, i64 %stride) { 6*eddb79d5SFeng Zou; CHECK-LABEL: test_amx: 7*eddb79d5SFeng Zou; CHECK: # %bb.0: 8*eddb79d5SFeng Zou; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 9*eddb79d5SFeng Zou; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp) 10*eddb79d5SFeng Zou; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp) 11*eddb79d5SFeng Zou; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 12*eddb79d5SFeng Zou; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 13*eddb79d5SFeng Zou; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 14*eddb79d5SFeng Zou; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 15*eddb79d5SFeng Zou; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 16*eddb79d5SFeng Zou; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 17*eddb79d5SFeng Zou; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp) 18*eddb79d5SFeng Zou; CHECK-NEXT: movw $8, %ax 19*eddb79d5SFeng Zou; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 20*eddb79d5SFeng Zou; CHECK-NEXT: tilezero %tmm1 21*eddb79d5SFeng Zou; CHECK-NEXT: tilezero %tmm2 22*eddb79d5SFeng Zou; CHECK-NEXT: tmmultf32ps %tmm1, %tmm0, %tmm2 23*eddb79d5SFeng Zou; CHECK-NEXT: ttmmultf32ps %tmm1, %tmm0, %tmm2 24*eddb79d5SFeng Zou; CHECK-NEXT: tilestored %tmm2, (%rdi,%rdx) 25*eddb79d5SFeng Zou; CHECK-NEXT: tilerelease 26*eddb79d5SFeng Zou; CHECK-NEXT: vzeroupper 27*eddb79d5SFeng Zou; CHECK-NEXT: retq 28*eddb79d5SFeng Zou 29*eddb79d5SFeng Zou %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride) 30*eddb79d5SFeng Zou %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) 31*eddb79d5SFeng Zou %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) 32*eddb79d5SFeng Zou 33*eddb79d5SFeng Zou %c1 = call x86_amx @llvm.x86.tmmultf32ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) 34*eddb79d5SFeng Zou %c2 = call x86_amx @llvm.x86.ttmmultf32ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b) 35*eddb79d5SFeng Zou 36*eddb79d5SFeng Zou call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c2) 37*eddb79d5SFeng Zou ret void 38*eddb79d5SFeng Zou} 39*eddb79d5SFeng Zou 40*eddb79d5SFeng Zoudeclare x86_amx @llvm.x86.tilezero.internal(i16, i16) 41*eddb79d5SFeng Zoudeclare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64) 42*eddb79d5SFeng Zoudeclare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx) 43*eddb79d5SFeng Zou 44*eddb79d5SFeng Zou 45*eddb79d5SFeng Zoudeclare x86_amx @llvm.x86.tmmultf32ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) 46*eddb79d5SFeng Zoudeclare x86_amx @llvm.x86.ttmmultf32ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) 47