1038b7e6bSXiang1 Zhang; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2038b7e6bSXiang1 Zhang; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-tile,+amx-bf16,+avx512f, \ 3038b7e6bSXiang1 Zhang; RUN: -mattr=+amx-complex \ 4038b7e6bSXiang1 Zhang; RUN: -verify-machineinstrs | FileCheck %s 5038b7e6bSXiang1 Zhang 6*6e83c0a1SNikita Popovdefine void @test_amx(ptr %pointer, ptr %base, i64 %stride) { 7038b7e6bSXiang1 Zhang; CHECK-LABEL: test_amx: 8038b7e6bSXiang1 Zhang; CHECK: # %bb.0: 9038b7e6bSXiang1 Zhang; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0 10038b7e6bSXiang1 Zhang; CHECK-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp) 11038b7e6bSXiang1 Zhang; CHECK-NEXT: movb $1, -{{[0-9]+}}(%rsp) 12038b7e6bSXiang1 Zhang; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 13038b7e6bSXiang1 Zhang; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 14038b7e6bSXiang1 Zhang; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 15038b7e6bSXiang1 Zhang; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 16038b7e6bSXiang1 Zhang; CHECK-NEXT: movb $8, -{{[0-9]+}}(%rsp) 17038b7e6bSXiang1 Zhang; CHECK-NEXT: movw $8, -{{[0-9]+}}(%rsp) 18038b7e6bSXiang1 Zhang; CHECK-NEXT: ldtilecfg -{{[0-9]+}}(%rsp) 19038b7e6bSXiang1 Zhang; CHECK-NEXT: movw $8, %ax 20038b7e6bSXiang1 Zhang; CHECK-NEXT: tileloadd (%rsi,%rdx), %tmm0 21038b7e6bSXiang1 Zhang; CHECK-NEXT: tilezero %tmm1 22038b7e6bSXiang1 Zhang; CHECK-NEXT: tilezero %tmm2 23038b7e6bSXiang1 Zhang; CHECK-NEXT: tcmmimfp16ps %tmm1, %tmm0, %tmm2 24038b7e6bSXiang1 Zhang; CHECK-NEXT: tcmmrlfp16ps %tmm1, %tmm0, %tmm2 25038b7e6bSXiang1 Zhang; CHECK-NEXT: tilestored %tmm2, (%rdi,%rdx) 26038b7e6bSXiang1 Zhang; CHECK-NEXT: tilerelease 27038b7e6bSXiang1 Zhang; CHECK-NEXT: vzeroupper 28038b7e6bSXiang1 Zhang; CHECK-NEXT: retq 29038b7e6bSXiang1 Zhang 30*6e83c0a1SNikita Popov %a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, ptr %base, i64 %stride) 31038b7e6bSXiang1 Zhang %b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) 32038b7e6bSXiang1 Zhang %c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8) 33038b7e6bSXiang1 Zhang 34038b7e6bSXiang1 Zhang %c1 = call x86_amx @llvm.x86.tcmmimfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b) 35038b7e6bSXiang1 Zhang %c2 = call x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b) 36038b7e6bSXiang1 Zhang 37*6e83c0a1SNikita Popov call void @llvm.x86.tilestored64.internal(i16 8, i16 8, ptr %pointer, i64 %stride, x86_amx %c2) 38038b7e6bSXiang1 Zhang ret void 39038b7e6bSXiang1 Zhang} 40038b7e6bSXiang1 Zhang 41038b7e6bSXiang1 Zhangdeclare x86_amx @llvm.x86.tilezero.internal(i16, i16) 42*6e83c0a1SNikita Popovdeclare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64) 43*6e83c0a1SNikita Popovdeclare x86_amx @llvm.x86.tileloaddt164.internal(i16, i16, ptr, i64) 44*6e83c0a1SNikita Popovdeclare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx) 45038b7e6bSXiang1 Zhang 46038b7e6bSXiang1 Zhangdeclare x86_amx @llvm.x86.tcmmimfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) 47038b7e6bSXiang1 Zhangdeclare x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx) 48