xref: /llvm-project/llvm/test/Transforms/InstCombine/X86/x86-amx-load-store.ll (revision fcfc31fffb9a83416453e60bd0dff2df93c2ee20)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=instcombine -S < %s | FileCheck %s
3
4; Prohibit poiter cast for amx.
5define dso_local void @test_amx_load_store(ptr %src, ptr %dst) {
6; CHECK-LABEL: @test_amx_load_store(
7; CHECK-NEXT:  entry:
8; CHECK-NEXT:    [[VEC:%.*]] = load <256 x i32>, ptr [[SRC:%.*]], align 64
9; CHECK-NEXT:    [[BC:%.*]] = bitcast <256 x i32> [[VEC]] to x86_amx
10; CHECK-NEXT:    tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr [[DST:%.*]], i64 64, x86_amx [[BC]])
11; CHECK-NEXT:    ret void
12;
13entry:
14  %vec = load <256 x i32>, ptr %src, align 64
15  %bc = bitcast <256 x i32> %vec to x86_amx
16  tail call void @llvm.x86.tilestored64.internal(i16 16, i16 16, ptr %dst, i64 64, x86_amx %bc)
17  ret void
18}
19
20; Prohibit poiter cast for amx.
21define dso_local void @test_amx_load_store2(ptr %dst, ptr %src) {
22; CHECK-LABEL: @test_amx_load_store2(
23; CHECK-NEXT:  entry:
24; CHECK-NEXT:    [[AMX:%.*]] = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr [[SRC:%.*]], i64 64)
25; CHECK-NEXT:    [[BC:%.*]] = bitcast x86_amx [[AMX]] to <256 x i32>
26; CHECK-NEXT:    store <256 x i32> [[BC]], ptr [[DST:%.*]], align 1024
27; CHECK-NEXT:    ret void
28;
29entry:
30  %amx = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 16, ptr %src, i64 64)
31  %bc = bitcast x86_amx %amx to <256 x i32>
32  store <256 x i32> %bc, ptr %dst
33  ret void
34}
35
36declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
37declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
38