xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-masked-ldst-trunc.ll (revision 1ee315ae7964c8433b772e0b5d667834994ba753)
1; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
2
3;
4; Masked Stores
5;
6
7define void @masked_trunc_store_nxv2i8(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
8; CHECK-LABEL: masked_trunc_store_nxv2i8:
9; CHECK-NEXT: st1b { z0.d }, p0, [x1]
10; CHECK-NEXT: ret
11  %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
12  call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
13  ret void
14}
15
16define void @masked_trunc_store_nxv2i16(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
17; CHECK-LABEL: masked_trunc_store_nxv2i16:
18; CHECK-NEXT: st1h { z0.d }, p0, [x1]
19; CHECK-NEXT: ret
20  %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
21  call void @llvm.masked.store.nxv2i16(<vscale x 2 x i16> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
22  ret void
23}
24
25define void @masked_trunc_store_nxv2i32(ptr %a, <vscale x 2 x i64> %val, ptr %b, <vscale x 2 x i1> %mask) nounwind {
26; CHECK-LABEL: masked_trunc_store_nxv2i32:
27; CHECK-NEXT: st1w { z0.d }, p0, [x1]
28; CHECK-NEXT: ret
29  %trunc = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
30  call void @llvm.masked.store.nxv2i32(<vscale x 2 x i32> %trunc, ptr %b, i32 8, <vscale x 2 x i1> %mask)
31  ret void
32}
33
34define void @masked_trunc_store_nxv4i8(ptr %a, <vscale x 4 x i32> %val, ptr %b, <vscale x 4 x i1> %mask) nounwind {
35; CHECK-LABEL: masked_trunc_store_nxv4i8:
36; CHECK-NEXT: st1b { z0.s }, p0, [x1]
37; CHECK-NEXT: ret
38  %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i8>
39  call void @llvm.masked.store.nxv4i8(<vscale x 4 x i8> %trunc, ptr %b, i32 4, <vscale x 4 x i1> %mask)
40  ret void
41}
42
43define void @masked_trunc_store_nxv4i16(ptr %a, <vscale x 4 x i32> %val, ptr %b, <vscale x 4 x i1> %mask) nounwind {
44; CHECK-LABEL: masked_trunc_store_nxv4i16:
45; CHECK-NEXT: st1h { z0.s }, p0, [x1]
46; CHECK-NEXT: ret
47  %trunc = trunc <vscale x 4 x i32> %val to <vscale x 4 x i16>
48  call void @llvm.masked.store.nxv4i16(<vscale x 4 x i16> %trunc, ptr %b, i32 4, <vscale x 4 x i1> %mask)
49  ret void
50}
51
52define void @masked_trunc_store_nxv8i8(ptr %a, <vscale x 8 x i16> %val, ptr %b, <vscale x 8 x i1> %mask) nounwind {
53; CHECK-LABEL: masked_trunc_store_nxv8i8:
54; CHECK-NEXT: st1b { z0.h }, p0, [x1]
55; CHECK-NEXT: ret
56  %trunc = trunc <vscale x 8 x i16> %val to <vscale x 8 x i8>
57  call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, ptr %b, i32 2, <vscale x 8 x i1> %mask)
58  ret void
59}
60
61declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>)
62declare void @llvm.masked.store.nxv2i16(<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>)
63declare void @llvm.masked.store.nxv2i32(<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>)
64declare void @llvm.masked.store.nxv4i8(<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>)
65declare void @llvm.masked.store.nxv4i16(<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>)
66declare void @llvm.masked.store.nxv8i8(<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>)
67