xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-nontemporal-masked-ldst.ll (revision 9fd2e2c2fd0dbd5d11a5899bd6bb4db0fd3f2c35)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3
4define <4 x i32> @masked_load_v4i32(ptr %a, <4 x i1> %mask) nounwind {
5; CHECK-LABEL: masked_load_v4i32:
6; CHECK:       // %bb.0:
7; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
8; CHECK-NEXT:    ptrue p0.s, vl4
9; CHECK-NEXT:    shl v0.4s, v0.4s, #31
10; CHECK-NEXT:    cmlt v0.4s, v0.4s, #0
11; CHECK-NEXT:    cmpne p0.s, p0/z, z0.s, #0
12; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0]
13; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
14; CHECK-NEXT:    ret
15  %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> %mask, <4 x i32> undef), !nontemporal !0
16  ret <4 x i32> %load
17}
18
19define void @masked_store_v4i32(<4 x i32> %x, ptr %a, <4 x i1> %mask) nounwind {
20; CHECK-LABEL: masked_store_v4i32:
21; CHECK:       // %bb.0:
22; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
23; CHECK-NEXT:    ptrue p0.s, vl4
24; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
25; CHECK-NEXT:    shl v1.4s, v1.4s, #31
26; CHECK-NEXT:    cmlt v1.4s, v1.4s, #0
27; CHECK-NEXT:    cmpne p0.s, p0/z, z1.s, #0
28; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0]
29; CHECK-NEXT:    ret
30  call void @llvm.masked.store.v4i32.p0(<4 x i32> %x, ptr %a, i32 1, <4 x i1> %mask), !nontemporal !0
31  ret void
32}
33
34define <4 x i32> @load_v4i32(ptr %a) nounwind {
35; CHECK-LABEL: load_v4i32:
36; CHECK:       // %bb.0:
37; CHECK-NEXT:    ldr q0, [x0]
38; CHECK-NEXT:    ret
39  %load = call <4 x i32> @llvm.masked.load.v4i32(ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>, <4 x i32> undef), !nontemporal !0
40  ret <4 x i32> %load
41}
42
43define void @store_v4i32(<4 x i32> %x, ptr %a) nounwind {
44; CHECK-LABEL: store_v4i32:
45; CHECK:       // %bb.0:
46; CHECK-NEXT:    mov d1, v0.d[1]
47; CHECK-NEXT:    stnp d0, d1, [x0]
48; CHECK-NEXT:    ret
49  call void @llvm.masked.store.v4i32.p0(<4 x i32> %x, ptr %a, i32 1, <4 x i1> <i1 1, i1 1, i1 1, i1 1>), !nontemporal !0
50  ret void
51}
52
53define <vscale x 4 x i32> @masked_load_nxv4i32(ptr %a, <vscale x 4 x i1> %mask) nounwind {
54; CHECK-LABEL: masked_load_nxv4i32:
55; CHECK:       // %bb.0:
56; CHECK-NEXT:    ldnt1w { z0.s }, p0/z, [x0]
57; CHECK-NEXT:    ret
58  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef), !nontemporal !0
59  ret <vscale x 4 x i32> %load
60}
61
62define void @masked_store_nxv4i32(<vscale x 4 x i32> %x, ptr %a, <vscale x 4 x i1> %mask) nounwind {
63; CHECK-LABEL: masked_store_nxv4i32:
64; CHECK:       // %bb.0:
65; CHECK-NEXT:    stnt1w { z0.s }, p0, [x0]
66; CHECK-NEXT:    ret
67  call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> %x, ptr %a, i32 1, <vscale x 4 x i1> %mask), !nontemporal !0
68  ret void
69}
70
71declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
72declare void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>)
73declare <4 x i32> @llvm.masked.load.v4i32(ptr, i32, <4 x i1>, <4 x i32>)
74declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>)
75
76!0 = !{i32 1}
77