1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3
4;
5; STNT1B, STNT1W, STNT1H, STNT1D: vector base + scalar offset
6;   stnt1b { z0.s }, p0/z, [z0.s, x0]
7;
8
9; STNT1B
10define void @stnt1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
11; CHECK-LABEL: stnt1b_s:
12; CHECK:       // %bb.0:
13; CHECK-NEXT:    stnt1b { z0.s }, p0, [z1.s, x0]
14; CHECK-NEXT:    ret
15  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
16  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> %data_trunc,
17                                                                         <vscale x 4 x i1> %pg,
18                                                                         <vscale x 4 x i32> %base,
19                                                                         i64 %offset)
20  ret void
21}
22
23define void @stnt1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
24; CHECK-LABEL: stnt1b_d:
25; CHECK:       // %bb.0:
26; CHECK-NEXT:    stnt1b { z0.d }, p0, [z1.d, x0]
27; CHECK-NEXT:    ret
28  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
29  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> %data_trunc,
30                                                                         <vscale x 2 x i1> %pg,
31                                                                         <vscale x 2 x i64> %base,
32                                                                         i64 %offset)
33  ret void
34}
35
36; STNT1H
37define void @stnt1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
38; CHECK-LABEL: stnt1h_s:
39; CHECK:       // %bb.0:
40; CHECK-NEXT:    stnt1h { z0.s }, p0, [z1.s, x0]
41; CHECK-NEXT:    ret
42  %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
43  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> %data_trunc,
44                                                                          <vscale x 4 x i1> %pg,
45                                                                          <vscale x 4 x i32> %base,
46                                                                          i64 %offset)
47  ret void
48}
49
50define void @stnt1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
51; CHECK-LABEL: stnt1h_d:
52; CHECK:       // %bb.0:
53; CHECK-NEXT:    stnt1h { z0.d }, p0, [z1.d, x0]
54; CHECK-NEXT:    ret
55  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
56  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> %data_trunc,
57                                                                          <vscale x 2 x i1> %pg,
58                                                                          <vscale x 2 x i64> %base,
59                                                                          i64 %offset)
60  ret void
61}
62
63; STNT1W
64define void @stnt1w_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
65; CHECK-LABEL: stnt1w_s:
66; CHECK:       // %bb.0:
67; CHECK-NEXT:    stnt1w { z0.s }, p0, [z1.s, x0]
68; CHECK-NEXT:    ret
69  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
70                                                                          <vscale x 4 x i1> %pg,
71                                                                          <vscale x 4 x i32> %base,
72                                                                          i64 %offset)
73  ret void
74}
75
76define void @stnt1w_f32_s(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base, i64 %offset) {
77; CHECK-LABEL: stnt1w_f32_s:
78; CHECK:       // %bb.0:
79; CHECK-NEXT:    stnt1w { z0.s }, p0, [z1.s, x0]
80; CHECK-NEXT:    ret
81  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
82                                                                          <vscale x 4 x i1> %pg,
83                                                                          <vscale x 4 x i32> %base,
84                                                                          i64 %offset)
85  ret void
86}
87
88define void @stnt1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
89; CHECK-LABEL: stnt1w_d:
90; CHECK:       // %bb.0:
91; CHECK-NEXT:    stnt1w { z0.d }, p0, [z1.d, x0]
92; CHECK-NEXT:    ret
93  %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
94  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %data_trunc,
95                                                                          <vscale x 2 x i1> %pg,
96                                                                          <vscale x 2 x i64> %base,
97                                                                          i64 %offset)
98  ret void
99}
100
101; STNT1D
102define void @stnt1d_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
103; CHECK-LABEL: stnt1d_d:
104; CHECK:       // %bb.0:
105; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
106; CHECK-NEXT:    ret
107  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
108                                                                          <vscale x 2 x i1> %pg,
109                                                                          <vscale x 2 x i64> %base,
110                                                                          i64 %offset)
111  ret void
112}
113
114define void @stnt1d_f64_d(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base, i64 %offset) {
115; CHECK-LABEL: stnt1d_f64_d:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    stnt1d { z0.d }, p0, [z1.d, x0]
118; CHECK-NEXT:    ret
119  call void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
120                                                                          <vscale x 2 x i1> %pg,
121                                                                          <vscale x 2 x i64> %base,
122                                                                          i64 %offset)
123  ret void
124}
125
126; STNT1B
127declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
128declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
129
130; STNT1H
131declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
132declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
133
134; STNT1W
135declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
136declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
137
138declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>, i64)
139
140; STNT1D
141declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
142
143declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f32.nxv2i64(<vscale x 2 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
144declare void @llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
145