xref: /llvm-project/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-store.ll (revision 43d207addaf4111dd6a4e0e702e8797587ce61ba)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV32 %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator -verify-machineinstrs < %s | FileCheck  -check-prefixes=RV64 %s
4
5define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
6  ; RV32-LABEL: name: vstore_nx1i8
7  ; RV32: bb.1 (%ir-block.0):
8  ; RV32-NEXT:   liveins: $v8, $x10
9  ; RV32-NEXT: {{  $}}
10  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
11  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
12  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
13  ; RV32-NEXT:   PseudoRET
14  ;
15  ; RV64-LABEL: name: vstore_nx1i8
16  ; RV64: bb.1 (%ir-block.0):
17  ; RV64-NEXT:   liveins: $v8, $x10
18  ; RV64-NEXT: {{  $}}
19  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
20  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
21  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
22  ; RV64-NEXT:   PseudoRET
23  store <vscale x 1 x i8> %b, ptr %pa
24  ret void
25}
26
27define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) {
28  ; RV32-LABEL: name: vstore_nx2i8
29  ; RV32: bb.1 (%ir-block.0):
30  ; RV32-NEXT:   liveins: $v8, $x10
31  ; RV32-NEXT: {{  $}}
32  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
33  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
34  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
35  ; RV32-NEXT:   PseudoRET
36  ;
37  ; RV64-LABEL: name: vstore_nx2i8
38  ; RV64: bb.1 (%ir-block.0):
39  ; RV64-NEXT:   liveins: $v8, $x10
40  ; RV64-NEXT: {{  $}}
41  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
42  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
43  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
44  ; RV64-NEXT:   PseudoRET
45  store <vscale x 2 x i8> %b, ptr %pa
46  ret void
47}
48
49define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) {
50  ; RV32-LABEL: name: vstore_nx4i8
51  ; RV32: bb.1 (%ir-block.0):
52  ; RV32-NEXT:   liveins: $v8, $x10
53  ; RV32-NEXT: {{  $}}
54  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
55  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
56  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
57  ; RV32-NEXT:   PseudoRET
58  ;
59  ; RV64-LABEL: name: vstore_nx4i8
60  ; RV64: bb.1 (%ir-block.0):
61  ; RV64-NEXT:   liveins: $v8, $x10
62  ; RV64-NEXT: {{  $}}
63  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
64  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
65  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
66  ; RV64-NEXT:   PseudoRET
67  store <vscale x 4 x i8> %b, ptr %pa
68  ret void
69}
70
71define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) {
72  ; RV32-LABEL: name: vstore_nx8i8
73  ; RV32: bb.1 (%ir-block.0):
74  ; RV32-NEXT:   liveins: $v8, $x10
75  ; RV32-NEXT: {{  $}}
76  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
77  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
78  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
79  ; RV32-NEXT:   PseudoRET
80  ;
81  ; RV64-LABEL: name: vstore_nx8i8
82  ; RV64: bb.1 (%ir-block.0):
83  ; RV64-NEXT:   liveins: $v8, $x10
84  ; RV64-NEXT: {{  $}}
85  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
86  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
87  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
88  ; RV64-NEXT:   PseudoRET
89  store <vscale x 8 x i8> %b, ptr %pa
90  ret void
91}
92
93define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) {
94  ; RV32-LABEL: name: vstore_nx16i8
95  ; RV32: bb.1 (%ir-block.0):
96  ; RV32-NEXT:   liveins: $x10, $v8m2
97  ; RV32-NEXT: {{  $}}
98  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
99  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
100  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
101  ; RV32-NEXT:   PseudoRET
102  ;
103  ; RV64-LABEL: name: vstore_nx16i8
104  ; RV64: bb.1 (%ir-block.0):
105  ; RV64-NEXT:   liveins: $x10, $v8m2
106  ; RV64-NEXT: {{  $}}
107  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
108  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
109  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
110  ; RV64-NEXT:   PseudoRET
111  store <vscale x 16 x i8> %b, ptr %pa
112  ret void
113}
114
115define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) {
116  ; RV32-LABEL: name: vstore_nx32i8
117  ; RV32: bb.1 (%ir-block.0):
118  ; RV32-NEXT:   liveins: $x10, $v8m4
119  ; RV32-NEXT: {{  $}}
120  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
121  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
122  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
123  ; RV32-NEXT:   PseudoRET
124  ;
125  ; RV64-LABEL: name: vstore_nx32i8
126  ; RV64: bb.1 (%ir-block.0):
127  ; RV64-NEXT:   liveins: $x10, $v8m4
128  ; RV64-NEXT: {{  $}}
129  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
130  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
131  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
132  ; RV64-NEXT:   PseudoRET
133  store <vscale x 32 x i8> %b, ptr %pa
134  ret void
135}
136
137define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) {
138  ; RV32-LABEL: name: vstore_nx64i8
139  ; RV32: bb.1 (%ir-block.0):
140  ; RV32-NEXT:   liveins: $x10, $v8m8
141  ; RV32-NEXT: {{  $}}
142  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
143  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
144  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
145  ; RV32-NEXT:   PseudoRET
146  ;
147  ; RV64-LABEL: name: vstore_nx64i8
148  ; RV64: bb.1 (%ir-block.0):
149  ; RV64-NEXT:   liveins: $x10, $v8m8
150  ; RV64-NEXT: {{  $}}
151  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
152  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
153  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
154  ; RV64-NEXT:   PseudoRET
155  store <vscale x 64 x i8> %b, ptr %pa
156  ret void
157}
158
159define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) {
160  ; RV32-LABEL: name: vstore_nx1i16
161  ; RV32: bb.1 (%ir-block.0):
162  ; RV32-NEXT:   liveins: $v8, $x10
163  ; RV32-NEXT: {{  $}}
164  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
165  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
166  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
167  ; RV32-NEXT:   PseudoRET
168  ;
169  ; RV64-LABEL: name: vstore_nx1i16
170  ; RV64: bb.1 (%ir-block.0):
171  ; RV64-NEXT:   liveins: $v8, $x10
172  ; RV64-NEXT: {{  $}}
173  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
174  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
175  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
176  ; RV64-NEXT:   PseudoRET
177  store <vscale x 1 x i16> %b, ptr %pa
178  ret void
179}
180
181define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) {
182  ; RV32-LABEL: name: vstore_nx2i16
183  ; RV32: bb.1 (%ir-block.0):
184  ; RV32-NEXT:   liveins: $v8, $x10
185  ; RV32-NEXT: {{  $}}
186  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
187  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
188  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
189  ; RV32-NEXT:   PseudoRET
190  ;
191  ; RV64-LABEL: name: vstore_nx2i16
192  ; RV64: bb.1 (%ir-block.0):
193  ; RV64-NEXT:   liveins: $v8, $x10
194  ; RV64-NEXT: {{  $}}
195  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
196  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
197  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
198  ; RV64-NEXT:   PseudoRET
199  store <vscale x 2 x i16> %b, ptr %pa
200  ret void
201}
202
203define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) {
204  ; RV32-LABEL: name: vstore_nx4i16
205  ; RV32: bb.1 (%ir-block.0):
206  ; RV32-NEXT:   liveins: $v8, $x10
207  ; RV32-NEXT: {{  $}}
208  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
209  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
210  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
211  ; RV32-NEXT:   PseudoRET
212  ;
213  ; RV64-LABEL: name: vstore_nx4i16
214  ; RV64: bb.1 (%ir-block.0):
215  ; RV64-NEXT:   liveins: $v8, $x10
216  ; RV64-NEXT: {{  $}}
217  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
218  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
219  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
220  ; RV64-NEXT:   PseudoRET
221  store <vscale x 4 x i16> %b, ptr %pa
222  ret void
223}
224
225define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) {
226  ; RV32-LABEL: name: vstore_nx8i16
227  ; RV32: bb.1 (%ir-block.0):
228  ; RV32-NEXT:   liveins: $x10, $v8m2
229  ; RV32-NEXT: {{  $}}
230  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
231  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
232  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
233  ; RV32-NEXT:   PseudoRET
234  ;
235  ; RV64-LABEL: name: vstore_nx8i16
236  ; RV64: bb.1 (%ir-block.0):
237  ; RV64-NEXT:   liveins: $x10, $v8m2
238  ; RV64-NEXT: {{  $}}
239  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
240  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
241  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
242  ; RV64-NEXT:   PseudoRET
243  store <vscale x 8 x i16> %b, ptr %pa
244  ret void
245}
246
247define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) {
248  ; RV32-LABEL: name: vstore_nx16i16
249  ; RV32: bb.1 (%ir-block.0):
250  ; RV32-NEXT:   liveins: $x10, $v8m4
251  ; RV32-NEXT: {{  $}}
252  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
253  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
254  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
255  ; RV32-NEXT:   PseudoRET
256  ;
257  ; RV64-LABEL: name: vstore_nx16i16
258  ; RV64: bb.1 (%ir-block.0):
259  ; RV64-NEXT:   liveins: $x10, $v8m4
260  ; RV64-NEXT: {{  $}}
261  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
262  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
263  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
264  ; RV64-NEXT:   PseudoRET
265  store <vscale x 16 x i16> %b, ptr %pa
266  ret void
267}
268
269define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) {
270  ; RV32-LABEL: name: vstore_nx32i16
271  ; RV32: bb.1 (%ir-block.0):
272  ; RV32-NEXT:   liveins: $x10, $v8m8
273  ; RV32-NEXT: {{  $}}
274  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
275  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
276  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
277  ; RV32-NEXT:   PseudoRET
278  ;
279  ; RV64-LABEL: name: vstore_nx32i16
280  ; RV64: bb.1 (%ir-block.0):
281  ; RV64-NEXT:   liveins: $x10, $v8m8
282  ; RV64-NEXT: {{  $}}
283  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
284  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
285  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
286  ; RV64-NEXT:   PseudoRET
287  store <vscale x 32 x i16> %b, ptr %pa
288  ret void
289}
290
291define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) {
292  ; RV32-LABEL: name: vstore_nx1i32
293  ; RV32: bb.1 (%ir-block.0):
294  ; RV32-NEXT:   liveins: $v8, $x10
295  ; RV32-NEXT: {{  $}}
296  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
297  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
298  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
299  ; RV32-NEXT:   PseudoRET
300  ;
301  ; RV64-LABEL: name: vstore_nx1i32
302  ; RV64: bb.1 (%ir-block.0):
303  ; RV64-NEXT:   liveins: $v8, $x10
304  ; RV64-NEXT: {{  $}}
305  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
306  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
307  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
308  ; RV64-NEXT:   PseudoRET
309  store <vscale x 1 x i32> %b, ptr %pa
310  ret void
311}
312
313define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) {
314  ; RV32-LABEL: name: vstore_nx2i32
315  ; RV32: bb.1 (%ir-block.0):
316  ; RV32-NEXT:   liveins: $v8, $x10
317  ; RV32-NEXT: {{  $}}
318  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
319  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
320  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
321  ; RV32-NEXT:   PseudoRET
322  ;
323  ; RV64-LABEL: name: vstore_nx2i32
324  ; RV64: bb.1 (%ir-block.0):
325  ; RV64-NEXT:   liveins: $v8, $x10
326  ; RV64-NEXT: {{  $}}
327  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
328  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
329  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
330  ; RV64-NEXT:   PseudoRET
331  store <vscale x 2 x i32> %b, ptr %pa
332  ret void
333}
334
335define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) {
336  ; RV32-LABEL: name: vstore_nx4i32
337  ; RV32: bb.1 (%ir-block.0):
338  ; RV32-NEXT:   liveins: $x10, $v8m2
339  ; RV32-NEXT: {{  $}}
340  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
341  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
342  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
343  ; RV32-NEXT:   PseudoRET
344  ;
345  ; RV64-LABEL: name: vstore_nx4i32
346  ; RV64: bb.1 (%ir-block.0):
347  ; RV64-NEXT:   liveins: $x10, $v8m2
348  ; RV64-NEXT: {{  $}}
349  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
350  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
351  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
352  ; RV64-NEXT:   PseudoRET
353  store <vscale x 4 x i32> %b, ptr %pa
354  ret void
355}
356
357define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) {
358  ; RV32-LABEL: name: vstore_nx8i32
359  ; RV32: bb.1 (%ir-block.0):
360  ; RV32-NEXT:   liveins: $x10, $v8m4
361  ; RV32-NEXT: {{  $}}
362  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
363  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
364  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
365  ; RV32-NEXT:   PseudoRET
366  ;
367  ; RV64-LABEL: name: vstore_nx8i32
368  ; RV64: bb.1 (%ir-block.0):
369  ; RV64-NEXT:   liveins: $x10, $v8m4
370  ; RV64-NEXT: {{  $}}
371  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
372  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
373  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
374  ; RV64-NEXT:   PseudoRET
375  store <vscale x 8 x i32> %b, ptr %pa
376  ret void
377}
378
379define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) {
380  ; RV32-LABEL: name: vstore_nx16i32
381  ; RV32: bb.1 (%ir-block.0):
382  ; RV32-NEXT:   liveins: $x10, $v8m8
383  ; RV32-NEXT: {{  $}}
384  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
385  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
386  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
387  ; RV32-NEXT:   PseudoRET
388  ;
389  ; RV64-LABEL: name: vstore_nx16i32
390  ; RV64: bb.1 (%ir-block.0):
391  ; RV64-NEXT:   liveins: $x10, $v8m8
392  ; RV64-NEXT: {{  $}}
393  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
394  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
395  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
396  ; RV64-NEXT:   PseudoRET
397  store <vscale x 16 x i32> %b, ptr %pa
398  ret void
399}
400
401define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) {
402  ; RV32-LABEL: name: vstore_nx1i64
403  ; RV32: bb.1 (%ir-block.0):
404  ; RV32-NEXT:   liveins: $v8, $x10
405  ; RV32-NEXT: {{  $}}
406  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
407  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
408  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
409  ; RV32-NEXT:   PseudoRET
410  ;
411  ; RV64-LABEL: name: vstore_nx1i64
412  ; RV64: bb.1 (%ir-block.0):
413  ; RV64-NEXT:   liveins: $v8, $x10
414  ; RV64-NEXT: {{  $}}
415  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
416  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
417  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
418  ; RV64-NEXT:   PseudoRET
419  store <vscale x 1 x i64> %b, ptr %pa
420  ret void
421}
422
423define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) {
424  ; RV32-LABEL: name: vstore_nx2i64
425  ; RV32: bb.1 (%ir-block.0):
426  ; RV32-NEXT:   liveins: $x10, $v8m2
427  ; RV32-NEXT: {{  $}}
428  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
429  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
430  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
431  ; RV32-NEXT:   PseudoRET
432  ;
433  ; RV64-LABEL: name: vstore_nx2i64
434  ; RV64: bb.1 (%ir-block.0):
435  ; RV64-NEXT:   liveins: $x10, $v8m2
436  ; RV64-NEXT: {{  $}}
437  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
438  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
439  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
440  ; RV64-NEXT:   PseudoRET
441  store <vscale x 2 x i64> %b, ptr %pa
442  ret void
443}
444
445define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) {
446  ; RV32-LABEL: name: vstore_nx4i64
447  ; RV32: bb.1 (%ir-block.0):
448  ; RV32-NEXT:   liveins: $x10, $v8m4
449  ; RV32-NEXT: {{  $}}
450  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
451  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
452  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
453  ; RV32-NEXT:   PseudoRET
454  ;
455  ; RV64-LABEL: name: vstore_nx4i64
456  ; RV64: bb.1 (%ir-block.0):
457  ; RV64-NEXT:   liveins: $x10, $v8m4
458  ; RV64-NEXT: {{  $}}
459  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
460  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
461  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
462  ; RV64-NEXT:   PseudoRET
463  store <vscale x 4 x i64> %b, ptr %pa
464  ret void
465}
466
467define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) {
468  ; RV32-LABEL: name: vstore_nx8i64
469  ; RV32: bb.1 (%ir-block.0):
470  ; RV32-NEXT:   liveins: $x10, $v8m8
471  ; RV32-NEXT: {{  $}}
472  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
473  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
474  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
475  ; RV32-NEXT:   PseudoRET
476  ;
477  ; RV64-LABEL: name: vstore_nx8i64
478  ; RV64: bb.1 (%ir-block.0):
479  ; RV64-NEXT:   liveins: $x10, $v8m8
480  ; RV64-NEXT: {{  $}}
481  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
482  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
483  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
484  ; RV64-NEXT:   PseudoRET
485  store <vscale x 8 x i64> %b, ptr %pa
486  ret void
487}
488
489define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) {
490  ; RV32-LABEL: name: vstore_nx16i8_align1
491  ; RV32: bb.1 (%ir-block.0):
492  ; RV32-NEXT:   liveins: $x10, $v8m2
493  ; RV32-NEXT: {{  $}}
494  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
495  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
496  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
497  ; RV32-NEXT:   PseudoRET
498  ;
499  ; RV64-LABEL: name: vstore_nx16i8_align1
500  ; RV64: bb.1 (%ir-block.0):
501  ; RV64-NEXT:   liveins: $x10, $v8m2
502  ; RV64-NEXT: {{  $}}
503  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
504  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
505  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
506  ; RV64-NEXT:   PseudoRET
507  store <vscale x 16 x i8> %b, ptr %pa, align 1
508  ret void
509}
510
511define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) {
512  ; RV32-LABEL: name: vstore_nx16i8_align2
513  ; RV32: bb.1 (%ir-block.0):
514  ; RV32-NEXT:   liveins: $x10, $v8m2
515  ; RV32-NEXT: {{  $}}
516  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
517  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
518  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
519  ; RV32-NEXT:   PseudoRET
520  ;
521  ; RV64-LABEL: name: vstore_nx16i8_align2
522  ; RV64: bb.1 (%ir-block.0):
523  ; RV64-NEXT:   liveins: $x10, $v8m2
524  ; RV64-NEXT: {{  $}}
525  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
526  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
527  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
528  ; RV64-NEXT:   PseudoRET
529  store <vscale x 16 x i8> %b, ptr %pa, align 2
530  ret void
531}
532
533define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) {
534  ; RV32-LABEL: name: vstore_nx16i8_align16
535  ; RV32: bb.1 (%ir-block.0):
536  ; RV32-NEXT:   liveins: $x10, $v8m2
537  ; RV32-NEXT: {{  $}}
538  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
539  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
540  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
541  ; RV32-NEXT:   PseudoRET
542  ;
543  ; RV64-LABEL: name: vstore_nx16i8_align16
544  ; RV64: bb.1 (%ir-block.0):
545  ; RV64-NEXT:   liveins: $x10, $v8m2
546  ; RV64-NEXT: {{  $}}
547  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
548  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
549  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
550  ; RV64-NEXT:   PseudoRET
551  store <vscale x 16 x i8> %b, ptr %pa, align 16
552  ret void
553}
554
555define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) {
556  ; RV32-LABEL: name: vstore_nx16i8_align64
557  ; RV32: bb.1 (%ir-block.0):
558  ; RV32-NEXT:   liveins: $x10, $v8m2
559  ; RV32-NEXT: {{  $}}
560  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
561  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
562  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
563  ; RV32-NEXT:   PseudoRET
564  ;
565  ; RV64-LABEL: name: vstore_nx16i8_align64
566  ; RV64: bb.1 (%ir-block.0):
567  ; RV64-NEXT:   liveins: $x10, $v8m2
568  ; RV64-NEXT: {{  $}}
569  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
570  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
571  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
572  ; RV64-NEXT:   PseudoRET
573  store <vscale x 16 x i8> %b, ptr %pa, align 64
574  ret void
575}
576
577define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) {
578  ; RV32-LABEL: name: vstore_nx4i16_align1
579  ; RV32: bb.1 (%ir-block.0):
580  ; RV32-NEXT:   liveins: $v8, $x10
581  ; RV32-NEXT: {{  $}}
582  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
583  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
584  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
585  ; RV32-NEXT:   PseudoRET
586  ;
587  ; RV64-LABEL: name: vstore_nx4i16_align1
588  ; RV64: bb.1 (%ir-block.0):
589  ; RV64-NEXT:   liveins: $v8, $x10
590  ; RV64-NEXT: {{  $}}
591  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
592  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
593  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
594  ; RV64-NEXT:   PseudoRET
595  store <vscale x 4 x i16> %b, ptr %pa, align 1
596  ret void
597}
598
599define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) {
600  ; RV32-LABEL: name: vstore_nx4i16_align2
601  ; RV32: bb.1 (%ir-block.0):
602  ; RV32-NEXT:   liveins: $v8, $x10
603  ; RV32-NEXT: {{  $}}
604  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
605  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
606  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
607  ; RV32-NEXT:   PseudoRET
608  ;
609  ; RV64-LABEL: name: vstore_nx4i16_align2
610  ; RV64: bb.1 (%ir-block.0):
611  ; RV64-NEXT:   liveins: $v8, $x10
612  ; RV64-NEXT: {{  $}}
613  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
614  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
615  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
616  ; RV64-NEXT:   PseudoRET
617  store <vscale x 4 x i16> %b, ptr %pa, align 2
618  ret void
619}
620
621define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) {
622  ; RV32-LABEL: name: vstore_nx4i16_align4
623  ; RV32: bb.1 (%ir-block.0):
624  ; RV32-NEXT:   liveins: $v8, $x10
625  ; RV32-NEXT: {{  $}}
626  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
627  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
628  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
629  ; RV32-NEXT:   PseudoRET
630  ;
631  ; RV64-LABEL: name: vstore_nx4i16_align4
632  ; RV64: bb.1 (%ir-block.0):
633  ; RV64-NEXT:   liveins: $v8, $x10
634  ; RV64-NEXT: {{  $}}
635  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
636  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
637  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
638  ; RV64-NEXT:   PseudoRET
639  store <vscale x 4 x i16> %b, ptr %pa, align 4
640  ret void
641}
642
643define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) {
644  ; RV32-LABEL: name: vstore_nx4i16_align8
645  ; RV32: bb.1 (%ir-block.0):
646  ; RV32-NEXT:   liveins: $v8, $x10
647  ; RV32-NEXT: {{  $}}
648  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
649  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
650  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
651  ; RV32-NEXT:   PseudoRET
652  ;
653  ; RV64-LABEL: name: vstore_nx4i16_align8
654  ; RV64: bb.1 (%ir-block.0):
655  ; RV64-NEXT:   liveins: $v8, $x10
656  ; RV64-NEXT: {{  $}}
657  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
658  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
659  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
660  ; RV64-NEXT:   PseudoRET
661  store <vscale x 4 x i16> %b, ptr %pa, align 8
662  ret void
663}
664
665define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) {
666  ; RV32-LABEL: name: vstore_nx4i16_align16
667  ; RV32: bb.1 (%ir-block.0):
668  ; RV32-NEXT:   liveins: $v8, $x10
669  ; RV32-NEXT: {{  $}}
670  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
671  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
672  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
673  ; RV32-NEXT:   PseudoRET
674  ;
675  ; RV64-LABEL: name: vstore_nx4i16_align16
676  ; RV64: bb.1 (%ir-block.0):
677  ; RV64-NEXT:   liveins: $v8, $x10
678  ; RV64-NEXT: {{  $}}
679  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
680  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
681  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
682  ; RV64-NEXT:   PseudoRET
683  store <vscale x 4 x i16> %b, ptr %pa, align 16
684  ret void
685}
686
687define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) {
688  ; RV32-LABEL: name: vstore_nx2i32_align2
689  ; RV32: bb.1 (%ir-block.0):
690  ; RV32-NEXT:   liveins: $v8, $x10
691  ; RV32-NEXT: {{  $}}
692  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
693  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
694  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
695  ; RV32-NEXT:   PseudoRET
696  ;
697  ; RV64-LABEL: name: vstore_nx2i32_align2
698  ; RV64: bb.1 (%ir-block.0):
699  ; RV64-NEXT:   liveins: $v8, $x10
700  ; RV64-NEXT: {{  $}}
701  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
702  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
703  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
704  ; RV64-NEXT:   PseudoRET
705  store <vscale x 2 x i32> %b, ptr %pa, align 2
706  ret void
707}
708
709define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) {
710  ; RV32-LABEL: name: vstore_nx2i32_align4
711  ; RV32: bb.1 (%ir-block.0):
712  ; RV32-NEXT:   liveins: $v8, $x10
713  ; RV32-NEXT: {{  $}}
714  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
715  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
716  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
717  ; RV32-NEXT:   PseudoRET
718  ;
719  ; RV64-LABEL: name: vstore_nx2i32_align4
720  ; RV64: bb.1 (%ir-block.0):
721  ; RV64-NEXT:   liveins: $v8, $x10
722  ; RV64-NEXT: {{  $}}
723  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
724  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
725  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
726  ; RV64-NEXT:   PseudoRET
727  store <vscale x 2 x i32> %b, ptr %pa, align 4
728  ret void
729}
730
731define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) {
732  ; RV32-LABEL: name: vstore_nx2i32_align8
733  ; RV32: bb.1 (%ir-block.0):
734  ; RV32-NEXT:   liveins: $v8, $x10
735  ; RV32-NEXT: {{  $}}
736  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
737  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
738  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
739  ; RV32-NEXT:   PseudoRET
740  ;
741  ; RV64-LABEL: name: vstore_nx2i32_align8
742  ; RV64: bb.1 (%ir-block.0):
743  ; RV64-NEXT:   liveins: $v8, $x10
744  ; RV64-NEXT: {{  $}}
745  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
746  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
747  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
748  ; RV64-NEXT:   PseudoRET
749  store <vscale x 2 x i32> %b, ptr %pa, align 8
750  ret void
751}
752
753define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) {
754  ; RV32-LABEL: name: vstore_nx2i32_align16
755  ; RV32: bb.1 (%ir-block.0):
756  ; RV32-NEXT:   liveins: $v8, $x10
757  ; RV32-NEXT: {{  $}}
758  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
759  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
760  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
761  ; RV32-NEXT:   PseudoRET
762  ;
763  ; RV64-LABEL: name: vstore_nx2i32_align16
764  ; RV64: bb.1 (%ir-block.0):
765  ; RV64-NEXT:   liveins: $v8, $x10
766  ; RV64-NEXT: {{  $}}
767  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
768  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
769  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
770  ; RV64-NEXT:   PseudoRET
771  store <vscale x 2 x i32> %b, ptr %pa, align 16
772  ret void
773}
774
775define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) {
776  ; RV32-LABEL: name: vstore_nx2i32_align256
777  ; RV32: bb.1 (%ir-block.0):
778  ; RV32-NEXT:   liveins: $v8, $x10
779  ; RV32-NEXT: {{  $}}
780  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
781  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
782  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
783  ; RV32-NEXT:   PseudoRET
784  ;
785  ; RV64-LABEL: name: vstore_nx2i32_align256
786  ; RV64: bb.1 (%ir-block.0):
787  ; RV64-NEXT:   liveins: $v8, $x10
788  ; RV64-NEXT: {{  $}}
789  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
790  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
791  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
792  ; RV64-NEXT:   PseudoRET
793  store <vscale x 2 x i32> %b, ptr %pa, align 256
794  ret void
795}
796
797define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) {
798  ; RV32-LABEL: name: vstore_nx2i64_align4
799  ; RV32: bb.1 (%ir-block.0):
800  ; RV32-NEXT:   liveins: $x10, $v8m2
801  ; RV32-NEXT: {{  $}}
802  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
803  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
804  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
805  ; RV32-NEXT:   PseudoRET
806  ;
807  ; RV64-LABEL: name: vstore_nx2i64_align4
808  ; RV64: bb.1 (%ir-block.0):
809  ; RV64-NEXT:   liveins: $x10, $v8m2
810  ; RV64-NEXT: {{  $}}
811  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
812  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
813  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
814  ; RV64-NEXT:   PseudoRET
815  store <vscale x 2 x i64> %b, ptr %pa, align 4
816  ret void
817}
818
819define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) {
820  ; RV32-LABEL: name: vstore_nx2i64_align8
821  ; RV32: bb.1 (%ir-block.0):
822  ; RV32-NEXT:   liveins: $x10, $v8m2
823  ; RV32-NEXT: {{  $}}
824  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
825  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
826  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
827  ; RV32-NEXT:   PseudoRET
828  ;
829  ; RV64-LABEL: name: vstore_nx2i64_align8
830  ; RV64: bb.1 (%ir-block.0):
831  ; RV64-NEXT:   liveins: $x10, $v8m2
832  ; RV64-NEXT: {{  $}}
833  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
834  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
835  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
836  ; RV64-NEXT:   PseudoRET
837  store <vscale x 2 x i64> %b, ptr %pa, align 8
838  ret void
839}
840
841define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) {
842  ; RV32-LABEL: name: vstore_nx2i64_align16
843  ; RV32: bb.1 (%ir-block.0):
844  ; RV32-NEXT:   liveins: $x10, $v8m2
845  ; RV32-NEXT: {{  $}}
846  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
847  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
848  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
849  ; RV32-NEXT:   PseudoRET
850  ;
851  ; RV64-LABEL: name: vstore_nx2i64_align16
852  ; RV64: bb.1 (%ir-block.0):
853  ; RV64-NEXT:   liveins: $x10, $v8m2
854  ; RV64-NEXT: {{  $}}
855  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
856  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
857  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
858  ; RV64-NEXT:   PseudoRET
859  store <vscale x 2 x i64> %b, ptr %pa, align 16
860  ret void
861}
862
863define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) {
864  ; RV32-LABEL: name: vstore_nx2i64_align32
865  ; RV32: bb.1 (%ir-block.0):
866  ; RV32-NEXT:   liveins: $x10, $v8m2
867  ; RV32-NEXT: {{  $}}
868  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
869  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
870  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
871  ; RV32-NEXT:   PseudoRET
872  ;
873  ; RV64-LABEL: name: vstore_nx2i64_align32
874  ; RV64: bb.1 (%ir-block.0):
875  ; RV64-NEXT:   liveins: $x10, $v8m2
876  ; RV64-NEXT: {{  $}}
877  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
878  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
879  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
880  ; RV64-NEXT:   PseudoRET
881  store <vscale x 2 x i64> %b, ptr %pa, align 32
882  ret void
883}
884
885define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) {
886  ; RV32-LABEL: name: vstore_nx1ptr
887  ; RV32: bb.1 (%ir-block.0):
888  ; RV32-NEXT:   liveins: $v8, $x10
889  ; RV32-NEXT: {{  $}}
890  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
891  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
892  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
893  ; RV32-NEXT:   PseudoRET
894  ;
895  ; RV64-LABEL: name: vstore_nx1ptr
896  ; RV64: bb.1 (%ir-block.0):
897  ; RV64-NEXT:   liveins: $v8, $x10
898  ; RV64-NEXT: {{  $}}
899  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
900  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
901  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
902  ; RV64-NEXT:   PseudoRET
903  store <vscale x 1 x ptr> %b, ptr %pa
904  ret void
905}
906
907define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) {
908  ; RV32-LABEL: name: vstore_nx2ptr
909  ; RV32: bb.1 (%ir-block.0):
910  ; RV32-NEXT:   liveins: $v8, $x10
911  ; RV32-NEXT: {{  $}}
912  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
913  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
914  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
915  ; RV32-NEXT:   PseudoRET
916  ;
917  ; RV64-LABEL: name: vstore_nx2ptr
918  ; RV64: bb.1 (%ir-block.0):
919  ; RV64-NEXT:   liveins: $x10, $v8m2
920  ; RV64-NEXT: {{  $}}
921  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
922  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
923  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
924  ; RV64-NEXT:   PseudoRET
925  store <vscale x 2 x ptr> %b, ptr %pa
926  ret void
927}
928
929define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) {
930  ; RV32-LABEL: name: vstore_nx8ptr
931  ; RV32: bb.1 (%ir-block.0):
932  ; RV32-NEXT:   liveins: $x10, $v8m4
933  ; RV32-NEXT: {{  $}}
934  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
935  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
936  ; RV32-NEXT:   G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
937  ; RV32-NEXT:   PseudoRET
938  ;
939  ; RV64-LABEL: name: vstore_nx8ptr
940  ; RV64: bb.1 (%ir-block.0):
941  ; RV64-NEXT:   liveins: $x10, $v8m8
942  ; RV64-NEXT: {{  $}}
943  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x10
944  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
945  ; RV64-NEXT:   G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
946  ; RV64-NEXT:   PseudoRET
947  store <vscale x 8 x ptr> %b, ptr %pa
948  ret void
949}
950
951