xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vbrev.ll (revision 09058654f68dd4cc5435f49502de33bac2b7f8fa)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
6
7declare <vscale x 1 x i8> @llvm.riscv.vbrev.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  iXLen);
11
12define <vscale x 1 x i8> @intrinsic_vbrev_vs_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
13; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i8:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
16; CHECK-NEXT:    vbrev.v v8, v8
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev.nxv1i8(
20    <vscale x 1 x i8> undef,
21    <vscale x 1 x i8> %0,
22    iXLen %1)
23
24  ret <vscale x 1 x i8> %a
25}
26
27declare <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8(
28  <vscale x 1 x i8>,
29  <vscale x 1 x i8>,
30  <vscale x 1 x i1>,
31  iXLen,
32  iXLen);
33
34define <vscale x 1 x i8> @intrinsic_vbrev_mask_vs_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
35; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i8:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
38; CHECK-NEXT:    vbrev.v v8, v9, v0.t
39; CHECK-NEXT:    ret
40entry:
41  %a = call <vscale x 1 x i8> @llvm.riscv.vbrev.mask.nxv1i8(
42    <vscale x 1 x i8> %1,
43    <vscale x 1 x i8> %2,
44    <vscale x 1 x i1> %0,
45    iXLen %3, iXLen 1)
46
47  ret <vscale x 1 x i8> %a
48}
49
50declare <vscale x 2 x i8> @llvm.riscv.vbrev.nxv2i8(
51  <vscale x 2 x i8>,
52  <vscale x 2 x i8>,
53  iXLen);
54
55define <vscale x 2 x i8> @intrinsic_vbrev_vs_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
56; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i8:
57; CHECK:       # %bb.0: # %entry
58; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
59; CHECK-NEXT:    vbrev.v v8, v8
60; CHECK-NEXT:    ret
61entry:
62  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev.nxv2i8(
63    <vscale x 2 x i8> undef,
64    <vscale x 2 x i8> %0,
65    iXLen %1)
66
67  ret <vscale x 2 x i8> %a
68}
69
70declare <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8(
71  <vscale x 2 x i8>,
72  <vscale x 2 x i8>,
73  <vscale x 2 x i1>,
74  iXLen,
75  iXLen);
76
77define <vscale x 2 x i8> @intrinsic_vbrev_mask_vs_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
78; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i8:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
81; CHECK-NEXT:    vbrev.v v8, v9, v0.t
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 2 x i8> @llvm.riscv.vbrev.mask.nxv2i8(
85    <vscale x 2 x i8> %1,
86    <vscale x 2 x i8> %2,
87    <vscale x 2 x i1> %0,
88    iXLen %3, iXLen 1)
89
90  ret <vscale x 2 x i8> %a
91}
92
93declare <vscale x 4 x i8> @llvm.riscv.vbrev.nxv4i8(
94  <vscale x 4 x i8>,
95  <vscale x 4 x i8>,
96  iXLen);
97
98define <vscale x 4 x i8> @intrinsic_vbrev_vs_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
99; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i8:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
102; CHECK-NEXT:    vbrev.v v8, v8
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev.nxv4i8(
106    <vscale x 4 x i8> undef,
107    <vscale x 4 x i8> %0,
108    iXLen %1)
109
110  ret <vscale x 4 x i8> %a
111}
112
113declare <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8(
114  <vscale x 4 x i8>,
115  <vscale x 4 x i8>,
116  <vscale x 4 x i1>,
117  iXLen,
118  iXLen);
119
120define <vscale x 4 x i8> @intrinsic_vbrev_mask_vs_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
121; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i8:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
124; CHECK-NEXT:    vbrev.v v8, v9, v0.t
125; CHECK-NEXT:    ret
126entry:
127  %a = call <vscale x 4 x i8> @llvm.riscv.vbrev.mask.nxv4i8(
128    <vscale x 4 x i8> %1,
129    <vscale x 4 x i8> %2,
130    <vscale x 4 x i1> %0,
131    iXLen %3, iXLen 1)
132
133  ret <vscale x 4 x i8> %a
134}
135
136declare <vscale x 8 x i8> @llvm.riscv.vbrev.nxv8i8(
137  <vscale x 8 x i8>,
138  <vscale x 8 x i8>,
139  iXLen);
140
141define <vscale x 8 x i8> @intrinsic_vbrev_vs_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
142; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i8:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
145; CHECK-NEXT:    vbrev.v v8, v8
146; CHECK-NEXT:    ret
147entry:
148  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev.nxv8i8(
149    <vscale x 8 x i8> undef,
150    <vscale x 8 x i8> %0,
151    iXLen %1)
152
153  ret <vscale x 8 x i8> %a
154}
155
156declare <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8(
157  <vscale x 8 x i8>,
158  <vscale x 8 x i8>,
159  <vscale x 8 x i1>,
160  iXLen,
161  iXLen);
162
163define <vscale x 8 x i8> @intrinsic_vbrev_mask_vs_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
164; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i8:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
167; CHECK-NEXT:    vbrev.v v8, v9, v0.t
168; CHECK-NEXT:    ret
169entry:
170  %a = call <vscale x 8 x i8> @llvm.riscv.vbrev.mask.nxv8i8(
171    <vscale x 8 x i8> %1,
172    <vscale x 8 x i8> %2,
173    <vscale x 8 x i1> %0,
174    iXLen %3, iXLen 1)
175
176  ret <vscale x 8 x i8> %a
177}
178
179declare <vscale x 16 x i8> @llvm.riscv.vbrev.nxv16i8(
180  <vscale x 16 x i8>,
181  <vscale x 16 x i8>,
182  iXLen);
183
184define <vscale x 16 x i8> @intrinsic_vbrev_vs_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
185; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i8:
186; CHECK:       # %bb.0: # %entry
187; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
188; CHECK-NEXT:    vbrev.v v8, v8
189; CHECK-NEXT:    ret
190entry:
191  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev.nxv16i8(
192    <vscale x 16 x i8> undef,
193    <vscale x 16 x i8> %0,
194    iXLen %1)
195
196  ret <vscale x 16 x i8> %a
197}
198
199declare <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8(
200  <vscale x 16 x i8>,
201  <vscale x 16 x i8>,
202  <vscale x 16 x i1>,
203  iXLen,
204  iXLen);
205
206define <vscale x 16 x i8> @intrinsic_vbrev_mask_vs_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
207; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i8:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
210; CHECK-NEXT:    vbrev.v v8, v10, v0.t
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x i8> @llvm.riscv.vbrev.mask.nxv16i8(
214    <vscale x 16 x i8> %1,
215    <vscale x 16 x i8> %2,
216    <vscale x 16 x i1> %0,
217    iXLen %3, iXLen 1)
218
219  ret <vscale x 16 x i8> %a
220}
221
222declare <vscale x 32 x i8> @llvm.riscv.vbrev.nxv32i8(
223  <vscale x 32 x i8>,
224  <vscale x 32 x i8>,
225  iXLen);
226
227define <vscale x 32 x i8> @intrinsic_vbrev_vs_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
228; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i8:
229; CHECK:       # %bb.0: # %entry
230; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
231; CHECK-NEXT:    vbrev.v v8, v8
232; CHECK-NEXT:    ret
233entry:
234  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev.nxv32i8(
235    <vscale x 32 x i8> undef,
236    <vscale x 32 x i8> %0,
237    iXLen %1)
238
239  ret <vscale x 32 x i8> %a
240}
241
242declare <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8(
243  <vscale x 32 x i8>,
244  <vscale x 32 x i8>,
245  <vscale x 32 x i1>,
246  iXLen,
247  iXLen);
248
249define <vscale x 32 x i8> @intrinsic_vbrev_mask_vs_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
250; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i8:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
253; CHECK-NEXT:    vbrev.v v8, v12, v0.t
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 32 x i8> @llvm.riscv.vbrev.mask.nxv32i8(
257    <vscale x 32 x i8> %1,
258    <vscale x 32 x i8> %2,
259    <vscale x 32 x i1> %0,
260    iXLen %3, iXLen 1)
261
262  ret <vscale x 32 x i8> %a
263}
264
265declare <vscale x 64 x i8> @llvm.riscv.vbrev.nxv64i8(
266  <vscale x 64 x i8>,
267  <vscale x 64 x i8>,
268  iXLen);
269
270define <vscale x 64 x i8> @intrinsic_vbrev_vs_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
271; CHECK-LABEL: intrinsic_vbrev_vs_nxv64i8:
272; CHECK:       # %bb.0: # %entry
273; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
274; CHECK-NEXT:    vbrev.v v8, v8
275; CHECK-NEXT:    ret
276entry:
277  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev.nxv64i8(
278    <vscale x 64 x i8> undef,
279    <vscale x 64 x i8> %0,
280    iXLen %1)
281
282  ret <vscale x 64 x i8> %a
283}
284
285declare <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8(
286  <vscale x 64 x i8>,
287  <vscale x 64 x i8>,
288  <vscale x 64 x i1>,
289  iXLen,
290  iXLen);
291
292define <vscale x 64 x i8> @intrinsic_vbrev_mask_vs_nxv64i8(<vscale x 64 x i1> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
293; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv64i8:
294; CHECK:       # %bb.0: # %entry
295; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
296; CHECK-NEXT:    vbrev.v v8, v16, v0.t
297; CHECK-NEXT:    ret
298entry:
299  %a = call <vscale x 64 x i8> @llvm.riscv.vbrev.mask.nxv64i8(
300    <vscale x 64 x i8> %1,
301    <vscale x 64 x i8> %2,
302    <vscale x 64 x i1> %0,
303    iXLen %3, iXLen 1)
304
305  ret <vscale x 64 x i8> %a
306}
307
308declare <vscale x 1 x i16> @llvm.riscv.vbrev.nxv1i16(
309  <vscale x 1 x i16>,
310  <vscale x 1 x i16>,
311  iXLen);
312
313define <vscale x 1 x i16> @intrinsic_vbrev_vs_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
314; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i16:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
317; CHECK-NEXT:    vbrev.v v8, v8
318; CHECK-NEXT:    ret
319entry:
320  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev.nxv1i16(
321    <vscale x 1 x i16> undef,
322    <vscale x 1 x i16> %0,
323    iXLen %1)
324
325  ret <vscale x 1 x i16> %a
326}
327
328declare <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16(
329  <vscale x 1 x i16>,
330  <vscale x 1 x i16>,
331  <vscale x 1 x i1>,
332  iXLen,
333  iXLen);
334
335define <vscale x 1 x i16> @intrinsic_vbrev_mask_vs_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i16:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
339; CHECK-NEXT:    vbrev.v v8, v9, v0.t
340; CHECK-NEXT:    ret
341entry:
342  %a = call <vscale x 1 x i16> @llvm.riscv.vbrev.mask.nxv1i16(
343    <vscale x 1 x i16> %1,
344    <vscale x 1 x i16> %2,
345    <vscale x 1 x i1> %0,
346    iXLen %3, iXLen 1)
347
348  ret <vscale x 1 x i16> %a
349}
350
351declare <vscale x 2 x i16> @llvm.riscv.vbrev.nxv2i16(
352  <vscale x 2 x i16>,
353  <vscale x 2 x i16>,
354  iXLen);
355
356define <vscale x 2 x i16> @intrinsic_vbrev_vs_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
357; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i16:
358; CHECK:       # %bb.0: # %entry
359; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
360; CHECK-NEXT:    vbrev.v v8, v8
361; CHECK-NEXT:    ret
362entry:
363  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev.nxv2i16(
364    <vscale x 2 x i16> undef,
365    <vscale x 2 x i16> %0,
366    iXLen %1)
367
368  ret <vscale x 2 x i16> %a
369}
370
371declare <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16(
372  <vscale x 2 x i16>,
373  <vscale x 2 x i16>,
374  <vscale x 2 x i1>,
375  iXLen,
376  iXLen);
377
378define <vscale x 2 x i16> @intrinsic_vbrev_mask_vs_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
379; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i16:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
382; CHECK-NEXT:    vbrev.v v8, v9, v0.t
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 2 x i16> @llvm.riscv.vbrev.mask.nxv2i16(
386    <vscale x 2 x i16> %1,
387    <vscale x 2 x i16> %2,
388    <vscale x 2 x i1> %0,
389    iXLen %3, iXLen 1)
390
391  ret <vscale x 2 x i16> %a
392}
393
394declare <vscale x 4 x i16> @llvm.riscv.vbrev.nxv4i16(
395  <vscale x 4 x i16>,
396  <vscale x 4 x i16>,
397  iXLen);
398
399define <vscale x 4 x i16> @intrinsic_vbrev_vs_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
400; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i16:
401; CHECK:       # %bb.0: # %entry
402; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
403; CHECK-NEXT:    vbrev.v v8, v8
404; CHECK-NEXT:    ret
405entry:
406  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev.nxv4i16(
407    <vscale x 4 x i16> undef,
408    <vscale x 4 x i16> %0,
409    iXLen %1)
410
411  ret <vscale x 4 x i16> %a
412}
413
414declare <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16(
415  <vscale x 4 x i16>,
416  <vscale x 4 x i16>,
417  <vscale x 4 x i1>,
418  iXLen,
419  iXLen);
420
421define <vscale x 4 x i16> @intrinsic_vbrev_mask_vs_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
422; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i16:
423; CHECK:       # %bb.0: # %entry
424; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
425; CHECK-NEXT:    vbrev.v v8, v9, v0.t
426; CHECK-NEXT:    ret
427entry:
428  %a = call <vscale x 4 x i16> @llvm.riscv.vbrev.mask.nxv4i16(
429    <vscale x 4 x i16> %1,
430    <vscale x 4 x i16> %2,
431    <vscale x 4 x i1> %0,
432    iXLen %3, iXLen 1)
433
434  ret <vscale x 4 x i16> %a
435}
436
437declare <vscale x 8 x i16> @llvm.riscv.vbrev.nxv8i16(
438  <vscale x 8 x i16>,
439  <vscale x 8 x i16>,
440  iXLen);
441
442define <vscale x 8 x i16> @intrinsic_vbrev_vs_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
443; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i16:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
446; CHECK-NEXT:    vbrev.v v8, v8
447; CHECK-NEXT:    ret
448entry:
449  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev.nxv8i16(
450    <vscale x 8 x i16> undef,
451    <vscale x 8 x i16> %0,
452    iXLen %1)
453
454  ret <vscale x 8 x i16> %a
455}
456
457declare <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16(
458  <vscale x 8 x i16>,
459  <vscale x 8 x i16>,
460  <vscale x 8 x i1>,
461  iXLen,
462  iXLen);
463
464define <vscale x 8 x i16> @intrinsic_vbrev_mask_vs_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
465; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i16:
466; CHECK:       # %bb.0: # %entry
467; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
468; CHECK-NEXT:    vbrev.v v8, v10, v0.t
469; CHECK-NEXT:    ret
470entry:
471  %a = call <vscale x 8 x i16> @llvm.riscv.vbrev.mask.nxv8i16(
472    <vscale x 8 x i16> %1,
473    <vscale x 8 x i16> %2,
474    <vscale x 8 x i1> %0,
475    iXLen %3, iXLen 1)
476
477  ret <vscale x 8 x i16> %a
478}
479
480declare <vscale x 16 x i16> @llvm.riscv.vbrev.nxv16i16(
481  <vscale x 16 x i16>,
482  <vscale x 16 x i16>,
483  iXLen);
484
485define <vscale x 16 x i16> @intrinsic_vbrev_vs_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
486; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i16:
487; CHECK:       # %bb.0: # %entry
488; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
489; CHECK-NEXT:    vbrev.v v8, v8
490; CHECK-NEXT:    ret
491entry:
492  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev.nxv16i16(
493    <vscale x 16 x i16> undef,
494    <vscale x 16 x i16> %0,
495    iXLen %1)
496
497  ret <vscale x 16 x i16> %a
498}
499
500declare <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16(
501  <vscale x 16 x i16>,
502  <vscale x 16 x i16>,
503  <vscale x 16 x i1>,
504  iXLen,
505  iXLen);
506
507define <vscale x 16 x i16> @intrinsic_vbrev_mask_vs_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
508; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i16:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
511; CHECK-NEXT:    vbrev.v v8, v12, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 16 x i16> @llvm.riscv.vbrev.mask.nxv16i16(
515    <vscale x 16 x i16> %1,
516    <vscale x 16 x i16> %2,
517    <vscale x 16 x i1> %0,
518    iXLen %3, iXLen 1)
519
520  ret <vscale x 16 x i16> %a
521}
522
523declare <vscale x 32 x i16> @llvm.riscv.vbrev.nxv32i16(
524  <vscale x 32 x i16>,
525  <vscale x 32 x i16>,
526  iXLen);
527
528define <vscale x 32 x i16> @intrinsic_vbrev_vs_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
529; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i16:
530; CHECK:       # %bb.0: # %entry
531; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
532; CHECK-NEXT:    vbrev.v v8, v8
533; CHECK-NEXT:    ret
534entry:
535  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev.nxv32i16(
536    <vscale x 32 x i16> undef,
537    <vscale x 32 x i16> %0,
538    iXLen %1)
539
540  ret <vscale x 32 x i16> %a
541}
542
543declare <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16(
544  <vscale x 32 x i16>,
545  <vscale x 32 x i16>,
546  <vscale x 32 x i1>,
547  iXLen,
548  iXLen);
549
550define <vscale x 32 x i16> @intrinsic_vbrev_mask_vs_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
551; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i16:
552; CHECK:       # %bb.0: # %entry
553; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
554; CHECK-NEXT:    vbrev.v v8, v16, v0.t
555; CHECK-NEXT:    ret
556entry:
557  %a = call <vscale x 32 x i16> @llvm.riscv.vbrev.mask.nxv32i16(
558    <vscale x 32 x i16> %1,
559    <vscale x 32 x i16> %2,
560    <vscale x 32 x i1> %0,
561    iXLen %3, iXLen 1)
562
563  ret <vscale x 32 x i16> %a
564}
565
566declare <vscale x 1 x i32> @llvm.riscv.vbrev.nxv1i32(
567  <vscale x 1 x i32>,
568  <vscale x 1 x i32>,
569  iXLen);
570
571define <vscale x 1 x i32> @intrinsic_vbrev_vs_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
572; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i32:
573; CHECK:       # %bb.0: # %entry
574; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
575; CHECK-NEXT:    vbrev.v v8, v8
576; CHECK-NEXT:    ret
577entry:
578  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev.nxv1i32(
579    <vscale x 1 x i32> undef,
580    <vscale x 1 x i32> %0,
581    iXLen %1)
582
583  ret <vscale x 1 x i32> %a
584}
585
586declare <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32(
587  <vscale x 1 x i32>,
588  <vscale x 1 x i32>,
589  <vscale x 1 x i1>,
590  iXLen,
591  iXLen);
592
593define <vscale x 1 x i32> @intrinsic_vbrev_mask_vs_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
594; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i32:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
597; CHECK-NEXT:    vbrev.v v8, v9, v0.t
598; CHECK-NEXT:    ret
599entry:
600  %a = call <vscale x 1 x i32> @llvm.riscv.vbrev.mask.nxv1i32(
601    <vscale x 1 x i32> %1,
602    <vscale x 1 x i32> %2,
603    <vscale x 1 x i1> %0,
604    iXLen %3, iXLen 1)
605
606  ret <vscale x 1 x i32> %a
607}
608
609declare <vscale x 2 x i32> @llvm.riscv.vbrev.nxv2i32(
610  <vscale x 2 x i32>,
611  <vscale x 2 x i32>,
612  iXLen);
613
614define <vscale x 2 x i32> @intrinsic_vbrev_vs_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
615; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i32:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
618; CHECK-NEXT:    vbrev.v v8, v8
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev.nxv2i32(
622    <vscale x 2 x i32> undef,
623    <vscale x 2 x i32> %0,
624    iXLen %1)
625
626  ret <vscale x 2 x i32> %a
627}
628
629declare <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32(
630  <vscale x 2 x i32>,
631  <vscale x 2 x i32>,
632  <vscale x 2 x i1>,
633  iXLen,
634  iXLen);
635
636define <vscale x 2 x i32> @intrinsic_vbrev_mask_vs_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
637; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i32:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
640; CHECK-NEXT:    vbrev.v v8, v9, v0.t
641; CHECK-NEXT:    ret
642entry:
643  %a = call <vscale x 2 x i32> @llvm.riscv.vbrev.mask.nxv2i32(
644    <vscale x 2 x i32> %1,
645    <vscale x 2 x i32> %2,
646    <vscale x 2 x i1> %0,
647    iXLen %3, iXLen 1)
648
649  ret <vscale x 2 x i32> %a
650}
651
652declare <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32(
653  <vscale x 4 x i32>,
654  <vscale x 4 x i32>,
655  iXLen);
656
657define <vscale x 4 x i32> @intrinsic_vbrev_vs_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
658; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i32:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
661; CHECK-NEXT:    vbrev.v v8, v8
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev.nxv4i32(
665    <vscale x 4 x i32> undef,
666    <vscale x 4 x i32> %0,
667    iXLen %1)
668
669  ret <vscale x 4 x i32> %a
670}
671
672declare <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32(
673  <vscale x 4 x i32>,
674  <vscale x 4 x i32>,
675  <vscale x 4 x i1>,
676  iXLen,
677  iXLen);
678
679define <vscale x 4 x i32> @intrinsic_vbrev_mask_vs_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
680; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i32:
681; CHECK:       # %bb.0: # %entry
682; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
683; CHECK-NEXT:    vbrev.v v8, v10, v0.t
684; CHECK-NEXT:    ret
685entry:
686  %a = call <vscale x 4 x i32> @llvm.riscv.vbrev.mask.nxv4i32(
687    <vscale x 4 x i32> %1,
688    <vscale x 4 x i32> %2,
689    <vscale x 4 x i1> %0,
690    iXLen %3, iXLen 1)
691
692  ret <vscale x 4 x i32> %a
693}
694
695declare <vscale x 8 x i32> @llvm.riscv.vbrev.nxv8i32(
696  <vscale x 8 x i32>,
697  <vscale x 8 x i32>,
698  iXLen);
699
700define <vscale x 8 x i32> @intrinsic_vbrev_vs_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
701; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i32:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
704; CHECK-NEXT:    vbrev.v v8, v8
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev.nxv8i32(
708    <vscale x 8 x i32> undef,
709    <vscale x 8 x i32> %0,
710    iXLen %1)
711
712  ret <vscale x 8 x i32> %a
713}
714
715declare <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32(
716  <vscale x 8 x i32>,
717  <vscale x 8 x i32>,
718  <vscale x 8 x i1>,
719  iXLen,
720  iXLen);
721
722define <vscale x 8 x i32> @intrinsic_vbrev_mask_vs_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
723; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i32:
724; CHECK:       # %bb.0: # %entry
725; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
726; CHECK-NEXT:    vbrev.v v8, v12, v0.t
727; CHECK-NEXT:    ret
728entry:
729  %a = call <vscale x 8 x i32> @llvm.riscv.vbrev.mask.nxv8i32(
730    <vscale x 8 x i32> %1,
731    <vscale x 8 x i32> %2,
732    <vscale x 8 x i1> %0,
733    iXLen %3, iXLen 1)
734
735  ret <vscale x 8 x i32> %a
736}
737
738declare <vscale x 16 x i32> @llvm.riscv.vbrev.nxv16i32(
739  <vscale x 16 x i32>,
740  <vscale x 16 x i32>,
741  iXLen);
742
743define <vscale x 16 x i32> @intrinsic_vbrev_vs_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
744; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i32:
745; CHECK:       # %bb.0: # %entry
746; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
747; CHECK-NEXT:    vbrev.v v8, v8
748; CHECK-NEXT:    ret
749entry:
750  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev.nxv16i32(
751    <vscale x 16 x i32> undef,
752    <vscale x 16 x i32> %0,
753    iXLen %1)
754
755  ret <vscale x 16 x i32> %a
756}
757
758declare <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32(
759  <vscale x 16 x i32>,
760  <vscale x 16 x i32>,
761  <vscale x 16 x i1>,
762  iXLen,
763  iXLen);
764
765define <vscale x 16 x i32> @intrinsic_vbrev_mask_vs_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
766; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i32:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
769; CHECK-NEXT:    vbrev.v v8, v16, v0.t
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 16 x i32> @llvm.riscv.vbrev.mask.nxv16i32(
773    <vscale x 16 x i32> %1,
774    <vscale x 16 x i32> %2,
775    <vscale x 16 x i1> %0,
776    iXLen %3, iXLen 1)
777
778  ret <vscale x 16 x i32> %a
779}
780
781declare <vscale x 1 x i64> @llvm.riscv.vbrev.nxv1i64(
782  <vscale x 1 x i64>,
783  <vscale x 1 x i64>,
784  iXLen);
785
786define <vscale x 1 x i64> @intrinsic_vbrev_vs_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
787; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i64:
788; CHECK:       # %bb.0: # %entry
789; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
790; CHECK-NEXT:    vbrev.v v8, v8
791; CHECK-NEXT:    ret
792entry:
793  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev.nxv1i64(
794    <vscale x 1 x i64> undef,
795    <vscale x 1 x i64> %0,
796    iXLen %1)
797
798  ret <vscale x 1 x i64> %a
799}
800
801declare <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64(
802  <vscale x 1 x i64>,
803  <vscale x 1 x i64>,
804  <vscale x 1 x i1>,
805  iXLen,
806  iXLen);
807
808define <vscale x 1 x i64> @intrinsic_vbrev_mask_vs_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
809; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i64:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
812; CHECK-NEXT:    vbrev.v v8, v9, v0.t
813; CHECK-NEXT:    ret
814entry:
815  %a = call <vscale x 1 x i64> @llvm.riscv.vbrev.mask.nxv1i64(
816    <vscale x 1 x i64> %1,
817    <vscale x 1 x i64> %2,
818    <vscale x 1 x i1> %0,
819    iXLen %3, iXLen 1)
820
821  ret <vscale x 1 x i64> %a
822}
823
824declare <vscale x 2 x i64> @llvm.riscv.vbrev.nxv2i64(
825  <vscale x 2 x i64>,
826  <vscale x 2 x i64>,
827  iXLen);
828
829define <vscale x 2 x i64> @intrinsic_vbrev_vs_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
830; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i64:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
833; CHECK-NEXT:    vbrev.v v8, v8
834; CHECK-NEXT:    ret
835entry:
836  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev.nxv2i64(
837    <vscale x 2 x i64> undef,
838    <vscale x 2 x i64> %0,
839    iXLen %1)
840
841  ret <vscale x 2 x i64> %a
842}
843
844declare <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64(
845  <vscale x 2 x i64>,
846  <vscale x 2 x i64>,
847  <vscale x 2 x i1>,
848  iXLen,
849  iXLen);
850
851define <vscale x 2 x i64> @intrinsic_vbrev_mask_vs_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, iXLen %3) nounwind {
852; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i64:
853; CHECK:       # %bb.0: # %entry
854; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
855; CHECK-NEXT:    vbrev.v v8, v10, v0.t
856; CHECK-NEXT:    ret
857entry:
858  %a = call <vscale x 2 x i64> @llvm.riscv.vbrev.mask.nxv2i64(
859    <vscale x 2 x i64> %1,
860    <vscale x 2 x i64> %2,
861    <vscale x 2 x i1> %0,
862    iXLen %3, iXLen 1)
863
864  ret <vscale x 2 x i64> %a
865}
866
867declare <vscale x 4 x i64> @llvm.riscv.vbrev.nxv4i64(
868  <vscale x 4 x i64>,
869  <vscale x 4 x i64>,
870  iXLen);
871
872define <vscale x 4 x i64> @intrinsic_vbrev_vs_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
873; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i64:
874; CHECK:       # %bb.0: # %entry
875; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
876; CHECK-NEXT:    vbrev.v v8, v8
877; CHECK-NEXT:    ret
878entry:
879  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev.nxv4i64(
880    <vscale x 4 x i64> undef,
881    <vscale x 4 x i64> %0,
882    iXLen %1)
883
884  ret <vscale x 4 x i64> %a
885}
886
887declare <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64(
888  <vscale x 4 x i64>,
889  <vscale x 4 x i64>,
890  <vscale x 4 x i1>,
891  iXLen,
892  iXLen);
893
894define <vscale x 4 x i64> @intrinsic_vbrev_mask_vs_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, iXLen %3) nounwind {
895; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i64:
896; CHECK:       # %bb.0: # %entry
897; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
898; CHECK-NEXT:    vbrev.v v8, v12, v0.t
899; CHECK-NEXT:    ret
900entry:
901  %a = call <vscale x 4 x i64> @llvm.riscv.vbrev.mask.nxv4i64(
902    <vscale x 4 x i64> %1,
903    <vscale x 4 x i64> %2,
904    <vscale x 4 x i1> %0,
905    iXLen %3, iXLen 1)
906
907  ret <vscale x 4 x i64> %a
908}
909
910declare <vscale x 8 x i64> @llvm.riscv.vbrev.nxv8i64(
911  <vscale x 8 x i64>,
912  <vscale x 8 x i64>,
913  iXLen);
914
915define <vscale x 8 x i64> @intrinsic_vbrev_vs_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
916; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i64:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
919; CHECK-NEXT:    vbrev.v v8, v8
920; CHECK-NEXT:    ret
921entry:
922  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev.nxv8i64(
923    <vscale x 8 x i64> undef,
924    <vscale x 8 x i64> %0,
925    iXLen %1)
926
927  ret <vscale x 8 x i64> %a
928}
929
930declare <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64(
931  <vscale x 8 x i64>,
932  <vscale x 8 x i64>,
933  <vscale x 8 x i1>,
934  iXLen,
935  iXLen);
936
937define <vscale x 8 x i64> @intrinsic_vbrev_mask_vs_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, iXLen %3) nounwind {
938; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i64:
939; CHECK:       # %bb.0: # %entry
940; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
941; CHECK-NEXT:    vbrev.v v8, v16, v0.t
942; CHECK-NEXT:    ret
943entry:
944  %a = call <vscale x 8 x i64> @llvm.riscv.vbrev.mask.nxv8i64(
945    <vscale x 8 x i64> %1,
946    <vscale x 8 x i64> %2,
947    <vscale x 8 x i1> %0,
948    iXLen %3, iXLen 1)
949
950  ret <vscale x 8 x i64> %a
951}
952