xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vror.ll (revision 09058654f68dd4cc5435f49502de33bac2b7f8fa)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
6
7declare <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  iXLen)
12
13define <vscale x 1 x i8> @intrinsic_vror_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vror.vv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8(
21    <vscale x 1 x i8> undef,
22    <vscale x 1 x i8> %0,
23    <vscale x 1 x i8> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8(
30  <vscale x 1 x i8>,
31  <vscale x 1 x i8>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen)
36
37define <vscale x 1 x i8> @intrinsic_vror_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
41; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8(
45    <vscale x 1 x i8> %0,
46    <vscale x 1 x i8> %1,
47    <vscale x 1 x i8> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i8> %a
52}
53
54declare <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8(
55  <vscale x 2 x i8>,
56  <vscale x 2 x i8>,
57  <vscale x 2 x i8>,
58  iXLen)
59
60define <vscale x 2 x i8> @intrinsic_vror_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
64; CHECK-NEXT:    vror.vv v8, v8, v9
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8(
68    <vscale x 2 x i8> undef,
69    <vscale x 2 x i8> %0,
70    <vscale x 2 x i8> %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i8> %a
74}
75
76declare <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8(
77  <vscale x 2 x i8>,
78  <vscale x 2 x i8>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen)
83
84define <vscale x 2 x i8> @intrinsic_vror_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
88; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8(
92    <vscale x 2 x i8> %0,
93    <vscale x 2 x i8> %1,
94    <vscale x 2 x i8> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i8> %a
99}
100
101declare <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8(
102  <vscale x 4 x i8>,
103  <vscale x 4 x i8>,
104  <vscale x 4 x i8>,
105  iXLen)
106
107define <vscale x 4 x i8> @intrinsic_vror_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
111; CHECK-NEXT:    vror.vv v8, v8, v9
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8(
115    <vscale x 4 x i8> undef,
116    <vscale x 4 x i8> %0,
117    <vscale x 4 x i8> %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i8> %a
121}
122
123declare <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8(
124  <vscale x 4 x i8>,
125  <vscale x 4 x i8>,
126  <vscale x 4 x i8>,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen)
130
131define <vscale x 4 x i8> @intrinsic_vror_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
135; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8(
139    <vscale x 4 x i8> %0,
140    <vscale x 4 x i8> %1,
141    <vscale x 4 x i8> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i8> %a
146}
147
148declare <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8(
149  <vscale x 8 x i8>,
150  <vscale x 8 x i8>,
151  <vscale x 8 x i8>,
152  iXLen)
153
154define <vscale x 8 x i8> @intrinsic_vror_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
158; CHECK-NEXT:    vror.vv v8, v8, v9
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8(
162    <vscale x 8 x i8> undef,
163    <vscale x 8 x i8> %0,
164    <vscale x 8 x i8> %1,
165    iXLen %2)
166
167  ret <vscale x 8 x i8> %a
168}
169
170declare <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8(
171  <vscale x 8 x i8>,
172  <vscale x 8 x i8>,
173  <vscale x 8 x i8>,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen)
177
178define <vscale x 8 x i8> @intrinsic_vror_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
182; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8(
186    <vscale x 8 x i8> %0,
187    <vscale x 8 x i8> %1,
188    <vscale x 8 x i8> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x i8> %a
193}
194
195declare <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8(
196  <vscale x 16 x i8>,
197  <vscale x 16 x i8>,
198  <vscale x 16 x i8>,
199  iXLen)
200
201define <vscale x 16 x i8> @intrinsic_vror_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
205; CHECK-NEXT:    vror.vv v8, v8, v10
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8(
209    <vscale x 16 x i8> undef,
210    <vscale x 16 x i8> %0,
211    <vscale x 16 x i8> %1,
212    iXLen %2)
213
214  ret <vscale x 16 x i8> %a
215}
216
217declare <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8(
218  <vscale x 16 x i8>,
219  <vscale x 16 x i8>,
220  <vscale x 16 x i8>,
221  <vscale x 16 x i1>,
222  iXLen,
223  iXLen)
224
225define <vscale x 16 x i8> @intrinsic_vror_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
229; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8(
233    <vscale x 16 x i8> %0,
234    <vscale x 16 x i8> %1,
235    <vscale x 16 x i8> %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 16 x i8> %a
240}
241
242declare <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8(
243  <vscale x 32 x i8>,
244  <vscale x 32 x i8>,
245  <vscale x 32 x i8>,
246  iXLen)
247
248define <vscale x 32 x i8> @intrinsic_vror_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
252; CHECK-NEXT:    vror.vv v8, v8, v12
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8(
256    <vscale x 32 x i8> undef,
257    <vscale x 32 x i8> %0,
258    <vscale x 32 x i8> %1,
259    iXLen %2)
260
261  ret <vscale x 32 x i8> %a
262}
263
264declare <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8(
265  <vscale x 32 x i8>,
266  <vscale x 32 x i8>,
267  <vscale x 32 x i8>,
268  <vscale x 32 x i1>,
269  iXLen,
270  iXLen)
271
272define <vscale x 32 x i8> @intrinsic_vror_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
276; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8(
280    <vscale x 32 x i8> %0,
281    <vscale x 32 x i8> %1,
282    <vscale x 32 x i8> %2,
283    <vscale x 32 x i1> %3,
284    iXLen %4, iXLen 1)
285
286  ret <vscale x 32 x i8> %a
287}
288
289declare <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8(
290  <vscale x 64 x i8>,
291  <vscale x 64 x i8>,
292  <vscale x 64 x i8>,
293  iXLen)
294
295define <vscale x 64 x i8> @intrinsic_vror_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
296; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
299; CHECK-NEXT:    vror.vv v8, v8, v16
300; CHECK-NEXT:    ret
301entry:
302  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8(
303    <vscale x 64 x i8> undef,
304    <vscale x 64 x i8> %0,
305    <vscale x 64 x i8> %1,
306    iXLen %2)
307
308  ret <vscale x 64 x i8> %a
309}
310
311declare <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8(
312  <vscale x 64 x i8>,
313  <vscale x 64 x i8>,
314  <vscale x 64 x i8>,
315  <vscale x 64 x i1>,
316  iXLen,
317  iXLen)
318
319define <vscale x 64 x i8> @intrinsic_vror_mask_vv_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vl8r.v v24, (a0)
323; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
324; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
325; CHECK-NEXT:    ret
326entry:
327  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8(
328    <vscale x 64 x i8> %0,
329    <vscale x 64 x i8> %1,
330    <vscale x 64 x i8> %2,
331    <vscale x 64 x i1> %3,
332    iXLen %4, iXLen 1)
333
334  ret <vscale x 64 x i8> %a
335}
336
337declare <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16(
338  <vscale x 1 x i16>,
339  <vscale x 1 x i16>,
340  <vscale x 1 x i16>,
341  iXLen)
342
343define <vscale x 1 x i16> @intrinsic_vror_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
344; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
347; CHECK-NEXT:    vror.vv v8, v8, v9
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16(
351    <vscale x 1 x i16> undef,
352    <vscale x 1 x i16> %0,
353    <vscale x 1 x i16> %1,
354    iXLen %2)
355
356  ret <vscale x 1 x i16> %a
357}
358
359declare <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16(
360  <vscale x 1 x i16>,
361  <vscale x 1 x i16>,
362  <vscale x 1 x i16>,
363  <vscale x 1 x i1>,
364  iXLen,
365  iXLen)
366
367define <vscale x 1 x i16> @intrinsic_vror_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
368; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16:
369; CHECK:       # %bb.0: # %entry
370; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
371; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
372; CHECK-NEXT:    ret
373entry:
374  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16(
375    <vscale x 1 x i16> %0,
376    <vscale x 1 x i16> %1,
377    <vscale x 1 x i16> %2,
378    <vscale x 1 x i1> %3,
379    iXLen %4, iXLen 1)
380
381  ret <vscale x 1 x i16> %a
382}
383
384declare <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16(
385  <vscale x 2 x i16>,
386  <vscale x 2 x i16>,
387  <vscale x 2 x i16>,
388  iXLen)
389
390define <vscale x 2 x i16> @intrinsic_vror_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
391; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
394; CHECK-NEXT:    vror.vv v8, v8, v9
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16(
398    <vscale x 2 x i16> undef,
399    <vscale x 2 x i16> %0,
400    <vscale x 2 x i16> %1,
401    iXLen %2)
402
403  ret <vscale x 2 x i16> %a
404}
405
406declare <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16(
407  <vscale x 2 x i16>,
408  <vscale x 2 x i16>,
409  <vscale x 2 x i16>,
410  <vscale x 2 x i1>,
411  iXLen,
412  iXLen)
413
414define <vscale x 2 x i16> @intrinsic_vror_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
415; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
418; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
419; CHECK-NEXT:    ret
420entry:
421  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16(
422    <vscale x 2 x i16> %0,
423    <vscale x 2 x i16> %1,
424    <vscale x 2 x i16> %2,
425    <vscale x 2 x i1> %3,
426    iXLen %4, iXLen 1)
427
428  ret <vscale x 2 x i16> %a
429}
430
431declare <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16(
432  <vscale x 4 x i16>,
433  <vscale x 4 x i16>,
434  <vscale x 4 x i16>,
435  iXLen)
436
437define <vscale x 4 x i16> @intrinsic_vror_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
438; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
441; CHECK-NEXT:    vror.vv v8, v8, v9
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16(
445    <vscale x 4 x i16> undef,
446    <vscale x 4 x i16> %0,
447    <vscale x 4 x i16> %1,
448    iXLen %2)
449
450  ret <vscale x 4 x i16> %a
451}
452
453declare <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16(
454  <vscale x 4 x i16>,
455  <vscale x 4 x i16>,
456  <vscale x 4 x i16>,
457  <vscale x 4 x i1>,
458  iXLen,
459  iXLen)
460
461define <vscale x 4 x i16> @intrinsic_vror_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
462; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16:
463; CHECK:       # %bb.0: # %entry
464; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
465; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
466; CHECK-NEXT:    ret
467entry:
468  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16(
469    <vscale x 4 x i16> %0,
470    <vscale x 4 x i16> %1,
471    <vscale x 4 x i16> %2,
472    <vscale x 4 x i1> %3,
473    iXLen %4, iXLen 1)
474
475  ret <vscale x 4 x i16> %a
476}
477
478declare <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16(
479  <vscale x 8 x i16>,
480  <vscale x 8 x i16>,
481  <vscale x 8 x i16>,
482  iXLen)
483
484define <vscale x 8 x i16> @intrinsic_vror_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
485; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
488; CHECK-NEXT:    vror.vv v8, v8, v10
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16(
492    <vscale x 8 x i16> undef,
493    <vscale x 8 x i16> %0,
494    <vscale x 8 x i16> %1,
495    iXLen %2)
496
497  ret <vscale x 8 x i16> %a
498}
499
500declare <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16(
501  <vscale x 8 x i16>,
502  <vscale x 8 x i16>,
503  <vscale x 8 x i16>,
504  <vscale x 8 x i1>,
505  iXLen,
506  iXLen)
507
508define <vscale x 8 x i16> @intrinsic_vror_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
509; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16:
510; CHECK:       # %bb.0: # %entry
511; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
512; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
513; CHECK-NEXT:    ret
514entry:
515  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16(
516    <vscale x 8 x i16> %0,
517    <vscale x 8 x i16> %1,
518    <vscale x 8 x i16> %2,
519    <vscale x 8 x i1> %3,
520    iXLen %4, iXLen 1)
521
522  ret <vscale x 8 x i16> %a
523}
524
525declare <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16(
526  <vscale x 16 x i16>,
527  <vscale x 16 x i16>,
528  <vscale x 16 x i16>,
529  iXLen)
530
531define <vscale x 16 x i16> @intrinsic_vror_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
532; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16:
533; CHECK:       # %bb.0: # %entry
534; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
535; CHECK-NEXT:    vror.vv v8, v8, v12
536; CHECK-NEXT:    ret
537entry:
538  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16(
539    <vscale x 16 x i16> undef,
540    <vscale x 16 x i16> %0,
541    <vscale x 16 x i16> %1,
542    iXLen %2)
543
544  ret <vscale x 16 x i16> %a
545}
546
547declare <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16(
548  <vscale x 16 x i16>,
549  <vscale x 16 x i16>,
550  <vscale x 16 x i16>,
551  <vscale x 16 x i1>,
552  iXLen,
553  iXLen)
554
555define <vscale x 16 x i16> @intrinsic_vror_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
556; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16:
557; CHECK:       # %bb.0: # %entry
558; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
559; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
560; CHECK-NEXT:    ret
561entry:
562  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16(
563    <vscale x 16 x i16> %0,
564    <vscale x 16 x i16> %1,
565    <vscale x 16 x i16> %2,
566    <vscale x 16 x i1> %3,
567    iXLen %4, iXLen 1)
568
569  ret <vscale x 16 x i16> %a
570}
571
572declare <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16(
573  <vscale x 32 x i16>,
574  <vscale x 32 x i16>,
575  <vscale x 32 x i16>,
576  iXLen)
577
578define <vscale x 32 x i16> @intrinsic_vror_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
579; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16:
580; CHECK:       # %bb.0: # %entry
581; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
582; CHECK-NEXT:    vror.vv v8, v8, v16
583; CHECK-NEXT:    ret
584entry:
585  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16(
586    <vscale x 32 x i16> undef,
587    <vscale x 32 x i16> %0,
588    <vscale x 32 x i16> %1,
589    iXLen %2)
590
591  ret <vscale x 32 x i16> %a
592}
593
594declare <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16(
595  <vscale x 32 x i16>,
596  <vscale x 32 x i16>,
597  <vscale x 32 x i16>,
598  <vscale x 32 x i1>,
599  iXLen,
600  iXLen)
601
602define <vscale x 32 x i16> @intrinsic_vror_mask_vv_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
603; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16:
604; CHECK:       # %bb.0: # %entry
605; CHECK-NEXT:    vl8re16.v v24, (a0)
606; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
607; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16(
611    <vscale x 32 x i16> %0,
612    <vscale x 32 x i16> %1,
613    <vscale x 32 x i16> %2,
614    <vscale x 32 x i1> %3,
615    iXLen %4, iXLen 1)
616
617  ret <vscale x 32 x i16> %a
618}
619
620declare <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32(
621  <vscale x 1 x i32>,
622  <vscale x 1 x i32>,
623  <vscale x 1 x i32>,
624  iXLen)
625
626define <vscale x 1 x i32> @intrinsic_vror_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
627; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
630; CHECK-NEXT:    vror.vv v8, v8, v9
631; CHECK-NEXT:    ret
632entry:
633  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32(
634    <vscale x 1 x i32> undef,
635    <vscale x 1 x i32> %0,
636    <vscale x 1 x i32> %1,
637    iXLen %2)
638
639  ret <vscale x 1 x i32> %a
640}
641
642declare <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32(
643  <vscale x 1 x i32>,
644  <vscale x 1 x i32>,
645  <vscale x 1 x i32>,
646  <vscale x 1 x i1>,
647  iXLen,
648  iXLen)
649
650define <vscale x 1 x i32> @intrinsic_vror_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
651; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32:
652; CHECK:       # %bb.0: # %entry
653; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
654; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
655; CHECK-NEXT:    ret
656entry:
657  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32(
658    <vscale x 1 x i32> %0,
659    <vscale x 1 x i32> %1,
660    <vscale x 1 x i32> %2,
661    <vscale x 1 x i1> %3,
662    iXLen %4, iXLen 1)
663
664  ret <vscale x 1 x i32> %a
665}
666
667declare <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32(
668  <vscale x 2 x i32>,
669  <vscale x 2 x i32>,
670  <vscale x 2 x i32>,
671  iXLen)
672
673define <vscale x 2 x i32> @intrinsic_vror_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
674; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32:
675; CHECK:       # %bb.0: # %entry
676; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
677; CHECK-NEXT:    vror.vv v8, v8, v9
678; CHECK-NEXT:    ret
679entry:
680  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32(
681    <vscale x 2 x i32> undef,
682    <vscale x 2 x i32> %0,
683    <vscale x 2 x i32> %1,
684    iXLen %2)
685
686  ret <vscale x 2 x i32> %a
687}
688
689declare <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32(
690  <vscale x 2 x i32>,
691  <vscale x 2 x i32>,
692  <vscale x 2 x i32>,
693  <vscale x 2 x i1>,
694  iXLen,
695  iXLen)
696
697define <vscale x 2 x i32> @intrinsic_vror_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
698; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32:
699; CHECK:       # %bb.0: # %entry
700; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
701; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
702; CHECK-NEXT:    ret
703entry:
704  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32(
705    <vscale x 2 x i32> %0,
706    <vscale x 2 x i32> %1,
707    <vscale x 2 x i32> %2,
708    <vscale x 2 x i1> %3,
709    iXLen %4, iXLen 1)
710
711  ret <vscale x 2 x i32> %a
712}
713
714declare <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32(
715  <vscale x 4 x i32>,
716  <vscale x 4 x i32>,
717  <vscale x 4 x i32>,
718  iXLen)
719
720define <vscale x 4 x i32> @intrinsic_vror_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
721; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32:
722; CHECK:       # %bb.0: # %entry
723; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
724; CHECK-NEXT:    vror.vv v8, v8, v10
725; CHECK-NEXT:    ret
726entry:
727  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32(
728    <vscale x 4 x i32> undef,
729    <vscale x 4 x i32> %0,
730    <vscale x 4 x i32> %1,
731    iXLen %2)
732
733  ret <vscale x 4 x i32> %a
734}
735
736declare <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32(
737  <vscale x 4 x i32>,
738  <vscale x 4 x i32>,
739  <vscale x 4 x i32>,
740  <vscale x 4 x i1>,
741  iXLen,
742  iXLen)
743
744define <vscale x 4 x i32> @intrinsic_vror_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
745; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32:
746; CHECK:       # %bb.0: # %entry
747; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
748; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
749; CHECK-NEXT:    ret
750entry:
751  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32(
752    <vscale x 4 x i32> %0,
753    <vscale x 4 x i32> %1,
754    <vscale x 4 x i32> %2,
755    <vscale x 4 x i1> %3,
756    iXLen %4, iXLen 1)
757
758  ret <vscale x 4 x i32> %a
759}
760
761declare <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32(
762  <vscale x 8 x i32>,
763  <vscale x 8 x i32>,
764  <vscale x 8 x i32>,
765  iXLen)
766
767define <vscale x 8 x i32> @intrinsic_vror_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
768; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32:
769; CHECK:       # %bb.0: # %entry
770; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
771; CHECK-NEXT:    vror.vv v8, v8, v12
772; CHECK-NEXT:    ret
773entry:
774  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32(
775    <vscale x 8 x i32> undef,
776    <vscale x 8 x i32> %0,
777    <vscale x 8 x i32> %1,
778    iXLen %2)
779
780  ret <vscale x 8 x i32> %a
781}
782
783declare <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32(
784  <vscale x 8 x i32>,
785  <vscale x 8 x i32>,
786  <vscale x 8 x i32>,
787  <vscale x 8 x i1>,
788  iXLen,
789  iXLen)
790
791define <vscale x 8 x i32> @intrinsic_vror_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
792; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32:
793; CHECK:       # %bb.0: # %entry
794; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
795; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
796; CHECK-NEXT:    ret
797entry:
798  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32(
799    <vscale x 8 x i32> %0,
800    <vscale x 8 x i32> %1,
801    <vscale x 8 x i32> %2,
802    <vscale x 8 x i1> %3,
803    iXLen %4, iXLen 1)
804
805  ret <vscale x 8 x i32> %a
806}
807
808declare <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32(
809  <vscale x 16 x i32>,
810  <vscale x 16 x i32>,
811  <vscale x 16 x i32>,
812  iXLen)
813
814define <vscale x 16 x i32> @intrinsic_vror_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
815; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32:
816; CHECK:       # %bb.0: # %entry
817; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
818; CHECK-NEXT:    vror.vv v8, v8, v16
819; CHECK-NEXT:    ret
820entry:
821  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32(
822    <vscale x 16 x i32> undef,
823    <vscale x 16 x i32> %0,
824    <vscale x 16 x i32> %1,
825    iXLen %2)
826
827  ret <vscale x 16 x i32> %a
828}
829
830declare <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32(
831  <vscale x 16 x i32>,
832  <vscale x 16 x i32>,
833  <vscale x 16 x i32>,
834  <vscale x 16 x i1>,
835  iXLen,
836  iXLen)
837
838define <vscale x 16 x i32> @intrinsic_vror_mask_vv_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
839; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vl8re32.v v24, (a0)
842; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
843; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32(
847    <vscale x 16 x i32> %0,
848    <vscale x 16 x i32> %1,
849    <vscale x 16 x i32> %2,
850    <vscale x 16 x i1> %3,
851    iXLen %4, iXLen 1)
852
853  ret <vscale x 16 x i32> %a
854}
855
856declare <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64(
857  <vscale x 1 x i64>,
858  <vscale x 1 x i64>,
859  <vscale x 1 x i64>,
860  iXLen)
861
862define <vscale x 1 x i64> @intrinsic_vror_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
863; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64:
864; CHECK:       # %bb.0: # %entry
865; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
866; CHECK-NEXT:    vror.vv v8, v8, v9
867; CHECK-NEXT:    ret
868entry:
869  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64(
870    <vscale x 1 x i64> undef,
871    <vscale x 1 x i64> %0,
872    <vscale x 1 x i64> %1,
873    iXLen %2)
874
875  ret <vscale x 1 x i64> %a
876}
877
878declare <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64(
879  <vscale x 1 x i64>,
880  <vscale x 1 x i64>,
881  <vscale x 1 x i64>,
882  <vscale x 1 x i1>,
883  iXLen,
884  iXLen)
885
886define <vscale x 1 x i64> @intrinsic_vror_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
887; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64:
888; CHECK:       # %bb.0: # %entry
889; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
890; CHECK-NEXT:    vror.vv v8, v9, v10, v0.t
891; CHECK-NEXT:    ret
892entry:
893  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64(
894    <vscale x 1 x i64> %0,
895    <vscale x 1 x i64> %1,
896    <vscale x 1 x i64> %2,
897    <vscale x 1 x i1> %3,
898    iXLen %4, iXLen 1)
899
900  ret <vscale x 1 x i64> %a
901}
902
903declare <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64(
904  <vscale x 2 x i64>,
905  <vscale x 2 x i64>,
906  <vscale x 2 x i64>,
907  iXLen)
908
909define <vscale x 2 x i64> @intrinsic_vror_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
910; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
913; CHECK-NEXT:    vror.vv v8, v8, v10
914; CHECK-NEXT:    ret
915entry:
916  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64(
917    <vscale x 2 x i64> undef,
918    <vscale x 2 x i64> %0,
919    <vscale x 2 x i64> %1,
920    iXLen %2)
921
922  ret <vscale x 2 x i64> %a
923}
924
925declare <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64(
926  <vscale x 2 x i64>,
927  <vscale x 2 x i64>,
928  <vscale x 2 x i64>,
929  <vscale x 2 x i1>,
930  iXLen,
931  iXLen)
932
933define <vscale x 2 x i64> @intrinsic_vror_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
934; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
937; CHECK-NEXT:    vror.vv v8, v10, v12, v0.t
938; CHECK-NEXT:    ret
939entry:
940  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64(
941    <vscale x 2 x i64> %0,
942    <vscale x 2 x i64> %1,
943    <vscale x 2 x i64> %2,
944    <vscale x 2 x i1> %3,
945    iXLen %4, iXLen 1)
946
947  ret <vscale x 2 x i64> %a
948}
949
950declare <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64(
951  <vscale x 4 x i64>,
952  <vscale x 4 x i64>,
953  <vscale x 4 x i64>,
954  iXLen)
955
956define <vscale x 4 x i64> @intrinsic_vror_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
957; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
960; CHECK-NEXT:    vror.vv v8, v8, v12
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64(
964    <vscale x 4 x i64> undef,
965    <vscale x 4 x i64> %0,
966    <vscale x 4 x i64> %1,
967    iXLen %2)
968
969  ret <vscale x 4 x i64> %a
970}
971
972declare <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64(
973  <vscale x 4 x i64>,
974  <vscale x 4 x i64>,
975  <vscale x 4 x i64>,
976  <vscale x 4 x i1>,
977  iXLen,
978  iXLen)
979
980define <vscale x 4 x i64> @intrinsic_vror_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
981; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
984; CHECK-NEXT:    vror.vv v8, v12, v16, v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64(
988    <vscale x 4 x i64> %0,
989    <vscale x 4 x i64> %1,
990    <vscale x 4 x i64> %2,
991    <vscale x 4 x i1> %3,
992    iXLen %4, iXLen 1)
993
994  ret <vscale x 4 x i64> %a
995}
996
997declare <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64(
998  <vscale x 8 x i64>,
999  <vscale x 8 x i64>,
1000  <vscale x 8 x i64>,
1001  iXLen)
1002
1003define <vscale x 8 x i64> @intrinsic_vror_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1004; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64:
1005; CHECK:       # %bb.0: # %entry
1006; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1007; CHECK-NEXT:    vror.vv v8, v8, v16
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64(
1011    <vscale x 8 x i64> undef,
1012    <vscale x 8 x i64> %0,
1013    <vscale x 8 x i64> %1,
1014    iXLen %2)
1015
1016  ret <vscale x 8 x i64> %a
1017}
1018
1019declare <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64(
1020  <vscale x 8 x i64>,
1021  <vscale x 8 x i64>,
1022  <vscale x 8 x i64>,
1023  <vscale x 8 x i1>,
1024  iXLen,
1025  iXLen)
1026
1027define <vscale x 8 x i64> @intrinsic_vror_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1028; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64:
1029; CHECK:       # %bb.0: # %entry
1030; CHECK-NEXT:    vl8re64.v v24, (a0)
1031; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1032; CHECK-NEXT:    vror.vv v8, v16, v24, v0.t
1033; CHECK-NEXT:    ret
1034entry:
1035  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64(
1036    <vscale x 8 x i64> %0,
1037    <vscale x 8 x i64> %1,
1038    <vscale x 8 x i64> %2,
1039    <vscale x 8 x i1> %3,
1040    iXLen %4, iXLen 1)
1041
1042  ret <vscale x 8 x i64> %a
1043}
1044
1045declare <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
1046  <vscale x 1 x i8>,
1047  <vscale x 1 x i8>,
1048  iXLen,
1049  iXLen)
1050
1051define <vscale x 1 x i8> @intrinsic_vror_vx_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
1052; CHECK-LABEL: intrinsic_vror_vx_nxv1i8:
1053; CHECK:       # %bb.0: # %entry
1054; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1055; CHECK-NEXT:    vror.vx v8, v8, a0
1056; CHECK-NEXT:    ret
1057entry:
1058  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
1059    <vscale x 1 x i8> undef,
1060    <vscale x 1 x i8> %0,
1061    iXLen %1,
1062    iXLen %2)
1063
1064  ret <vscale x 1 x i8> %a
1065}
1066
1067declare <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
1068  <vscale x 1 x i8>,
1069  <vscale x 1 x i8>,
1070  iXLen,
1071  <vscale x 1 x i1>,
1072  iXLen,
1073  iXLen)
1074
1075define <vscale x 1 x i8> @intrinsic_vror_mask_vx_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1076; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8:
1077; CHECK:       # %bb.0: # %entry
1078; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1079; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1080; CHECK-NEXT:    ret
1081entry:
1082  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
1083    <vscale x 1 x i8> %0,
1084    <vscale x 1 x i8> %1,
1085    iXLen %2,
1086    <vscale x 1 x i1> %3,
1087    iXLen %4, iXLen 1)
1088
1089  ret <vscale x 1 x i8> %a
1090}
1091
1092declare <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
1093  <vscale x 2 x i8>,
1094  <vscale x 2 x i8>,
1095  iXLen,
1096  iXLen)
1097
1098define <vscale x 2 x i8> @intrinsic_vror_vx_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
1099; CHECK-LABEL: intrinsic_vror_vx_nxv2i8:
1100; CHECK:       # %bb.0: # %entry
1101; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1102; CHECK-NEXT:    vror.vx v8, v8, a0
1103; CHECK-NEXT:    ret
1104entry:
1105  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
1106    <vscale x 2 x i8> undef,
1107    <vscale x 2 x i8> %0,
1108    iXLen %1,
1109    iXLen %2)
1110
1111  ret <vscale x 2 x i8> %a
1112}
1113
1114declare <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
1115  <vscale x 2 x i8>,
1116  <vscale x 2 x i8>,
1117  iXLen,
1118  <vscale x 2 x i1>,
1119  iXLen,
1120  iXLen)
1121
1122define <vscale x 2 x i8> @intrinsic_vror_mask_vx_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1123; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8:
1124; CHECK:       # %bb.0: # %entry
1125; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1126; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1127; CHECK-NEXT:    ret
1128entry:
1129  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
1130    <vscale x 2 x i8> %0,
1131    <vscale x 2 x i8> %1,
1132    iXLen %2,
1133    <vscale x 2 x i1> %3,
1134    iXLen %4, iXLen 1)
1135
1136  ret <vscale x 2 x i8> %a
1137}
1138
1139declare <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
1140  <vscale x 4 x i8>,
1141  <vscale x 4 x i8>,
1142  iXLen,
1143  iXLen)
1144
1145define <vscale x 4 x i8> @intrinsic_vror_vx_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
1146; CHECK-LABEL: intrinsic_vror_vx_nxv4i8:
1147; CHECK:       # %bb.0: # %entry
1148; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1149; CHECK-NEXT:    vror.vx v8, v8, a0
1150; CHECK-NEXT:    ret
1151entry:
1152  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
1153    <vscale x 4 x i8> undef,
1154    <vscale x 4 x i8> %0,
1155    iXLen %1,
1156    iXLen %2)
1157
1158  ret <vscale x 4 x i8> %a
1159}
1160
1161declare <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
1162  <vscale x 4 x i8>,
1163  <vscale x 4 x i8>,
1164  iXLen,
1165  <vscale x 4 x i1>,
1166  iXLen,
1167  iXLen)
1168
1169define <vscale x 4 x i8> @intrinsic_vror_mask_vx_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1170; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8:
1171; CHECK:       # %bb.0: # %entry
1172; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1173; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1174; CHECK-NEXT:    ret
1175entry:
1176  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
1177    <vscale x 4 x i8> %0,
1178    <vscale x 4 x i8> %1,
1179    iXLen %2,
1180    <vscale x 4 x i1> %3,
1181    iXLen %4, iXLen 1)
1182
1183  ret <vscale x 4 x i8> %a
1184}
1185
1186declare <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
1187  <vscale x 8 x i8>,
1188  <vscale x 8 x i8>,
1189  iXLen,
1190  iXLen)
1191
1192define <vscale x 8 x i8> @intrinsic_vror_vx_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
1193; CHECK-LABEL: intrinsic_vror_vx_nxv8i8:
1194; CHECK:       # %bb.0: # %entry
1195; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1196; CHECK-NEXT:    vror.vx v8, v8, a0
1197; CHECK-NEXT:    ret
1198entry:
1199  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
1200    <vscale x 8 x i8> undef,
1201    <vscale x 8 x i8> %0,
1202    iXLen %1,
1203    iXLen %2)
1204
1205  ret <vscale x 8 x i8> %a
1206}
1207
1208declare <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
1209  <vscale x 8 x i8>,
1210  <vscale x 8 x i8>,
1211  iXLen,
1212  <vscale x 8 x i1>,
1213  iXLen,
1214  iXLen)
1215
1216define <vscale x 8 x i8> @intrinsic_vror_mask_vx_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1217; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8:
1218; CHECK:       # %bb.0: # %entry
1219; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1220; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1221; CHECK-NEXT:    ret
1222entry:
1223  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
1224    <vscale x 8 x i8> %0,
1225    <vscale x 8 x i8> %1,
1226    iXLen %2,
1227    <vscale x 8 x i1> %3,
1228    iXLen %4, iXLen 1)
1229
1230  ret <vscale x 8 x i8> %a
1231}
1232
1233declare <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
1234  <vscale x 16 x i8>,
1235  <vscale x 16 x i8>,
1236  iXLen,
1237  iXLen)
1238
1239define <vscale x 16 x i8> @intrinsic_vror_vx_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
1240; CHECK-LABEL: intrinsic_vror_vx_nxv16i8:
1241; CHECK:       # %bb.0: # %entry
1242; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1243; CHECK-NEXT:    vror.vx v8, v8, a0
1244; CHECK-NEXT:    ret
1245entry:
1246  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
1247    <vscale x 16 x i8> undef,
1248    <vscale x 16 x i8> %0,
1249    iXLen %1,
1250    iXLen %2)
1251
1252  ret <vscale x 16 x i8> %a
1253}
1254
1255declare <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
1256  <vscale x 16 x i8>,
1257  <vscale x 16 x i8>,
1258  iXLen,
1259  <vscale x 16 x i1>,
1260  iXLen,
1261  iXLen)
1262
1263define <vscale x 16 x i8> @intrinsic_vror_mask_vx_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1264; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8:
1265; CHECK:       # %bb.0: # %entry
1266; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1267; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
1268; CHECK-NEXT:    ret
1269entry:
1270  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
1271    <vscale x 16 x i8> %0,
1272    <vscale x 16 x i8> %1,
1273    iXLen %2,
1274    <vscale x 16 x i1> %3,
1275    iXLen %4, iXLen 1)
1276
1277  ret <vscale x 16 x i8> %a
1278}
1279
1280declare <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
1281  <vscale x 32 x i8>,
1282  <vscale x 32 x i8>,
1283  iXLen,
1284  iXLen)
1285
1286define <vscale x 32 x i8> @intrinsic_vror_vx_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
1287; CHECK-LABEL: intrinsic_vror_vx_nxv32i8:
1288; CHECK:       # %bb.0: # %entry
1289; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1290; CHECK-NEXT:    vror.vx v8, v8, a0
1291; CHECK-NEXT:    ret
1292entry:
1293  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
1294    <vscale x 32 x i8> undef,
1295    <vscale x 32 x i8> %0,
1296    iXLen %1,
1297    iXLen %2)
1298
1299  ret <vscale x 32 x i8> %a
1300}
1301
1302declare <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
1303  <vscale x 32 x i8>,
1304  <vscale x 32 x i8>,
1305  iXLen,
1306  <vscale x 32 x i1>,
1307  iXLen,
1308  iXLen)
1309
1310define <vscale x 32 x i8> @intrinsic_vror_mask_vx_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1311; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8:
1312; CHECK:       # %bb.0: # %entry
1313; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1314; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
1315; CHECK-NEXT:    ret
1316entry:
1317  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
1318    <vscale x 32 x i8> %0,
1319    <vscale x 32 x i8> %1,
1320    iXLen %2,
1321    <vscale x 32 x i1> %3,
1322    iXLen %4, iXLen 1)
1323
1324  ret <vscale x 32 x i8> %a
1325}
1326
1327declare <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
1328  <vscale x 64 x i8>,
1329  <vscale x 64 x i8>,
1330  iXLen,
1331  iXLen)
1332
1333define <vscale x 64 x i8> @intrinsic_vror_vx_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
1334; CHECK-LABEL: intrinsic_vror_vx_nxv64i8:
1335; CHECK:       # %bb.0: # %entry
1336; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
1337; CHECK-NEXT:    vror.vx v8, v8, a0
1338; CHECK-NEXT:    ret
1339entry:
1340  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
1341    <vscale x 64 x i8> undef,
1342    <vscale x 64 x i8> %0,
1343    iXLen %1,
1344    iXLen %2)
1345
1346  ret <vscale x 64 x i8> %a
1347}
1348
1349declare <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
1350  <vscale x 64 x i8>,
1351  <vscale x 64 x i8>,
1352  iXLen,
1353  <vscale x 64 x i1>,
1354  iXLen,
1355  iXLen)
1356
1357define <vscale x 64 x i8> @intrinsic_vror_mask_vx_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1358; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8:
1359; CHECK:       # %bb.0: # %entry
1360; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1361; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
1362; CHECK-NEXT:    ret
1363entry:
1364  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
1365    <vscale x 64 x i8> %0,
1366    <vscale x 64 x i8> %1,
1367    iXLen %2,
1368    <vscale x 64 x i1> %3,
1369    iXLen %4, iXLen 1)
1370
1371  ret <vscale x 64 x i8> %a
1372}
1373
1374declare <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
1375  <vscale x 1 x i16>,
1376  <vscale x 1 x i16>,
1377  iXLen,
1378  iXLen)
1379
1380define <vscale x 1 x i16> @intrinsic_vror_vx_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
1381; CHECK-LABEL: intrinsic_vror_vx_nxv1i16:
1382; CHECK:       # %bb.0: # %entry
1383; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1384; CHECK-NEXT:    vror.vx v8, v8, a0
1385; CHECK-NEXT:    ret
1386entry:
1387  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
1388    <vscale x 1 x i16> undef,
1389    <vscale x 1 x i16> %0,
1390    iXLen %1,
1391    iXLen %2)
1392
1393  ret <vscale x 1 x i16> %a
1394}
1395
1396declare <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
1397  <vscale x 1 x i16>,
1398  <vscale x 1 x i16>,
1399  iXLen,
1400  <vscale x 1 x i1>,
1401  iXLen,
1402  iXLen)
1403
1404define <vscale x 1 x i16> @intrinsic_vror_mask_vx_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1405; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16:
1406; CHECK:       # %bb.0: # %entry
1407; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1408; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1409; CHECK-NEXT:    ret
1410entry:
1411  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
1412    <vscale x 1 x i16> %0,
1413    <vscale x 1 x i16> %1,
1414    iXLen %2,
1415    <vscale x 1 x i1> %3,
1416    iXLen %4, iXLen 1)
1417
1418  ret <vscale x 1 x i16> %a
1419}
1420
1421declare <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
1422  <vscale x 2 x i16>,
1423  <vscale x 2 x i16>,
1424  iXLen,
1425  iXLen)
1426
1427define <vscale x 2 x i16> @intrinsic_vror_vx_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
1428; CHECK-LABEL: intrinsic_vror_vx_nxv2i16:
1429; CHECK:       # %bb.0: # %entry
1430; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1431; CHECK-NEXT:    vror.vx v8, v8, a0
1432; CHECK-NEXT:    ret
1433entry:
1434  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
1435    <vscale x 2 x i16> undef,
1436    <vscale x 2 x i16> %0,
1437    iXLen %1,
1438    iXLen %2)
1439
1440  ret <vscale x 2 x i16> %a
1441}
1442
1443declare <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
1444  <vscale x 2 x i16>,
1445  <vscale x 2 x i16>,
1446  iXLen,
1447  <vscale x 2 x i1>,
1448  iXLen,
1449  iXLen)
1450
1451define <vscale x 2 x i16> @intrinsic_vror_mask_vx_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1452; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16:
1453; CHECK:       # %bb.0: # %entry
1454; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1455; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1456; CHECK-NEXT:    ret
1457entry:
1458  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
1459    <vscale x 2 x i16> %0,
1460    <vscale x 2 x i16> %1,
1461    iXLen %2,
1462    <vscale x 2 x i1> %3,
1463    iXLen %4, iXLen 1)
1464
1465  ret <vscale x 2 x i16> %a
1466}
1467
1468declare <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
1469  <vscale x 4 x i16>,
1470  <vscale x 4 x i16>,
1471  iXLen,
1472  iXLen)
1473
1474define <vscale x 4 x i16> @intrinsic_vror_vx_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
1475; CHECK-LABEL: intrinsic_vror_vx_nxv4i16:
1476; CHECK:       # %bb.0: # %entry
1477; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1478; CHECK-NEXT:    vror.vx v8, v8, a0
1479; CHECK-NEXT:    ret
1480entry:
1481  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
1482    <vscale x 4 x i16> undef,
1483    <vscale x 4 x i16> %0,
1484    iXLen %1,
1485    iXLen %2)
1486
1487  ret <vscale x 4 x i16> %a
1488}
1489
1490declare <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
1491  <vscale x 4 x i16>,
1492  <vscale x 4 x i16>,
1493  iXLen,
1494  <vscale x 4 x i1>,
1495  iXLen,
1496  iXLen)
1497
1498define <vscale x 4 x i16> @intrinsic_vror_mask_vx_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1499; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16:
1500; CHECK:       # %bb.0: # %entry
1501; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1502; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1503; CHECK-NEXT:    ret
1504entry:
1505  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
1506    <vscale x 4 x i16> %0,
1507    <vscale x 4 x i16> %1,
1508    iXLen %2,
1509    <vscale x 4 x i1> %3,
1510    iXLen %4, iXLen 1)
1511
1512  ret <vscale x 4 x i16> %a
1513}
1514
1515declare <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
1516  <vscale x 8 x i16>,
1517  <vscale x 8 x i16>,
1518  iXLen,
1519  iXLen)
1520
1521define <vscale x 8 x i16> @intrinsic_vror_vx_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
1522; CHECK-LABEL: intrinsic_vror_vx_nxv8i16:
1523; CHECK:       # %bb.0: # %entry
1524; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1525; CHECK-NEXT:    vror.vx v8, v8, a0
1526; CHECK-NEXT:    ret
1527entry:
1528  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
1529    <vscale x 8 x i16> undef,
1530    <vscale x 8 x i16> %0,
1531    iXLen %1,
1532    iXLen %2)
1533
1534  ret <vscale x 8 x i16> %a
1535}
1536
1537declare <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
1538  <vscale x 8 x i16>,
1539  <vscale x 8 x i16>,
1540  iXLen,
1541  <vscale x 8 x i1>,
1542  iXLen,
1543  iXLen)
1544
1545define <vscale x 8 x i16> @intrinsic_vror_mask_vx_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1546; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16:
1547; CHECK:       # %bb.0: # %entry
1548; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1549; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
1550; CHECK-NEXT:    ret
1551entry:
1552  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
1553    <vscale x 8 x i16> %0,
1554    <vscale x 8 x i16> %1,
1555    iXLen %2,
1556    <vscale x 8 x i1> %3,
1557    iXLen %4, iXLen 1)
1558
1559  ret <vscale x 8 x i16> %a
1560}
1561
1562declare <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
1563  <vscale x 16 x i16>,
1564  <vscale x 16 x i16>,
1565  iXLen,
1566  iXLen)
1567
1568define <vscale x 16 x i16> @intrinsic_vror_vx_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
1569; CHECK-LABEL: intrinsic_vror_vx_nxv16i16:
1570; CHECK:       # %bb.0: # %entry
1571; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1572; CHECK-NEXT:    vror.vx v8, v8, a0
1573; CHECK-NEXT:    ret
1574entry:
1575  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
1576    <vscale x 16 x i16> undef,
1577    <vscale x 16 x i16> %0,
1578    iXLen %1,
1579    iXLen %2)
1580
1581  ret <vscale x 16 x i16> %a
1582}
1583
1584declare <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
1585  <vscale x 16 x i16>,
1586  <vscale x 16 x i16>,
1587  iXLen,
1588  <vscale x 16 x i1>,
1589  iXLen,
1590  iXLen)
1591
1592define <vscale x 16 x i16> @intrinsic_vror_mask_vx_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1593; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16:
1594; CHECK:       # %bb.0: # %entry
1595; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1596; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
1597; CHECK-NEXT:    ret
1598entry:
1599  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
1600    <vscale x 16 x i16> %0,
1601    <vscale x 16 x i16> %1,
1602    iXLen %2,
1603    <vscale x 16 x i1> %3,
1604    iXLen %4, iXLen 1)
1605
1606  ret <vscale x 16 x i16> %a
1607}
1608
1609declare <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
1610  <vscale x 32 x i16>,
1611  <vscale x 32 x i16>,
1612  iXLen,
1613  iXLen)
1614
1615define <vscale x 32 x i16> @intrinsic_vror_vx_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
1616; CHECK-LABEL: intrinsic_vror_vx_nxv32i16:
1617; CHECK:       # %bb.0: # %entry
1618; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1619; CHECK-NEXT:    vror.vx v8, v8, a0
1620; CHECK-NEXT:    ret
1621entry:
1622  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
1623    <vscale x 32 x i16> undef,
1624    <vscale x 32 x i16> %0,
1625    iXLen %1,
1626    iXLen %2)
1627
1628  ret <vscale x 32 x i16> %a
1629}
1630
1631declare <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
1632  <vscale x 32 x i16>,
1633  <vscale x 32 x i16>,
1634  iXLen,
1635  <vscale x 32 x i1>,
1636  iXLen,
1637  iXLen)
1638
1639define <vscale x 32 x i16> @intrinsic_vror_mask_vx_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1640; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16:
1641; CHECK:       # %bb.0: # %entry
1642; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1643; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
1644; CHECK-NEXT:    ret
1645entry:
1646  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
1647    <vscale x 32 x i16> %0,
1648    <vscale x 32 x i16> %1,
1649    iXLen %2,
1650    <vscale x 32 x i1> %3,
1651    iXLen %4, iXLen 1)
1652
1653  ret <vscale x 32 x i16> %a
1654}
1655
1656declare <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
1657  <vscale x 1 x i32>,
1658  <vscale x 1 x i32>,
1659  iXLen,
1660  iXLen)
1661
1662define <vscale x 1 x i32> @intrinsic_vror_vx_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1663; CHECK-LABEL: intrinsic_vror_vx_nxv1i32:
1664; CHECK:       # %bb.0: # %entry
1665; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1666; CHECK-NEXT:    vror.vx v8, v8, a0
1667; CHECK-NEXT:    ret
1668entry:
1669  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
1670    <vscale x 1 x i32> undef,
1671    <vscale x 1 x i32> %0,
1672    iXLen %1,
1673    iXLen %2)
1674
1675  ret <vscale x 1 x i32> %a
1676}
1677
1678declare <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
1679  <vscale x 1 x i32>,
1680  <vscale x 1 x i32>,
1681  iXLen,
1682  <vscale x 1 x i1>,
1683  iXLen,
1684  iXLen)
1685
1686define <vscale x 1 x i32> @intrinsic_vror_mask_vx_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1687; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32:
1688; CHECK:       # %bb.0: # %entry
1689; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1690; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1691; CHECK-NEXT:    ret
1692entry:
1693  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
1694    <vscale x 1 x i32> %0,
1695    <vscale x 1 x i32> %1,
1696    iXLen %2,
1697    <vscale x 1 x i1> %3,
1698    iXLen %4, iXLen 1)
1699
1700  ret <vscale x 1 x i32> %a
1701}
1702
1703declare <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
1704  <vscale x 2 x i32>,
1705  <vscale x 2 x i32>,
1706  iXLen,
1707  iXLen)
1708
1709define <vscale x 2 x i32> @intrinsic_vror_vx_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1710; CHECK-LABEL: intrinsic_vror_vx_nxv2i32:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1713; CHECK-NEXT:    vror.vx v8, v8, a0
1714; CHECK-NEXT:    ret
1715entry:
1716  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
1717    <vscale x 2 x i32> undef,
1718    <vscale x 2 x i32> %0,
1719    iXLen %1,
1720    iXLen %2)
1721
1722  ret <vscale x 2 x i32> %a
1723}
1724
1725declare <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
1726  <vscale x 2 x i32>,
1727  <vscale x 2 x i32>,
1728  iXLen,
1729  <vscale x 2 x i1>,
1730  iXLen,
1731  iXLen)
1732
1733define <vscale x 2 x i32> @intrinsic_vror_mask_vx_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1734; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32:
1735; CHECK:       # %bb.0: # %entry
1736; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1737; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1738; CHECK-NEXT:    ret
1739entry:
1740  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
1741    <vscale x 2 x i32> %0,
1742    <vscale x 2 x i32> %1,
1743    iXLen %2,
1744    <vscale x 2 x i1> %3,
1745    iXLen %4, iXLen 1)
1746
1747  ret <vscale x 2 x i32> %a
1748}
1749
1750declare <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
1751  <vscale x 4 x i32>,
1752  <vscale x 4 x i32>,
1753  iXLen,
1754  iXLen)
1755
1756define <vscale x 4 x i32> @intrinsic_vror_vx_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1757; CHECK-LABEL: intrinsic_vror_vx_nxv4i32:
1758; CHECK:       # %bb.0: # %entry
1759; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1760; CHECK-NEXT:    vror.vx v8, v8, a0
1761; CHECK-NEXT:    ret
1762entry:
1763  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
1764    <vscale x 4 x i32> undef,
1765    <vscale x 4 x i32> %0,
1766    iXLen %1,
1767    iXLen %2)
1768
1769  ret <vscale x 4 x i32> %a
1770}
1771
1772declare <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
1773  <vscale x 4 x i32>,
1774  <vscale x 4 x i32>,
1775  iXLen,
1776  <vscale x 4 x i1>,
1777  iXLen,
1778  iXLen)
1779
1780define <vscale x 4 x i32> @intrinsic_vror_mask_vx_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1781; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32:
1782; CHECK:       # %bb.0: # %entry
1783; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1784; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
1785; CHECK-NEXT:    ret
1786entry:
1787  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
1788    <vscale x 4 x i32> %0,
1789    <vscale x 4 x i32> %1,
1790    iXLen %2,
1791    <vscale x 4 x i1> %3,
1792    iXLen %4, iXLen 1)
1793
1794  ret <vscale x 4 x i32> %a
1795}
1796
1797declare <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
1798  <vscale x 8 x i32>,
1799  <vscale x 8 x i32>,
1800  iXLen,
1801  iXLen)
1802
1803define <vscale x 8 x i32> @intrinsic_vror_vx_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1804; CHECK-LABEL: intrinsic_vror_vx_nxv8i32:
1805; CHECK:       # %bb.0: # %entry
1806; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1807; CHECK-NEXT:    vror.vx v8, v8, a0
1808; CHECK-NEXT:    ret
1809entry:
1810  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
1811    <vscale x 8 x i32> undef,
1812    <vscale x 8 x i32> %0,
1813    iXLen %1,
1814    iXLen %2)
1815
1816  ret <vscale x 8 x i32> %a
1817}
1818
1819declare <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
1820  <vscale x 8 x i32>,
1821  <vscale x 8 x i32>,
1822  iXLen,
1823  <vscale x 8 x i1>,
1824  iXLen,
1825  iXLen)
1826
1827define <vscale x 8 x i32> @intrinsic_vror_mask_vx_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1828; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32:
1829; CHECK:       # %bb.0: # %entry
1830; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1831; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
1832; CHECK-NEXT:    ret
1833entry:
1834  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
1835    <vscale x 8 x i32> %0,
1836    <vscale x 8 x i32> %1,
1837    iXLen %2,
1838    <vscale x 8 x i1> %3,
1839    iXLen %4, iXLen 1)
1840
1841  ret <vscale x 8 x i32> %a
1842}
1843
1844declare <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
1845  <vscale x 16 x i32>,
1846  <vscale x 16 x i32>,
1847  iXLen,
1848  iXLen)
1849
1850define <vscale x 16 x i32> @intrinsic_vror_vx_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1851; CHECK-LABEL: intrinsic_vror_vx_nxv16i32:
1852; CHECK:       # %bb.0: # %entry
1853; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1854; CHECK-NEXT:    vror.vx v8, v8, a0
1855; CHECK-NEXT:    ret
1856entry:
1857  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
1858    <vscale x 16 x i32> undef,
1859    <vscale x 16 x i32> %0,
1860    iXLen %1,
1861    iXLen %2)
1862
1863  ret <vscale x 16 x i32> %a
1864}
1865
1866declare <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
1867  <vscale x 16 x i32>,
1868  <vscale x 16 x i32>,
1869  iXLen,
1870  <vscale x 16 x i1>,
1871  iXLen,
1872  iXLen)
1873
1874define <vscale x 16 x i32> @intrinsic_vror_mask_vx_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1875; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32:
1876; CHECK:       # %bb.0: # %entry
1877; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1878; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
1879; CHECK-NEXT:    ret
1880entry:
1881  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
1882    <vscale x 16 x i32> %0,
1883    <vscale x 16 x i32> %1,
1884    iXLen %2,
1885    <vscale x 16 x i1> %3,
1886    iXLen %4, iXLen 1)
1887
1888  ret <vscale x 16 x i32> %a
1889}
1890
1891declare <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
1892  <vscale x 1 x i64>,
1893  <vscale x 1 x i64>,
1894  iXLen,
1895  iXLen)
1896
1897define <vscale x 1 x i64> @intrinsic_vror_vx_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1898; CHECK-LABEL: intrinsic_vror_vx_nxv1i64:
1899; CHECK:       # %bb.0: # %entry
1900; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1901; CHECK-NEXT:    vror.vx v8, v8, a0
1902; CHECK-NEXT:    ret
1903entry:
1904  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
1905    <vscale x 1 x i64> undef,
1906    <vscale x 1 x i64> %0,
1907    iXLen %1,
1908    iXLen %2)
1909
1910  ret <vscale x 1 x i64> %a
1911}
1912
1913declare <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
1914  <vscale x 1 x i64>,
1915  <vscale x 1 x i64>,
1916  iXLen,
1917  <vscale x 1 x i1>,
1918  iXLen,
1919  iXLen)
1920
1921define <vscale x 1 x i64> @intrinsic_vror_mask_vx_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1922; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i64:
1923; CHECK:       # %bb.0: # %entry
1924; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1925; CHECK-NEXT:    vror.vx v8, v9, a0, v0.t
1926; CHECK-NEXT:    ret
1927entry:
1928  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
1929    <vscale x 1 x i64> %0,
1930    <vscale x 1 x i64> %1,
1931    iXLen %2,
1932    <vscale x 1 x i1> %3,
1933    iXLen %4, iXLen 1)
1934
1935  ret <vscale x 1 x i64> %a
1936}
1937
1938declare <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
1939  <vscale x 2 x i64>,
1940  <vscale x 2 x i64>,
1941  iXLen,
1942  iXLen)
1943
1944define <vscale x 2 x i64> @intrinsic_vror_vx_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1945; CHECK-LABEL: intrinsic_vror_vx_nxv2i64:
1946; CHECK:       # %bb.0: # %entry
1947; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1948; CHECK-NEXT:    vror.vx v8, v8, a0
1949; CHECK-NEXT:    ret
1950entry:
1951  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
1952    <vscale x 2 x i64> undef,
1953    <vscale x 2 x i64> %0,
1954    iXLen %1,
1955    iXLen %2)
1956
1957  ret <vscale x 2 x i64> %a
1958}
1959
1960declare <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
1961  <vscale x 2 x i64>,
1962  <vscale x 2 x i64>,
1963  iXLen,
1964  <vscale x 2 x i1>,
1965  iXLen,
1966  iXLen)
1967
1968define <vscale x 2 x i64> @intrinsic_vror_mask_vx_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1969; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i64:
1970; CHECK:       # %bb.0: # %entry
1971; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1972; CHECK-NEXT:    vror.vx v8, v10, a0, v0.t
1973; CHECK-NEXT:    ret
1974entry:
1975  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
1976    <vscale x 2 x i64> %0,
1977    <vscale x 2 x i64> %1,
1978    iXLen %2,
1979    <vscale x 2 x i1> %3,
1980    iXLen %4, iXLen 1)
1981
1982  ret <vscale x 2 x i64> %a
1983}
1984
1985declare <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
1986  <vscale x 4 x i64>,
1987  <vscale x 4 x i64>,
1988  iXLen,
1989  iXLen)
1990
1991define <vscale x 4 x i64> @intrinsic_vror_vx_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1992; CHECK-LABEL: intrinsic_vror_vx_nxv4i64:
1993; CHECK:       # %bb.0: # %entry
1994; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1995; CHECK-NEXT:    vror.vx v8, v8, a0
1996; CHECK-NEXT:    ret
1997entry:
1998  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
1999    <vscale x 4 x i64> undef,
2000    <vscale x 4 x i64> %0,
2001    iXLen %1,
2002    iXLen %2)
2003
2004  ret <vscale x 4 x i64> %a
2005}
2006
2007declare <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
2008  <vscale x 4 x i64>,
2009  <vscale x 4 x i64>,
2010  iXLen,
2011  <vscale x 4 x i1>,
2012  iXLen,
2013  iXLen)
2014
2015define <vscale x 4 x i64> @intrinsic_vror_mask_vx_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2016; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i64:
2017; CHECK:       # %bb.0: # %entry
2018; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2019; CHECK-NEXT:    vror.vx v8, v12, a0, v0.t
2020; CHECK-NEXT:    ret
2021entry:
2022  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
2023    <vscale x 4 x i64> %0,
2024    <vscale x 4 x i64> %1,
2025    iXLen %2,
2026    <vscale x 4 x i1> %3,
2027    iXLen %4, iXLen 1)
2028
2029  ret <vscale x 4 x i64> %a
2030}
2031
2032declare <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
2033  <vscale x 8 x i64>,
2034  <vscale x 8 x i64>,
2035  iXLen,
2036  iXLen)
2037
2038define <vscale x 8 x i64> @intrinsic_vror_vx_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
2039; CHECK-LABEL: intrinsic_vror_vx_nxv8i64:
2040; CHECK:       # %bb.0: # %entry
2041; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2042; CHECK-NEXT:    vror.vx v8, v8, a0
2043; CHECK-NEXT:    ret
2044entry:
2045  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
2046    <vscale x 8 x i64> undef,
2047    <vscale x 8 x i64> %0,
2048    iXLen %1,
2049    iXLen %2)
2050
2051  ret <vscale x 8 x i64> %a
2052}
2053
2054declare <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
2055  <vscale x 8 x i64>,
2056  <vscale x 8 x i64>,
2057  iXLen,
2058  <vscale x 8 x i1>,
2059  iXLen,
2060  iXLen)
2061
2062define <vscale x 8 x i64> @intrinsic_vror_mask_vx_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2063; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i64:
2064; CHECK:       # %bb.0: # %entry
2065; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2066; CHECK-NEXT:    vror.vx v8, v16, a0, v0.t
2067; CHECK-NEXT:    ret
2068entry:
2069  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
2070    <vscale x 8 x i64> %0,
2071    <vscale x 8 x i64> %1,
2072    iXLen %2,
2073    <vscale x 8 x i1> %3,
2074    iXLen %4, iXLen 1)
2075
2076  ret <vscale x 8 x i64> %a
2077}
2078
2079define <vscale x 1 x i8> @intrinsic_vror_vi_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2080; CHECK-LABEL: intrinsic_vror_vi_nxv1i8:
2081; CHECK:       # %bb.0: # %entry
2082; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
2083; CHECK-NEXT:    vror.vi v8, v8, 2
2084; CHECK-NEXT:    ret
2085entry:
2086  %a = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8(
2087    <vscale x 1 x i8> undef,
2088    <vscale x 1 x i8> %0,
2089    iXLen 2,
2090    iXLen %1)
2091
2092  ret <vscale x 1 x i8> %a
2093}
2094
2095define <vscale x 1 x i8> @intrinsic_vror_mask_vi_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2096; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i8:
2097; CHECK:       # %bb.0: # %entry
2098; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
2099; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2100; CHECK-NEXT:    ret
2101entry:
2102  %a = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8(
2103    <vscale x 1 x i8> %0,
2104    <vscale x 1 x i8> %1,
2105    iXLen 2,
2106    <vscale x 1 x i1> %2,
2107    iXLen %3, iXLen 1)
2108
2109  ret <vscale x 1 x i8> %a
2110}
2111
2112define <vscale x 2 x i8> @intrinsic_vror_vi_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2113; CHECK-LABEL: intrinsic_vror_vi_nxv2i8:
2114; CHECK:       # %bb.0: # %entry
2115; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
2116; CHECK-NEXT:    vror.vi v8, v8, 2
2117; CHECK-NEXT:    ret
2118entry:
2119  %a = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8(
2120    <vscale x 2 x i8> undef,
2121    <vscale x 2 x i8> %0,
2122    iXLen 2,
2123    iXLen %1)
2124
2125  ret <vscale x 2 x i8> %a
2126}
2127
2128define <vscale x 2 x i8> @intrinsic_vror_mask_vi_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2129; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i8:
2130; CHECK:       # %bb.0: # %entry
2131; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
2132; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2133; CHECK-NEXT:    ret
2134entry:
2135  %a = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8(
2136    <vscale x 2 x i8> %0,
2137    <vscale x 2 x i8> %1,
2138    iXLen 2,
2139    <vscale x 2 x i1> %2,
2140    iXLen %3, iXLen 1)
2141
2142  ret <vscale x 2 x i8> %a
2143}
2144
2145define <vscale x 4 x i8> @intrinsic_vror_vi_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2146; CHECK-LABEL: intrinsic_vror_vi_nxv4i8:
2147; CHECK:       # %bb.0: # %entry
2148; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
2149; CHECK-NEXT:    vror.vi v8, v8, 2
2150; CHECK-NEXT:    ret
2151entry:
2152  %a = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8(
2153    <vscale x 4 x i8> undef,
2154    <vscale x 4 x i8> %0,
2155    iXLen 2,
2156    iXLen %1)
2157
2158  ret <vscale x 4 x i8> %a
2159}
2160
2161define <vscale x 4 x i8> @intrinsic_vror_mask_vi_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2162; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i8:
2163; CHECK:       # %bb.0: # %entry
2164; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
2165; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2166; CHECK-NEXT:    ret
2167entry:
2168  %a = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8(
2169    <vscale x 4 x i8> %0,
2170    <vscale x 4 x i8> %1,
2171    iXLen 2,
2172    <vscale x 4 x i1> %2,
2173    iXLen %3, iXLen 1)
2174
2175  ret <vscale x 4 x i8> %a
2176}
2177
2178define <vscale x 8 x i8> @intrinsic_vror_vi_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2179; CHECK-LABEL: intrinsic_vror_vi_nxv8i8:
2180; CHECK:       # %bb.0: # %entry
2181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
2182; CHECK-NEXT:    vror.vi v8, v8, 2
2183; CHECK-NEXT:    ret
2184entry:
2185  %a = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8(
2186    <vscale x 8 x i8> undef,
2187    <vscale x 8 x i8> %0,
2188    iXLen 2,
2189    iXLen %1)
2190
2191  ret <vscale x 8 x i8> %a
2192}
2193
2194define <vscale x 8 x i8> @intrinsic_vror_mask_vi_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2195; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i8:
2196; CHECK:       # %bb.0: # %entry
2197; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
2198; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2199; CHECK-NEXT:    ret
2200entry:
2201  %a = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8(
2202    <vscale x 8 x i8> %0,
2203    <vscale x 8 x i8> %1,
2204    iXLen 2,
2205    <vscale x 8 x i1> %2,
2206    iXLen %3, iXLen 1)
2207
2208  ret <vscale x 8 x i8> %a
2209}
2210
2211define <vscale x 16 x i8> @intrinsic_vror_vi_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2212; CHECK-LABEL: intrinsic_vror_vi_nxv16i8:
2213; CHECK:       # %bb.0: # %entry
2214; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2215; CHECK-NEXT:    vror.vi v8, v8, 2
2216; CHECK-NEXT:    ret
2217entry:
2218  %a = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8(
2219    <vscale x 16 x i8> undef,
2220    <vscale x 16 x i8> %0,
2221    iXLen 2,
2222    iXLen %1)
2223
2224  ret <vscale x 16 x i8> %a
2225}
2226
2227define <vscale x 16 x i8> @intrinsic_vror_mask_vi_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2228; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i8:
2229; CHECK:       # %bb.0: # %entry
2230; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
2231; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
2232; CHECK-NEXT:    ret
2233entry:
2234  %a = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8(
2235    <vscale x 16 x i8> %0,
2236    <vscale x 16 x i8> %1,
2237    iXLen 2,
2238    <vscale x 16 x i1> %2,
2239    iXLen %3, iXLen 1)
2240
2241  ret <vscale x 16 x i8> %a
2242}
2243
2244define <vscale x 32 x i8> @intrinsic_vror_vi_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2245; CHECK-LABEL: intrinsic_vror_vi_nxv32i8:
2246; CHECK:       # %bb.0: # %entry
2247; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2248; CHECK-NEXT:    vror.vi v8, v8, 2
2249; CHECK-NEXT:    ret
2250entry:
2251  %a = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8(
2252    <vscale x 32 x i8> undef,
2253    <vscale x 32 x i8> %0,
2254    iXLen 2,
2255    iXLen %1)
2256
2257  ret <vscale x 32 x i8> %a
2258}
2259
2260define <vscale x 32 x i8> @intrinsic_vror_mask_vi_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2261; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i8:
2262; CHECK:       # %bb.0: # %entry
2263; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2264; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
2265; CHECK-NEXT:    ret
2266entry:
2267  %a = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8(
2268    <vscale x 32 x i8> %0,
2269    <vscale x 32 x i8> %1,
2270    iXLen 2,
2271    <vscale x 32 x i1> %2,
2272    iXLen %3, iXLen 1)
2273
2274  ret <vscale x 32 x i8> %a
2275}
2276
2277define <vscale x 64 x i8> @intrinsic_vror_vi_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2278; CHECK-LABEL: intrinsic_vror_vi_nxv64i8:
2279; CHECK:       # %bb.0: # %entry
2280; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
2281; CHECK-NEXT:    vror.vi v8, v8, 2
2282; CHECK-NEXT:    ret
2283entry:
2284  %a = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8(
2285    <vscale x 64 x i8> undef,
2286    <vscale x 64 x i8> %0,
2287    iXLen 2,
2288    iXLen %1)
2289
2290  ret <vscale x 64 x i8> %a
2291}
2292
2293define <vscale x 64 x i8> @intrinsic_vror_mask_vi_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2294; CHECK-LABEL: intrinsic_vror_mask_vi_nxv64i8:
2295; CHECK:       # %bb.0: # %entry
2296; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
2297; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
2298; CHECK-NEXT:    ret
2299entry:
2300  %a = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8(
2301    <vscale x 64 x i8> %0,
2302    <vscale x 64 x i8> %1,
2303    iXLen 2,
2304    <vscale x 64 x i1> %2,
2305    iXLen %3, iXLen 1)
2306
2307  ret <vscale x 64 x i8> %a
2308}
2309
2310define <vscale x 1 x i16> @intrinsic_vror_vi_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2311; CHECK-LABEL: intrinsic_vror_vi_nxv1i16:
2312; CHECK:       # %bb.0: # %entry
2313; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2314; CHECK-NEXT:    vror.vi v8, v8, 2
2315; CHECK-NEXT:    ret
2316entry:
2317  %a = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16(
2318    <vscale x 1 x i16> undef,
2319    <vscale x 1 x i16> %0,
2320    iXLen 2,
2321    iXLen %1)
2322
2323  ret <vscale x 1 x i16> %a
2324}
2325
2326define <vscale x 1 x i16> @intrinsic_vror_mask_vi_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2327; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i16:
2328; CHECK:       # %bb.0: # %entry
2329; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2330; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2331; CHECK-NEXT:    ret
2332entry:
2333  %a = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16(
2334    <vscale x 1 x i16> %0,
2335    <vscale x 1 x i16> %1,
2336    iXLen 2,
2337    <vscale x 1 x i1> %2,
2338    iXLen %3, iXLen 1)
2339
2340  ret <vscale x 1 x i16> %a
2341}
2342
2343define <vscale x 2 x i16> @intrinsic_vror_vi_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2344; CHECK-LABEL: intrinsic_vror_vi_nxv2i16:
2345; CHECK:       # %bb.0: # %entry
2346; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2347; CHECK-NEXT:    vror.vi v8, v8, 2
2348; CHECK-NEXT:    ret
2349entry:
2350  %a = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16(
2351    <vscale x 2 x i16> undef,
2352    <vscale x 2 x i16> %0,
2353    iXLen 2,
2354    iXLen %1)
2355
2356  ret <vscale x 2 x i16> %a
2357}
2358
2359define <vscale x 2 x i16> @intrinsic_vror_mask_vi_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2360; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i16:
2361; CHECK:       # %bb.0: # %entry
2362; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2363; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2364; CHECK-NEXT:    ret
2365entry:
2366  %a = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16(
2367    <vscale x 2 x i16> %0,
2368    <vscale x 2 x i16> %1,
2369    iXLen 2,
2370    <vscale x 2 x i1> %2,
2371    iXLen %3, iXLen 1)
2372
2373  ret <vscale x 2 x i16> %a
2374}
2375
2376define <vscale x 4 x i16> @intrinsic_vror_vi_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2377; CHECK-LABEL: intrinsic_vror_vi_nxv4i16:
2378; CHECK:       # %bb.0: # %entry
2379; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2380; CHECK-NEXT:    vror.vi v8, v8, 2
2381; CHECK-NEXT:    ret
2382entry:
2383  %a = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16(
2384    <vscale x 4 x i16> undef,
2385    <vscale x 4 x i16> %0,
2386    iXLen 2,
2387    iXLen %1)
2388
2389  ret <vscale x 4 x i16> %a
2390}
2391
2392define <vscale x 4 x i16> @intrinsic_vror_mask_vi_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2393; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i16:
2394; CHECK:       # %bb.0: # %entry
2395; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2396; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2397; CHECK-NEXT:    ret
2398entry:
2399  %a = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16(
2400    <vscale x 4 x i16> %0,
2401    <vscale x 4 x i16> %1,
2402    iXLen 2,
2403    <vscale x 4 x i1> %2,
2404    iXLen %3, iXLen 1)
2405
2406  ret <vscale x 4 x i16> %a
2407}
2408
2409define <vscale x 8 x i16> @intrinsic_vror_vi_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2410; CHECK-LABEL: intrinsic_vror_vi_nxv8i16:
2411; CHECK:       # %bb.0: # %entry
2412; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2413; CHECK-NEXT:    vror.vi v8, v8, 2
2414; CHECK-NEXT:    ret
2415entry:
2416  %a = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16(
2417    <vscale x 8 x i16> undef,
2418    <vscale x 8 x i16> %0,
2419    iXLen 2,
2420    iXLen %1)
2421
2422  ret <vscale x 8 x i16> %a
2423}
2424
2425define <vscale x 8 x i16> @intrinsic_vror_mask_vi_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2426; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i16:
2427; CHECK:       # %bb.0: # %entry
2428; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2429; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
2430; CHECK-NEXT:    ret
2431entry:
2432  %a = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16(
2433    <vscale x 8 x i16> %0,
2434    <vscale x 8 x i16> %1,
2435    iXLen 2,
2436    <vscale x 8 x i1> %2,
2437    iXLen %3, iXLen 1)
2438
2439  ret <vscale x 8 x i16> %a
2440}
2441
2442define <vscale x 16 x i16> @intrinsic_vror_vi_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2443; CHECK-LABEL: intrinsic_vror_vi_nxv16i16:
2444; CHECK:       # %bb.0: # %entry
2445; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
2446; CHECK-NEXT:    vror.vi v8, v8, 2
2447; CHECK-NEXT:    ret
2448entry:
2449  %a = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16(
2450    <vscale x 16 x i16> undef,
2451    <vscale x 16 x i16> %0,
2452    iXLen 2,
2453    iXLen %1)
2454
2455  ret <vscale x 16 x i16> %a
2456}
2457
2458define <vscale x 16 x i16> @intrinsic_vror_mask_vi_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2459; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i16:
2460; CHECK:       # %bb.0: # %entry
2461; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2462; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
2463; CHECK-NEXT:    ret
2464entry:
2465  %a = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16(
2466    <vscale x 16 x i16> %0,
2467    <vscale x 16 x i16> %1,
2468    iXLen 2,
2469    <vscale x 16 x i1> %2,
2470    iXLen %3, iXLen 1)
2471
2472  ret <vscale x 16 x i16> %a
2473}
2474
2475define <vscale x 32 x i16> @intrinsic_vror_vi_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2476; CHECK-LABEL: intrinsic_vror_vi_nxv32i16:
2477; CHECK:       # %bb.0: # %entry
2478; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
2479; CHECK-NEXT:    vror.vi v8, v8, 2
2480; CHECK-NEXT:    ret
2481entry:
2482  %a = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16(
2483    <vscale x 32 x i16> undef,
2484    <vscale x 32 x i16> %0,
2485    iXLen 2,
2486    iXLen %1)
2487
2488  ret <vscale x 32 x i16> %a
2489}
2490
2491define <vscale x 32 x i16> @intrinsic_vror_mask_vi_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2492; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i16:
2493; CHECK:       # %bb.0: # %entry
2494; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
2495; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
2496; CHECK-NEXT:    ret
2497entry:
2498  %a = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16(
2499    <vscale x 32 x i16> %0,
2500    <vscale x 32 x i16> %1,
2501    iXLen 2,
2502    <vscale x 32 x i1> %2,
2503    iXLen %3, iXLen 1)
2504
2505  ret <vscale x 32 x i16> %a
2506}
2507
2508define <vscale x 1 x i32> @intrinsic_vror_vi_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2509; CHECK-LABEL: intrinsic_vror_vi_nxv1i32:
2510; CHECK:       # %bb.0: # %entry
2511; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2512; CHECK-NEXT:    vror.vi v8, v8, 2
2513; CHECK-NEXT:    ret
2514entry:
2515  %a = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32(
2516    <vscale x 1 x i32> undef,
2517    <vscale x 1 x i32> %0,
2518    iXLen 2,
2519    iXLen %1)
2520
2521  ret <vscale x 1 x i32> %a
2522}
2523
2524define <vscale x 1 x i32> @intrinsic_vror_mask_vi_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2525; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i32:
2526; CHECK:       # %bb.0: # %entry
2527; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2528; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2529; CHECK-NEXT:    ret
2530entry:
2531  %a = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32(
2532    <vscale x 1 x i32> %0,
2533    <vscale x 1 x i32> %1,
2534    iXLen 2,
2535    <vscale x 1 x i1> %2,
2536    iXLen %3, iXLen 1)
2537
2538  ret <vscale x 1 x i32> %a
2539}
2540
2541define <vscale x 2 x i32> @intrinsic_vror_vi_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2542; CHECK-LABEL: intrinsic_vror_vi_nxv2i32:
2543; CHECK:       # %bb.0: # %entry
2544; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2545; CHECK-NEXT:    vror.vi v8, v8, 2
2546; CHECK-NEXT:    ret
2547entry:
2548  %a = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32(
2549    <vscale x 2 x i32> undef,
2550    <vscale x 2 x i32> %0,
2551    iXLen 2,
2552    iXLen %1)
2553
2554  ret <vscale x 2 x i32> %a
2555}
2556
2557define <vscale x 2 x i32> @intrinsic_vror_mask_vi_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2558; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i32:
2559; CHECK:       # %bb.0: # %entry
2560; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2561; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2562; CHECK-NEXT:    ret
2563entry:
2564  %a = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32(
2565    <vscale x 2 x i32> %0,
2566    <vscale x 2 x i32> %1,
2567    iXLen 2,
2568    <vscale x 2 x i1> %2,
2569    iXLen %3, iXLen 1)
2570
2571  ret <vscale x 2 x i32> %a
2572}
2573
2574define <vscale x 4 x i32> @intrinsic_vror_vi_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2575; CHECK-LABEL: intrinsic_vror_vi_nxv4i32:
2576; CHECK:       # %bb.0: # %entry
2577; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2578; CHECK-NEXT:    vror.vi v8, v8, 2
2579; CHECK-NEXT:    ret
2580entry:
2581  %a = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32(
2582    <vscale x 4 x i32> undef,
2583    <vscale x 4 x i32> %0,
2584    iXLen 2,
2585    iXLen %1)
2586
2587  ret <vscale x 4 x i32> %a
2588}
2589
2590define <vscale x 4 x i32> @intrinsic_vror_mask_vi_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2591; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i32:
2592; CHECK:       # %bb.0: # %entry
2593; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2594; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
2595; CHECK-NEXT:    ret
2596entry:
2597  %a = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32(
2598    <vscale x 4 x i32> %0,
2599    <vscale x 4 x i32> %1,
2600    iXLen 2,
2601    <vscale x 4 x i1> %2,
2602    iXLen %3, iXLen 1)
2603
2604  ret <vscale x 4 x i32> %a
2605}
2606
2607define <vscale x 8 x i32> @intrinsic_vror_vi_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2608; CHECK-LABEL: intrinsic_vror_vi_nxv8i32:
2609; CHECK:       # %bb.0: # %entry
2610; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2611; CHECK-NEXT:    vror.vi v8, v8, 2
2612; CHECK-NEXT:    ret
2613entry:
2614  %a = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32(
2615    <vscale x 8 x i32> undef,
2616    <vscale x 8 x i32> %0,
2617    iXLen 2,
2618    iXLen %1)
2619
2620  ret <vscale x 8 x i32> %a
2621}
2622
2623define <vscale x 8 x i32> @intrinsic_vror_mask_vi_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2624; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i32:
2625; CHECK:       # %bb.0: # %entry
2626; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2627; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
2628; CHECK-NEXT:    ret
2629entry:
2630  %a = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32(
2631    <vscale x 8 x i32> %0,
2632    <vscale x 8 x i32> %1,
2633    iXLen 2,
2634    <vscale x 8 x i1> %2,
2635    iXLen %3, iXLen 1)
2636
2637  ret <vscale x 8 x i32> %a
2638}
2639
2640define <vscale x 16 x i32> @intrinsic_vror_vi_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2641; CHECK-LABEL: intrinsic_vror_vi_nxv16i32:
2642; CHECK:       # %bb.0: # %entry
2643; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
2644; CHECK-NEXT:    vror.vi v8, v8, 2
2645; CHECK-NEXT:    ret
2646entry:
2647  %a = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32(
2648    <vscale x 16 x i32> undef,
2649    <vscale x 16 x i32> %0,
2650    iXLen 2,
2651    iXLen %1)
2652
2653  ret <vscale x 16 x i32> %a
2654}
2655
2656define <vscale x 16 x i32> @intrinsic_vror_mask_vi_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2657; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i32:
2658; CHECK:       # %bb.0: # %entry
2659; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
2660; CHECK-NEXT:    vror.vi v8, v16, 2, v0.t
2661; CHECK-NEXT:    ret
2662entry:
2663  %a = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32(
2664    <vscale x 16 x i32> %0,
2665    <vscale x 16 x i32> %1,
2666    iXLen 2,
2667    <vscale x 16 x i1> %2,
2668    iXLen %3, iXLen 1)
2669
2670  ret <vscale x 16 x i32> %a
2671}
2672
2673define <vscale x 1 x i64> @intrinsic_vror_vi_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2674; CHECK-LABEL: intrinsic_vror_vi_nxv1i64:
2675; CHECK:       # %bb.0: # %entry
2676; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
2677; CHECK-NEXT:    vror.vi v8, v8, 2
2678; CHECK-NEXT:    ret
2679entry:
2680  %a = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64(
2681    <vscale x 1 x i64> undef,
2682    <vscale x 1 x i64> %0,
2683    iXLen 2,
2684    iXLen %1)
2685
2686  ret <vscale x 1 x i64> %a
2687}
2688
2689define <vscale x 1 x i64> @intrinsic_vror_mask_vi_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2690; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i64:
2691; CHECK:       # %bb.0: # %entry
2692; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2693; CHECK-NEXT:    vror.vi v8, v9, 2, v0.t
2694; CHECK-NEXT:    ret
2695entry:
2696  %a = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64(
2697    <vscale x 1 x i64> %0,
2698    <vscale x 1 x i64> %1,
2699    iXLen 2,
2700    <vscale x 1 x i1> %2,
2701    iXLen %3, iXLen 1)
2702
2703  ret <vscale x 1 x i64> %a
2704}
2705
2706define <vscale x 2 x i64> @intrinsic_vror_vi_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2707; CHECK-LABEL: intrinsic_vror_vi_nxv2i64:
2708; CHECK:       # %bb.0: # %entry
2709; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
2710; CHECK-NEXT:    vror.vi v8, v8, 2
2711; CHECK-NEXT:    ret
2712entry:
2713  %a = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64(
2714    <vscale x 2 x i64> undef,
2715    <vscale x 2 x i64> %0,
2716    iXLen 2,
2717    iXLen %1)
2718
2719  ret <vscale x 2 x i64> %a
2720}
2721
2722define <vscale x 2 x i64> @intrinsic_vror_mask_vi_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2723; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i64:
2724; CHECK:       # %bb.0: # %entry
2725; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2726; CHECK-NEXT:    vror.vi v8, v10, 2, v0.t
2727; CHECK-NEXT:    ret
2728entry:
2729  %a = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64(
2730    <vscale x 2 x i64> %0,
2731    <vscale x 2 x i64> %1,
2732    iXLen 2,
2733    <vscale x 2 x i1> %2,
2734    iXLen %3, iXLen 1)
2735
2736  ret <vscale x 2 x i64> %a
2737}
2738
2739define <vscale x 4 x i64> @intrinsic_vror_vi_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2740; CHECK-LABEL: intrinsic_vror_vi_nxv4i64:
2741; CHECK:       # %bb.0: # %entry
2742; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
2743; CHECK-NEXT:    vror.vi v8, v8, 2
2744; CHECK-NEXT:    ret
2745entry:
2746  %a = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64(
2747    <vscale x 4 x i64> undef,
2748    <vscale x 4 x i64> %0,
2749    iXLen 2,
2750    iXLen %1)
2751
2752  ret <vscale x 4 x i64> %a
2753}
2754
2755define <vscale x 4 x i64> @intrinsic_vror_mask_vi_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2756; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i64:
2757; CHECK:       # %bb.0: # %entry
2758; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2759; CHECK-NEXT:    vror.vi v8, v12, 2, v0.t
2760; CHECK-NEXT:    ret
2761entry:
2762  %a = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64(
2763    <vscale x 4 x i64> %0,
2764    <vscale x 4 x i64> %1,
2765    iXLen 2,
2766    <vscale x 4 x i1> %2,
2767    iXLen %3, iXLen 1)
2768
2769  ret <vscale x 4 x i64> %a
2770}
2771
2772define <vscale x 8 x i64> @intrinsic_vror_vi_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2773; CHECK-LABEL: intrinsic_vror_vi_nxv8i64:
2774; CHECK:       # %bb.0: # %entry
2775; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
2776; CHECK-NEXT:    vror.vi v8, v8, 2
2777; CHECK-NEXT:    ret
2778entry:
2779  %a = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64(
2780    <vscale x 8 x i64> undef,
2781    <vscale x 8 x i64> %0,
2782    iXLen 2,
2783    iXLen %1)
2784
2785  ret <vscale x 8 x i64> %a
2786}
2787
2788define <vscale x 8 x i64> @intrinsic_vror_mask_vi_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2789; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i64:
2790; CHECK:       # %bb.0: # %entry
2791; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
2792; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
2793; CHECK-NEXT:    ret
2794entry:
2795  %a = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64(
2796    <vscale x 8 x i64> %0,
2797    <vscale x 8 x i64> %1,
2798    iXLen 62,
2799    <vscale x 8 x i1> %2,
2800    iXLen %3, iXLen 1)
2801
2802  ret <vscale x 8 x i64> %a
2803}
2804