xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vid.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
8  <vscale x 1 x i8>,
9  iXLen);
10
11define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
12; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
13; CHECK:       # %bb.0: # %entry
14; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
15; CHECK-NEXT:    vid.v v8
16; CHECK-NEXT:    ret
17entry:
18  %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
19    <vscale x 1 x i8> undef,
20    iXLen %0)
21
22  ret <vscale x 1 x i8> %a
23}
24
25declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
26  <vscale x 1 x i8>,
27  <vscale x 1 x i1>,
28  iXLen, iXLen);
29
30define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
31; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
32; CHECK:       # %bb.0: # %entry
33; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
34; CHECK-NEXT:    vid.v v8, v0.t
35; CHECK-NEXT:    ret
36entry:
37  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
38    <vscale x 1 x i8> %0,
39    <vscale x 1 x i1> %1,
40    iXLen %2, iXLen 0)
41
42  ret <vscale x 1 x i8> %a
43}
44
45declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
46  <vscale x 2 x i8>,
47  iXLen);
48
49define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
50; CHECK-LABEL: intrinsic_vid_v_nxv2i8:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
53; CHECK-NEXT:    vid.v v8
54; CHECK-NEXT:    ret
55entry:
56  %a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
57    <vscale x 2 x i8> undef,
58    iXLen %0)
59
60  ret <vscale x 2 x i8> %a
61}
62
63declare <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
64  <vscale x 2 x i8>,
65  <vscale x 2 x i1>,
66  iXLen, iXLen);
67
68define <vscale x 2 x i8> @intrinsic_vid_mask_v_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
69; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8:
70; CHECK:       # %bb.0: # %entry
71; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
72; CHECK-NEXT:    vid.v v8, v0.t
73; CHECK-NEXT:    ret
74entry:
75  %a = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8(
76    <vscale x 2 x i8> %0,
77    <vscale x 2 x i1> %1,
78    iXLen %2, iXLen 0)
79
80  ret <vscale x 2 x i8> %a
81}
82
83declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
84  <vscale x 4 x i8>,
85  iXLen);
86
87define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
88; CHECK-LABEL: intrinsic_vid_v_nxv4i8:
89; CHECK:       # %bb.0: # %entry
90; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
91; CHECK-NEXT:    vid.v v8
92; CHECK-NEXT:    ret
93entry:
94  %a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
95    <vscale x 4 x i8> undef,
96    iXLen %0)
97
98  ret <vscale x 4 x i8> %a
99}
100
101declare <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
102  <vscale x 4 x i8>,
103  <vscale x 4 x i1>,
104  iXLen, iXLen);
105
106define <vscale x 4 x i8> @intrinsic_vid_mask_v_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
107; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8:
108; CHECK:       # %bb.0: # %entry
109; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
110; CHECK-NEXT:    vid.v v8, v0.t
111; CHECK-NEXT:    ret
112entry:
113  %a = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8(
114    <vscale x 4 x i8> %0,
115    <vscale x 4 x i1> %1,
116    iXLen %2, iXLen 0)
117
118  ret <vscale x 4 x i8> %a
119}
120
121declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
122  <vscale x 8 x i8>,
123  iXLen);
124
125define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
126; CHECK-LABEL: intrinsic_vid_v_nxv8i8:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
129; CHECK-NEXT:    vid.v v8
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
133    <vscale x 8 x i8> undef,
134    iXLen %0)
135
136  ret <vscale x 8 x i8> %a
137}
138
139declare <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
140  <vscale x 8 x i8>,
141  <vscale x 8 x i1>,
142  iXLen, iXLen);
143
144define <vscale x 8 x i8> @intrinsic_vid_mask_v_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
145; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8:
146; CHECK:       # %bb.0: # %entry
147; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
148; CHECK-NEXT:    vid.v v8, v0.t
149; CHECK-NEXT:    ret
150entry:
151  %a = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8(
152    <vscale x 8 x i8> %0,
153    <vscale x 8 x i1> %1,
154    iXLen %2, iXLen 0)
155
156  ret <vscale x 8 x i8> %a
157}
158
159declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
160  <vscale x 16 x i8>,
161  iXLen);
162
163define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
164; CHECK-LABEL: intrinsic_vid_v_nxv16i8:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
167; CHECK-NEXT:    vid.v v8
168; CHECK-NEXT:    ret
169entry:
170  %a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
171    <vscale x 16 x i8> undef,
172    iXLen %0)
173
174  ret <vscale x 16 x i8> %a
175}
176
177declare <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
178  <vscale x 16 x i8>,
179  <vscale x 16 x i1>,
180  iXLen, iXLen);
181
182define <vscale x 16 x i8> @intrinsic_vid_mask_v_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
183; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
186; CHECK-NEXT:    vid.v v8, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8(
190    <vscale x 16 x i8> %0,
191    <vscale x 16 x i1> %1,
192    iXLen %2, iXLen 0)
193
194  ret <vscale x 16 x i8> %a
195}
196
197declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
198  <vscale x 32 x i8>,
199  iXLen);
200
201define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
202; CHECK-LABEL: intrinsic_vid_v_nxv32i8:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
205; CHECK-NEXT:    vid.v v8
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
209    <vscale x 32 x i8> undef,
210    iXLen %0)
211
212  ret <vscale x 32 x i8> %a
213}
214
215declare <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
216  <vscale x 32 x i8>,
217  <vscale x 32 x i1>,
218  iXLen, iXLen);
219
220define <vscale x 32 x i8> @intrinsic_vid_mask_v_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
221; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
224; CHECK-NEXT:    vid.v v8, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8(
228    <vscale x 32 x i8> %0,
229    <vscale x 32 x i1> %1,
230    iXLen %2, iXLen 0)
231
232  ret <vscale x 32 x i8> %a
233}
234
235declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
236  <vscale x 1 x i16>,
237  iXLen);
238
239define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
240; CHECK-LABEL: intrinsic_vid_v_nxv1i16:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
243; CHECK-NEXT:    vid.v v8
244; CHECK-NEXT:    ret
245entry:
246  %a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
247    <vscale x 1 x i16> undef,
248    iXLen %0)
249
250  ret <vscale x 1 x i16> %a
251}
252
253declare <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
254  <vscale x 1 x i16>,
255  <vscale x 1 x i1>,
256  iXLen, iXLen);
257
258define <vscale x 1 x i16> @intrinsic_vid_mask_v_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
259; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16:
260; CHECK:       # %bb.0: # %entry
261; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
262; CHECK-NEXT:    vid.v v8, v0.t
263; CHECK-NEXT:    ret
264entry:
265  %a = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16(
266    <vscale x 1 x i16> %0,
267    <vscale x 1 x i1> %1,
268    iXLen %2, iXLen 0)
269
270  ret <vscale x 1 x i16> %a
271}
272
273declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
274  <vscale x 2 x i16>,
275  iXLen);
276
277define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
278; CHECK-LABEL: intrinsic_vid_v_nxv2i16:
279; CHECK:       # %bb.0: # %entry
280; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
281; CHECK-NEXT:    vid.v v8
282; CHECK-NEXT:    ret
283entry:
284  %a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
285    <vscale x 2 x i16> undef,
286    iXLen %0)
287
288  ret <vscale x 2 x i16> %a
289}
290
291declare <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
292  <vscale x 2 x i16>,
293  <vscale x 2 x i1>,
294  iXLen, iXLen);
295
296define <vscale x 2 x i16> @intrinsic_vid_mask_v_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
297; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
300; CHECK-NEXT:    vid.v v8, v0.t
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16(
304    <vscale x 2 x i16> %0,
305    <vscale x 2 x i1> %1,
306    iXLen %2, iXLen 0)
307
308  ret <vscale x 2 x i16> %a
309}
310
311declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
312  <vscale x 4 x i16>,
313  iXLen);
314
315define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
316; CHECK-LABEL: intrinsic_vid_v_nxv4i16:
317; CHECK:       # %bb.0: # %entry
318; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
319; CHECK-NEXT:    vid.v v8
320; CHECK-NEXT:    ret
321entry:
322  %a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
323    <vscale x 4 x i16> undef,
324    iXLen %0)
325
326  ret <vscale x 4 x i16> %a
327}
328
329declare <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
330  <vscale x 4 x i16>,
331  <vscale x 4 x i1>,
332  iXLen, iXLen);
333
334define <vscale x 4 x i16> @intrinsic_vid_mask_v_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
335; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16:
336; CHECK:       # %bb.0: # %entry
337; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
338; CHECK-NEXT:    vid.v v8, v0.t
339; CHECK-NEXT:    ret
340entry:
341  %a = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16(
342    <vscale x 4 x i16> %0,
343    <vscale x 4 x i1> %1,
344    iXLen %2, iXLen 0)
345
346  ret <vscale x 4 x i16> %a
347}
348
349declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
350  <vscale x 8 x i16>,
351  iXLen);
352
353define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
354; CHECK-LABEL: intrinsic_vid_v_nxv8i16:
355; CHECK:       # %bb.0: # %entry
356; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
357; CHECK-NEXT:    vid.v v8
358; CHECK-NEXT:    ret
359entry:
360  %a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
361    <vscale x 8 x i16> undef,
362    iXLen %0)
363
364  ret <vscale x 8 x i16> %a
365}
366
367declare <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
368  <vscale x 8 x i16>,
369  <vscale x 8 x i1>,
370  iXLen, iXLen);
371
372define <vscale x 8 x i16> @intrinsic_vid_mask_v_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
373; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16:
374; CHECK:       # %bb.0: # %entry
375; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
376; CHECK-NEXT:    vid.v v8, v0.t
377; CHECK-NEXT:    ret
378entry:
379  %a = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16(
380    <vscale x 8 x i16> %0,
381    <vscale x 8 x i1> %1,
382    iXLen %2, iXLen 0)
383
384  ret <vscale x 8 x i16> %a
385}
386
387declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
388  <vscale x 16 x i16>,
389  iXLen);
390
391define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
392; CHECK-LABEL: intrinsic_vid_v_nxv16i16:
393; CHECK:       # %bb.0: # %entry
394; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
395; CHECK-NEXT:    vid.v v8
396; CHECK-NEXT:    ret
397entry:
398  %a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
399    <vscale x 16 x i16> undef,
400    iXLen %0)
401
402  ret <vscale x 16 x i16> %a
403}
404
405declare <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
406  <vscale x 16 x i16>,
407  <vscale x 16 x i1>,
408  iXLen, iXLen);
409
410define <vscale x 16 x i16> @intrinsic_vid_mask_v_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
411; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16:
412; CHECK:       # %bb.0: # %entry
413; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
414; CHECK-NEXT:    vid.v v8, v0.t
415; CHECK-NEXT:    ret
416entry:
417  %a = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16(
418    <vscale x 16 x i16> %0,
419    <vscale x 16 x i1> %1,
420    iXLen %2, iXLen 0)
421
422  ret <vscale x 16 x i16> %a
423}
424
425declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
426  <vscale x 32 x i16>,
427  iXLen);
428
429define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
430; CHECK-LABEL: intrinsic_vid_v_nxv32i16:
431; CHECK:       # %bb.0: # %entry
432; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
433; CHECK-NEXT:    vid.v v8
434; CHECK-NEXT:    ret
435entry:
436  %a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
437    <vscale x 32 x i16> undef,
438    iXLen %0)
439
440  ret <vscale x 32 x i16> %a
441}
442
443declare <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
444  <vscale x 32 x i16>,
445  <vscale x 32 x i1>,
446  iXLen, iXLen);
447
448define <vscale x 32 x i16> @intrinsic_vid_mask_v_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
449; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16:
450; CHECK:       # %bb.0: # %entry
451; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
452; CHECK-NEXT:    vid.v v8, v0.t
453; CHECK-NEXT:    ret
454entry:
455  %a = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16(
456    <vscale x 32 x i16> %0,
457    <vscale x 32 x i1> %1,
458    iXLen %2, iXLen 0)
459
460  ret <vscale x 32 x i16> %a
461}
462
463declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
464  <vscale x 1 x i32>,
465  iXLen);
466
467define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
468; CHECK-LABEL: intrinsic_vid_v_nxv1i32:
469; CHECK:       # %bb.0: # %entry
470; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
471; CHECK-NEXT:    vid.v v8
472; CHECK-NEXT:    ret
473entry:
474  %a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
475    <vscale x 1 x i32> undef,
476    iXLen %0)
477
478  ret <vscale x 1 x i32> %a
479}
480
481declare <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
482  <vscale x 1 x i32>,
483  <vscale x 1 x i1>,
484  iXLen, iXLen);
485
486define <vscale x 1 x i32> @intrinsic_vid_mask_v_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
487; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32:
488; CHECK:       # %bb.0: # %entry
489; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
490; CHECK-NEXT:    vid.v v8, v0.t
491; CHECK-NEXT:    ret
492entry:
493  %a = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32(
494    <vscale x 1 x i32> %0,
495    <vscale x 1 x i1> %1,
496    iXLen %2, iXLen 0)
497
498  ret <vscale x 1 x i32> %a
499}
500
501declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
502  <vscale x 2 x i32>,
503  iXLen);
504
505define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
506; CHECK-LABEL: intrinsic_vid_v_nxv2i32:
507; CHECK:       # %bb.0: # %entry
508; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
509; CHECK-NEXT:    vid.v v8
510; CHECK-NEXT:    ret
511entry:
512  %a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
513    <vscale x 2 x i32> undef,
514    iXLen %0)
515
516  ret <vscale x 2 x i32> %a
517}
518
519declare <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
520  <vscale x 2 x i32>,
521  <vscale x 2 x i1>,
522  iXLen, iXLen);
523
524define <vscale x 2 x i32> @intrinsic_vid_mask_v_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
525; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32:
526; CHECK:       # %bb.0: # %entry
527; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
528; CHECK-NEXT:    vid.v v8, v0.t
529; CHECK-NEXT:    ret
530entry:
531  %a = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32(
532    <vscale x 2 x i32> %0,
533    <vscale x 2 x i1> %1,
534    iXLen %2, iXLen 0)
535
536  ret <vscale x 2 x i32> %a
537}
538
539declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
540  <vscale x 4 x i32>,
541  iXLen);
542
543define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
544; CHECK-LABEL: intrinsic_vid_v_nxv4i32:
545; CHECK:       # %bb.0: # %entry
546; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
547; CHECK-NEXT:    vid.v v8
548; CHECK-NEXT:    ret
549entry:
550  %a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
551    <vscale x 4 x i32> undef,
552    iXLen %0)
553
554  ret <vscale x 4 x i32> %a
555}
556
557declare <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
558  <vscale x 4 x i32>,
559  <vscale x 4 x i1>,
560  iXLen, iXLen);
561
562define <vscale x 4 x i32> @intrinsic_vid_mask_v_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
563; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
566; CHECK-NEXT:    vid.v v8, v0.t
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32(
570    <vscale x 4 x i32> %0,
571    <vscale x 4 x i1> %1,
572    iXLen %2, iXLen 0)
573
574  ret <vscale x 4 x i32> %a
575}
576
577declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
578  <vscale x 8 x i32>,
579  iXLen);
580
581define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
582; CHECK-LABEL: intrinsic_vid_v_nxv8i32:
583; CHECK:       # %bb.0: # %entry
584; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
585; CHECK-NEXT:    vid.v v8
586; CHECK-NEXT:    ret
587entry:
588  %a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
589    <vscale x 8 x i32> undef,
590    iXLen %0)
591
592  ret <vscale x 8 x i32> %a
593}
594
595declare <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
596  <vscale x 8 x i32>,
597  <vscale x 8 x i1>,
598  iXLen, iXLen);
599
600define <vscale x 8 x i32> @intrinsic_vid_mask_v_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
601; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32:
602; CHECK:       # %bb.0: # %entry
603; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
604; CHECK-NEXT:    vid.v v8, v0.t
605; CHECK-NEXT:    ret
606entry:
607  %a = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32(
608    <vscale x 8 x i32> %0,
609    <vscale x 8 x i1> %1,
610    iXLen %2, iXLen 0)
611
612  ret <vscale x 8 x i32> %a
613}
614
615declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
616  <vscale x 16 x i32>,
617  iXLen);
618
619define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
620; CHECK-LABEL: intrinsic_vid_v_nxv16i32:
621; CHECK:       # %bb.0: # %entry
622; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
623; CHECK-NEXT:    vid.v v8
624; CHECK-NEXT:    ret
625entry:
626  %a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
627    <vscale x 16 x i32> undef,
628    iXLen %0)
629
630  ret <vscale x 16 x i32> %a
631}
632
633declare <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
634  <vscale x 16 x i32>,
635  <vscale x 16 x i1>,
636  iXLen, iXLen);
637
638define <vscale x 16 x i32> @intrinsic_vid_mask_v_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
639; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
642; CHECK-NEXT:    vid.v v8, v0.t
643; CHECK-NEXT:    ret
644entry:
645  %a = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32(
646    <vscale x 16 x i32> %0,
647    <vscale x 16 x i1> %1,
648    iXLen %2, iXLen 0)
649
650  ret <vscale x 16 x i32> %a
651}
652
653declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
654  <vscale x 1 x i64>,
655  iXLen);
656
657define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
658; CHECK-LABEL: intrinsic_vid_v_nxv1i64:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
661; CHECK-NEXT:    vid.v v8
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
665    <vscale x 1 x i64> undef,
666    iXLen %0)
667
668  ret <vscale x 1 x i64> %a
669}
670
671declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
672  <vscale x 1 x i64>,
673  <vscale x 1 x i1>,
674  iXLen, iXLen);
675
676define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
677; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64:
678; CHECK:       # %bb.0: # %entry
679; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
680; CHECK-NEXT:    vid.v v8, v0.t
681; CHECK-NEXT:    ret
682entry:
683  %a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
684    <vscale x 1 x i64> %0,
685    <vscale x 1 x i1> %1,
686    iXLen %2, iXLen 0)
687
688  ret <vscale x 1 x i64> %a
689}
690
691declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
692  <vscale x 2 x i64>,
693  iXLen);
694
695define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
696; CHECK-LABEL: intrinsic_vid_v_nxv2i64:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
699; CHECK-NEXT:    vid.v v8
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
703    <vscale x 2 x i64> undef,
704    iXLen %0)
705
706  ret <vscale x 2 x i64> %a
707}
708
709declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
710  <vscale x 2 x i64>,
711  <vscale x 2 x i1>,
712  iXLen, iXLen);
713
714define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
715; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64:
716; CHECK:       # %bb.0: # %entry
717; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
718; CHECK-NEXT:    vid.v v8, v0.t
719; CHECK-NEXT:    ret
720entry:
721  %a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
722    <vscale x 2 x i64> %0,
723    <vscale x 2 x i1> %1,
724    iXLen %2, iXLen 0)
725
726  ret <vscale x 2 x i64> %a
727}
728
729declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
730  <vscale x 4 x i64>,
731  iXLen);
732
733define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
734; CHECK-LABEL: intrinsic_vid_v_nxv4i64:
735; CHECK:       # %bb.0: # %entry
736; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
737; CHECK-NEXT:    vid.v v8
738; CHECK-NEXT:    ret
739entry:
740  %a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
741    <vscale x 4 x i64> undef,
742    iXLen %0)
743
744  ret <vscale x 4 x i64> %a
745}
746
747declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
748  <vscale x 4 x i64>,
749  <vscale x 4 x i1>,
750  iXLen, iXLen);
751
752define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
753; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64:
754; CHECK:       # %bb.0: # %entry
755; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
756; CHECK-NEXT:    vid.v v8, v0.t
757; CHECK-NEXT:    ret
758entry:
759  %a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
760    <vscale x 4 x i64> %0,
761    <vscale x 4 x i1> %1,
762    iXLen %2, iXLen 0)
763
764  ret <vscale x 4 x i64> %a
765}
766
767declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
768  <vscale x 8 x i64>,
769  iXLen);
770
771define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
772; CHECK-LABEL: intrinsic_vid_v_nxv8i64:
773; CHECK:       # %bb.0: # %entry
774; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
775; CHECK-NEXT:    vid.v v8
776; CHECK-NEXT:    ret
777entry:
778  %a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
779    <vscale x 8 x i64> undef,
780    iXLen %0)
781
782  ret <vscale x 8 x i64> %a
783}
784
785declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
786  <vscale x 8 x i64>,
787  <vscale x 8 x i1>,
788  iXLen, iXLen);
789
790define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
791; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64:
792; CHECK:       # %bb.0: # %entry
793; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
794; CHECK-NEXT:    vid.v v8, v0.t
795; CHECK-NEXT:    ret
796entry:
797  %a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
798    <vscale x 8 x i64> %0,
799    <vscale x 8 x i1> %1,
800    iXLen %2, iXLen 0)
801
802  ret <vscale x 8 x i64> %a
803}
804