xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/viota.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i1>,
10  iXLen);
11
12define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
13; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
16; CHECK-NEXT:    viota.m v8, v0
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
20    <vscale x 1 x i8> undef,
21    <vscale x 1 x i1> %0,
22    iXLen %1)
23
24  ret <vscale x 1 x i8> %a
25}
26
27declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
28  <vscale x 1 x i8>,
29  <vscale x 1 x i1>,
30  <vscale x 1 x i1>,
31  iXLen, iXLen);
32
33define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
34; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
37; CHECK-NEXT:    viota.m v8, v0, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
41    <vscale x 1 x i8> %0,
42    <vscale x 1 x i1> %1,
43    <vscale x 1 x i1> %1,
44    iXLen %2, iXLen 0)
45
46  ret <vscale x 1 x i8> %a
47}
48
49declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
50  <vscale x 2 x i8>,
51  <vscale x 2 x i1>,
52  iXLen);
53
54define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
55; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
58; CHECK-NEXT:    viota.m v8, v0
59; CHECK-NEXT:    ret
60entry:
61  %a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
62    <vscale x 2 x i8> undef,
63    <vscale x 2 x i1> %0,
64    iXLen %1)
65
66  ret <vscale x 2 x i8> %a
67}
68
69declare <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
70  <vscale x 2 x i8>,
71  <vscale x 2 x i1>,
72  <vscale x 2 x i1>,
73  iXLen, iXLen);
74
75define <vscale x 2 x i8> @intrinsic_viota_mask_m_nxv2i8_nxv2i1(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
76; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
79; CHECK-NEXT:    viota.m v8, v0, v0.t
80; CHECK-NEXT:    ret
81entry:
82  %a = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8(
83    <vscale x 2 x i8> %0,
84    <vscale x 2 x i1> %1,
85    <vscale x 2 x i1> %1,
86    iXLen %2, iXLen 0)
87
88  ret <vscale x 2 x i8> %a
89}
90
91declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
92  <vscale x 4 x i8>,
93  <vscale x 4 x i1>,
94  iXLen);
95
96define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
97; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1:
98; CHECK:       # %bb.0: # %entry
99; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
100; CHECK-NEXT:    viota.m v8, v0
101; CHECK-NEXT:    ret
102entry:
103  %a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
104    <vscale x 4 x i8> undef,
105    <vscale x 4 x i1> %0,
106    iXLen %1)
107
108  ret <vscale x 4 x i8> %a
109}
110
111declare <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
112  <vscale x 4 x i8>,
113  <vscale x 4 x i1>,
114  <vscale x 4 x i1>,
115  iXLen, iXLen);
116
117define <vscale x 4 x i8> @intrinsic_viota_mask_m_nxv4i8_nxv4i1(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
118; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1:
119; CHECK:       # %bb.0: # %entry
120; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
121; CHECK-NEXT:    viota.m v8, v0, v0.t
122; CHECK-NEXT:    ret
123entry:
124  %a = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8(
125    <vscale x 4 x i8> %0,
126    <vscale x 4 x i1> %1,
127    <vscale x 4 x i1> %1,
128    iXLen %2, iXLen 0)
129
130  ret <vscale x 4 x i8> %a
131}
132
133declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
134  <vscale x 8 x i8>,
135  <vscale x 8 x i1>,
136  iXLen);
137
138define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
139; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
142; CHECK-NEXT:    viota.m v8, v0
143; CHECK-NEXT:    ret
144entry:
145  %a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
146    <vscale x 8 x i8> undef,
147    <vscale x 8 x i1> %0,
148    iXLen %1)
149
150  ret <vscale x 8 x i8> %a
151}
152
153declare <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
154  <vscale x 8 x i8>,
155  <vscale x 8 x i1>,
156  <vscale x 8 x i1>,
157  iXLen, iXLen);
158
159define <vscale x 8 x i8> @intrinsic_viota_mask_m_nxv8i8_nxv8i1(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
160; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1:
161; CHECK:       # %bb.0: # %entry
162; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
163; CHECK-NEXT:    viota.m v8, v0, v0.t
164; CHECK-NEXT:    ret
165entry:
166  %a = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8(
167    <vscale x 8 x i8> %0,
168    <vscale x 8 x i1> %1,
169    <vscale x 8 x i1> %1,
170    iXLen %2, iXLen 0)
171
172  ret <vscale x 8 x i8> %a
173}
174
175declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
176  <vscale x 16 x i8>,
177  <vscale x 16 x i1>,
178  iXLen);
179
180define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
181; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1:
182; CHECK:       # %bb.0: # %entry
183; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
184; CHECK-NEXT:    viota.m v8, v0
185; CHECK-NEXT:    ret
186entry:
187  %a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
188    <vscale x 16 x i8> undef,
189    <vscale x 16 x i1> %0,
190    iXLen %1)
191
192  ret <vscale x 16 x i8> %a
193}
194
195declare <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
196  <vscale x 16 x i8>,
197  <vscale x 16 x i1>,
198  <vscale x 16 x i1>,
199  iXLen, iXLen);
200
201define <vscale x 16 x i8> @intrinsic_viota_mask_m_nxv16i8_nxv16i1(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
205; CHECK-NEXT:    viota.m v8, v0, v0.t
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8(
209    <vscale x 16 x i8> %0,
210    <vscale x 16 x i1> %1,
211    <vscale x 16 x i1> %1,
212    iXLen %2, iXLen 0)
213
214  ret <vscale x 16 x i8> %a
215}
216
217declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
218  <vscale x 32 x i8>,
219  <vscale x 32 x i1>,
220  iXLen);
221
222define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
223; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1:
224; CHECK:       # %bb.0: # %entry
225; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
226; CHECK-NEXT:    viota.m v8, v0
227; CHECK-NEXT:    ret
228entry:
229  %a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
230    <vscale x 32 x i8> undef,
231    <vscale x 32 x i1> %0,
232    iXLen %1)
233
234  ret <vscale x 32 x i8> %a
235}
236
237declare <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
238  <vscale x 32 x i8>,
239  <vscale x 32 x i1>,
240  <vscale x 32 x i1>,
241  iXLen, iXLen);
242
243define <vscale x 32 x i8> @intrinsic_viota_mask_m_nxv32i8_nxv32i1(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
244; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
247; CHECK-NEXT:    viota.m v8, v0, v0.t
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8(
251    <vscale x 32 x i8> %0,
252    <vscale x 32 x i1> %1,
253    <vscale x 32 x i1> %1,
254    iXLen %2, iXLen 0)
255
256  ret <vscale x 32 x i8> %a
257}
258
259declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
260  <vscale x 64 x i8>,
261  <vscale x 64 x i1>,
262  iXLen);
263
264define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
265; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1:
266; CHECK:       # %bb.0: # %entry
267; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
268; CHECK-NEXT:    viota.m v8, v0
269; CHECK-NEXT:    ret
270entry:
271  %a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
272    <vscale x 64 x i8> undef,
273    <vscale x 64 x i1> %0,
274    iXLen %1)
275
276  ret <vscale x 64 x i8> %a
277}
278
279declare <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
280  <vscale x 64 x i8>,
281  <vscale x 64 x i1>,
282  <vscale x 64 x i1>,
283  iXLen, iXLen);
284
285define <vscale x 64 x i8> @intrinsic_viota_mask_m_nxv64i8_nxv64i1(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
286; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1:
287; CHECK:       # %bb.0: # %entry
288; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
289; CHECK-NEXT:    viota.m v8, v0, v0.t
290; CHECK-NEXT:    ret
291entry:
292  %a = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8(
293    <vscale x 64 x i8> %0,
294    <vscale x 64 x i1> %1,
295    <vscale x 64 x i1> %1,
296    iXLen %2, iXLen 0)
297
298  ret <vscale x 64 x i8> %a
299}
300
301declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
302  <vscale x 1 x i16>,
303  <vscale x 1 x i1>,
304  iXLen);
305
306define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
307; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1:
308; CHECK:       # %bb.0: # %entry
309; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
310; CHECK-NEXT:    viota.m v8, v0
311; CHECK-NEXT:    ret
312entry:
313  %a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
314    <vscale x 1 x i16> undef,
315    <vscale x 1 x i1> %0,
316    iXLen %1)
317
318  ret <vscale x 1 x i16> %a
319}
320
321declare <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
322  <vscale x 1 x i16>,
323  <vscale x 1 x i1>,
324  <vscale x 1 x i1>,
325  iXLen, iXLen);
326
327define <vscale x 1 x i16> @intrinsic_viota_mask_m_nxv1i16_nxv1i1(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
328; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1:
329; CHECK:       # %bb.0: # %entry
330; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
331; CHECK-NEXT:    viota.m v8, v0, v0.t
332; CHECK-NEXT:    ret
333entry:
334  %a = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16(
335    <vscale x 1 x i16> %0,
336    <vscale x 1 x i1> %1,
337    <vscale x 1 x i1> %1,
338    iXLen %2, iXLen 0)
339
340  ret <vscale x 1 x i16> %a
341}
342
343declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
344  <vscale x 2 x i16>,
345  <vscale x 2 x i1>,
346  iXLen);
347
348define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
349; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1:
350; CHECK:       # %bb.0: # %entry
351; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
352; CHECK-NEXT:    viota.m v8, v0
353; CHECK-NEXT:    ret
354entry:
355  %a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
356    <vscale x 2 x i16> undef,
357    <vscale x 2 x i1> %0,
358    iXLen %1)
359
360  ret <vscale x 2 x i16> %a
361}
362
363declare <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
364  <vscale x 2 x i16>,
365  <vscale x 2 x i1>,
366  <vscale x 2 x i1>,
367  iXLen, iXLen);
368
369define <vscale x 2 x i16> @intrinsic_viota_mask_m_nxv2i16_nxv2i1(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
370; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
373; CHECK-NEXT:    viota.m v8, v0, v0.t
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16(
377    <vscale x 2 x i16> %0,
378    <vscale x 2 x i1> %1,
379    <vscale x 2 x i1> %1,
380    iXLen %2, iXLen 0)
381
382  ret <vscale x 2 x i16> %a
383}
384
385declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
386  <vscale x 4 x i16>,
387  <vscale x 4 x i1>,
388  iXLen);
389
390define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
391; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
394; CHECK-NEXT:    viota.m v8, v0
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
398    <vscale x 4 x i16> undef,
399    <vscale x 4 x i1> %0,
400    iXLen %1)
401
402  ret <vscale x 4 x i16> %a
403}
404
405declare <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
406  <vscale x 4 x i16>,
407  <vscale x 4 x i1>,
408  <vscale x 4 x i1>,
409  iXLen, iXLen);
410
411define <vscale x 4 x i16> @intrinsic_viota_mask_m_nxv4i16_nxv4i1(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
412; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1:
413; CHECK:       # %bb.0: # %entry
414; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
415; CHECK-NEXT:    viota.m v8, v0, v0.t
416; CHECK-NEXT:    ret
417entry:
418  %a = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16(
419    <vscale x 4 x i16> %0,
420    <vscale x 4 x i1> %1,
421    <vscale x 4 x i1> %1,
422    iXLen %2, iXLen 0)
423
424  ret <vscale x 4 x i16> %a
425}
426
427declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
428  <vscale x 8 x i16>,
429  <vscale x 8 x i1>,
430  iXLen);
431
432define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
433; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
436; CHECK-NEXT:    viota.m v8, v0
437; CHECK-NEXT:    ret
438entry:
439  %a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
440    <vscale x 8 x i16> undef,
441    <vscale x 8 x i1> %0,
442    iXLen %1)
443
444  ret <vscale x 8 x i16> %a
445}
446
447declare <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
448  <vscale x 8 x i16>,
449  <vscale x 8 x i1>,
450  <vscale x 8 x i1>,
451  iXLen, iXLen);
452
453define <vscale x 8 x i16> @intrinsic_viota_mask_m_nxv8i16_nxv8i1(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
454; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1:
455; CHECK:       # %bb.0: # %entry
456; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
457; CHECK-NEXT:    viota.m v8, v0, v0.t
458; CHECK-NEXT:    ret
459entry:
460  %a = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16(
461    <vscale x 8 x i16> %0,
462    <vscale x 8 x i1> %1,
463    <vscale x 8 x i1> %1,
464    iXLen %2, iXLen 0)
465
466  ret <vscale x 8 x i16> %a
467}
468
469declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
470  <vscale x 16 x i16>,
471  <vscale x 16 x i1>,
472  iXLen);
473
474define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
475; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1:
476; CHECK:       # %bb.0: # %entry
477; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
478; CHECK-NEXT:    viota.m v8, v0
479; CHECK-NEXT:    ret
480entry:
481  %a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
482    <vscale x 16 x i16> undef,
483    <vscale x 16 x i1> %0,
484    iXLen %1)
485
486  ret <vscale x 16 x i16> %a
487}
488
489declare <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
490  <vscale x 16 x i16>,
491  <vscale x 16 x i1>,
492  <vscale x 16 x i1>,
493  iXLen, iXLen);
494
495define <vscale x 16 x i16> @intrinsic_viota_mask_m_nxv16i16_nxv16i1(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
496; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1:
497; CHECK:       # %bb.0: # %entry
498; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
499; CHECK-NEXT:    viota.m v8, v0, v0.t
500; CHECK-NEXT:    ret
501entry:
502  %a = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16(
503    <vscale x 16 x i16> %0,
504    <vscale x 16 x i1> %1,
505    <vscale x 16 x i1> %1,
506    iXLen %2, iXLen 0)
507
508  ret <vscale x 16 x i16> %a
509}
510
511declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
512  <vscale x 32 x i16>,
513  <vscale x 32 x i1>,
514  iXLen);
515
516define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
517; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
520; CHECK-NEXT:    viota.m v8, v0
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
524    <vscale x 32 x i16> undef,
525    <vscale x 32 x i1> %0,
526    iXLen %1)
527
528  ret <vscale x 32 x i16> %a
529}
530
531declare <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
532  <vscale x 32 x i16>,
533  <vscale x 32 x i1>,
534  <vscale x 32 x i1>,
535  iXLen, iXLen);
536
537define <vscale x 32 x i16> @intrinsic_viota_mask_m_nxv32i16_nxv32i1(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
538; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1:
539; CHECK:       # %bb.0: # %entry
540; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
541; CHECK-NEXT:    viota.m v8, v0, v0.t
542; CHECK-NEXT:    ret
543entry:
544  %a = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16(
545    <vscale x 32 x i16> %0,
546    <vscale x 32 x i1> %1,
547    <vscale x 32 x i1> %1,
548    iXLen %2, iXLen 0)
549
550  ret <vscale x 32 x i16> %a
551}
552
553declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
554  <vscale x 1 x i32>,
555  <vscale x 1 x i1>,
556  iXLen);
557
558define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
559; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1:
560; CHECK:       # %bb.0: # %entry
561; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
562; CHECK-NEXT:    viota.m v8, v0
563; CHECK-NEXT:    ret
564entry:
565  %a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
566    <vscale x 1 x i32> undef,
567    <vscale x 1 x i1> %0,
568    iXLen %1)
569
570  ret <vscale x 1 x i32> %a
571}
572
573declare <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
574  <vscale x 1 x i32>,
575  <vscale x 1 x i1>,
576  <vscale x 1 x i1>,
577  iXLen, iXLen);
578
579define <vscale x 1 x i32> @intrinsic_viota_mask_m_nxv1i32_nxv1i1(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
580; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1:
581; CHECK:       # %bb.0: # %entry
582; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
583; CHECK-NEXT:    viota.m v8, v0, v0.t
584; CHECK-NEXT:    ret
585entry:
586  %a = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32(
587    <vscale x 1 x i32> %0,
588    <vscale x 1 x i1> %1,
589    <vscale x 1 x i1> %1,
590    iXLen %2, iXLen 0)
591
592  ret <vscale x 1 x i32> %a
593}
594
595declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
596  <vscale x 2 x i32>,
597  <vscale x 2 x i1>,
598  iXLen);
599
600define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
601; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1:
602; CHECK:       # %bb.0: # %entry
603; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
604; CHECK-NEXT:    viota.m v8, v0
605; CHECK-NEXT:    ret
606entry:
607  %a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
608    <vscale x 2 x i32> undef,
609    <vscale x 2 x i1> %0,
610    iXLen %1)
611
612  ret <vscale x 2 x i32> %a
613}
614
615declare <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
616  <vscale x 2 x i32>,
617  <vscale x 2 x i1>,
618  <vscale x 2 x i1>,
619  iXLen, iXLen);
620
621define <vscale x 2 x i32> @intrinsic_viota_mask_m_nxv2i32_nxv2i1(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
622; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1:
623; CHECK:       # %bb.0: # %entry
624; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
625; CHECK-NEXT:    viota.m v8, v0, v0.t
626; CHECK-NEXT:    ret
627entry:
628  %a = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32(
629    <vscale x 2 x i32> %0,
630    <vscale x 2 x i1> %1,
631    <vscale x 2 x i1> %1,
632    iXLen %2, iXLen 0)
633
634  ret <vscale x 2 x i32> %a
635}
636
637declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
638  <vscale x 4 x i32>,
639  <vscale x 4 x i1>,
640  iXLen);
641
642define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
643; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1:
644; CHECK:       # %bb.0: # %entry
645; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
646; CHECK-NEXT:    viota.m v8, v0
647; CHECK-NEXT:    ret
648entry:
649  %a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
650    <vscale x 4 x i32> undef,
651    <vscale x 4 x i1> %0,
652    iXLen %1)
653
654  ret <vscale x 4 x i32> %a
655}
656
657declare <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
658  <vscale x 4 x i32>,
659  <vscale x 4 x i1>,
660  <vscale x 4 x i1>,
661  iXLen, iXLen);
662
663define <vscale x 4 x i32> @intrinsic_viota_mask_m_nxv4i32_nxv4i1(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
664; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
667; CHECK-NEXT:    viota.m v8, v0, v0.t
668; CHECK-NEXT:    ret
669entry:
670  %a = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32(
671    <vscale x 4 x i32> %0,
672    <vscale x 4 x i1> %1,
673    <vscale x 4 x i1> %1,
674    iXLen %2, iXLen 0)
675
676  ret <vscale x 4 x i32> %a
677}
678
679declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
680  <vscale x 8 x i32>,
681  <vscale x 8 x i1>,
682  iXLen);
683
684define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
685; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1:
686; CHECK:       # %bb.0: # %entry
687; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
688; CHECK-NEXT:    viota.m v8, v0
689; CHECK-NEXT:    ret
690entry:
691  %a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
692    <vscale x 8 x i32> undef,
693    <vscale x 8 x i1> %0,
694    iXLen %1)
695
696  ret <vscale x 8 x i32> %a
697}
698
699declare <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
700  <vscale x 8 x i32>,
701  <vscale x 8 x i1>,
702  <vscale x 8 x i1>,
703  iXLen, iXLen);
704
705define <vscale x 8 x i32> @intrinsic_viota_mask_m_nxv8i32_nxv8i1(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
706; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
709; CHECK-NEXT:    viota.m v8, v0, v0.t
710; CHECK-NEXT:    ret
711entry:
712  %a = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32(
713    <vscale x 8 x i32> %0,
714    <vscale x 8 x i1> %1,
715    <vscale x 8 x i1> %1,
716    iXLen %2, iXLen 0)
717
718  ret <vscale x 8 x i32> %a
719}
720
721declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
722  <vscale x 16 x i32>,
723  <vscale x 16 x i1>,
724  iXLen);
725
726define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
727; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
730; CHECK-NEXT:    viota.m v8, v0
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
734    <vscale x 16 x i32> undef,
735    <vscale x 16 x i1> %0,
736    iXLen %1)
737
738  ret <vscale x 16 x i32> %a
739}
740
741declare <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
742  <vscale x 16 x i32>,
743  <vscale x 16 x i1>,
744  <vscale x 16 x i1>,
745  iXLen, iXLen);
746
747define <vscale x 16 x i32> @intrinsic_viota_mask_m_nxv16i32_nxv16i1(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
748; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1:
749; CHECK:       # %bb.0: # %entry
750; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
751; CHECK-NEXT:    viota.m v8, v0, v0.t
752; CHECK-NEXT:    ret
753entry:
754  %a = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32(
755    <vscale x 16 x i32> %0,
756    <vscale x 16 x i1> %1,
757    <vscale x 16 x i1> %1,
758    iXLen %2, iXLen 0)
759
760  ret <vscale x 16 x i32> %a
761}
762
763declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
764  <vscale x 1 x i64>,
765  <vscale x 1 x i1>,
766  iXLen);
767
768define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
769; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
770; CHECK:       # %bb.0: # %entry
771; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
772; CHECK-NEXT:    viota.m v8, v0
773; CHECK-NEXT:    ret
774entry:
775  %a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
776    <vscale x 1 x i64> undef,
777    <vscale x 1 x i1> %0,
778    iXLen %1)
779
780  ret <vscale x 1 x i64> %a
781}
782
783declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
784  <vscale x 1 x i64>,
785  <vscale x 1 x i1>,
786  <vscale x 1 x i1>,
787  iXLen, iXLen);
788
789define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
790; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
793; CHECK-NEXT:    viota.m v8, v0, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
797    <vscale x 1 x i64> %0,
798    <vscale x 1 x i1> %1,
799    <vscale x 1 x i1> %1,
800    iXLen %2, iXLen 0)
801
802  ret <vscale x 1 x i64> %a
803}
804
805declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
806  <vscale x 2 x i64>,
807  <vscale x 2 x i1>,
808  iXLen);
809
810define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
811; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
812; CHECK:       # %bb.0: # %entry
813; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
814; CHECK-NEXT:    viota.m v8, v0
815; CHECK-NEXT:    ret
816entry:
817  %a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
818    <vscale x 2 x i64> undef,
819    <vscale x 2 x i1> %0,
820    iXLen %1)
821
822  ret <vscale x 2 x i64> %a
823}
824
825declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
826  <vscale x 2 x i64>,
827  <vscale x 2 x i1>,
828  <vscale x 2 x i1>,
829  iXLen, iXLen);
830
831define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
832; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
833; CHECK:       # %bb.0: # %entry
834; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
835; CHECK-NEXT:    viota.m v8, v0, v0.t
836; CHECK-NEXT:    ret
837entry:
838  %a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
839    <vscale x 2 x i64> %0,
840    <vscale x 2 x i1> %1,
841    <vscale x 2 x i1> %1,
842    iXLen %2, iXLen 0)
843
844  ret <vscale x 2 x i64> %a
845}
846
847declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
848  <vscale x 4 x i64>,
849  <vscale x 4 x i1>,
850  iXLen);
851
852define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
853; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
854; CHECK:       # %bb.0: # %entry
855; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
856; CHECK-NEXT:    viota.m v8, v0
857; CHECK-NEXT:    ret
858entry:
859  %a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
860    <vscale x 4 x i64> undef,
861    <vscale x 4 x i1> %0,
862    iXLen %1)
863
864  ret <vscale x 4 x i64> %a
865}
866
867declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
868  <vscale x 4 x i64>,
869  <vscale x 4 x i1>,
870  <vscale x 4 x i1>,
871  iXLen, iXLen);
872
873define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
874; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
875; CHECK:       # %bb.0: # %entry
876; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
877; CHECK-NEXT:    viota.m v8, v0, v0.t
878; CHECK-NEXT:    ret
879entry:
880  %a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
881    <vscale x 4 x i64> %0,
882    <vscale x 4 x i1> %1,
883    <vscale x 4 x i1> %1,
884    iXLen %2, iXLen 0)
885
886  ret <vscale x 4 x i64> %a
887}
888
889declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
890  <vscale x 8 x i64>,
891  <vscale x 8 x i1>,
892  iXLen);
893
894define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
895; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
896; CHECK:       # %bb.0: # %entry
897; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
898; CHECK-NEXT:    viota.m v8, v0
899; CHECK-NEXT:    ret
900entry:
901  %a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
902    <vscale x 8 x i64> undef,
903    <vscale x 8 x i1> %0,
904    iXLen %1)
905
906  ret <vscale x 8 x i64> %a
907}
908
909declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
910  <vscale x 8 x i64>,
911  <vscale x 8 x i1>,
912  <vscale x 8 x i1>,
913  iXLen, iXLen);
914
915define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
916; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
919; CHECK-NEXT:    viota.m v8, v0, v0.t
920; CHECK-NEXT:    ret
921entry:
922  %a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
923    <vscale x 8 x i64> %0,
924    <vscale x 8 x i1> %1,
925    <vscale x 8 x i1> %1,
926    iXLen %2, iXLen 0)
927
928  ret <vscale x 8 x i64> %a
929}
930