xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
8  <vscale x 1 x i64>,
9  ptr,
10  <vscale x 1 x i1>,
11  iXLen,
12  iXLen)
13define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
17; CHECK-NEXT:    vle64.v v8, (a0), v0.t
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
21    <vscale x 1 x i64> %0,
22    ptr %1,
23    <vscale x 1 x i1> %2,
24    iXLen %3, iXLen 2)
25
26  ret <vscale x 1 x i64> %a
27}
28
29declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
30  <vscale x 1 x i64>,
31  ptr,
32  <vscale x 1 x i1>,
33  iXLen,
34  iXLen)
35define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
36; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
37; RV32:       # %bb.0: # %entry
38; RV32-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
39; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
40; RV32-NEXT:    csrr a0, vl
41; RV32-NEXT:    sw a0, 0(a2)
42; RV32-NEXT:    ret
43;
44; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
45; RV64:       # %bb.0: # %entry
46; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
47; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
48; RV64-NEXT:    csrr a0, vl
49; RV64-NEXT:    sd a0, 0(a2)
50; RV64-NEXT:    ret
51entry:
52  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
53    <vscale x 1 x i64> %0,
54    ptr %1,
55    <vscale x 1 x i1> %2,
56    iXLen %3, iXLen 2)
57  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
58  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
59  store iXLen %c, iXLen* %4
60
61  ret <vscale x 1 x i64> %b
62}
63
64declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
65  <vscale x 1 x i64>,
66  ptr,
67  iXLen,
68  <vscale x 1 x i1>,
69  iXLen,
70  iXLen)
71define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
72; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
73; CHECK:       # %bb.0: # %entry
74; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
75; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
76; CHECK-NEXT:    ret
77entry:
78  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
79    <vscale x 1 x i64> %0,
80    ptr %1,
81    iXLen %2,
82    <vscale x 1 x i1> %3,
83    iXLen %4, iXLen 2)
84
85  ret <vscale x 1 x i64> %a
86}
87
88declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
89  <vscale x 1 x i8>,
90  ptr,
91  <vscale x 1 x iXLen>,
92  <vscale x 1 x i1>,
93  iXLen,
94  iXLen)
95define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x iXLen> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
96entry:
97  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
98    <vscale x 1 x i8> %0,
99    ptr %1,
100    <vscale x 1 x iXLen> %2,
101    <vscale x 1 x i1> %3,
102    iXLen %4, iXLen 2)
103
104  ret <vscale x 1 x i8> %a
105}
106
107declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
108  <vscale x 1 x i8>,
109  <vscale x 1 x i8>,
110  <vscale x 1 x i8>,
111  <vscale x 1 x i1>,
112  iXLen, iXLen)
113define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
114; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
117; CHECK-NEXT:    vadd.vv v8, v9, v10, v0.t
118; CHECK-NEXT:    ret
119entry:
120  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
121    <vscale x 1 x i8> %0,
122    <vscale x 1 x i8> %1,
123    <vscale x 1 x i8> %2,
124    <vscale x 1 x i1> %3,
125    iXLen %4, iXLen 2)
126
127  ret <vscale x 1 x i8> %a
128}
129
130declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
131  <vscale x 1 x i16>,
132  <vscale x 1 x i8>,
133  <vscale x 1 x i8>,
134  <vscale x 1 x i1>,
135  iXLen,
136  iXLen)
137define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
138; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
139; CHECK:       # %bb.0: # %entry
140; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
141; CHECK-NEXT:    vwadd.vv v8, v9, v10, v0.t
142; CHECK-NEXT:    ret
143entry:
144  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
145    <vscale x 1 x i16> %0,
146    <vscale x 1 x i8> %1,
147    <vscale x 1 x i8> %2,
148    <vscale x 1 x i1> %3,
149    iXLen %4, iXLen 2)
150
151  ret <vscale x 1 x i16> %a
152}
153
154declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
155  <vscale x 1 x i8>,
156  <vscale x 1 x i8>,
157  i8,
158  <vscale x 1 x i1>,
159  iXLen,
160  iXLen)
161define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
162; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
163; CHECK:       # %bb.0: # %entry
164; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
165; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
166; CHECK-NEXT:    ret
167entry:
168  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
169    <vscale x 1 x i8> %0,
170    <vscale x 1 x i8> %1,
171    i8 %2,
172    <vscale x 1 x i1> %3,
173    iXLen %4, iXLen 2)
174
175  ret <vscale x 1 x i8> %a
176}
177
178declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
179  <vscale x 1 x i8>,
180  <vscale x 1 x i8>,
181  i8,
182  <vscale x 1 x i1>,
183  iXLen,
184  iXLen)
185define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
186; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
187; CHECK:       # %bb.0: # %entry
188; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
189; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
190; CHECK-NEXT:    ret
191entry:
192  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
193    <vscale x 1 x i8> %0,
194    <vscale x 1 x i8> %1,
195    i8 -9,
196    <vscale x 1 x i1> %2,
197    iXLen %3, iXLen 2)
198
199  ret <vscale x 1 x i8> %a
200}
201
202declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
203  <vscale x 1 x i64>,
204  <vscale x 1 x i32>,
205  <vscale x 1 x i1>,
206  iXLen,
207  iXLen)
208define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
209; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
210; CHECK:       # %bb.0: # %entry
211; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
212; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
216    <vscale x 1 x i64> %1,
217    <vscale x 1 x i32> %2,
218    <vscale x 1 x i1> %0,
219    iXLen %3, iXLen 2)
220
221  ret <vscale x 1 x i64> %a
222}
223
224declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
225  <vscale x 1 x i64>,
226  <vscale x 1 x i16>,
227  <vscale x 1 x i1>,
228  iXLen,
229  iXLen)
230define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
231; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
234; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
238    <vscale x 1 x i64> %1,
239    <vscale x 1 x i16> %2,
240    <vscale x 1 x i1> %0,
241    iXLen %3, iXLen 2)
242
243  ret <vscale x 1 x i64> %a
244}
245declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
246  <vscale x 1 x i64>,
247  <vscale x 1 x i8>,
248  <vscale x 1 x i1>,
249  iXLen,
250  iXLen)
251define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
252; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
253; CHECK:       # %bb.0: # %entry
254; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
255; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
256; CHECK-NEXT:    ret
257entry:
258  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
259    <vscale x 1 x i64> %1,
260    <vscale x 1 x i8> %2,
261    <vscale x 1 x i1> %0,
262    iXLen %3, iXLen 2)
263
264  ret <vscale x 1 x i64> %a
265}
266
267declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
268  <vscale x 1 x i8>,
269  <vscale x 1 x i8>,
270  <vscale x 1 x i8>,
271  <vscale x 1 x i1>,
272  iXLen,
273  iXLen)
274define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
275; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
278; CHECK-NEXT:    vand.vv v8, v9, v10, v0.t
279; CHECK-NEXT:    ret
280entry:
281  %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
282    <vscale x 1 x i8> %0,
283    <vscale x 1 x i8> %1,
284    <vscale x 1 x i8> %2,
285    <vscale x 1 x i1> %3,
286    iXLen %4, iXLen 2)
287
288  ret <vscale x 1 x i8> %a
289}
290
291declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
292  <vscale x 1 x i8>,
293  <vscale x 1 x i8>,
294  <vscale x 1 x i8>,
295  <vscale x 1 x i1>,
296  iXLen,
297  iXLen)
298define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
299; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
300; CHECK:       # %bb.0: # %entry
301; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
302; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
303; CHECK-NEXT:    ret
304entry:
305  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
306    <vscale x 1 x i8> %0,
307    <vscale x 1 x i8> %1,
308    <vscale x 1 x i8> %2,
309    <vscale x 1 x i1> %3,
310    iXLen %4, iXLen 2)
311
312  ret <vscale x 1 x i8> %a
313}
314
315declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
316  <vscale x 1 x i8>,
317  <vscale x 1 x i16>,
318  <vscale x 1 x i8>,
319  <vscale x 1 x i1>,
320  iXLen,
321  iXLen)
322define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
323; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
324; CHECK:       # %bb.0: # %entry
325; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
326; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
327; CHECK-NEXT:    ret
328entry:
329  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
330    <vscale x 1 x i8> %0,
331    <vscale x 1 x i16> %1,
332    <vscale x 1 x i8> %2,
333    <vscale x 1 x i1> %3,
334    iXLen %4, iXLen 2)
335
336  ret <vscale x 1 x i8> %a
337}
338
339declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
340  <vscale x 1 x i8>,
341  <vscale x 1 x i8>,
342  <vscale x 1 x i8>,
343  <vscale x 1 x i1>,
344  iXLen,
345  iXLen)
346define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
347; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
348; CHECK:       # %bb.0: # %entry
349; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
350; CHECK-NEXT:    vmin.vv v8, v9, v10, v0.t
351; CHECK-NEXT:    ret
352entry:
353  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
354    <vscale x 1 x i8> %0,
355    <vscale x 1 x i8> %1,
356    <vscale x 1 x i8> %2,
357    <vscale x 1 x i1> %3,
358    iXLen %4, iXLen 2)
359
360  ret <vscale x 1 x i8> %a
361}
362
363declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
364  <vscale x 1 x i8>,
365  <vscale x 1 x i8>,
366  <vscale x 1 x i8>,
367  <vscale x 1 x i1>,
368  iXLen,
369  iXLen)
370define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
371; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
372; CHECK:       # %bb.0: # %entry
373; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
374; CHECK-NEXT:    vdiv.vv v8, v9, v10, v0.t
375; CHECK-NEXT:    ret
376entry:
377  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
378    <vscale x 1 x i8> %0,
379    <vscale x 1 x i8> %1,
380    <vscale x 1 x i8> %2,
381    <vscale x 1 x i1> %3,
382    iXLen %4, iXLen 2)
383
384  ret <vscale x 1 x i8> %a
385}
386
387declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
388  <vscale x 1 x i16>,
389  <vscale x 1 x i8>,
390  <vscale x 1 x i8>,
391  <vscale x 1 x i1>,
392  iXLen,
393  iXLen)
394define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
395; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
396; CHECK:       # %bb.0: # %entry
397; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
398; CHECK-NEXT:    vwmul.vv v8, v9, v10, v0.t
399; CHECK-NEXT:    ret
400entry:
401  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
402    <vscale x 1 x i16> %0,
403    <vscale x 1 x i8> %1,
404    <vscale x 1 x i8> %2,
405    <vscale x 1 x i1> %3,
406    iXLen %4, iXLen 2)
407
408  ret <vscale x 1 x i16> %a
409}
410
411declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
412  <vscale x 1 x i8>,
413  <vscale x 1 x i8>,
414  <vscale x 1 x i8>,
415  <vscale x 1 x i1>,
416  iXLen, iXLen)
417define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
418; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
419; CHECK:       # %bb.0: # %entry
420; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
421; CHECK-NEXT:    vmacc.vv v8, v9, v10, v0.t
422; CHECK-NEXT:    ret
423entry:
424  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
425    <vscale x 1 x i8> %0,
426    <vscale x 1 x i8> %1,
427    <vscale x 1 x i8> %2,
428    <vscale x 1 x i1> %3,
429    iXLen %4, iXLen 2)
430
431  ret <vscale x 1 x i8> %a
432}
433
434declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
435  <vscale x 1 x i16>,
436  <vscale x 1 x i8>,
437  <vscale x 1 x i8>,
438  <vscale x 1 x i1>,
439  iXLen, iXLen)
440define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
441; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
442; CHECK:       # %bb.0: # %entry
443; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
444; CHECK-NEXT:    vwmacc.vv v8, v9, v10, v0.t
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
448    <vscale x 1 x i16> %0,
449    <vscale x 1 x i8> %1,
450    <vscale x 1 x i8> %2,
451    <vscale x 1 x i1> %3,
452    iXLen %4, iXLen 2)
453
454  ret <vscale x 1 x i16> %a
455}
456
457declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
458  <vscale x 1 x i8>,
459  <vscale x 1 x i8>,
460  <vscale x 1 x i8>,
461  <vscale x 1 x i1>,
462  iXLen,
463  iXLen)
464define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
465; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
466; CHECK:       # %bb.0: # %entry
467; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
468; CHECK-NEXT:    vsadd.vv v8, v9, v10, v0.t
469; CHECK-NEXT:    ret
470entry:
471  %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
472    <vscale x 1 x i8> %0,
473    <vscale x 1 x i8> %1,
474    <vscale x 1 x i8> %2,
475    <vscale x 1 x i1> %3,
476    iXLen %4, iXLen 2)
477
478  ret <vscale x 1 x i8> %a
479}
480
481declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
482  <vscale x 1 x i8>,
483  <vscale x 1 x i8>,
484  <vscale x 1 x i8>,
485  <vscale x 1 x i1>,
486  iXLen,
487  iXLen,
488  iXLen)
489define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
490; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
491; CHECK:       # %bb.0: # %entry
492; CHECK-NEXT:    csrwi vxrm, 1
493; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
494; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
495; CHECK-NEXT:    ret
496entry:
497  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
498    <vscale x 1 x i8> %0,
499    <vscale x 1 x i8> %1,
500    <vscale x 1 x i8> %2,
501    <vscale x 1 x i1> %3,
502    iXLen 1, iXLen %4, iXLen 2)
503
504  ret <vscale x 1 x i8> %a
505}
506
507declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
508  <vscale x 1 x i8>,
509  <vscale x 1 x i8>,
510  <vscale x 1 x i8>,
511  <vscale x 1 x i1>,
512  iXLen,
513  iXLen,
514  iXLen)
515define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
516; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
517; CHECK:       # %bb.0: # %entry
518; CHECK-NEXT:    csrwi vxrm, 0
519; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
520; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
524    <vscale x 1 x i8> %0,
525    <vscale x 1 x i8> %1,
526    <vscale x 1 x i8> %2,
527    <vscale x 1 x i1> %3,
528    iXLen 0, iXLen %4, iXLen 2)
529
530  ret <vscale x 1 x i8> %a
531}
532
533declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
534  <vscale x 1 x i8>,
535  <vscale x 1 x i8>,
536  <vscale x 1 x i8>,
537  <vscale x 1 x i1>,
538  iXLen,
539  iXLen,
540  iXLen)
541define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
542; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    csrwi vxrm, 0
545; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
546; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
550    <vscale x 1 x i8> %0,
551    <vscale x 1 x i8> %1,
552    <vscale x 1 x i8> %2,
553    <vscale x 1 x i1> %3,
554    iXLen 0, iXLen %4, iXLen 2)
555
556  ret <vscale x 1 x i8> %a
557}
558
559declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
560  <vscale x 1 x i8>,
561  <vscale x 1 x i16>,
562  <vscale x 1 x i8>,
563  <vscale x 1 x i1>,
564  iXLen,
565  iXLen,
566  iXLen)
567define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
568; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
569; CHECK:       # %bb.0: # %entry
570; CHECK-NEXT:    csrwi vxrm, 0
571; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
572; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
573; CHECK-NEXT:    ret
574entry:
575  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
576    <vscale x 1 x i8> %0,
577    <vscale x 1 x i16> %1,
578    <vscale x 1 x i8> %2,
579    <vscale x 1 x i1> %3,
580    iXLen 0, iXLen %4, iXLen 2)
581
582  ret <vscale x 1 x i8> %a
583}
584
585declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
586  <vscale x 1 x half>,
587  <vscale x 1 x half>,
588  <vscale x 1 x half>,
589  <vscale x 1 x i1>,
590  iXLen, iXLen, iXLen);
591define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
592; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
593; CHECK:       # %bb.0: # %entry
594; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
595; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
596; CHECK-NEXT:    ret
597entry:
598  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
599    <vscale x 1 x half> %0,
600    <vscale x 1 x half> %1,
601    <vscale x 1 x half> %2,
602    <vscale x 1 x i1> %3,
603    iXLen 7, iXLen %4, iXLen 2)
604
605  ret <vscale x 1 x half> %a
606}
607
608declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
609  <vscale x 1 x float>,
610  <vscale x 1 x half>,
611  <vscale x 1 x half>,
612  <vscale x 1 x i1>,
613  iXLen, iXLen, iXLen)
614define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
615; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
618; CHECK-NEXT:    vfwadd.vv v8, v9, v10, v0.t
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
622    <vscale x 1 x float> %0,
623    <vscale x 1 x half> %1,
624    <vscale x 1 x half> %2,
625    <vscale x 1 x i1> %3,
626    iXLen 7, iXLen %4, iXLen 2)
627
628  ret <vscale x 1 x float> %a
629}
630
631declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
632  <vscale x 1 x half>,
633  <vscale x 1 x half>,
634  <vscale x 1 x half>,
635  <vscale x 1 x i1>,
636  iXLen, iXLen, iXLen)
637define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
638; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
639; CHECK:       # %bb.0: # %entry
640; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
641; CHECK-NEXT:    vfmul.vv v8, v9, v10, v0.t
642; CHECK-NEXT:    ret
643entry:
644  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
645    <vscale x 1 x half> %0,
646    <vscale x 1 x half> %1,
647    <vscale x 1 x half> %2,
648    <vscale x 1 x i1> %3,
649    iXLen 7, iXLen %4, iXLen 2)
650
651  ret <vscale x 1 x half> %a
652}
653
654declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
655  <vscale x 1 x half>,
656  <vscale x 1 x half>,
657  <vscale x 1 x half>,
658  <vscale x 1 x i1>,
659  iXLen, iXLen, iXLen)
660define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
661; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
662; CHECK:       # %bb.0: # %entry
663; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
664; CHECK-NEXT:    vfdiv.vv v8, v9, v10, v0.t
665; CHECK-NEXT:    ret
666entry:
667  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
668    <vscale x 1 x half> %0,
669    <vscale x 1 x half> %1,
670    <vscale x 1 x half> %2,
671    <vscale x 1 x i1> %3,
672    iXLen 7, iXLen %4, iXLen 2)
673
674  ret <vscale x 1 x half> %a
675}
676
677declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
678  <vscale x 1 x half>,
679  <vscale x 1 x half>,
680  half,
681  <vscale x 1 x i1>,
682  iXLen, iXLen, iXLen)
683define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
684; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
685; CHECK:       # %bb.0: # %entry
686; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
687; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
691    <vscale x 1 x half> %0,
692    <vscale x 1 x half> %1,
693    half %2,
694    <vscale x 1 x i1> %3,
695    iXLen 7, iXLen %4, iXLen 2)
696
697  ret <vscale x 1 x half> %a
698}
699
700declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
701  <vscale x 1 x float>,
702  <vscale x 1 x half>,
703  <vscale x 1 x half>,
704  <vscale x 1 x i1>,
705  iXLen, iXLen, iXLen)
706define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
707; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
708; CHECK:       # %bb.0: # %entry
709; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
710; CHECK-NEXT:    vfwmul.vv v8, v9, v10, v0.t
711; CHECK-NEXT:    ret
712entry:
713  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
714    <vscale x 1 x float> %0,
715    <vscale x 1 x half> %1,
716    <vscale x 1 x half> %2,
717    <vscale x 1 x i1> %3,
718    iXLen 7, iXLen %4, iXLen 2)
719
720  ret <vscale x 1 x float> %a
721}
722
723declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
724  <vscale x 1 x half>,
725  <vscale x 1 x half>,
726  <vscale x 1 x half>,
727  <vscale x 1 x i1>,
728  iXLen, iXLen, iXLen)
729define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
730; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
733; CHECK-NEXT:    vfmacc.vv v8, v9, v10, v0.t
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
737    <vscale x 1 x half> %0,
738    <vscale x 1 x half> %1,
739    <vscale x 1 x half> %2,
740    <vscale x 1 x i1> %3,
741    iXLen 7, iXLen %4, iXLen 2)
742
743  ret <vscale x 1 x half> %a
744}
745
746declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
747  <vscale x 1 x float>,
748  <vscale x 1 x half>,
749  <vscale x 1 x half>,
750  <vscale x 1 x i1>,
751  iXLen, iXLen, iXLen)
752define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
753; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
754; CHECK:       # %bb.0: # %entry
755; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
756; CHECK-NEXT:    vfwmacc.vv v8, v9, v10, v0.t
757; CHECK-NEXT:    ret
758entry:
759  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
760    <vscale x 1 x float> %0,
761    <vscale x 1 x half> %1,
762    <vscale x 1 x half> %2,
763    <vscale x 1 x i1> %3,
764    iXLen 7, iXLen %4, iXLen 2)
765  ret <vscale x 1 x float> %a
766}
767
768declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
769  <vscale x 1 x half>,
770  <vscale x 1 x half>,
771  <vscale x 1 x i1>,
772  iXLen, iXLen, iXLen)
773define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
774; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
775; CHECK:       # %bb.0: # %entry
776; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
777; CHECK-NEXT:    vfsqrt.v v8, v9, v0.t
778; CHECK-NEXT:    ret
779entry:
780  %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
781    <vscale x 1 x half> %0,
782    <vscale x 1 x half> %1,
783    <vscale x 1 x i1> %2,
784    iXLen 7, iXLen %3, iXLen 2)
785
786  ret <vscale x 1 x half> %a
787}
788
789declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
790  <vscale x 1 x half>,
791  <vscale x 1 x half>,
792  <vscale x 1 x i1>,
793  iXLen,
794  iXLen)
795define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
796; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
799; CHECK-NEXT:    vfrsqrt7.v v8, v9, v0.t
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
803    <vscale x 1 x half> %1,
804    <vscale x 1 x half> %2,
805    <vscale x 1 x i1> %0,
806    iXLen %3, iXLen 2)
807
808  ret <vscale x 1 x half> %a
809}
810
811declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
812  <vscale x 1 x half>,
813  <vscale x 1 x half>,
814  <vscale x 1 x i1>,
815  iXLen, iXLen, iXLen)
816define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
817; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
818; CHECK:       # %bb.0: # %entry
819; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
820; CHECK-NEXT:    vfrec7.v v8, v9, v0.t
821; CHECK-NEXT:    ret
822entry:
823  %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
824    <vscale x 1 x half> %1,
825    <vscale x 1 x half> %2,
826    <vscale x 1 x i1> %0,
827    iXLen 7, iXLen %3, iXLen 2)
828
829  ret <vscale x 1 x half> %a
830}
831
832declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
833  <vscale x 1 x half>,
834  <vscale x 1 x half>,
835  <vscale x 1 x half>,
836  <vscale x 1 x i1>,
837  iXLen,
838  iXLen)
839define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
840; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
843; CHECK-NEXT:    vfmin.vv v8, v9, v10, v0.t
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
847    <vscale x 1 x half> %0,
848    <vscale x 1 x half> %1,
849    <vscale x 1 x half> %2,
850    <vscale x 1 x i1> %3,
851    iXLen %4, iXLen 2)
852
853  ret <vscale x 1 x half> %a
854}
855
856declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
857  <vscale x 1 x half>,
858  <vscale x 1 x half>,
859  <vscale x 1 x half>,
860  <vscale x 1 x i1>,
861  iXLen,
862  iXLen)
863define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
864; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
867; CHECK-NEXT:    vfsgnj.vv v8, v9, v10, v0.t
868; CHECK-NEXT:    ret
869entry:
870  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
871    <vscale x 1 x half> %0,
872    <vscale x 1 x half> %1,
873    <vscale x 1 x half> %2,
874    <vscale x 1 x i1> %3,
875    iXLen %4, iXLen 2)
876
877  ret <vscale x 1 x half> %a
878}
879
880declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
881  <vscale x 1 x i16>,
882  <vscale x 1 x half>,
883  <vscale x 1 x i1>,
884  iXLen, iXLen)
885define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
886; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
887; CHECK:       # %bb.0: # %entry
888; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
889; CHECK-NEXT:    vfclass.v v8, v9, v0.t
890; CHECK-NEXT:    ret
891  <vscale x 1 x i16> %0,
892  <vscale x 1 x half> %1,
893  <vscale x 1 x i1> %2,
894  iXLen %3) nounwind {
895entry:
896  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
897    <vscale x 1 x i16> %0,
898    <vscale x 1 x half> %1,
899    <vscale x 1 x i1> %2,
900    iXLen %3, iXLen 2)
901
902  ret <vscale x 1 x i16> %a
903}
904
905declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
906  <vscale x 1 x i16>,
907  <vscale x 1 x half>,
908  <vscale x 1 x i1>,
909  iXLen, iXLen, iXLen)
910define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
911; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
914; CHECK-NEXT:    vfcvt.xu.f.v v8, v9, v0.t
915; CHECK-NEXT:    ret
916entry:
917  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
918    <vscale x 1 x i16> %0,
919    <vscale x 1 x half> %1,
920    <vscale x 1 x i1> %2,
921    iXLen 7, iXLen %3, iXLen 2)
922
923  ret <vscale x 1 x i16> %a
924}
925
926declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
927  <vscale x 1 x half>,
928  <vscale x 1 x i16>,
929  <vscale x 1 x i1>,
930  iXLen, iXLen, iXLen)
931define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
932; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
933; CHECK:       # %bb.0: # %entry
934; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
935; CHECK-NEXT:    vfcvt.f.x.v v8, v9, v0.t
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
939    <vscale x 1 x half> %0,
940    <vscale x 1 x i16> %1,
941    <vscale x 1 x i1> %2,
942    iXLen 7, iXLen %3, iXLen 2)
943
944  ret <vscale x 1 x half> %a
945}
946
947declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
948  <vscale x 1 x i32>,
949  <vscale x 1 x half>,
950  <vscale x 1 x i1>,
951  iXLen, iXLen, iXLen)
952define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
953; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
954; CHECK:       # %bb.0: # %entry
955; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
956; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9, v0.t
957; CHECK-NEXT:    ret
958entry:
959  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
960    <vscale x 1 x i32> %0,
961    <vscale x 1 x half> %1,
962    <vscale x 1 x i1> %2,
963    iXLen 7, iXLen %3, iXLen 2)
964
965  ret <vscale x 1 x i32> %a
966}
967
968declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
969  <vscale x 1 x half>,
970  <vscale x 1 x i8>,
971  <vscale x 1 x i1>,
972  iXLen, iXLen)
973define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
974; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
975; CHECK:       # %bb.0: # %entry
976; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
977; CHECK-NEXT:    vfwcvt.f.x.v v8, v9, v0.t
978; CHECK-NEXT:    ret
979entry:
980  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
981    <vscale x 1 x half> %0,
982    <vscale x 1 x i8> %1,
983    <vscale x 1 x i1> %2,
984    iXLen %3, iXLen 2)
985
986  ret <vscale x 1 x half> %a
987}
988
989declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
990  <vscale x 1 x float>,
991  <vscale x 1 x half>,
992  <vscale x 1 x i1>,
993  iXLen, iXLen)
994define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
995; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
996; CHECK:       # %bb.0: # %entry
997; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
998; CHECK-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
999; CHECK-NEXT:    ret
1000entry:
1001  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1002    <vscale x 1 x float> %0,
1003    <vscale x 1 x half> %1,
1004    <vscale x 1 x i1> %2,
1005    iXLen %3, iXLen 2)
1006
1007  ret <vscale x 1 x float> %a
1008}
1009
1010declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1011  <vscale x 1 x i8>,
1012  <vscale x 1 x half>,
1013  <vscale x 1 x i1>,
1014  iXLen, iXLen, iXLen)
1015define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1016; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
1017; CHECK:       # %bb.0: # %entry
1018; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1019; CHECK-NEXT:    vfncvt.xu.f.w v8, v9, v0.t
1020; CHECK-NEXT:    ret
1021entry:
1022  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1023    <vscale x 1 x i8> %0,
1024    <vscale x 1 x half> %1,
1025    <vscale x 1 x i1> %2,
1026    iXLen 7, iXLen %3, iXLen 2)
1027
1028  ret <vscale x 1 x i8> %a
1029}
1030
1031declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1032  <vscale x 1 x half>,
1033  <vscale x 1 x i32>,
1034  <vscale x 1 x i1>,
1035  iXLen, iXLen, iXLen)
1036define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1037; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
1038; CHECK:       # %bb.0: # %entry
1039; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1040; CHECK-NEXT:    vfncvt.f.x.w v8, v9, v0.t
1041; CHECK-NEXT:    ret
1042entry:
1043  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1044    <vscale x 1 x half> %0,
1045    <vscale x 1 x i32> %1,
1046    <vscale x 1 x i1> %2,
1047    iXLen 7, iXLen %3, iXLen 2)
1048
1049  ret <vscale x 1 x half> %a
1050}
1051
1052declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1053  <vscale x 1 x half>,
1054  <vscale x 1 x float>,
1055  <vscale x 1 x i1>,
1056  iXLen, iXLen, iXLen)
1057define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1058; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
1059; CHECK:       # %bb.0: # %entry
1060; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1061; CHECK-NEXT:    vfncvt.f.f.w v8, v9, v0.t
1062; CHECK-NEXT:    ret
1063entry:
1064  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1065    <vscale x 1 x half> %0,
1066    <vscale x 1 x float> %1,
1067    <vscale x 1 x i1> %2,
1068    iXLen 7, iXLen %3, iXLen 2)
1069
1070  ret <vscale x 1 x half> %a
1071}
1072
1073declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1074  <vscale x 1 x i8>,
1075  <vscale x 1 x i8>,
1076  iXLen,
1077  <vscale x 1 x i1>,
1078  iXLen, iXLen)
1079define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1080; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
1081; CHECK:       # %bb.0: # %entry
1082; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
1083; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
1084; CHECK-NEXT:    ret
1085entry:
1086  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1087    <vscale x 1 x i8> %0,
1088    <vscale x 1 x i8> %1,
1089    iXLen %2,
1090    <vscale x 1 x i1> %3,
1091    iXLen %4, iXLen 2)
1092
1093  ret <vscale x 1 x i8> %a
1094}
1095
1096declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1097  <vscale x 1 x i8>,
1098  <vscale x 1 x i8>,
1099  i8,
1100  <vscale x 1 x i1>,
1101  iXLen,
1102  iXLen)
1103define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1104; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
1105; CHECK:       # %bb.0: # %entry
1106; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
1107; CHECK-NEXT:    vslide1up.vx v8, v9, a0, v0.t
1108; CHECK-NEXT:    ret
1109entry:
1110  %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1111    <vscale x 1 x i8> %0,
1112    <vscale x 1 x i8> %1,
1113    i8 %2,
1114    <vscale x 1 x i1> %3,
1115    iXLen %4, iXLen 2)
1116
1117  ret <vscale x 1 x i8> %a
1118}
1119
1120declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1121  <vscale x 1 x half>,
1122  <vscale x 1 x half>,
1123  half,
1124  <vscale x 1 x i1>,
1125  iXLen,
1126  iXLen)
1127define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1128; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
1129; CHECK:       # %bb.0: # %entry
1130; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1131; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
1132; CHECK-NEXT:    ret
1133entry:
1134  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1135    <vscale x 1 x half> %0,
1136    <vscale x 1 x half> %1,
1137    half %2,
1138    <vscale x 1 x i1> %3,
1139    iXLen %4, iXLen 2)
1140
1141  ret <vscale x 1 x half> %a
1142}
1143
1144declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1145  <vscale x 1 x i8>,
1146  <vscale x 1 x i8>,
1147  <vscale x 1 x i8>,
1148  <vscale x 1 x i1>,
1149  iXLen,
1150  iXLen)
1151define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1152; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
1153; CHECK:       # %bb.0: # %entry
1154; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1155; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1156; CHECK-NEXT:    ret
1157entry:
1158  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1159    <vscale x 1 x i8> %0,
1160    <vscale x 1 x i8> %1,
1161    <vscale x 1 x i8> %2,
1162    <vscale x 1 x i1> %3,
1163    iXLen %4, iXLen 2)
1164
1165  ret <vscale x 1 x i8> %a
1166}
1167
1168declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1169  <vscale x 1 x i8>,
1170  <vscale x 1 x i8>,
1171  <vscale x 1 x i16>,
1172  <vscale x 1 x i1>,
1173  iXLen,
1174  iXLen)
1175define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1176; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
1177; CHECK:       # %bb.0: # %entry
1178; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1179; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
1180; CHECK-NEXT:    ret
1181entry:
1182  %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1183    <vscale x 1 x i8> %0,
1184    <vscale x 1 x i8> %1,
1185    <vscale x 1 x i16> %2,
1186    <vscale x 1 x i1> %3,
1187    iXLen %4, iXLen 2)
1188
1189  ret <vscale x 1 x i8> %a
1190}
1191
1192declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1193  <vscale x 1 x i8>,
1194  <vscale x 1 x i1>,
1195  iXLen, iXLen);
1196
1197define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1198; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
1199; CHECK:       # %bb.0: # %entry
1200; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1201; CHECK-NEXT:    vid.v v8, v0.t
1202; CHECK-NEXT:    ret
1203entry:
1204  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1205    <vscale x 1 x i8> %0,
1206    <vscale x 1 x i1> %1,
1207    iXLen %2, iXLen 2)
1208
1209  ret <vscale x 1 x i8> %a
1210}
1211
1212declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1213  <vscale x 1 x i8>,
1214  <vscale x 1 x i1>,
1215  <vscale x 1 x i1>,
1216  iXLen, iXLen);
1217
1218define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1219; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
1220; CHECK:       # %bb.0: # %entry
1221; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1222; CHECK-NEXT:    viota.m v8, v0, v0.t
1223; CHECK-NEXT:    ret
1224entry:
1225  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1226    <vscale x 1 x i8> %0,
1227    <vscale x 1 x i1> %1,
1228    <vscale x 1 x i1> %1,
1229    iXLen %2, iXLen 2)
1230
1231  ret <vscale x 1 x i8> %a
1232}
1233