xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
8  <vscale x 1 x i64>,
9  ptr,
10  <vscale x 1 x i1>,
11  iXLen,
12  iXLen);
13
14define <vscale x 1 x i64> @intrinsic_vle_mask_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
15; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
18; CHECK-NEXT:    vle64.v v8, (a0), v0.t
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
22    <vscale x 1 x i64> undef,
23    ptr %0,
24    <vscale x 1 x i1> %1,
25    iXLen %2, iXLen 3)
26
27  ret <vscale x 1 x i64> %a
28}
29
30declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
31  <vscale x 1 x i64>,
32  ptr,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
38; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
39; RV32:       # %bb.0: # %entry
40; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
41; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
42; RV32-NEXT:    csrr a0, vl
43; RV32-NEXT:    sw a0, 0(a2)
44; RV32-NEXT:    ret
45;
46; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
47; RV64:       # %bb.0: # %entry
48; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
49; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
50; RV64-NEXT:    csrr a0, vl
51; RV64-NEXT:    sd a0, 0(a2)
52; RV64-NEXT:    ret
53entry:
54  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
55    <vscale x 1 x i64> undef,
56    ptr %0,
57    <vscale x 1 x i1> %1,
58    iXLen %2, iXLen 3)
59  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
60  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
61  store iXLen %c, iXLen* %3
62
63  ret <vscale x 1 x i64> %b
64}
65
66declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
67  <vscale x 1 x i64>,
68  ptr,
69  iXLen,
70  <vscale x 1 x i1>,
71  iXLen,
72  iXLen);
73
74define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
75; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
76; CHECK:       # %bb.0: # %entry
77; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
78; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
79; CHECK-NEXT:    ret
80entry:
81  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
82    <vscale x 1 x i64> undef,
83    ptr %0,
84    iXLen %1,
85    <vscale x 1 x i1> %2,
86    iXLen %3, iXLen 3)
87
88  ret <vscale x 1 x i64> %a
89}
90
91declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
92  <vscale x 1 x i8>,
93  ptr,
94  <vscale x 1 x iXLen>,
95  <vscale x 1 x i1>,
96  iXLen,
97  iXLen);
98
99define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen(ptr %0, <vscale x 1 x iXLen> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
100entry:
101  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen(
102    <vscale x 1 x i8> undef,
103    ptr %0,
104    <vscale x 1 x iXLen> %1,
105    <vscale x 1 x i1> %2,
106    iXLen %3, iXLen 3)
107
108  ret <vscale x 1 x i8> %a
109}
110
111declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
112  <vscale x 1 x i8>,
113  <vscale x 1 x i8>,
114  <vscale x 1 x i8>,
115  <vscale x 1 x i1>,
116  iXLen, iXLen);
117
118define <vscale x 1 x i8> @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
119; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
120; CHECK:       # %bb.0: # %entry
121; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
122; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
123; CHECK-NEXT:    ret
124entry:
125  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
126    <vscale x 1 x i8> undef,
127    <vscale x 1 x i8> %0,
128    <vscale x 1 x i8> %1,
129    <vscale x 1 x i1> %2,
130    iXLen %3, iXLen 3)
131
132  ret <vscale x 1 x i8> %a
133}
134
135declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
136  <vscale x 1 x i16>,
137  <vscale x 1 x i8>,
138  <vscale x 1 x i8>,
139  <vscale x 1 x i1>,
140  iXLen,
141  iXLen);
142
143define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
144; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8:
145; CHECK:       # %bb.0: # %entry
146; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
147; CHECK-NEXT:    vwadd.vv v10, v8, v9, v0.t
148; CHECK-NEXT:    vmv1r.v v8, v10
149; CHECK-NEXT:    ret
150entry:
151  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
152    <vscale x 1 x i16> undef,
153    <vscale x 1 x i8> %0,
154    <vscale x 1 x i8> %1,
155    <vscale x 1 x i1> %2,
156    iXLen %3, iXLen 3)
157
158  ret <vscale x 1 x i16> %a
159}
160
161declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
162  <vscale x 1 x i8>,
163  <vscale x 1 x i8>,
164  i8,
165  <vscale x 1 x i1>,
166  iXLen,
167  iXLen);
168
169define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
170; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
171; CHECK:       # %bb.0: # %entry
172; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
173; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
174; CHECK-NEXT:    ret
175entry:
176  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
177    <vscale x 1 x i8> undef,
178    <vscale x 1 x i8> %0,
179    i8 %1,
180    <vscale x 1 x i1> %2,
181    iXLen %3, iXLen 3)
182
183  ret <vscale x 1 x i8> %a
184}
185
186declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
187  <vscale x 1 x i8>,
188  <vscale x 1 x i8>,
189  i8,
190  <vscale x 1 x i1>,
191  iXLen,
192  iXLen);
193
194define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
195; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
198; CHECK-NEXT:    vadd.vi v8, v8, 9, v0.t
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
202    <vscale x 1 x i8> undef,
203    <vscale x 1 x i8> %0,
204    i8 -9,
205    <vscale x 1 x i1> %1,
206    iXLen %2, iXLen 3)
207
208  ret <vscale x 1 x i8> %a
209}
210
211declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
212  <vscale x 1 x i64>,
213  <vscale x 1 x i32>,
214  <vscale x 1 x i1>,
215  iXLen,
216  iXLen);
217
218define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
219; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
220; CHECK:       # %bb.0: # %entry
221; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
222; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
223; CHECK-NEXT:    vmv.v.v v8, v9
224; CHECK-NEXT:    ret
225entry:
226  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
227    <vscale x 1 x i64> undef,
228    <vscale x 1 x i32> %0,
229    <vscale x 1 x i1> %1,
230    iXLen %2, iXLen 3)
231
232  ret <vscale x 1 x i64> %a
233}
234
235declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
236  <vscale x 1 x i64>,
237  <vscale x 1 x i16>,
238  <vscale x 1 x i1>,
239  iXLen,
240  iXLen);
241
242define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
243; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
244; CHECK:       # %bb.0: # %entry
245; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
246; CHECK-NEXT:    vzext.vf4 v9, v8, v0.t
247; CHECK-NEXT:    vmv.v.v v8, v9
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
251    <vscale x 1 x i64> undef,
252    <vscale x 1 x i16> %0,
253    <vscale x 1 x i1> %1,
254    iXLen %2, iXLen 3)
255
256  ret <vscale x 1 x i64> %a
257}
258declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
259  <vscale x 1 x i64>,
260  <vscale x 1 x i8>,
261  <vscale x 1 x i1>,
262  iXLen,
263  iXLen);
264
265define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
266; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
267; CHECK:       # %bb.0: # %entry
268; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
269; CHECK-NEXT:    vzext.vf8 v9, v8, v0.t
270; CHECK-NEXT:    vmv.v.v v8, v9
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
274    <vscale x 1 x i64> undef,
275    <vscale x 1 x i8> %0,
276    <vscale x 1 x i1> %1,
277    iXLen %2, iXLen 3)
278
279  ret <vscale x 1 x i64> %a
280}
281
282declare <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
283  <vscale x 1 x i8>,
284  <vscale x 1 x i8>,
285  <vscale x 1 x i8>,
286  <vscale x 1 x i1>,
287  iXLen,
288  iXLen);
289
290define <vscale x 1 x i8> @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
291; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8:
292; CHECK:       # %bb.0: # %entry
293; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
294; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8(
298    <vscale x 1 x i8> undef,
299    <vscale x 1 x i8> %0,
300    <vscale x 1 x i8> %1,
301    <vscale x 1 x i1> %2,
302    iXLen %3, iXLen 3)
303
304  ret <vscale x 1 x i8> %a
305}
306
307declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
308  <vscale x 1 x i8>,
309  <vscale x 1 x i8>,
310  <vscale x 1 x i8>,
311  <vscale x 1 x i1>,
312  iXLen,
313  iXLen);
314
315define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
316; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
317; CHECK:       # %bb.0: # %entry
318; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
319; CHECK-NEXT:    vsll.vv v8, v8, v9, v0.t
320; CHECK-NEXT:    ret
321entry:
322  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
323    <vscale x 1 x i8> undef,
324    <vscale x 1 x i8> %0,
325    <vscale x 1 x i8> %1,
326    <vscale x 1 x i1> %2,
327    iXLen %3, iXLen 3)
328
329  ret <vscale x 1 x i8> %a
330}
331
332declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
333  <vscale x 1 x i8>,
334  <vscale x 1 x i16>,
335  <vscale x 1 x i8>,
336  <vscale x 1 x i1>,
337  iXLen,
338  iXLen);
339
340define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
341; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
344; CHECK-NEXT:    vnsra.wv v8, v8, v9, v0.t
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
348    <vscale x 1 x i8> undef,
349    <vscale x 1 x i16> %0,
350    <vscale x 1 x i8> %1,
351    <vscale x 1 x i1> %2,
352    iXLen %3, iXLen 3)
353
354  ret <vscale x 1 x i8> %a
355}
356
357declare <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
358  <vscale x 1 x i8>,
359  <vscale x 1 x i8>,
360  <vscale x 1 x i8>,
361  <vscale x 1 x i1>,
362  iXLen,
363  iXLen);
364
365define <vscale x 1 x i8> @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
366; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8:
367; CHECK:       # %bb.0: # %entry
368; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
369; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8(
373    <vscale x 1 x i8> undef,
374    <vscale x 1 x i8> %0,
375    <vscale x 1 x i8> %1,
376    <vscale x 1 x i1> %2,
377    iXLen %3, iXLen 3)
378
379  ret <vscale x 1 x i8> %a
380}
381
382declare <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
383  <vscale x 1 x i8>,
384  <vscale x 1 x i8>,
385  <vscale x 1 x i8>,
386  <vscale x 1 x i1>,
387  iXLen,
388  iXLen);
389
390define <vscale x 1 x i8> @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
391; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
394; CHECK-NEXT:    vdiv.vv v8, v8, v9, v0.t
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8(
398    <vscale x 1 x i8> undef,
399    <vscale x 1 x i8> %0,
400    <vscale x 1 x i8> %1,
401    <vscale x 1 x i1> %2,
402    iXLen %3, iXLen 3)
403
404  ret <vscale x 1 x i8> %a
405}
406
407declare <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
408  <vscale x 1 x i16>,
409  <vscale x 1 x i8>,
410  <vscale x 1 x i8>,
411  <vscale x 1 x i1>,
412  iXLen,
413  iXLen);
414
415define <vscale x 1 x i16> @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
416; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8:
417; CHECK:       # %bb.0: # %entry
418; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
419; CHECK-NEXT:    vwmul.vv v10, v8, v9, v0.t
420; CHECK-NEXT:    vmv1r.v v8, v10
421; CHECK-NEXT:    ret
422entry:
423  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8(
424    <vscale x 1 x i16> undef,
425    <vscale x 1 x i8> %0,
426    <vscale x 1 x i8> %1,
427    <vscale x 1 x i1> %2,
428    iXLen %3, iXLen 3)
429
430  ret <vscale x 1 x i16> %a
431}
432
433declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
434  <vscale x 1 x i8>,
435  <vscale x 1 x i8>,
436  <vscale x 1 x i8>,
437  <vscale x 1 x i1>,
438  iXLen, iXLen);
439
440define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
441; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
442; CHECK:       # %bb.0: # %entry
443; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
444; CHECK-NEXT:    vmacc.vv v8, v8, v9, v0.t
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
448    <vscale x 1 x i8> undef,
449    <vscale x 1 x i8> %0,
450    <vscale x 1 x i8> %1,
451    <vscale x 1 x i1> %2,
452    iXLen %3, iXLen 3)
453
454  ret <vscale x 1 x i8> %a
455}
456
457declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
458  <vscale x 1 x i16>,
459  <vscale x 1 x i8>,
460  <vscale x 1 x i8>,
461  <vscale x 1 x i1>,
462  iXLen, iXLen);
463
464define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
465; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
466; CHECK:       # %bb.0: # %entry
467; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
468; CHECK-NEXT:    vwmacc.vv v10, v8, v9, v0.t
469; CHECK-NEXT:    vmv1r.v v8, v10
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
473    <vscale x 1 x i16> undef,
474    <vscale x 1 x i8> %0,
475    <vscale x 1 x i8> %1,
476    <vscale x 1 x i1> %2,
477    iXLen %3, iXLen 3)
478
479  ret <vscale x 1 x i16> %a
480}
481
482declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
483  <vscale x 1 x i8>,
484  <vscale x 1 x i8>,
485  <vscale x 1 x i8>,
486  <vscale x 1 x i1>,
487  iXLen,
488  iXLen);
489
490define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
491; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
492; CHECK:       # %bb.0: # %entry
493; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
494; CHECK-NEXT:    vsadd.vv v8, v8, v9, v0.t
495; CHECK-NEXT:    ret
496entry:
497  %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
498    <vscale x 1 x i8> undef,
499    <vscale x 1 x i8> %0,
500    <vscale x 1 x i8> %1,
501    <vscale x 1 x i1> %2,
502    iXLen %3, iXLen 3)
503
504  ret <vscale x 1 x i8> %a
505}
506
507declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
508  <vscale x 1 x i8>,
509  <vscale x 1 x i8>,
510  <vscale x 1 x i8>,
511  <vscale x 1 x i1>,
512  iXLen,
513  iXLen,
514  iXLen);
515
516define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
517; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    csrwi vxrm, 1
520; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
521; CHECK-NEXT:    vaadd.vv v8, v8, v9, v0.t
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
525    <vscale x 1 x i8> undef,
526    <vscale x 1 x i8> %0,
527    <vscale x 1 x i8> %1,
528    <vscale x 1 x i1> %2,
529    iXLen 1, iXLen %3, iXLen 3)
530
531  ret <vscale x 1 x i8> %a
532}
533
534declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
535  <vscale x 1 x i8>,
536  <vscale x 1 x i8>,
537  <vscale x 1 x i8>,
538  <vscale x 1 x i1>,
539  iXLen,
540  iXLen,
541  iXLen);
542
543define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
544; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
545; CHECK:       # %bb.0: # %entry
546; CHECK-NEXT:    csrwi vxrm, 0
547; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
548; CHECK-NEXT:    vsmul.vv v8, v8, v9, v0.t
549; CHECK-NEXT:    ret
550entry:
551  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
552    <vscale x 1 x i8> undef,
553    <vscale x 1 x i8> %0,
554    <vscale x 1 x i8> %1,
555    <vscale x 1 x i1> %2,
556    iXLen 0, iXLen %3, iXLen 3)
557
558  ret <vscale x 1 x i8> %a
559}
560
561declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
562  <vscale x 1 x i8>,
563  <vscale x 1 x i8>,
564  <vscale x 1 x i8>,
565  <vscale x 1 x i1>,
566  iXLen,
567  iXLen,
568  iXLen);
569
570define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
571; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
572; CHECK:       # %bb.0: # %entry
573; CHECK-NEXT:    csrwi vxrm, 0
574; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
575; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
576; CHECK-NEXT:    ret
577entry:
578  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
579    <vscale x 1 x i8> undef,
580    <vscale x 1 x i8> %0,
581    <vscale x 1 x i8> %1,
582    <vscale x 1 x i1> %2,
583    iXLen 0, iXLen %3, iXLen 3)
584
585  ret <vscale x 1 x i8> %a
586}
587
588declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
589  <vscale x 1 x i8>,
590  <vscale x 1 x i16>,
591  <vscale x 1 x i8>,
592  <vscale x 1 x i1>,
593  iXLen,
594  iXLen,
595  iXLen);
596
597define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
598; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
599; CHECK:       # %bb.0: # %entry
600; CHECK-NEXT:    csrwi vxrm, 0
601; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
602; CHECK-NEXT:    vnclip.wv v8, v8, v9, v0.t
603; CHECK-NEXT:    ret
604entry:
605  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
606    <vscale x 1 x i8> undef,
607    <vscale x 1 x i16> %0,
608    <vscale x 1 x i8> %1,
609    <vscale x 1 x i1> %2,
610    iXLen 0, iXLen %3, iXLen 3)
611
612  ret <vscale x 1 x i8> %a
613}
614
615declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
616  <vscale x 1 x half>,
617  <vscale x 1 x half>,
618  <vscale x 1 x half>,
619  <vscale x 1 x i1>,
620  iXLen, iXLen, iXLen);
621
622define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
623; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
624; CHECK:       # %bb.0: # %entry
625; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
626; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
627; CHECK-NEXT:    ret
628entry:
629  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
630    <vscale x 1 x half> undef,
631    <vscale x 1 x half> %0,
632    <vscale x 1 x half> %1,
633    <vscale x 1 x i1> %2,
634    iXLen 7, iXLen %3, iXLen 3)
635
636  ret <vscale x 1 x half> %a
637}
638
639declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
640  <vscale x 1 x float>,
641  <vscale x 1 x half>,
642  <vscale x 1 x half>,
643  <vscale x 1 x i1>,
644  iXLen, iXLen, iXLen);
645
646define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
647; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16:
648; CHECK:       # %bb.0: # %entry
649; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
650; CHECK-NEXT:    vfwadd.vv v10, v8, v9, v0.t
651; CHECK-NEXT:    vmv1r.v v8, v10
652; CHECK-NEXT:    ret
653entry:
654  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
655    <vscale x 1 x float> undef,
656    <vscale x 1 x half> %0,
657    <vscale x 1 x half> %1,
658    <vscale x 1 x i1> %2,
659    iXLen 7, iXLen %3, iXLen 3)
660
661  ret <vscale x 1 x float> %a
662}
663
664declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
665  <vscale x 1 x half>,
666  <vscale x 1 x half>,
667  <vscale x 1 x half>,
668  <vscale x 1 x i1>,
669  iXLen, iXLen, iXLen);
670
671define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
672; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
675; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
679    <vscale x 1 x half> undef,
680    <vscale x 1 x half> %0,
681    <vscale x 1 x half> %1,
682    <vscale x 1 x i1> %2,
683    iXLen 7, iXLen %3, iXLen 3)
684
685  ret <vscale x 1 x half> %a
686}
687
688declare <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
689  <vscale x 1 x half>,
690  <vscale x 1 x half>,
691  <vscale x 1 x half>,
692  <vscale x 1 x i1>,
693  iXLen, iXLen, iXLen);
694
695define <vscale x 1 x half> @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
696; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
699; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16(
703    <vscale x 1 x half> undef,
704    <vscale x 1 x half> %0,
705    <vscale x 1 x half> %1,
706    <vscale x 1 x i1> %2,
707    iXLen 7, iXLen %3, iXLen 3)
708
709  ret <vscale x 1 x half> %a
710}
711
712declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
713  <vscale x 1 x half>,
714  <vscale x 1 x half>,
715  half,
716  <vscale x 1 x i1>,
717  iXLen, iXLen, iXLen);
718
719define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
720; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
723; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0, v0.t
724; CHECK-NEXT:    ret
725entry:
726  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
727    <vscale x 1 x half> undef,
728    <vscale x 1 x half> %0,
729    half %1,
730    <vscale x 1 x i1> %2,
731    iXLen 7, iXLen %3, iXLen 3)
732
733  ret <vscale x 1 x half> %a
734}
735
736declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
737  <vscale x 1 x float>,
738  <vscale x 1 x half>,
739  <vscale x 1 x half>,
740  <vscale x 1 x i1>,
741  iXLen, iXLen, iXLen);
742
743define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
744; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16:
745; CHECK:       # %bb.0: # %entry
746; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
747; CHECK-NEXT:    vfwmul.vv v10, v8, v9, v0.t
748; CHECK-NEXT:    vmv1r.v v8, v10
749; CHECK-NEXT:    ret
750entry:
751  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
752    <vscale x 1 x float> undef,
753    <vscale x 1 x half> %0,
754    <vscale x 1 x half> %1,
755    <vscale x 1 x i1> %2,
756    iXLen 7, iXLen %3, iXLen 3)
757
758  ret <vscale x 1 x float> %a
759}
760
761declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
762  <vscale x 1 x half>,
763  <vscale x 1 x half>,
764  <vscale x 1 x half>,
765  <vscale x 1 x i1>,
766  iXLen, iXLen, iXLen);
767
768define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
769; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
770; CHECK:       # %bb.0: # %entry
771; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
772; CHECK-NEXT:    vfmacc.vv v8, v8, v9, v0.t
773; CHECK-NEXT:    ret
774entry:
775  %a = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
776    <vscale x 1 x half> undef,
777    <vscale x 1 x half> %0,
778    <vscale x 1 x half> %1,
779    <vscale x 1 x i1> %2,
780    iXLen 7, iXLen %3, iXLen 3)
781
782  ret <vscale x 1 x half> %a
783}
784
785declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
786  <vscale x 1 x float>,
787  <vscale x 1 x half>,
788  <vscale x 1 x half>,
789  <vscale x 1 x i1>,
790  iXLen, iXLen, iXLen);
791
792define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
793; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
796; CHECK-NEXT:    vfwmacc.vv v10, v8, v9, v0.t
797; CHECK-NEXT:    vmv1r.v v8, v10
798; CHECK-NEXT:    ret
799entry:
800  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
801    <vscale x 1 x float> undef,
802    <vscale x 1 x half> %0,
803    <vscale x 1 x half> %1,
804    <vscale x 1 x i1> %2,
805    iXLen 7, iXLen %3, iXLen 3);
806
807  ret <vscale x 1 x float> %a
808}
809
810declare <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
811  <vscale x 1 x half>,
812  <vscale x 1 x half>,
813  <vscale x 1 x i1>,
814  iXLen, iXLen, iXLen);
815
816define <vscale x 1 x half> @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
817; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16:
818; CHECK:       # %bb.0: # %entry
819; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
820; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
821; CHECK-NEXT:    ret
822entry:
823  %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.mask.nxv1f16(
824    <vscale x 1 x half> undef,
825    <vscale x 1 x half> %0,
826    <vscale x 1 x i1> %1,
827    iXLen 7, iXLen %2, iXLen 3)
828
829  ret <vscale x 1 x half> %a
830}
831
832declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
833  <vscale x 1 x half>,
834  <vscale x 1 x half>,
835  <vscale x 1 x i1>,
836  iXLen,
837  iXLen);
838
839define <vscale x 1 x half> @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
840; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
843; CHECK-NEXT:    vfrsqrt7.v v8, v8, v0.t
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.mask.nxv1f16(
847    <vscale x 1 x half> undef,
848    <vscale x 1 x half> %0,
849    <vscale x 1 x i1> %1,
850    iXLen %2, iXLen 3)
851
852  ret <vscale x 1 x half> %a
853}
854
855declare <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
856  <vscale x 1 x half>,
857  <vscale x 1 x half>,
858  <vscale x 1 x i1>,
859  iXLen, iXLen, iXLen);
860
861define <vscale x 1 x half> @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
862; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
865; CHECK-NEXT:    vfrec7.v v8, v8, v0.t
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.mask.nxv1f16(
869    <vscale x 1 x half> undef,
870    <vscale x 1 x half> %0,
871    <vscale x 1 x i1> %1,
872    iXLen 7, iXLen %2, iXLen 3)
873
874  ret <vscale x 1 x half> %a
875}
876
877declare <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
878  <vscale x 1 x half>,
879  <vscale x 1 x half>,
880  <vscale x 1 x half>,
881  <vscale x 1 x i1>,
882  iXLen,
883  iXLen);
884
885define <vscale x 1 x half> @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
886; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16:
887; CHECK:       # %bb.0: # %entry
888; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
889; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
890; CHECK-NEXT:    ret
891entry:
892  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16(
893    <vscale x 1 x half> undef,
894    <vscale x 1 x half> %0,
895    <vscale x 1 x half> %1,
896    <vscale x 1 x i1> %2,
897    iXLen %3, iXLen 3)
898
899  ret <vscale x 1 x half> %a
900}
901
902declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
903  <vscale x 1 x half>,
904  <vscale x 1 x half>,
905  <vscale x 1 x half>,
906  <vscale x 1 x i1>,
907  iXLen,
908  iXLen);
909
910define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
911; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
914; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
915; CHECK-NEXT:    ret
916entry:
917  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16(
918    <vscale x 1 x half> undef,
919    <vscale x 1 x half> %0,
920    <vscale x 1 x half> %1,
921    <vscale x 1 x i1> %2,
922    iXLen %3, iXLen 3)
923
924  ret <vscale x 1 x half> %a
925}
926
927declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
928  <vscale x 1 x i16>,
929  <vscale x 1 x half>,
930  <vscale x 1 x i1>,
931  iXLen, iXLen);
932
933define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
934; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
937; CHECK-NEXT:    vfclass.v v8, v8, v0.t
938; CHECK-NEXT:    ret
939  <vscale x 1 x half> %0,
940  <vscale x 1 x i1> %1,
941  iXLen %2) nounwind {
942entry:
943  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
944    <vscale x 1 x i16> undef,
945    <vscale x 1 x half> %0,
946    <vscale x 1 x i1> %1,
947    iXLen %2, iXLen 3)
948
949  ret <vscale x 1 x i16> %a
950}
951
952declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
953  <vscale x 1 x i16>,
954  <vscale x 1 x half>,
955  <vscale x 1 x i1>,
956  iXLen, iXLen, iXLen);
957
958define <vscale x 1 x i16> @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
959; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
962; CHECK-NEXT:    vfcvt.xu.f.v v8, v8, v0.t
963; CHECK-NEXT:    ret
964entry:
965  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16(
966    <vscale x 1 x i16> undef,
967    <vscale x 1 x half> %0,
968    <vscale x 1 x i1> %1,
969    iXLen 7, iXLen %2, iXLen 3)
970
971  ret <vscale x 1 x i16> %a
972}
973
974declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
975  <vscale x 1 x half>,
976  <vscale x 1 x i16>,
977  <vscale x 1 x i1>,
978  iXLen, iXLen, iXLen);
979
980define <vscale x 1 x half> @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
981; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
984; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16(
988    <vscale x 1 x half> undef,
989    <vscale x 1 x i16> %0,
990    <vscale x 1 x i1> %1,
991    iXLen 7, iXLen %2, iXLen 3)
992
993  ret <vscale x 1 x half> %a
994}
995
996declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
997  <vscale x 1 x i32>,
998  <vscale x 1 x half>,
999  <vscale x 1 x i1>,
1000  iXLen, iXLen, iXLen);
1001
1002define <vscale x 1 x i32> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1003; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16:
1004; CHECK:       # %bb.0: # %entry
1005; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1006; CHECK-NEXT:    vfwcvt.xu.f.v v9, v8, v0.t
1007; CHECK-NEXT:    vmv1r.v v8, v9
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16(
1011    <vscale x 1 x i32> undef,
1012    <vscale x 1 x half> %0,
1013    <vscale x 1 x i1> %1,
1014    iXLen 7, iXLen %2, iXLen 3)
1015
1016  ret <vscale x 1 x i32> %a
1017}
1018
1019declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
1020  <vscale x 1 x half>,
1021  <vscale x 1 x i8>,
1022  <vscale x 1 x i1>,
1023  iXLen, iXLen);
1024
1025define <vscale x 1 x half> @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1026; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1029; CHECK-NEXT:    vfwcvt.f.x.v v9, v8, v0.t
1030; CHECK-NEXT:    vmv1r.v v8, v9
1031; CHECK-NEXT:    ret
1032entry:
1033  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8(
1034    <vscale x 1 x half> undef,
1035    <vscale x 1 x i8> %0,
1036    <vscale x 1 x i1> %1,
1037    iXLen %2, iXLen 3)
1038
1039  ret <vscale x 1 x half> %a
1040}
1041
1042declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1043  <vscale x 1 x float>,
1044  <vscale x 1 x half>,
1045  <vscale x 1 x i1>,
1046  iXLen, iXLen);
1047
1048define <vscale x 1 x float> @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1049; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1052; CHECK-NEXT:    vfwcvt.f.f.v v9, v8, v0.t
1053; CHECK-NEXT:    vmv1r.v v8, v9
1054; CHECK-NEXT:    ret
1055entry:
1056  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16(
1057    <vscale x 1 x float> undef,
1058    <vscale x 1 x half> %0,
1059    <vscale x 1 x i1> %1,
1060    iXLen %2, iXLen 3)
1061
1062  ret <vscale x 1 x float> %a
1063}
1064
1065declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1066  <vscale x 1 x i8>,
1067  <vscale x 1 x half>,
1068  <vscale x 1 x i1>,
1069  iXLen, iXLen, iXLen);
1070
1071define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1072; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1075; CHECK-NEXT:    vfncvt.xu.f.w v9, v8, v0.t
1076; CHECK-NEXT:    vmv1r.v v8, v9
1077; CHECK-NEXT:    ret
1078entry:
1079  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
1080    <vscale x 1 x i8> undef,
1081    <vscale x 1 x half> %0,
1082    <vscale x 1 x i1> %1,
1083    iXLen 7, iXLen %2, iXLen 3)
1084
1085  ret <vscale x 1 x i8> %a
1086}
1087
1088declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1089  <vscale x 1 x half>,
1090  <vscale x 1 x i32>,
1091  <vscale x 1 x i1>,
1092  iXLen, iXLen, iXLen);
1093
1094define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1095; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1098; CHECK-NEXT:    vfncvt.f.x.w v9, v8, v0.t
1099; CHECK-NEXT:    vmv1r.v v8, v9
1100; CHECK-NEXT:    ret
1101entry:
1102  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32(
1103    <vscale x 1 x half> undef,
1104    <vscale x 1 x i32> %0,
1105    <vscale x 1 x i1> %1,
1106    iXLen 7, iXLen %2, iXLen 3)
1107
1108  ret <vscale x 1 x half> %a
1109}
1110
1111declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1112  <vscale x 1 x half>,
1113  <vscale x 1 x float>,
1114  <vscale x 1 x i1>,
1115  iXLen, iXLen, iXLen);
1116
1117define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1118; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32:
1119; CHECK:       # %bb.0: # %entry
1120; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1121; CHECK-NEXT:    vfncvt.f.f.w v9, v8, v0.t
1122; CHECK-NEXT:    vmv1r.v v8, v9
1123; CHECK-NEXT:    ret
1124entry:
1125  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32(
1126    <vscale x 1 x half> undef,
1127    <vscale x 1 x float> %0,
1128    <vscale x 1 x i1> %1,
1129    iXLen 7, iXLen %2, iXLen 3)
1130
1131  ret <vscale x 1 x half> %a
1132}
1133
1134declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1135  <vscale x 1 x i8>,
1136  <vscale x 1 x i8>,
1137  iXLen,
1138  <vscale x 1 x i1>,
1139  iXLen, iXLen);
1140
1141define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1142; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
1143; CHECK:       # %bb.0: # %entry
1144; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1145; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
1146; CHECK-NEXT:    ret
1147entry:
1148  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
1149    <vscale x 1 x i8> %0,
1150    <vscale x 1 x i8> %1,
1151    iXLen %2,
1152    <vscale x 1 x i1> %3,
1153    iXLen %4, iXLen 3)
1154
1155  ret <vscale x 1 x i8> %a
1156}
1157
1158declare <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1159  <vscale x 1 x i8>,
1160  <vscale x 1 x i8>,
1161  i8,
1162  <vscale x 1 x i1>,
1163  iXLen,
1164  iXLen);
1165
1166define <vscale x 1 x i8> @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1167; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8:
1168; CHECK:       # %bb.0: # %entry
1169; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1170; CHECK-NEXT:    vslide1up.vx v9, v8, a0, v0.t
1171; CHECK-NEXT:    vmv1r.v v8, v9
1172; CHECK-NEXT:    ret
1173entry:
1174  %a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8(
1175    <vscale x 1 x i8> undef,
1176    <vscale x 1 x i8> %0,
1177    i8 %1,
1178    <vscale x 1 x i1> %2,
1179    iXLen %3, iXLen 3)
1180
1181  ret <vscale x 1 x i8> %a
1182}
1183
1184declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1185  <vscale x 1 x half>,
1186  <vscale x 1 x half>,
1187  half,
1188  <vscale x 1 x i1>,
1189  iXLen,
1190  iXLen);
1191
1192define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1193; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
1194; CHECK:       # %bb.0: # %entry
1195; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1196; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0, v0.t
1197; CHECK-NEXT:    vmv1r.v v8, v9
1198; CHECK-NEXT:    ret
1199entry:
1200  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
1201    <vscale x 1 x half> undef,
1202    <vscale x 1 x half> %0,
1203    half %1,
1204    <vscale x 1 x i1> %2,
1205    iXLen %3, iXLen 3)
1206
1207  ret <vscale x 1 x half> %a
1208}
1209
1210declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1211  <vscale x 1 x i8>,
1212  <vscale x 1 x i8>,
1213  <vscale x 1 x i8>,
1214  <vscale x 1 x i1>,
1215  iXLen,
1216  iXLen);
1217
1218define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1219; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
1220; CHECK:       # %bb.0: # %entry
1221; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1222; CHECK-NEXT:    vrgather.vv v10, v8, v9, v0.t
1223; CHECK-NEXT:    vmv1r.v v8, v10
1224; CHECK-NEXT:    ret
1225entry:
1226  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
1227    <vscale x 1 x i8> undef,
1228    <vscale x 1 x i8> %0,
1229    <vscale x 1 x i8> %1,
1230    <vscale x 1 x i1> %2,
1231    iXLen %3, iXLen 3)
1232
1233  ret <vscale x 1 x i8> %a
1234}
1235
1236declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1237  <vscale x 1 x i8>,
1238  <vscale x 1 x i8>,
1239  <vscale x 1 x i16>,
1240  <vscale x 1 x i1>,
1241  iXLen,
1242  iXLen);
1243
1244define <vscale x 1 x i8> @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1245; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8:
1246; CHECK:       # %bb.0: # %entry
1247; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1248; CHECK-NEXT:    vrgatherei16.vv v10, v8, v9, v0.t
1249; CHECK-NEXT:    vmv1r.v v8, v10
1250; CHECK-NEXT:    ret
1251entry:
1252  %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8(
1253    <vscale x 1 x i8> undef,
1254    <vscale x 1 x i8> %0,
1255    <vscale x 1 x i16> %1,
1256    <vscale x 1 x i1> %2,
1257    iXLen %3, iXLen 3)
1258
1259  ret <vscale x 1 x i8> %a
1260}
1261
1262declare <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1263  <vscale x 1 x i8>,
1264  <vscale x 1 x i1>,
1265  iXLen, iXLen);
1266
1267define <vscale x 1 x i8> @intrinsic_vid_mask_v_nxv1i8(<vscale x 1 x i1> %0, iXLen %1) nounwind {
1268; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8:
1269; CHECK:       # %bb.0: # %entry
1270; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1271; CHECK-NEXT:    vid.v v8, v0.t
1272; CHECK-NEXT:    ret
1273entry:
1274  %a = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8(
1275    <vscale x 1 x i8> undef,
1276    <vscale x 1 x i1> %0,
1277    iXLen %1, iXLen 3)
1278
1279  ret <vscale x 1 x i8> %a
1280}
1281
1282declare <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1283  <vscale x 1 x i8>,
1284  <vscale x 1 x i1>,
1285  <vscale x 1 x i1>,
1286  iXLen, iXLen);
1287
1288define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1289; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
1290; CHECK:       # %bb.0: # %entry
1291; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1292; CHECK-NEXT:    vmv1r.v v9, v0
1293; CHECK-NEXT:    vmv1r.v v0, v8
1294; CHECK-NEXT:    viota.m v8, v9, v0.t
1295; CHECK-NEXT:    ret
1296entry:
1297  %a = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8(
1298    <vscale x 1 x i8> undef,
1299    <vscale x 1 x i1> %0,
1300    <vscale x 1 x i1> %1,
1301    iXLen %2, iXLen 3)
1302
1303  ret <vscale x 1 x i8> %a
1304}
1305
1306declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
1307  <vscale x 1 x i1>,
1308  <vscale x 1 x i1>,
1309  <vscale x 1 x i1>,
1310  iXLen);
1311
1312define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1313; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
1314; CHECK:       # %bb.0: # %entry
1315; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1316; CHECK-NEXT:    vmv1r.v v9, v0
1317; CHECK-NEXT:    vmv1r.v v0, v8
1318; CHECK-NEXT:    vmsbf.m v8, v9, v0.t
1319; CHECK-NEXT:    vmv1r.v v0, v8
1320; CHECK-NEXT:    ret
1321entry:
1322  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
1323    <vscale x 1 x i1> undef,
1324    <vscale x 1 x i1> %0,
1325    <vscale x 1 x i1> %1,
1326    iXLen %2)
1327  ret <vscale x 1 x i1> %a
1328}
1329
1330declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
1331  <vscale x 1 x i1>,
1332  <vscale x 1 x half>,
1333  <vscale x 1 x half>,
1334  <vscale x 1 x i1>,
1335  iXLen);
1336
1337declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
1338  <vscale x 1 x half>,
1339  <vscale x 1 x half>,
1340  iXLen);
1341
1342define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
1343; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
1344; CHECK:       # %bb.0: # %entry
1345; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1346; CHECK-NEXT:    vmfeq.vv v0, v9, v10
1347; CHECK-NEXT:    vmfeq.vv v0, v9, v10, v0.t
1348; CHECK-NEXT:    ret
1349entry:
1350  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
1351    <vscale x 1 x half> %1,
1352    <vscale x 1 x half> %2,
1353    iXLen %3)
1354  %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
1355    <vscale x 1 x i1> undef,
1356    <vscale x 1 x half> %1,
1357    <vscale x 1 x half> %2,
1358    <vscale x 1 x i1> %mask,
1359    iXLen %3)
1360
1361  ret <vscale x 1 x i1> %a
1362}
1363
1364declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
1365  <vscale x 1 x i1>,
1366  <vscale x 1 x i64>,
1367  i64,
1368  <vscale x 1 x i1>,
1369  iXLen);
1370
1371define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1372; RV32-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
1373; RV32:       # %bb.0: # %entry
1374; RV32-NEXT:    addi sp, sp, -16
1375; RV32-NEXT:    sw a0, 8(sp)
1376; RV32-NEXT:    sw a1, 12(sp)
1377; RV32-NEXT:    addi a0, sp, 8
1378; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1379; RV32-NEXT:    vlse64.v v9, (a0), zero
1380; RV32-NEXT:    vmseq.vv v0, v8, v9, v0.t
1381; RV32-NEXT:    addi sp, sp, 16
1382; RV32-NEXT:    ret
1383;
1384; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
1385; RV64:       # %bb.0: # %entry
1386; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1387; RV64-NEXT:    vmseq.vx v0, v8, a0, v0.t
1388; RV64-NEXT:    ret
1389entry:
1390  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
1391    <vscale x 1 x i1> undef,
1392    <vscale x 1 x i64> %0,
1393    i64 %1,
1394    <vscale x 1 x i1> %2,
1395    iXLen %3)
1396
1397  ret <vscale x 1 x i1> %a
1398}
1399
1400declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1401  <vscale x 1 x i1>,
1402  <vscale x 1 x i64>,
1403  i64,
1404  <vscale x 1 x i1>,
1405  iXLen);
1406
1407define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1408; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1409; RV32:       # %bb.0: # %entry
1410; RV32-NEXT:    addi sp, sp, -16
1411; RV32-NEXT:    sw a0, 8(sp)
1412; RV32-NEXT:    sw a1, 12(sp)
1413; RV32-NEXT:    addi a0, sp, 8
1414; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1415; RV32-NEXT:    vlse64.v v9, (a0), zero
1416; RV32-NEXT:    vmsle.vv v0, v9, v8, v0.t
1417; RV32-NEXT:    addi sp, sp, 16
1418; RV32-NEXT:    ret
1419;
1420; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1421; RV64:       # %bb.0: # %entry
1422; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1423; RV64-NEXT:    vmslt.vx v8, v8, a0, v0.t
1424; RV64-NEXT:    vmxor.mm v0, v8, v0
1425; RV64-NEXT:    ret
1426entry:
1427  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1428    <vscale x 1 x i1> undef,
1429    <vscale x 1 x i64> %0,
1430    i64 %1,
1431    <vscale x 1 x i1> %2,
1432    iXLen %3)
1433
1434  ret <vscale x 1 x i1> %a
1435}
1436
1437declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
1438  <vscale x 64 x i1>,
1439  <vscale x 64 x i1>,
1440  <vscale x 64 x i1>,
1441  iXLen);
1442
1443define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
1444; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
1445; CHECK:       # %bb.0: # %entry
1446; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
1447; CHECK-NEXT:    vmv1r.v v9, v0
1448; CHECK-NEXT:    vmv1r.v v0, v8
1449; CHECK-NEXT:    vmsbf.m v8, v9, v0.t
1450; CHECK-NEXT:    vmv1r.v v0, v8
1451; CHECK-NEXT:    ret
1452entry:
1453  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
1454    <vscale x 64 x i1> undef,
1455    <vscale x 64 x i1> %0,
1456    <vscale x 64 x i1> %1,
1457    iXLen %2)
1458
1459  ret <vscale x 64 x i1> %a
1460}
1461