xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
8  <vscale x 1 x i8>,
9  ptr,
10  iXLen);
11
12define <vscale x 1 x i8> @intrinsic_vle_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
16; CHECK-NEXT:    vle8.v v8, (a0)
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
20    <vscale x 1 x i8> %0,
21    ptr %1,
22    iXLen %2)
23
24  ret <vscale x 1 x i8> %a
25}
26
27declare <vscale x 1 x i8> @llvm.riscv.vlse(
28  <vscale x 1 x i8>,
29  ptr,
30  iXLen,
31  iXLen);
32
33
34define <vscale x 1 x i8> @intrinsic_vlse_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
35; CHECK-LABEL: intrinsic_vlse_v_tu:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, ma
38; CHECK-NEXT:    vlse8.v v8, (a0), a1
39; CHECK-NEXT:    ret
40entry:
41  %a = call <vscale x 1 x i8> @llvm.riscv.vlse(
42    <vscale x 1 x i8> %0,
43    ptr %1,
44    iXLen %2,
45    iXLen %3)
46
47  ret <vscale x 1 x i8> %a
48}
49
50declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
51  <vscale x 1 x i8>,
52  ptr,
53  iXLen);
54
55define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen* %3) nounwind {
56; RV32-LABEL: intrinsic_vleff_v_tu:
57; RV32:       # %bb.0: # %entry
58; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
59; RV32-NEXT:    vle8ff.v v8, (a0)
60; RV32-NEXT:    csrr a0, vl
61; RV32-NEXT:    sw a0, 0(a2)
62; RV32-NEXT:    ret
63;
64; RV64-LABEL: intrinsic_vleff_v_tu:
65; RV64:       # %bb.0: # %entry
66; RV64-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
67; RV64-NEXT:    vle8ff.v v8, (a0)
68; RV64-NEXT:    csrr a0, vl
69; RV64-NEXT:    sd a0, 0(a2)
70; RV64-NEXT:    ret
71entry:
72  %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
73    <vscale x 1 x i8> %0,
74    ptr %1,
75    iXLen %2)
76  %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
77  %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
78  store iXLen %c, iXLen* %3
79  ret <vscale x 1 x i8> %b
80}
81
82declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
83  <vscale x 1 x i8>,
84  ptr,
85  <vscale x 1 x i8>,
86  iXLen);
87
88define <vscale x 1 x i8> @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
89; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
92; CHECK-NEXT:    vloxei8.v v8, (a0), v9
93; CHECK-NEXT:    ret
94entry:
95  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8(
96    <vscale x 1 x i8> %0,
97    ptr %1,
98    <vscale x 1 x i8> %2,
99    iXLen %3)
100
101  ret <vscale x 1 x i8> %a
102}
103
104declare <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
105  <vscale x 1 x i8>,
106  <vscale x 1 x i8>,
107  <vscale x 1 x i8>,
108  iXLen, iXLen);
109
110define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
111; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    csrwi vxrm, 0
114; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
115; CHECK-NEXT:    vaadd.vv v8, v9, v10
116; CHECK-NEXT:    ret
117entry:
118  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
119    <vscale x 1 x i8> %0,
120    <vscale x 1 x i8> %1,
121    <vscale x 1 x i8> %2,
122    iXLen 0, iXLen %3)
123
124  ret <vscale x 1 x i8> %a
125}
126
127declare <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
128  <vscale x 1 x i8>,
129  <vscale x 1 x i8>,
130  <vscale x 1 x i8>,
131  iXLen, iXLen);
132
133define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
134; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
135; CHECK:       # %bb.0: # %entry
136; CHECK-NEXT:    csrwi vxrm, 0
137; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
138; CHECK-NEXT:    vaaddu.vv v8, v9, v10
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
142    <vscale x 1 x i8> %0,
143    <vscale x 1 x i8> %1,
144    <vscale x 1 x i8> %2,
145    iXLen 0, iXLen %3)
146
147  ret <vscale x 1 x i8> %a
148}
149
150declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
151  <vscale x 1 x i8>,
152  <vscale x 1 x i8>,
153  <vscale x 1 x i8>,
154  iXLen);
155
156define <vscale x 1 x i8> @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
157; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8:
158; CHECK:       # %bb.0: # %entry
159; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
160; CHECK-NEXT:    vadd.vv v8, v9, v10
161; CHECK-NEXT:    ret
162entry:
163  %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
164    <vscale x 1 x i8> %0,
165    <vscale x 1 x i8> %1,
166    <vscale x 1 x i8> %2,
167    iXLen %3)
168
169  ret <vscale x 1 x i8> %a
170}
171declare <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
172  <vscale x 1 x i8>,
173  <vscale x 1 x i8>,
174  <vscale x 1 x i8>,
175  iXLen);
176
177define <vscale x 1 x i8> @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
178; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8:
179; CHECK:       # %bb.0: # %entry
180; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
181; CHECK-NEXT:    vand.vv v8, v9, v10
182; CHECK-NEXT:    ret
183entry:
184  %a = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8(
185    <vscale x 1 x i8> %0,
186    <vscale x 1 x i8> %1,
187    <vscale x 1 x i8> %2,
188    iXLen %3)
189
190  ret <vscale x 1 x i8> %a
191}
192
193declare <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
194  <vscale x 1 x i8>,
195  <vscale x 1 x i8>,
196  <vscale x 1 x i8>,
197  iXLen, iXLen);
198
199define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
200; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
201; CHECK:       # %bb.0: # %entry
202; CHECK-NEXT:    csrwi vxrm, 0
203; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
204; CHECK-NEXT:    vasub.vv v8, v9, v10
205; CHECK-NEXT:    ret
206entry:
207  %a = call <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
208    <vscale x 1 x i8> %0,
209    <vscale x 1 x i8> %1,
210    <vscale x 1 x i8> %2,
211    iXLen 0, iXLen %3)
212
213  ret <vscale x 1 x i8> %a
214}
215
216declare <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
217  <vscale x 1 x i8>,
218  <vscale x 1 x i8>,
219  <vscale x 1 x i8>,
220  iXLen, iXLen);
221
222define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
223; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
224; CHECK:       # %bb.0: # %entry
225; CHECK-NEXT:    csrwi vxrm, 0
226; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
227; CHECK-NEXT:    vasubu.vv v8, v9, v10
228; CHECK-NEXT:    ret
229entry:
230  %a = call <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
231    <vscale x 1 x i8> %0,
232    <vscale x 1 x i8> %1,
233    <vscale x 1 x i8> %2,
234    iXLen 0, iXLen %3)
235
236  ret <vscale x 1 x i8> %a
237}
238
239declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
240  <vscale x 1 x i8>,
241  <vscale x 1 x i8>,
242  <vscale x 1 x i8>,
243  iXLen);
244
245define <vscale x 1 x i8> @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
246; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8:
247; CHECK:       # %bb.0: # %entry
248; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
249; CHECK-NEXT:    vdiv.vv v8, v9, v10
250; CHECK-NEXT:    ret
251entry:
252  %a = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8(
253    <vscale x 1 x i8> %0,
254    <vscale x 1 x i8> %1,
255    <vscale x 1 x i8> %2,
256    iXLen %3)
257
258  ret <vscale x 1 x i8> %a
259}
260
261declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
262  <vscale x 1 x i8>,
263  <vscale x 1 x i8>,
264  <vscale x 1 x i8>,
265  iXLen);
266
267define <vscale x 1 x i8> @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
268; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8:
269; CHECK:       # %bb.0: # %entry
270; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
271; CHECK-NEXT:    vdivu.vv v8, v9, v10
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8(
275    <vscale x 1 x i8> %0,
276    <vscale x 1 x i8> %1,
277    <vscale x 1 x i8> %2,
278    iXLen %3)
279
280  ret <vscale x 1 x i8> %a
281}
282
283declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
284  <vscale x 1 x half>,
285  <vscale x 1 x half>,
286  <vscale x 1 x half>,
287  iXLen, iXLen);
288
289define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
290; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
293; CHECK-NEXT:    vfadd.vv v8, v9, v10
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
297    <vscale x 1 x half> %0,
298    <vscale x 1 x half> %1,
299    <vscale x 1 x half> %2,
300    iXLen 7, iXLen %3)
301
302  ret <vscale x 1 x half> %a
303}
304
305declare <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
306  <vscale x 1 x half>,
307  <vscale x 1 x half>,
308  <vscale x 1 x half>,
309  iXLen, iXLen);
310
311define <vscale x 1 x half> @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
312; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16:
313; CHECK:       # %bb.0: # %entry
314; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
315; CHECK-NEXT:    vfdiv.vv v8, v9, v10
316; CHECK-NEXT:    ret
317entry:
318  %a = call <vscale x 1 x half> @llvm.riscv.vfdiv.nxv1f16.nxv1f16(
319    <vscale x 1 x half> %0,
320    <vscale x 1 x half> %1,
321    <vscale x 1 x half> %2,
322    iXLen 7, iXLen %3)
323
324  ret <vscale x 1 x half> %a
325}
326
327declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
328  <vscale x 1 x half>,
329  <vscale x 1 x half>,
330  <vscale x 1 x half>,
331  iXLen);
332
333define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
334; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
335; CHECK:       # %bb.0: # %entry
336; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
337; CHECK-NEXT:    vfmax.vv v8, v9, v10
338; CHECK-NEXT:    ret
339entry:
340  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
341    <vscale x 1 x half> %0,
342    <vscale x 1 x half> %1,
343    <vscale x 1 x half> %2,
344    iXLen %3)
345
346  ret <vscale x 1 x half> %a
347}
348
349declare <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
350  <vscale x 1 x half>,
351  <vscale x 1 x half>,
352  <vscale x 1 x half>,
353  iXLen);
354
355define <vscale x 1 x half> @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
356; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
359; CHECK-NEXT:    vfmin.vv v8, v9, v10
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 1 x half> @llvm.riscv.vfmin.nxv1f16.nxv1f16(
363    <vscale x 1 x half> %0,
364    <vscale x 1 x half> %1,
365    <vscale x 1 x half> %2,
366    iXLen %3)
367
368  ret <vscale x 1 x half> %a
369}
370
371declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
372  <vscale x 1 x half>,
373  <vscale x 1 x half>,
374  <vscale x 1 x half>,
375  iXLen, iXLen);
376
377define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
378; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
379; CHECK:       # %bb.0: # %entry
380; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
381; CHECK-NEXT:    vfmul.vv v8, v9, v10
382; CHECK-NEXT:    ret
383entry:
384  %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
385    <vscale x 1 x half> %0,
386    <vscale x 1 x half> %1,
387    <vscale x 1 x half> %2,
388    iXLen 7, iXLen %3)
389
390  ret <vscale x 1 x half> %a
391}
392
393declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
394  <vscale x 1 x half>,
395  <vscale x 1 x half>,
396  half,
397  iXLen, iXLen);
398
399define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
400; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
401; CHECK:       # %bb.0: # %entry
402; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
403; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0
404; CHECK-NEXT:    ret
405entry:
406  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
407    <vscale x 1 x half> %0,
408    <vscale x 1 x half> %1,
409    half %2,
410    iXLen 7, iXLen %3)
411
412  ret <vscale x 1 x half> %a
413}
414
415declare <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
416  <vscale x 1 x half>,
417  <vscale x 1 x half>,
418  <vscale x 1 x half>,
419  iXLen);
420
421define <vscale x 1 x half> @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
422; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16:
423; CHECK:       # %bb.0: # %entry
424; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
425; CHECK-NEXT:    vfsgnj.vv v8, v9, v10
426; CHECK-NEXT:    ret
427entry:
428  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.nxv1f16.nxv1f16(
429    <vscale x 1 x half> %0,
430    <vscale x 1 x half> %1,
431    <vscale x 1 x half> %2,
432    iXLen %3)
433
434  ret <vscale x 1 x half> %a
435}
436
437declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
438  <vscale x 1 x half>,
439  <vscale x 1 x half>,
440  <vscale x 1 x half>,
441  iXLen);
442
443define <vscale x 1 x half> @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
444; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16:
445; CHECK:       # %bb.0: # %entry
446; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
447; CHECK-NEXT:    vfsgnjn.vv v8, v9, v10
448; CHECK-NEXT:    ret
449entry:
450  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16(
451    <vscale x 1 x half> %0,
452    <vscale x 1 x half> %1,
453    <vscale x 1 x half> %2,
454    iXLen %3)
455
456  ret <vscale x 1 x half> %a
457}
458
459declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
460  <vscale x 1 x half>,
461  <vscale x 1 x half>,
462  <vscale x 1 x half>,
463  iXLen);
464
465define <vscale x 1 x half> @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
466; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16:
467; CHECK:       # %bb.0: # %entry
468; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
469; CHECK-NEXT:    vfsgnjx.vv v8, v9, v10
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16(
473    <vscale x 1 x half> %0,
474    <vscale x 1 x half> %1,
475    <vscale x 1 x half> %2,
476    iXLen %3)
477
478  ret <vscale x 1 x half> %a
479}
480
481declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
482  <vscale x 1 x half>,
483  <vscale x 1 x half>,
484  half,
485  iXLen, iXLen);
486
487define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
488; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16:
489; CHECK:       # %bb.0: # %entry
490; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
491; CHECK-NEXT:    vfrsub.vf v8, v9, fa0
492; CHECK-NEXT:    ret
493entry:
494  %a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
495    <vscale x 1 x half> %0,
496    <vscale x 1 x half> %1,
497    half %2,
498    iXLen 7, iXLen %3)
499
500  ret <vscale x 1 x half> %a
501}
502
503declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
504  <vscale x 1 x half>,
505  <vscale x 1 x half>,
506  half,
507  iXLen);
508
509define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
510; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
511; CHECK:       # %bb.0: # %entry
512; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
513; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
517    <vscale x 1 x half> %0,
518    <vscale x 1 x half> %1,
519    half %2,
520    iXLen %3)
521
522  ret <vscale x 1 x half> %a
523}
524
525declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
526  <vscale x 1 x half>,
527  <vscale x 1 x half>,
528  half,
529  iXLen);
530
531define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, iXLen %3) nounwind {
532; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
533; CHECK:       # %bb.0: # %entry
534; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
535; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0
536; CHECK-NEXT:    ret
537entry:
538  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
539    <vscale x 1 x half> %0,
540    <vscale x 1 x half> %1,
541    half %2,
542    iXLen %3)
543
544  ret <vscale x 1 x half> %a
545}
546
547declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
548  <vscale x 1 x float>,
549  <vscale x 1 x half>,
550  <vscale x 1 x half>,
551  iXLen, iXLen);
552
553define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
554; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16:
555; CHECK:       # %bb.0: # %entry
556; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
557; CHECK-NEXT:    vfwsub.vv v8, v9, v10
558; CHECK-NEXT:    ret
559entry:
560  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
561    <vscale x 1 x float> %0,
562    <vscale x 1 x half> %1,
563    <vscale x 1 x half> %2,
564    iXLen 7, iXLen %3)
565
566  ret <vscale x 1 x float> %a
567}
568
569declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
570  <vscale x 1 x float>,
571  <vscale x 1 x float>,
572  <vscale x 1 x half>,
573  iXLen, iXLen);
574
575define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
576; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16:
577; CHECK:       # %bb.0: # %entry
578; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
579; CHECK-NEXT:    vfwsub.wv v8, v9, v10
580; CHECK-NEXT:    ret
581entry:
582  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
583    <vscale x 1 x float> %0,
584    <vscale x 1 x float> %1,
585    <vscale x 1 x half> %2,
586    iXLen 7, iXLen %3)
587
588  ret <vscale x 1 x float> %a
589}
590
591declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
592  <vscale x 16 x float>,
593  <vscale x 16 x float>,
594  <vscale x 16 x half>,
595  iXLen, iXLen);
596
597define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
598; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16:
599; CHECK:       # %bb.0: # %entry
600; CHECK-NEXT:    vl4re16.v v24, (a0)
601; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
602; CHECK-NEXT:    vfwsub.wv v8, v16, v24
603; CHECK-NEXT:    ret
604entry:
605  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
606    <vscale x 16 x float> %0,
607    <vscale x 16 x float> %1,
608    <vscale x 16 x half> %2,
609    iXLen 7, iXLen %3)
610
611  ret <vscale x 16 x float> %a
612}
613
614declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
615  <vscale x 1 x float>,
616  <vscale x 1 x half>,
617  <vscale x 1 x half>,
618  iXLen, iXLen);
619
620define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
621; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16:
622; CHECK:       # %bb.0: # %entry
623; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
624; CHECK-NEXT:    vfwmul.vv v8, v9, v10
625; CHECK-NEXT:    ret
626entry:
627  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
628    <vscale x 1 x float> %0,
629    <vscale x 1 x half> %1,
630    <vscale x 1 x half> %2,
631    iXLen 7, iXLen %3)
632
633  ret <vscale x 1 x float> %a
634}
635
636declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
637  <vscale x 1 x float>,
638  <vscale x 1 x float>,
639  <vscale x 1 x half>,
640  iXLen, iXLen);
641
642define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
643; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16:
644; CHECK:       # %bb.0: # %entry
645; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
646; CHECK-NEXT:    vfwadd.wv v8, v9, v10
647; CHECK-NEXT:    ret
648entry:
649  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
650    <vscale x 1 x float> %0,
651    <vscale x 1 x float> %1,
652    <vscale x 1 x half> %2,
653    iXLen 7, iXLen %3)
654
655  ret <vscale x 1 x float> %a
656}
657
658declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
659  <vscale x 1 x float>,
660  <vscale x 1 x half>,
661  <vscale x 1 x half>,
662  iXLen, iXLen);
663
664define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
665; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16:
666; CHECK:       # %bb.0: # %entry
667; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
668; CHECK-NEXT:    vfwadd.vv v8, v9, v10
669; CHECK-NEXT:    ret
670entry:
671  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
672    <vscale x 1 x float> %0,
673    <vscale x 1 x half> %1,
674    <vscale x 1 x half> %2,
675    iXLen 7, iXLen %3)
676
677  ret <vscale x 1 x float> %a
678}
679
680declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
681  <vscale x 1 x half>,
682  <vscale x 1 x half>,
683  <vscale x 1 x half>,
684  iXLen, iXLen);
685
686define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
687; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16:
688; CHECK:       # %bb.0: # %entry
689; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
690; CHECK-NEXT:    vfsub.vv v8, v9, v10
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
694    <vscale x 1 x half> %0,
695    <vscale x 1 x half> %1,
696    <vscale x 1 x half> %2,
697    iXLen 7, iXLen %3)
698
699  ret <vscale x 1 x half> %a
700}
701
702
703declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
704  <vscale x 1 x i64>,
705  <vscale x 1 x i64>,
706  i64,
707  iXLen);
708
709define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
710; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
711; RV32:       # %bb.0: # %entry
712; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
713; RV32-NEXT:    slli a2, a2, 1
714; RV32-NEXT:    vmv1r.v v10, v8
715; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
716; RV32-NEXT:    vslide1down.vx v10, v9, a0
717; RV32-NEXT:    vslide1down.vx v8, v10, a1
718; RV32-NEXT:    ret
719;
720; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
721; RV64:       # %bb.0: # %entry
722; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
723; RV64-NEXT:    vslide1down.vx v8, v9, a0
724; RV64-NEXT:    ret
725entry:
726  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64(
727    <vscale x 1 x i64> %0,
728    <vscale x 1 x i64> %1,
729    i64 %2,
730    iXLen %3)
731
732  ret <vscale x 1 x i64> %a
733}
734
735declare <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
736  <vscale x 1 x i64>,
737  <vscale x 1 x i64>,
738  i64,
739  iXLen);
740
741define <vscale x 1 x i64> @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
742; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
743; RV32:       # %bb.0: # %entry
744; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
745; RV32-NEXT:    slli a2, a2, 1
746; RV32-NEXT:    vmv1r.v v10, v8
747; RV32-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
748; RV32-NEXT:    vslide1up.vx v10, v9, a1
749; RV32-NEXT:    vslide1up.vx v8, v10, a0
750; RV32-NEXT:    ret
751;
752; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64:
753; RV64:       # %bb.0: # %entry
754; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
755; RV64-NEXT:    vslide1up.vx v8, v9, a0
756; RV64-NEXT:    ret
757entry:
758  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64(
759    <vscale x 1 x i64> %0,
760    <vscale x 1 x i64> %1,
761    i64 %2,
762    iXLen %3)
763
764  ret <vscale x 1 x i64> %a
765}
766
767declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
768  <vscale x 1 x i8>,
769  <vscale x 1 x i8>,
770  <vscale x 1 x i8>,
771  iXLen);
772
773define <vscale x 1 x i8> @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
774; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8:
775; CHECK:       # %bb.0: # %entry
776; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
777; CHECK-NEXT:    vmax.vv v8, v9, v10
778; CHECK-NEXT:    ret
779entry:
780  %a = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8(
781    <vscale x 1 x i8> %0,
782    <vscale x 1 x i8> %1,
783    <vscale x 1 x i8> %2,
784    iXLen %3)
785
786  ret <vscale x 1 x i8> %a
787}
788
789declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
790  <vscale x 1 x i8>,
791  <vscale x 1 x i8>,
792  <vscale x 1 x i8>,
793  iXLen);
794
795define <vscale x 1 x i8> @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
796; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
799; CHECK-NEXT:    vmaxu.vv v8, v9, v10
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8(
803    <vscale x 1 x i8> %0,
804    <vscale x 1 x i8> %1,
805    <vscale x 1 x i8> %2,
806    iXLen %3)
807
808  ret <vscale x 1 x i8> %a
809}
810
811declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
812  <vscale x 1 x i8>,
813  <vscale x 1 x i8>,
814  <vscale x 1 x i8>,
815  iXLen);
816
817define <vscale x 1 x i8> @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
818; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8:
819; CHECK:       # %bb.0: # %entry
820; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
821; CHECK-NEXT:    vmin.vv v8, v9, v10
822; CHECK-NEXT:    ret
823entry:
824  %a = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8(
825    <vscale x 1 x i8> %0,
826    <vscale x 1 x i8> %1,
827    <vscale x 1 x i8> %2,
828    iXLen %3)
829
830  ret <vscale x 1 x i8> %a
831}
832
833declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
834  <vscale x 1 x i8>,
835  <vscale x 1 x i8>,
836  <vscale x 1 x i8>,
837  iXLen);
838
839define <vscale x 1 x i8> @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
840; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
843; CHECK-NEXT:    vminu.vv v8, v9, v10
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8(
847    <vscale x 1 x i8> %0,
848    <vscale x 1 x i8> %1,
849    <vscale x 1 x i8> %2,
850    iXLen %3)
851
852  ret <vscale x 1 x i8> %a
853}
854
855declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
856  <vscale x 1 x i8>,
857  <vscale x 1 x i8>,
858  <vscale x 1 x i8>,
859  iXLen);
860
861define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
862; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
865; CHECK-NEXT:    vmul.vv v8, v9, v10
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
869    <vscale x 1 x i8> %0,
870    <vscale x 1 x i8> %1,
871    <vscale x 1 x i8> %2,
872    iXLen %3)
873
874  ret <vscale x 1 x i8> %a
875}
876
877declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
878  <vscale x 1 x i8>,
879  <vscale x 1 x i8>,
880  <vscale x 1 x i8>,
881  iXLen);
882
883define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
884; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
885; CHECK:       # %bb.0: # %entry
886; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
887; CHECK-NEXT:    vmulh.vv v8, v9, v10
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
891    <vscale x 1 x i8> %0,
892    <vscale x 1 x i8> %1,
893    <vscale x 1 x i8> %2,
894    iXLen %3)
895
896  ret <vscale x 1 x i8> %a
897}
898
899declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
900  <vscale x 1 x i8>,
901  <vscale x 1 x i8>,
902  <vscale x 1 x i8>,
903  iXLen);
904
905define <vscale x 1 x i8> @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
906; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8:
907; CHECK:       # %bb.0: # %entry
908; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
909; CHECK-NEXT:    vmulhsu.vv v8, v9, v10
910; CHECK-NEXT:    ret
911entry:
912  %a = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8(
913    <vscale x 1 x i8> %0,
914    <vscale x 1 x i8> %1,
915    <vscale x 1 x i8> %2,
916    iXLen %3)
917
918  ret <vscale x 1 x i8> %a
919}
920
921declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
922  <vscale x 1 x i8>,
923  <vscale x 1 x i8>,
924  <vscale x 1 x i8>,
925  iXLen);
926
927define <vscale x 1 x i8> @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
928; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8:
929; CHECK:       # %bb.0: # %entry
930; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
931; CHECK-NEXT:    vmulhu.vv v8, v9, v10
932; CHECK-NEXT:    ret
933entry:
934  %a = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8(
935    <vscale x 1 x i8> %0,
936    <vscale x 1 x i8> %1,
937    <vscale x 1 x i8> %2,
938    iXLen %3)
939
940  ret <vscale x 1 x i8> %a
941}
942
943declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
944  <vscale x 1 x i8>,
945  <vscale x 1 x i16>,
946  <vscale x 1 x i8>,
947  iXLen,
948  iXLen);
949
950define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
951; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
952; CHECK:       # %bb.0: # %entry
953; CHECK-NEXT:    csrwi vxrm, 0
954; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
955; CHECK-NEXT:    vnclip.wv v8, v9, v10
956; CHECK-NEXT:    ret
957entry:
958  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
959    <vscale x 1 x i8> %0,
960    <vscale x 1 x i16> %1,
961    <vscale x 1 x i8> %2,
962    iXLen 0, iXLen %3)
963
964  ret <vscale x 1 x i8> %a
965}
966
967declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
968  <vscale x 1 x i8>,
969  <vscale x 1 x i16>,
970  <vscale x 1 x i8>,
971  iXLen,
972  iXLen);
973
974define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
975; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
976; CHECK:       # %bb.0: # %entry
977; CHECK-NEXT:    csrwi vxrm, 0
978; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
979; CHECK-NEXT:    vnclipu.wv v8, v9, v10
980; CHECK-NEXT:    ret
981entry:
982  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
983    <vscale x 1 x i8> %0,
984    <vscale x 1 x i16> %1,
985    <vscale x 1 x i8> %2,
986    iXLen 0, iXLen %3)
987
988  ret <vscale x 1 x i8> %a
989}
990
991declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
992  <vscale x 1 x i8>,
993  <vscale x 1 x i16>,
994  <vscale x 1 x i8>,
995  iXLen);
996
997define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
998; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
999; CHECK:       # %bb.0: # %entry
1000; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1001; CHECK-NEXT:    vnsra.wv v8, v9, v10
1002; CHECK-NEXT:    ret
1003entry:
1004  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
1005    <vscale x 1 x i8> %0,
1006    <vscale x 1 x i16> %1,
1007    <vscale x 1 x i8> %2,
1008    iXLen %3)
1009
1010  ret <vscale x 1 x i8> %a
1011}
1012
1013declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
1014  <vscale x 1 x i8>,
1015  <vscale x 1 x i16>,
1016  <vscale x 1 x i8>,
1017  iXLen);
1018
1019define <vscale x 1 x i8> @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1020; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8:
1021; CHECK:       # %bb.0: # %entry
1022; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1023; CHECK-NEXT:    vnsrl.wv v8, v9, v10
1024; CHECK-NEXT:    ret
1025entry:
1026  %a = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8(
1027    <vscale x 1 x i8> %0,
1028    <vscale x 1 x i16> %1,
1029    <vscale x 1 x i8> %2,
1030    iXLen %3)
1031
1032  ret <vscale x 1 x i8> %a
1033}
1034
1035declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
1036  <vscale x 1 x i8>,
1037  <vscale x 1 x i8>,
1038  <vscale x 1 x i8>,
1039  iXLen);
1040
1041define <vscale x 1 x i8> @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1042; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1045; CHECK-NEXT:    vor.vv v8, v9, v10
1046; CHECK-NEXT:    ret
1047entry:
1048  %a = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8(
1049    <vscale x 1 x i8> %0,
1050    <vscale x 1 x i8> %1,
1051    <vscale x 1 x i8> %2,
1052    iXLen %3)
1053
1054  ret <vscale x 1 x i8> %a
1055}
1056
1057declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
1058  <vscale x 1 x i8>,
1059  <vscale x 1 x i8>,
1060  <vscale x 1 x i8>,
1061  iXLen);
1062
1063define <vscale x 1 x i8> @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1064; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8:
1065; CHECK:       # %bb.0: # %entry
1066; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1067; CHECK-NEXT:    vrem.vv v8, v9, v10
1068; CHECK-NEXT:    ret
1069entry:
1070  %a = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8(
1071    <vscale x 1 x i8> %0,
1072    <vscale x 1 x i8> %1,
1073    <vscale x 1 x i8> %2,
1074    iXLen %3)
1075
1076  ret <vscale x 1 x i8> %a
1077}
1078
1079declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8(
1080  <vscale x 1 x i8>,
1081  <vscale x 1 x i8>,
1082  <vscale x 1 x i8>,
1083  iXLen);
1084declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
1085  <vscale x 1 x i8>,
1086  <vscale x 1 x i8>,
1087  <vscale x 1 x i8>,
1088  iXLen);
1089
1090define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1091; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
1092; CHECK:       # %bb.0: # %entry
1093; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1094; CHECK-NEXT:    vrgather.vv v8, v9, v10
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(
1098    <vscale x 1 x i8> %0,
1099    <vscale x 1 x i8> %1,
1100    <vscale x 1 x i8> %2,
1101    iXLen %3)
1102
1103  ret <vscale x 1 x i8> %a
1104}
1105
1106declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
1107  <vscale x 1 x i8>,
1108  <vscale x 1 x i8>,
1109  iXLen,
1110  iXLen);
1111
1112define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
1113; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
1116; CHECK-NEXT:    vrgather.vx v8, v9, a0
1117; CHECK-NEXT:    ret
1118entry:
1119  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8(
1120    <vscale x 1 x i8> %0,
1121    <vscale x 1 x i8> %1,
1122    iXLen %2,
1123    iXLen %3)
1124
1125  ret <vscale x 1 x i8> %a
1126}
1127
1128declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
1129  <vscale x 1 x i8>,
1130  <vscale x 1 x i8>,
1131  <vscale x 1 x i16>,
1132  iXLen);
1133
1134define <vscale x 1 x i8> @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1135; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8:
1136; CHECK:       # %bb.0: # %entry
1137; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1138; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10
1139; CHECK-NEXT:    ret
1140entry:
1141  %a = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
1142    <vscale x 1 x i8> %0,
1143    <vscale x 1 x i8> %1,
1144    <vscale x 1 x i16> %2,
1145    iXLen %3)
1146
1147  ret <vscale x 1 x i8> %a
1148}
1149
1150declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1151  <vscale x 1 x i64>,
1152  <vscale x 1 x i64>,
1153  i64,
1154  iXLen);
1155
1156define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1157; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
1158; RV32:       # %bb.0: # %entry
1159; RV32-NEXT:    addi sp, sp, -16
1160; RV32-NEXT:    sw a0, 8(sp)
1161; RV32-NEXT:    sw a1, 12(sp)
1162; RV32-NEXT:    addi a0, sp, 8
1163; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1164; RV32-NEXT:    vlse64.v v10, (a0), zero
1165; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1166; RV32-NEXT:    vsub.vv v8, v10, v9
1167; RV32-NEXT:    addi sp, sp, 16
1168; RV32-NEXT:    ret
1169;
1170; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
1171; RV64:       # %bb.0: # %entry
1172; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1173; RV64-NEXT:    vrsub.vx v8, v9, a0
1174; RV64-NEXT:    ret
1175entry:
1176  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1177    <vscale x 1 x i64> %0,
1178    <vscale x 1 x i64> %1,
1179    i64 %2,
1180    iXLen %3)
1181
1182  ret <vscale x 1 x i64> %a
1183}
1184
1185declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1186  <vscale x 1 x i64>,
1187  <vscale x 1 x i64>,
1188  i64,
1189  iXLen);
1190
1191define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1192; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
1193; RV32:       # %bb.0: # %entry
1194; RV32-NEXT:    addi sp, sp, -16
1195; RV32-NEXT:    sw a0, 8(sp)
1196; RV32-NEXT:    sw a1, 12(sp)
1197; RV32-NEXT:    addi a0, sp, 8
1198; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1199; RV32-NEXT:    vlse64.v v10, (a0), zero
1200; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1201; RV32-NEXT:    vsadd.vv v8, v9, v10
1202; RV32-NEXT:    addi sp, sp, 16
1203; RV32-NEXT:    ret
1204;
1205; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
1206; RV64:       # %bb.0: # %entry
1207; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1208; RV64-NEXT:    vsadd.vx v8, v9, a0
1209; RV64-NEXT:    ret
1210entry:
1211  %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
1212    <vscale x 1 x i64> %0,
1213    <vscale x 1 x i64> %1,
1214    i64 %2,
1215    iXLen %3)
1216
1217  ret <vscale x 1 x i64> %a
1218}
1219
1220declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
1221  <vscale x 1 x i8>,
1222  <vscale x 1 x i8>,
1223  <vscale x 1 x i8>,
1224  iXLen);
1225
1226define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1227; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
1228; CHECK:       # %bb.0: # %entry
1229; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1230; CHECK-NEXT:    vsaddu.vv v8, v9, v10
1231; CHECK-NEXT:    ret
1232entry:
1233  %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
1234    <vscale x 1 x i8> %0,
1235    <vscale x 1 x i8> %1,
1236    <vscale x 1 x i8> %2,
1237    iXLen %3)
1238
1239  ret <vscale x 1 x i8> %a
1240}
1241
1242declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
1243  <vscale x 1 x i8>,
1244  <vscale x 1 x i8>,
1245  <vscale x 1 x i8>,
1246  iXLen);
1247
1248define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1249; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
1250; CHECK:       # %bb.0: # %entry
1251; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1252; CHECK-NEXT:    vsll.vv v8, v9, v10
1253; CHECK-NEXT:    ret
1254entry:
1255  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
1256    <vscale x 1 x i8> %0,
1257    <vscale x 1 x i8> %1,
1258    <vscale x 1 x i8> %2,
1259    iXLen %3)
1260
1261  ret <vscale x 1 x i8> %a
1262}
1263
1264declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
1265  <vscale x 1 x i8>,
1266  <vscale x 1 x i8>,
1267  <vscale x 1 x i8>,
1268  iXLen,
1269  iXLen);
1270
1271define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1272; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
1273; CHECK:       # %bb.0: # %entry
1274; CHECK-NEXT:    csrwi vxrm, 0
1275; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1276; CHECK-NEXT:    vsmul.vv v8, v9, v10
1277; CHECK-NEXT:    ret
1278entry:
1279  %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
1280    <vscale x 1 x i8> %0,
1281    <vscale x 1 x i8> %1,
1282    <vscale x 1 x i8> %2,
1283    iXLen 0, iXLen %3)
1284
1285  ret <vscale x 1 x i8> %a
1286}
1287
1288declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1289  <vscale x 1 x i64>,
1290  <vscale x 1 x i64>,
1291  i64,
1292  iXLen,
1293  iXLen);
1294
1295define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1296; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1297; RV32:       # %bb.0: # %entry
1298; RV32-NEXT:    addi sp, sp, -16
1299; RV32-NEXT:    sw a0, 8(sp)
1300; RV32-NEXT:    sw a1, 12(sp)
1301; RV32-NEXT:    addi a0, sp, 8
1302; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1303; RV32-NEXT:    vlse64.v v10, (a0), zero
1304; RV32-NEXT:    csrwi vxrm, 0
1305; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1306; RV32-NEXT:    vsmul.vv v8, v9, v10
1307; RV32-NEXT:    addi sp, sp, 16
1308; RV32-NEXT:    ret
1309;
1310; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1311; RV64:       # %bb.0: # %entry
1312; RV64-NEXT:    csrwi vxrm, 0
1313; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1314; RV64-NEXT:    vsmul.vx v8, v9, a0
1315; RV64-NEXT:    ret
1316entry:
1317  %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1318    <vscale x 1 x i64> %0,
1319    <vscale x 1 x i64> %1,
1320    i64 %2,
1321    iXLen 0, iXLen %3)
1322
1323  ret <vscale x 1 x i64> %a
1324}
1325
1326declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
1327  <vscale x 1 x i8>,
1328  <vscale x 1 x i8>,
1329  <vscale x 1 x i8>,
1330  iXLen);
1331
1332define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1333; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1336; CHECK-NEXT:    vsra.vv v8, v9, v10
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
1340    <vscale x 1 x i8> %0,
1341    <vscale x 1 x i8> %1,
1342    <vscale x 1 x i8> %2,
1343    iXLen %3)
1344
1345  ret <vscale x 1 x i8> %a
1346}
1347declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
1348  <vscale x 1 x i8>,
1349  <vscale x 1 x i8>,
1350  <vscale x 1 x i8>,
1351  iXLen);
1352
1353define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1354; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1357; CHECK-NEXT:    vsrl.vv v8, v9, v10
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
1361    <vscale x 1 x i8> %0,
1362    <vscale x 1 x i8> %1,
1363    <vscale x 1 x i8> %2,
1364    iXLen %3)
1365
1366  ret <vscale x 1 x i8> %a
1367}
1368
1369declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
1370  <vscale x 1 x i8>,
1371  <vscale x 1 x i8>,
1372  <vscale x 1 x i8>,
1373  iXLen,
1374  iXLen);
1375
1376define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1377; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
1378; CHECK:       # %bb.0: # %entry
1379; CHECK-NEXT:    csrwi vxrm, 0
1380; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1381; CHECK-NEXT:    vssra.vv v8, v9, v10
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
1385    <vscale x 1 x i8> %0,
1386    <vscale x 1 x i8> %1,
1387    <vscale x 1 x i8> %2,
1388    iXLen 0, iXLen %3)
1389
1390  ret <vscale x 1 x i8> %a
1391}
1392
1393declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
1394  <vscale x 1 x i8>,
1395  <vscale x 1 x i8>,
1396  <vscale x 1 x i8>,
1397  iXLen,
1398  iXLen);
1399
1400define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1401; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    csrwi vxrm, 0
1404; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1405; CHECK-NEXT:    vssrl.vv v8, v9, v10
1406; CHECK-NEXT:    ret
1407entry:
1408  %a = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
1409    <vscale x 1 x i8> %0,
1410    <vscale x 1 x i8> %1,
1411    <vscale x 1 x i8> %2,
1412    iXLen 0, iXLen %3)
1413
1414  ret <vscale x 1 x i8> %a
1415}
1416
1417declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
1418  <vscale x 1 x i8>,
1419  <vscale x 1 x i8>,
1420  <vscale x 1 x i8>,
1421  iXLen);
1422
1423define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1424; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
1425; CHECK:       # %bb.0: # %entry
1426; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1427; CHECK-NEXT:    vssub.vv v8, v9, v10
1428; CHECK-NEXT:    ret
1429entry:
1430  %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
1431    <vscale x 1 x i8> %0,
1432    <vscale x 1 x i8> %1,
1433    <vscale x 1 x i8> %2,
1434    iXLen %3)
1435
1436  ret <vscale x 1 x i8> %a
1437}
1438
1439declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
1440  <vscale x 1 x i8>,
1441  <vscale x 1 x i8>,
1442  <vscale x 1 x i8>,
1443  iXLen);
1444
1445define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1446; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
1447; CHECK:       # %bb.0: # %entry
1448; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1449; CHECK-NEXT:    vssubu.vv v8, v9, v10
1450; CHECK-NEXT:    ret
1451entry:
1452  %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
1453    <vscale x 1 x i8> %0,
1454    <vscale x 1 x i8> %1,
1455    <vscale x 1 x i8> %2,
1456    iXLen %3)
1457
1458  ret <vscale x 1 x i8> %a
1459}
1460
1461declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
1462  <vscale x 1 x i64>,
1463  <vscale x 1 x i64>,
1464  i64,
1465  iXLen);
1466
1467define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1468; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
1469; RV32:       # %bb.0: # %entry
1470; RV32-NEXT:    addi sp, sp, -16
1471; RV32-NEXT:    sw a0, 8(sp)
1472; RV32-NEXT:    sw a1, 12(sp)
1473; RV32-NEXT:    addi a0, sp, 8
1474; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1475; RV32-NEXT:    vlse64.v v10, (a0), zero
1476; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1477; RV32-NEXT:    vssub.vv v8, v9, v10
1478; RV32-NEXT:    addi sp, sp, 16
1479; RV32-NEXT:    ret
1480;
1481; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
1482; RV64:       # %bb.0: # %entry
1483; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1484; RV64-NEXT:    vssub.vx v8, v9, a0
1485; RV64-NEXT:    ret
1486entry:
1487  %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
1488    <vscale x 1 x i64> %0,
1489    <vscale x 1 x i64> %1,
1490    i64 %2,
1491    iXLen %3)
1492
1493  ret <vscale x 1 x i64> %a
1494}
1495
1496declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
1497  <vscale x 1 x i64>,
1498  <vscale x 1 x i64>,
1499  i64,
1500  iXLen);
1501
1502define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
1503; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
1504; RV32:       # %bb.0: # %entry
1505; RV32-NEXT:    addi sp, sp, -16
1506; RV32-NEXT:    sw a0, 8(sp)
1507; RV32-NEXT:    sw a1, 12(sp)
1508; RV32-NEXT:    addi a0, sp, 8
1509; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1510; RV32-NEXT:    vlse64.v v10, (a0), zero
1511; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
1512; RV32-NEXT:    vssubu.vv v8, v9, v10
1513; RV32-NEXT:    addi sp, sp, 16
1514; RV32-NEXT:    ret
1515;
1516; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
1517; RV64:       # %bb.0: # %entry
1518; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
1519; RV64-NEXT:    vssubu.vx v8, v9, a0
1520; RV64-NEXT:    ret
1521entry:
1522  %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
1523    <vscale x 1 x i64> %0,
1524    <vscale x 1 x i64> %1,
1525    i64 %2,
1526    iXLen %3)
1527
1528  ret <vscale x 1 x i64> %a
1529}
1530
1531declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
1532  <vscale x 1 x i8>,
1533  <vscale x 1 x i8>,
1534  <vscale x 1 x i8>,
1535  iXLen);
1536
1537define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1538; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
1539; CHECK:       # %bb.0: # %entry
1540; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1541; CHECK-NEXT:    vsub.vv v8, v9, v10
1542; CHECK-NEXT:    ret
1543entry:
1544  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
1545    <vscale x 1 x i8> %0,
1546    <vscale x 1 x i8> %1,
1547    <vscale x 1 x i8> %2,
1548    iXLen %3)
1549
1550  ret <vscale x 1 x i8> %a
1551}
1552
1553declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
1554  <vscale x 1 x i16>,
1555  <vscale x 1 x i8>,
1556  <vscale x 1 x i8>,
1557  iXLen);
1558
1559define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1560; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8:
1561; CHECK:       # %bb.0: # %entry
1562; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1563; CHECK-NEXT:    vwadd.vv v8, v9, v10
1564; CHECK-NEXT:    ret
1565entry:
1566  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
1567    <vscale x 1 x i16> %0,
1568    <vscale x 1 x i8> %1,
1569    <vscale x 1 x i8> %2,
1570    iXLen %3)
1571
1572  ret <vscale x 1 x i16> %a
1573}
1574
1575declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1576  <vscale x 1 x i16>,
1577  <vscale x 1 x i16>,
1578  <vscale x 1 x i8>,
1579  iXLen);
1580
1581define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1582; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
1583; CHECK:       # %bb.0: # %entry
1584; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1585; CHECK-NEXT:    vwadd.wv v8, v9, v10
1586; CHECK-NEXT:    ret
1587entry:
1588  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1589    <vscale x 1 x i16> %0,
1590    <vscale x 1 x i16> %1,
1591    <vscale x 1 x i8> %2,
1592    iXLen %3)
1593
1594  ret <vscale x 1 x i16> %a
1595}
1596
1597declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
1598  <vscale x 1 x i16>,
1599  <vscale x 1 x i8>,
1600  <vscale x 1 x i8>,
1601  iXLen);
1602
1603define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1604; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8:
1605; CHECK:       # %bb.0: # %entry
1606; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1607; CHECK-NEXT:    vwaddu.vv v8, v9, v10
1608; CHECK-NEXT:    ret
1609entry:
1610  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
1611    <vscale x 1 x i16> %0,
1612    <vscale x 1 x i8> %1,
1613    <vscale x 1 x i8> %2,
1614    iXLen %3)
1615
1616  ret <vscale x 1 x i16> %a
1617}
1618
1619declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
1620  <vscale x 1 x i16>,
1621  <vscale x 1 x i8>,
1622  <vscale x 1 x i8>,
1623  iXLen);
1624
1625define <vscale x 1 x i16> @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1626; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8:
1627; CHECK:       # %bb.0: # %entry
1628; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1629; CHECK-NEXT:    vwmul.vv v8, v9, v10
1630; CHECK-NEXT:    ret
1631entry:
1632  %a = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8(
1633    <vscale x 1 x i16> %0,
1634    <vscale x 1 x i8> %1,
1635    <vscale x 1 x i8> %2,
1636    iXLen %3)
1637
1638  ret <vscale x 1 x i16> %a
1639}
1640
1641declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
1642  <vscale x 1 x i16>,
1643  <vscale x 1 x i8>,
1644  <vscale x 1 x i8>,
1645  iXLen);
1646
1647define <vscale x 1 x i16> @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1648; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8:
1649; CHECK:       # %bb.0: # %entry
1650; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1651; CHECK-NEXT:    vwmulu.vv v8, v9, v10
1652; CHECK-NEXT:    ret
1653entry:
1654  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8(
1655    <vscale x 1 x i16> %0,
1656    <vscale x 1 x i8> %1,
1657    <vscale x 1 x i8> %2,
1658    iXLen %3)
1659
1660  ret <vscale x 1 x i16> %a
1661}
1662
1663declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
1664  <vscale x 1 x i16>,
1665  <vscale x 1 x i8>,
1666  <vscale x 1 x i8>,
1667  iXLen);
1668
1669define <vscale x 1 x i16> @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1670; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8:
1671; CHECK:       # %bb.0: # %entry
1672; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1673; CHECK-NEXT:    vwmulsu.vv v8, v9, v10
1674; CHECK-NEXT:    ret
1675entry:
1676  %a = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8(
1677    <vscale x 1 x i16> %0,
1678    <vscale x 1 x i8> %1,
1679    <vscale x 1 x i8> %2,
1680    iXLen %3)
1681
1682  ret <vscale x 1 x i16> %a
1683}
1684
1685declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
1686  <vscale x 1 x i16>,
1687  <vscale x 1 x i8>,
1688  <vscale x 1 x i8>,
1689  iXLen);
1690
1691define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1692; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8:
1693; CHECK:       # %bb.0: # %entry
1694; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1695; CHECK-NEXT:    vwsub.vv v8, v9, v10
1696; CHECK-NEXT:    ret
1697entry:
1698  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
1699    <vscale x 1 x i16> %0,
1700    <vscale x 1 x i8> %1,
1701    <vscale x 1 x i8> %2,
1702    iXLen %3)
1703
1704  ret <vscale x 1 x i16> %a
1705}
1706
1707declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1708  <vscale x 1 x i16>,
1709  <vscale x 1 x i16>,
1710  <vscale x 1 x i8>,
1711  iXLen);
1712
1713define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1714; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
1715; CHECK:       # %bb.0: # %entry
1716; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1717; CHECK-NEXT:    vwsub.wv v8, v9, v10
1718; CHECK-NEXT:    ret
1719entry:
1720  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1721    <vscale x 1 x i16> %0,
1722    <vscale x 1 x i16> %1,
1723    <vscale x 1 x i8> %2,
1724    iXLen %3)
1725
1726  ret <vscale x 1 x i16> %a
1727}
1728
1729define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1730; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied:
1731; CHECK:       # %bb.0: # %entry
1732; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1733; CHECK-NEXT:    vwsub.wv v8, v8, v9
1734; CHECK-NEXT:    ret
1735entry:
1736  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1737    <vscale x 1 x i16> %0,
1738    <vscale x 1 x i16> %0,
1739    <vscale x 1 x i8> %1,
1740    iXLen %2)
1741
1742  ret <vscale x 1 x i16> %a
1743}
1744
1745declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
1746  <vscale x 1 x i16>,
1747  <vscale x 1 x i8>,
1748  <vscale x 1 x i8>,
1749  iXLen);
1750
1751define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1752; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8:
1753; CHECK:       # %bb.0: # %entry
1754; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1755; CHECK-NEXT:    vwsubu.vv v8, v9, v10
1756; CHECK-NEXT:    ret
1757entry:
1758  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
1759    <vscale x 1 x i16> %0,
1760    <vscale x 1 x i8> %1,
1761    <vscale x 1 x i8> %2,
1762    iXLen %3)
1763
1764  ret <vscale x 1 x i16> %a
1765}
1766
1767declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
1768  <vscale x 1 x i16>,
1769  <vscale x 1 x i16>,
1770  <vscale x 1 x i8>,
1771  iXLen);
1772
1773define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1774; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
1775; CHECK:       # %bb.0: # %entry
1776; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1777; CHECK-NEXT:    vwsubu.wv v8, v9, v10
1778; CHECK-NEXT:    ret
1779entry:
1780  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
1781    <vscale x 1 x i16> %0,
1782    <vscale x 1 x i16> %1,
1783    <vscale x 1 x i8> %2,
1784    iXLen %3)
1785
1786  ret <vscale x 1 x i16> %a
1787}
1788
1789declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
1790  <vscale x 1 x i8>,
1791  <vscale x 1 x i8>,
1792  <vscale x 1 x i8>,
1793  iXLen);
1794
1795define <vscale x 1 x i8> @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
1796; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8:
1797; CHECK:       # %bb.0: # %entry
1798; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1799; CHECK-NEXT:    vxor.vv v8, v9, v10
1800; CHECK-NEXT:    ret
1801entry:
1802  %a = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8(
1803    <vscale x 1 x i8> %0,
1804    <vscale x 1 x i8> %1,
1805    <vscale x 1 x i8> %2,
1806    iXLen %3)
1807
1808  ret <vscale x 1 x i8> %a
1809}
1810
1811declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
1812  <vscale x 1 x i64>,
1813  <vscale x 1 x i8>,
1814  iXLen);
1815
1816define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1817; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
1818; CHECK:       # %bb.0: # %entry
1819; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
1820; CHECK-NEXT:    vsext.vf8 v8, v9
1821; CHECK-NEXT:    ret
1822entry:
1823  %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
1824    <vscale x 1 x i64> %0,
1825    <vscale x 1 x i8> %1,
1826    iXLen %2)
1827
1828  ret <vscale x 1 x i64> %a
1829}
1830
1831declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
1832  <vscale x 1 x i64>,
1833  <vscale x 1 x i8>,
1834  iXLen);
1835
1836define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
1837; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
1838; CHECK:       # %bb.0: # %entry
1839; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
1840; CHECK-NEXT:    vzext.vf8 v8, v9
1841; CHECK-NEXT:    ret
1842entry:
1843  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
1844    <vscale x 1 x i64> %0,
1845    <vscale x 1 x i8> %1,
1846    iXLen %2)
1847
1848  ret <vscale x 1 x i64> %a
1849}
1850
1851declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
1852  <vscale x 2 x i16>,
1853  <vscale x 2 x float>,
1854  iXLen, iXLen);
1855
1856define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( <vscale x 2 x i16> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
1857; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
1858; CHECK:       # %bb.0: # %entry
1859; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
1860; CHECK-NEXT:    vfncvt.x.f.w v8, v9
1861; CHECK-NEXT:    ret
1862entry:
1863  %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
1864    <vscale x 2 x i16> %0,
1865    <vscale x 2 x float> %1,
1866    iXLen 7, iXLen %2)
1867
1868  ret <vscale x 2 x i16> %a
1869}
1870
1871declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
1872  <vscale x 1 x i8>,
1873  iXLen);
1874
1875define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1876; CHECK-LABEL: intrinsic_vid_v_nxv1i8:
1877; CHECK:       # %bb.0: # %entry
1878; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
1879; CHECK-NEXT:    vid.v v8
1880; CHECK-NEXT:    ret
1881entry:
1882  %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
1883    <vscale x 1 x i8> %0,
1884    iXLen %1)
1885
1886  ret <vscale x 1 x i8> %a
1887}
1888
1889declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
1890  <vscale x 1 x i16>,
1891  <vscale x 1 x half>,
1892  iXLen);
1893
1894define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
1895; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
1896; CHECK:       # %bb.0: # %entry
1897; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1898; CHECK-NEXT:    vfclass.v v8, v9
1899; CHECK-NEXT:    ret
1900  <vscale x 1 x i16> %0,
1901  <vscale x 1 x half> %1,
1902  iXLen %2) nounwind {
1903entry:
1904  %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
1905    <vscale x 1 x i16> %0,
1906    <vscale x 1 x half> %1,
1907    iXLen %2)
1908
1909  ret <vscale x 1 x i16> %a
1910}
1911
1912declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
1913  <vscale x 1 x half>,
1914  <vscale x 1 x i16>,
1915  iXLen, iXLen);
1916
1917define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1918; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1921; CHECK-NEXT:    vfcvt.f.x.v v8, v9
1922; CHECK-NEXT:    ret
1923entry:
1924  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
1925    <vscale x 1 x half> %0,
1926    <vscale x 1 x i16> %1,
1927    iXLen 7, iXLen %2)
1928
1929  ret <vscale x 1 x half> %a
1930}
1931
1932declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
1933  <vscale x 1 x half>,
1934  <vscale x 1 x i16>,
1935  iXLen, iXLen);
1936
1937define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1938; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
1939; CHECK:       # %bb.0: # %entry
1940; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1941; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
1942; CHECK-NEXT:    ret
1943entry:
1944  %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
1945    <vscale x 1 x half> %0,
1946    <vscale x 1 x i16> %1,
1947    iXLen 7, iXLen %2)
1948
1949  ret <vscale x 1 x half> %a
1950}
1951
1952declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
1953  <vscale x 1 x i16>,
1954  <vscale x 1 x half>,
1955  iXLen);
1956
1957define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1958; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
1959; CHECK:       # %bb.0: # %entry
1960; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1961; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
1962; CHECK-NEXT:    ret
1963entry:
1964  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
1965    <vscale x 1 x i16> %0,
1966    <vscale x 1 x half> %1,
1967    iXLen %2)
1968
1969  ret <vscale x 1 x i16> %a
1970}
1971
1972declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
1973  <vscale x 1 x i16>,
1974  <vscale x 1 x half>,
1975  iXLen);
1976
1977define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1978; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
1979; CHECK:       # %bb.0: # %entry
1980; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
1981; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
1982; CHECK-NEXT:    ret
1983entry:
1984  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
1985    <vscale x 1 x i16> %0,
1986    <vscale x 1 x half> %1,
1987    iXLen %2)
1988
1989  ret <vscale x 1 x i16> %a
1990}
1991
1992declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
1993  <vscale x 1 x i16>,
1994  <vscale x 1 x half>,
1995  iXLen, iXLen);
1996
1997define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
1998; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
1999; CHECK:       # %bb.0: # %entry
2000; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2001; CHECK-NEXT:    vfcvt.x.f.v v8, v9
2002; CHECK-NEXT:    ret
2003entry:
2004  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
2005    <vscale x 1 x i16> %0,
2006    <vscale x 1 x half> %1,
2007    iXLen 7, iXLen %2)
2008
2009  ret <vscale x 1 x i16> %a
2010}
2011
2012declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
2013  <vscale x 1 x half>,
2014  <vscale x 1 x float>,
2015  iXLen, iXLen);
2016
2017define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2018; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
2019; CHECK:       # %bb.0: # %entry
2020; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2021; CHECK-NEXT:    vfncvt.f.f.w v8, v9
2022; CHECK-NEXT:    ret
2023entry:
2024  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
2025    <vscale x 1 x half> %0,
2026    <vscale x 1 x float> %1,
2027    iXLen 7, iXLen %2)
2028
2029  ret <vscale x 1 x half> %a
2030}
2031
2032declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
2033  <vscale x 1 x i16>,
2034  <vscale x 1 x half>,
2035  iXLen, iXLen);
2036
2037define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2038; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
2039; CHECK:       # %bb.0: # %entry
2040; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2041; CHECK-NEXT:    vfcvt.xu.f.v v8, v9
2042; CHECK-NEXT:    ret
2043entry:
2044  %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
2045    <vscale x 1 x i16> %0,
2046    <vscale x 1 x half> %1,
2047    iXLen 7, iXLen %2)
2048
2049  ret <vscale x 1 x i16> %a
2050}
2051
2052declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
2053  <vscale x 1 x half>,
2054  <vscale x 1 x i32>,
2055  iXLen, iXLen);
2056
2057define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2058; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
2059; CHECK:       # %bb.0: # %entry
2060; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2061; CHECK-NEXT:    vfncvt.f.x.w v8, v9
2062; CHECK-NEXT:    ret
2063entry:
2064  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
2065    <vscale x 1 x half> %0,
2066    <vscale x 1 x i32> %1,
2067    iXLen 7, iXLen %2)
2068
2069  ret <vscale x 1 x half> %a
2070}
2071
2072declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
2073  <vscale x 1 x half>,
2074  <vscale x 1 x i32>,
2075  iXLen, iXLen);
2076
2077define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2078; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
2079; CHECK:       # %bb.0: # %entry
2080; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2081; CHECK-NEXT:    vfncvt.f.xu.w v8, v9
2082; CHECK-NEXT:    ret
2083entry:
2084  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
2085    <vscale x 1 x half> %0,
2086    <vscale x 1 x i32> %1,
2087    iXLen 7, iXLen %2)
2088
2089  ret <vscale x 1 x half> %a
2090}
2091
2092declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
2093  <vscale x 1 x half>,
2094  <vscale x 1 x float>,
2095  iXLen);
2096
2097define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2098; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
2099; CHECK:       # %bb.0: # %entry
2100; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2101; CHECK-NEXT:    vfncvt.rod.f.f.w v8, v9
2102; CHECK-NEXT:    ret
2103entry:
2104  %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
2105    <vscale x 1 x half> %0,
2106    <vscale x 1 x float> %1,
2107    iXLen %2)
2108
2109  ret <vscale x 1 x half> %a
2110}
2111
2112declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
2113  <vscale x 1 x i8>,
2114  <vscale x 1 x half>,
2115  iXLen);
2116
2117define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2118; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
2119; CHECK:       # %bb.0: # %entry
2120; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2121; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
2122; CHECK-NEXT:    ret
2123entry:
2124  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
2125    <vscale x 1 x i8> %0,
2126    <vscale x 1 x half> %1,
2127    iXLen %2)
2128
2129  ret <vscale x 1 x i8> %a
2130}
2131
2132declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
2133  <vscale x 1 x i8>,
2134  <vscale x 1 x half>,
2135  iXLen);
2136
2137define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2138; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
2139; CHECK:       # %bb.0: # %entry
2140; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2141; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
2142; CHECK-NEXT:    ret
2143entry:
2144  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
2145    <vscale x 1 x i8> %0,
2146    <vscale x 1 x half> %1,
2147    iXLen %2)
2148
2149  ret <vscale x 1 x i8> %a
2150}
2151
2152declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
2153  <vscale x 1 x i8>,
2154  <vscale x 1 x half>,
2155  iXLen, iXLen);
2156
2157define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2158; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
2159; CHECK:       # %bb.0: # %entry
2160; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2161; CHECK-NEXT:    vfncvt.x.f.w v8, v9
2162; CHECK-NEXT:    ret
2163entry:
2164  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
2165    <vscale x 1 x i8> %0,
2166    <vscale x 1 x half> %1,
2167    iXLen 7, iXLen %2)
2168
2169  ret <vscale x 1 x i8> %a
2170}
2171
2172declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
2173  <vscale x 1 x i8>,
2174  <vscale x 1 x half>,
2175  iXLen, iXLen);
2176
2177define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2178; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
2179; CHECK:       # %bb.0: # %entry
2180; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2181; CHECK-NEXT:    vfncvt.xu.f.w v8, v9
2182; CHECK-NEXT:    ret
2183entry:
2184  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
2185    <vscale x 1 x i8> %0,
2186    <vscale x 1 x half> %1,
2187    iXLen 7, iXLen %2)
2188
2189  ret <vscale x 1 x i8> %a
2190}
2191
2192declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
2193  <vscale x 1 x half>,
2194  <vscale x 1 x half>,
2195  iXLen, iXLen);
2196
2197define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2198; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
2199; CHECK:       # %bb.0: # %entry
2200; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2201; CHECK-NEXT:    vfrec7.v v8, v9
2202; CHECK-NEXT:    ret
2203entry:
2204  %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
2205    <vscale x 1 x half> %0,
2206    <vscale x 1 x half> %1,
2207    iXLen 7, iXLen %2)
2208
2209  ret <vscale x 1 x half> %a
2210}
2211
2212declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
2213  <vscale x 1 x half>,
2214  <vscale x 1 x half>,
2215  iXLen);
2216
2217define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2218; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
2219; CHECK:       # %bb.0: # %entry
2220; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2221; CHECK-NEXT:    vfrsqrt7.v v8, v9
2222; CHECK-NEXT:    ret
2223entry:
2224  %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
2225    <vscale x 1 x half> %0,
2226    <vscale x 1 x half> %1,
2227    iXLen %2)
2228
2229  ret <vscale x 1 x half> %a
2230}
2231
2232declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
2233  <vscale x 1 x half>,
2234  <vscale x 1 x half>,
2235  iXLen, iXLen);
2236
2237define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2238; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
2239; CHECK:       # %bb.0: # %entry
2240; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2241; CHECK-NEXT:    vfsqrt.v v8, v9
2242; CHECK-NEXT:    ret
2243entry:
2244  %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
2245    <vscale x 1 x half> %0,
2246    <vscale x 1 x half> %1,
2247    iXLen 7, iXLen %2)
2248
2249  ret <vscale x 1 x half> %a
2250}
2251
2252declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
2253  <vscale x 1 x float>,
2254  <vscale x 1 x half>,
2255  iXLen);
2256
2257define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2258; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
2259; CHECK:       # %bb.0: # %entry
2260; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2261; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
2262; CHECK-NEXT:    ret
2263entry:
2264  %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
2265    <vscale x 1 x float> %0,
2266    <vscale x 1 x half> %1,
2267    iXLen %2)
2268
2269  ret <vscale x 1 x float> %a
2270}
2271
2272declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
2273  <vscale x 1 x half>,
2274  <vscale x 1 x i8>,
2275  iXLen);
2276
2277define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2278; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
2279; CHECK:       # %bb.0: # %entry
2280; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2281; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
2282; CHECK-NEXT:    ret
2283entry:
2284  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
2285    <vscale x 1 x half> %0,
2286    <vscale x 1 x i8> %1,
2287    iXLen %2)
2288
2289  ret <vscale x 1 x half> %a
2290}
2291
2292declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
2293  <vscale x 1 x half>,
2294  <vscale x 1 x i8>,
2295  iXLen);
2296
2297define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2298; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
2299; CHECK:       # %bb.0: # %entry
2300; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2301; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
2302; CHECK-NEXT:    ret
2303entry:
2304  %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
2305    <vscale x 1 x half> %0,
2306    <vscale x 1 x i8> %1,
2307    iXLen %2)
2308
2309  ret <vscale x 1 x half> %a
2310}
2311
2312declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
2313  <vscale x 1 x i32>,
2314  <vscale x 1 x half>,
2315  iXLen);
2316
2317define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2318; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
2319; CHECK:       # %bb.0: # %entry
2320; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2321; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9
2322; CHECK-NEXT:    ret
2323entry:
2324  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
2325    <vscale x 1 x i32> %0,
2326    <vscale x 1 x half> %1,
2327    iXLen %2)
2328
2329  ret <vscale x 1 x i32> %a
2330}
2331
2332declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
2333  <vscale x 1 x i32>,
2334  <vscale x 1 x half>,
2335  iXLen);
2336
2337define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2338; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
2339; CHECK:       # %bb.0: # %entry
2340; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2341; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
2342; CHECK-NEXT:    ret
2343entry:
2344  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
2345    <vscale x 1 x i32> %0,
2346    <vscale x 1 x half> %1,
2347    iXLen %2)
2348
2349  ret <vscale x 1 x i32> %a
2350}
2351
2352declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
2353  <vscale x 1 x i32>,
2354  <vscale x 1 x half>,
2355  iXLen, iXLen);
2356
2357define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2358; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
2359; CHECK:       # %bb.0: # %entry
2360; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2361; CHECK-NEXT:    vfwcvt.x.f.v v8, v9
2362; CHECK-NEXT:    ret
2363entry:
2364  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
2365    <vscale x 1 x i32> %0,
2366    <vscale x 1 x half> %1,
2367    iXLen 7, iXLen %2)
2368
2369  ret <vscale x 1 x i32> %a
2370}
2371
2372declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
2373  <vscale x 1 x i32>,
2374  <vscale x 1 x half>,
2375  iXLen, iXLen);
2376
2377define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
2378; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
2379; CHECK:       # %bb.0: # %entry
2380; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2381; CHECK-NEXT:    vfwcvt.xu.f.v v8, v9
2382; CHECK-NEXT:    ret
2383entry:
2384  %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
2385    <vscale x 1 x i32> %0,
2386    <vscale x 1 x half> %1,
2387    iXLen 7, iXLen %2)
2388
2389  ret <vscale x 1 x i32> %a
2390}
2391
2392declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
2393  <vscale x 1 x i8>,
2394  <vscale x 1 x i1>,
2395  iXLen);
2396
2397define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
2398; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2401; CHECK-NEXT:    viota.m v8, v0
2402; CHECK-NEXT:    ret
2403entry:
2404  %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
2405    <vscale x 1 x i8> %0,
2406    <vscale x 1 x i1> %1,
2407    iXLen %2)
2408
2409  ret <vscale x 1 x i8> %a
2410}
2411
2412declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
2413  <vscale x 1 x i8>,
2414  <vscale x 1 x i8>,
2415  <vscale x 1 x i8>,
2416  <vscale x 1 x i1>,
2417  iXLen);
2418
2419define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2420; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
2421; CHECK:       # %bb.0: # %entry
2422; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2423; CHECK-NEXT:    vadc.vvm v8, v9, v10, v0
2424; CHECK-NEXT:    ret
2425entry:
2426  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
2427    <vscale x 1 x i8> %0,
2428    <vscale x 1 x i8> %1,
2429    <vscale x 1 x i8> %2,
2430    <vscale x 1 x i1> %3,
2431    iXLen %4)
2432
2433  ret <vscale x 1 x i8> %a
2434}
2435
2436declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
2437  <vscale x 1 x i8>,
2438  <vscale x 1 x i8>,
2439  <vscale x 1 x i8>,
2440  <vscale x 1 x i1>,
2441  iXLen);
2442
2443define <vscale x 1 x i8> @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2444; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8:
2445; CHECK:       # %bb.0: # %entry
2446; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2447; CHECK-NEXT:    vsbc.vvm v8, v9, v10, v0
2448; CHECK-NEXT:    ret
2449entry:
2450  %a = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8(
2451    <vscale x 1 x i8> %0,
2452    <vscale x 1 x i8> %1,
2453    <vscale x 1 x i8> %2,
2454    <vscale x 1 x i1> %3,
2455    iXLen %4)
2456
2457  ret <vscale x 1 x i8> %a
2458}
2459
2460declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
2461  <vscale x 1 x i8>,
2462  <vscale x 1 x i8>,
2463  <vscale x 1 x i8>,
2464  <vscale x 1 x i1>,
2465  iXLen);
2466
2467define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2468; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
2469; CHECK:       # %bb.0: # %entry
2470; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2471; CHECK-NEXT:    vmerge.vvm v8, v9, v10, v0
2472; CHECK-NEXT:    ret
2473entry:
2474  %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
2475    <vscale x 1 x i8> %0,
2476    <vscale x 1 x i8> %1,
2477    <vscale x 1 x i8> %2,
2478    <vscale x 1 x i1> %3,
2479    iXLen %4)
2480
2481  ret <vscale x 1 x i8> %a
2482}
2483
2484declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2485  <vscale x 8 x i64>,
2486  <vscale x 8 x i64>,
2487  i64,
2488  <vscale x 8 x i1>,
2489  iXLen);
2490
2491define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2492; RV32-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64:
2493; RV32:       # %bb.0: # %entry
2494; RV32-NEXT:    addi sp, sp, -16
2495; RV32-NEXT:    sw a0, 8(sp)
2496; RV32-NEXT:    sw a1, 12(sp)
2497; RV32-NEXT:    addi a0, sp, 8
2498; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
2499; RV32-NEXT:    vlse64.v v24, (a0), zero
2500; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
2501; RV32-NEXT:    vmerge.vvm v8, v16, v24, v0
2502; RV32-NEXT:    addi sp, sp, 16
2503; RV32-NEXT:    ret
2504;
2505; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64:
2506; RV64:       # %bb.0: # %entry
2507; RV64-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
2508; RV64-NEXT:    vmerge.vxm v8, v16, a0, v0
2509; RV64-NEXT:    ret
2510entry:
2511  %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2512    <vscale x 8 x i64> %0,
2513    <vscale x 8 x i64> %1,
2514    i64 %2,
2515    <vscale x 8 x i1> %3,
2516    iXLen %4)
2517
2518  ret <vscale x 8 x i64> %a
2519}
2520
2521define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2522; RV32-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
2523; RV32:       # %bb.0: # %entry
2524; RV32-NEXT:    addi sp, sp, -16
2525; RV32-NEXT:    li a1, 15
2526; RV32-NEXT:    li a2, -1
2527; RV32-NEXT:    sw a2, 8(sp)
2528; RV32-NEXT:    sw a1, 12(sp)
2529; RV32-NEXT:    addi a1, sp, 8
2530; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
2531; RV32-NEXT:    vlse64.v v24, (a1), zero
2532; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, ma
2533; RV32-NEXT:    vmerge.vvm v8, v16, v24, v0
2534; RV32-NEXT:    addi sp, sp, 16
2535; RV32-NEXT:    ret
2536;
2537; RV64-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
2538; RV64:       # %bb.0: # %entry
2539; RV64-NEXT:    li a1, -1
2540; RV64-NEXT:    srli a1, a1, 28
2541; RV64-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
2542; RV64-NEXT:    vmerge.vxm v8, v16, a1, v0
2543; RV64-NEXT:    ret
2544entry:
2545  %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
2546    <vscale x 8 x i64> %0,
2547    <vscale x 8 x i64> %1,
2548    i64 68719476735,
2549    <vscale x 8 x i1> %2,
2550    iXLen %3)
2551
2552  ret <vscale x 8 x i64> %a
2553}
2554
2555declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
2556  <vscale x 8 x double>,
2557  <vscale x 8 x double>,
2558  double,
2559  <vscale x 8 x i1>,
2560  iXLen);
2561
2562define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2563; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
2564; CHECK:       # %bb.0: # %entry
2565; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
2566; CHECK-NEXT:    vfmerge.vfm v8, v16, fa0, v0
2567; CHECK-NEXT:    ret
2568entry:
2569  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
2570    <vscale x 8 x double> %0,
2571    <vscale x 8 x double> %1,
2572    double %2,
2573    <vscale x 8 x i1> %3,
2574    iXLen %4)
2575
2576  ret <vscale x 8 x double> %a
2577}
2578
2579declare <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
2580  <vscale x 1 x half>,
2581  <vscale x 1 x half>,
2582  <vscale x 1 x half>,
2583  <vscale x 1 x i1>,
2584  iXLen);
2585
2586define <vscale x 1 x half> @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2587; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
2588; CHECK:       # %bb.0: # %entry
2589; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2590; CHECK-NEXT:    vmerge.vvm v8, v9, v10, v0
2591; CHECK-NEXT:    ret
2592entry:
2593  %a = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
2594    <vscale x 1 x half> %0,
2595    <vscale x 1 x half> %1,
2596    <vscale x 1 x half> %2,
2597    <vscale x 1 x i1> %3,
2598    iXLen %4)
2599
2600  ret <vscale x 1 x half> %a
2601}
2602
2603declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
2604  <vscale x 1 x half>,
2605  <vscale x 1 x half>,
2606  half,
2607  <vscale x 1 x i1>,
2608  iXLen);
2609
2610define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2611; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
2612; CHECK:       # %bb.0: # %entry
2613; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
2614; CHECK-NEXT:    vmerge.vim v8, v9, 0, v0
2615; CHECK-NEXT:    ret
2616entry:
2617  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
2618    <vscale x 1 x half> %0,
2619    <vscale x 1 x half> %1,
2620    half zeroinitializer,
2621    <vscale x 1 x i1> %2,
2622    iXLen %3)
2623
2624  ret <vscale x 1 x half> %a
2625}
2626
2627declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
2628  <vscale x 1 x i8>,
2629  <vscale x 1 x i8>,
2630  iXLen);
2631
2632define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2633; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
2634; CHECK:       # %bb.0: # %entry
2635; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
2636; CHECK-NEXT:    vmv.v.v v8, v9
2637; CHECK-NEXT:    ret
2638entry:
2639  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
2640    <vscale x 1 x i8> %0,
2641    <vscale x 1 x i8> %1,
2642    iXLen %2)
2643
2644  ret <vscale x 1 x i8> %a
2645}
2646
2647declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
2648  <vscale x 1 x float>,
2649  <vscale x 1 x float>,
2650  iXLen);
2651
2652define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
2653; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
2654; CHECK:       # %bb.0: # %entry
2655; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
2656; CHECK-NEXT:    vmv.v.v v8, v9
2657; CHECK-NEXT:    ret
2658entry:
2659  %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
2660    <vscale x 1 x float> %0,
2661    <vscale x 1 x float> %1,
2662    iXLen %2)
2663
2664  ret <vscale x 1 x float> %a
2665}
2666
2667declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
2668  <vscale x 1 x i64>,
2669  i64,
2670  iXLen);
2671
2672define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
2673; RV32-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
2674; RV32:       # %bb.0: # %entry
2675; RV32-NEXT:    addi sp, sp, -16
2676; RV32-NEXT:    sw a0, 8(sp)
2677; RV32-NEXT:    sw a1, 12(sp)
2678; RV32-NEXT:    addi a0, sp, 8
2679; RV32-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
2680; RV32-NEXT:    vlse64.v v8, (a0), zero
2681; RV32-NEXT:    addi sp, sp, 16
2682; RV32-NEXT:    ret
2683;
2684; RV64-LABEL: intrinsic_vmv.v.x_x_nxv1i64:
2685; RV64:       # %bb.0: # %entry
2686; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
2687; RV64-NEXT:    vmv.v.x v8, a0
2688; RV64-NEXT:    ret
2689entry:
2690  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
2691    <vscale x 1 x i64> %0,
2692    i64 %1,
2693    iXLen %2)
2694
2695  ret <vscale x 1 x i64> %a
2696}
2697
2698declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
2699  <vscale x 1 x float>,
2700  float,
2701  iXLen);
2702
2703define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
2704; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32:
2705; CHECK:       # %bb.0: # %entry
2706; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
2707; CHECK-NEXT:    vfmv.v.f v8, fa0
2708; CHECK-NEXT:    ret
2709entry:
2710  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
2711    <vscale x 1 x float> %0,
2712    float %1,
2713    iXLen %2)
2714
2715  ret <vscale x 1 x float> %a
2716}
2717