xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
8  <vscale x 1 x float>,
9  <vscale x 1 x float>,
10  <vscale x 1 x float>,
11  iXLen, iXLen, iXLen);
12
13define <vscale x 1 x float>  @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
17; CHECK-NEXT:    vfmacc.vv v8, v10, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
21    <vscale x 1 x float> %0,
22    <vscale x 1 x float> %1,
23    <vscale x 1 x float> %2,
24    iXLen 7, iXLen %3, iXLen 1)
25
26  ret <vscale x 1 x float> %a
27}
28
29declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
30  <vscale x 1 x float>,
31  <vscale x 1 x float>,
32  <vscale x 1 x float>,
33  iXLen, iXLen, iXLen);
34
35define <vscale x 1 x float>  @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
36; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
39; CHECK-NEXT:    vfmadd.vv v8, v9, v10
40; CHECK-NEXT:    ret
41entry:
42  %a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
43    <vscale x 1 x float> %0,
44    <vscale x 1 x float> %1,
45    <vscale x 1 x float> %2,
46    iXLen 7, iXLen %3, iXLen 1)
47
48  ret <vscale x 1 x float> %a
49}
50
51declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
52  <vscale x 1 x float>,
53  <vscale x 1 x float>,
54  <vscale x 1 x float>,
55  iXLen, iXLen, iXLen);
56
57define <vscale x 1 x float>  @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
58; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
59; CHECK:       # %bb.0: # %entry
60; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
61; CHECK-NEXT:    vfmsac.vv v8, v10, v9
62; CHECK-NEXT:    ret
63entry:
64  %a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
65    <vscale x 1 x float> %0,
66    <vscale x 1 x float> %1,
67    <vscale x 1 x float> %2,
68    iXLen 7, iXLen %3, iXLen 1)
69
70  ret <vscale x 1 x float> %a
71}
72
73declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
74  <vscale x 1 x float>,
75  <vscale x 1 x float>,
76  <vscale x 1 x float>,
77  iXLen, iXLen, iXLen);
78
79define <vscale x 1 x float>  @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
80; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
83; CHECK-NEXT:    vfmsub.vv v8, v9, v10
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
87    <vscale x 1 x float> %0,
88    <vscale x 1 x float> %1,
89    <vscale x 1 x float> %2,
90    iXLen 7, iXLen %3, iXLen 1)
91
92  ret <vscale x 1 x float> %a
93}
94
95declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
96  <vscale x 1 x float>,
97  <vscale x 1 x float>,
98  <vscale x 1 x float>,
99  iXLen, iXLen, iXLen);
100
101define <vscale x 1 x float>  @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
102; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
103; CHECK:       # %bb.0: # %entry
104; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
105; CHECK-NEXT:    vfnmacc.vv v8, v10, v9
106; CHECK-NEXT:    ret
107entry:
108  %a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
109    <vscale x 1 x float> %0,
110    <vscale x 1 x float> %1,
111    <vscale x 1 x float> %2,
112    iXLen 7, iXLen %3, iXLen 1)
113
114  ret <vscale x 1 x float> %a
115}
116
117declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
118  <vscale x 1 x float>,
119  <vscale x 1 x float>,
120  <vscale x 1 x float>,
121  iXLen, iXLen, iXLen);
122
123define <vscale x 1 x float>  @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
124; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
125; CHECK:       # %bb.0: # %entry
126; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
127; CHECK-NEXT:    vfnmadd.vv v8, v9, v10
128; CHECK-NEXT:    ret
129entry:
130  %a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
131    <vscale x 1 x float> %0,
132    <vscale x 1 x float> %1,
133    <vscale x 1 x float> %2,
134    iXLen 7, iXLen %3, iXLen 1)
135
136  ret <vscale x 1 x float> %a
137}
138
139declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
140  <vscale x 1 x float>,
141  <vscale x 1 x float>,
142  <vscale x 1 x float>,
143  iXLen, iXLen, iXLen);
144
145define <vscale x 1 x float>  @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
146; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
149; CHECK-NEXT:    vfnmsac.vv v8, v10, v9
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
153    <vscale x 1 x float> %0,
154    <vscale x 1 x float> %1,
155    <vscale x 1 x float> %2,
156    iXLen 7, iXLen %3, iXLen 1)
157
158  ret <vscale x 1 x float> %a
159}
160
161declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
162  <vscale x 1 x float>,
163  <vscale x 1 x float>,
164  <vscale x 1 x float>,
165  iXLen, iXLen, iXLen);
166
167define <vscale x 1 x float>  @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
168; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
169; CHECK:       # %bb.0: # %entry
170; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
171; CHECK-NEXT:    vfnmsub.vv v8, v9, v10
172; CHECK-NEXT:    ret
173entry:
174  %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
175    <vscale x 1 x float> %0,
176    <vscale x 1 x float> %1,
177    <vscale x 1 x float> %2,
178    iXLen 7, iXLen %3, iXLen 1)
179
180  ret <vscale x 1 x float> %a
181}
182
183declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
184  <vscale x 1 x float>,
185  <vscale x 1 x half>,
186  <vscale x 1 x half>,
187  iXLen, iXLen, iXLen);
188
189define <vscale x 1 x float>  @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
190; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
193; CHECK-NEXT:    vfwmacc.vv v8, v9, v10
194; CHECK-NEXT:    ret
195entry:
196  %a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
197    <vscale x 1 x float> %0,
198    <vscale x 1 x half> %1,
199    <vscale x 1 x half> %2,
200    iXLen 7, iXLen %3, iXLen 1)
201
202  ret <vscale x 1 x float> %a
203}
204
205declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
206  <vscale x 1 x float>,
207  <vscale x 1 x half>,
208  <vscale x 1 x half>,
209  iXLen, iXLen, iXLen);
210
211define <vscale x 1 x float>  @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
212; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
213; CHECK:       # %bb.0: # %entry
214; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
215; CHECK-NEXT:    vfwmsac.vv v8, v9, v10
216; CHECK-NEXT:    ret
217entry:
218  %a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
219    <vscale x 1 x float> %0,
220    <vscale x 1 x half> %1,
221    <vscale x 1 x half> %2,
222    iXLen 7, iXLen %3, iXLen 1)
223
224  ret <vscale x 1 x float> %a
225}
226
227declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
228  <vscale x 1 x float>,
229  <vscale x 1 x half>,
230  <vscale x 1 x half>,
231  iXLen, iXLen, iXLen);
232
233define <vscale x 1 x float>  @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
234; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
235; CHECK:       # %bb.0: # %entry
236; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
237; CHECK-NEXT:    vfwnmacc.vv v8, v9, v10
238; CHECK-NEXT:    ret
239entry:
240  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
241    <vscale x 1 x float> %0,
242    <vscale x 1 x half> %1,
243    <vscale x 1 x half> %2,
244    iXLen 7, iXLen %3, iXLen 1)
245
246  ret <vscale x 1 x float> %a
247}
248
249declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
250  <vscale x 1 x float>,
251  <vscale x 1 x half>,
252  <vscale x 1 x half>,
253  iXLen, iXLen, iXLen);
254
255define <vscale x 1 x float>  @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
256; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
257; CHECK:       # %bb.0: # %entry
258; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
259; CHECK-NEXT:    vfwnmsac.vv v8, v9, v10
260; CHECK-NEXT:    ret
261entry:
262  %a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
263    <vscale x 1 x float> %0,
264    <vscale x 1 x half> %1,
265    <vscale x 1 x half> %2,
266    iXLen 7, iXLen %3, iXLen 1)
267
268  ret <vscale x 1 x float> %a
269}
270
271declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
272  <vscale x 1 x i64>,
273  i64,
274  <vscale x 1 x i64>,
275  iXLen,
276  iXLen);
277
278define <vscale x 1 x i64>  @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
279; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
280; RV32:       # %bb.0: # %entry
281; RV32-NEXT:    addi sp, sp, -16
282; RV32-NEXT:    sw a0, 8(sp)
283; RV32-NEXT:    sw a1, 12(sp)
284; RV32-NEXT:    addi a0, sp, 8
285; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
286; RV32-NEXT:    vlse64.v v10, (a0), zero
287; RV32-NEXT:    vmacc.vv v8, v9, v10
288; RV32-NEXT:    addi sp, sp, 16
289; RV32-NEXT:    ret
290;
291; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
292; RV64:       # %bb.0: # %entry
293; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
294; RV64-NEXT:    vmacc.vx v8, a0, v9
295; RV64-NEXT:    ret
296entry:
297  %a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
298    <vscale x 1 x i64> %0,
299    i64 %1,
300    <vscale x 1 x i64> %2,
301    iXLen %3, iXLen 1)
302
303  ret <vscale x 1 x i64> %a
304}
305
306declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
307  <vscale x 1 x i64>,
308  i64,
309  <vscale x 1 x i64>,
310  iXLen,
311  iXLen);
312
313define <vscale x 1 x i64>  @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
314; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
315; RV32:       # %bb.0: # %entry
316; RV32-NEXT:    addi sp, sp, -16
317; RV32-NEXT:    sw a0, 8(sp)
318; RV32-NEXT:    sw a1, 12(sp)
319; RV32-NEXT:    addi a0, sp, 8
320; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
321; RV32-NEXT:    vlse64.v v10, (a0), zero
322; RV32-NEXT:    vmadd.vv v8, v10, v9
323; RV32-NEXT:    addi sp, sp, 16
324; RV32-NEXT:    ret
325;
326; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
327; RV64:       # %bb.0: # %entry
328; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
329; RV64-NEXT:    vmadd.vx v8, a0, v9
330; RV64-NEXT:    ret
331entry:
332  %a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
333    <vscale x 1 x i64> %0,
334    i64 %1,
335    <vscale x 1 x i64> %2,
336    iXLen %3, iXLen 1)
337
338  ret <vscale x 1 x i64> %a
339}
340
341declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
342  <vscale x 1 x i64>,
343  i64,
344  <vscale x 1 x i64>,
345  iXLen,
346  iXLen);
347
348define <vscale x 1 x i64>  @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
349; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
350; RV32:       # %bb.0: # %entry
351; RV32-NEXT:    addi sp, sp, -16
352; RV32-NEXT:    sw a0, 8(sp)
353; RV32-NEXT:    sw a1, 12(sp)
354; RV32-NEXT:    addi a0, sp, 8
355; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
356; RV32-NEXT:    vlse64.v v10, (a0), zero
357; RV32-NEXT:    vnmsac.vv v8, v9, v10
358; RV32-NEXT:    addi sp, sp, 16
359; RV32-NEXT:    ret
360;
361; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
362; RV64:       # %bb.0: # %entry
363; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
364; RV64-NEXT:    vnmsac.vx v8, a0, v9
365; RV64-NEXT:    ret
366entry:
367  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
368    <vscale x 1 x i64> %0,
369    i64 %1,
370    <vscale x 1 x i64> %2,
371    iXLen %3, iXLen 1)
372
373  ret <vscale x 1 x i64> %a
374}
375
376declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
377  <vscale x 1 x i64>,
378  i64,
379  <vscale x 1 x i64>,
380  iXLen,
381  iXLen);
382
383define <vscale x 1 x i64>  @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
384; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
385; RV32:       # %bb.0: # %entry
386; RV32-NEXT:    addi sp, sp, -16
387; RV32-NEXT:    sw a0, 8(sp)
388; RV32-NEXT:    sw a1, 12(sp)
389; RV32-NEXT:    addi a0, sp, 8
390; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
391; RV32-NEXT:    vlse64.v v10, (a0), zero
392; RV32-NEXT:    vnmsub.vv v8, v10, v9
393; RV32-NEXT:    addi sp, sp, 16
394; RV32-NEXT:    ret
395;
396; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
397; RV64:       # %bb.0: # %entry
398; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
399; RV64-NEXT:    vnmsub.vx v8, a0, v9
400; RV64-NEXT:    ret
401entry:
402  %a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
403    <vscale x 1 x i64> %0,
404    i64 %1,
405    <vscale x 1 x i64> %2,
406    iXLen %3, iXLen 1)
407
408  ret <vscale x 1 x i64> %a
409}
410
411declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
412  <vscale x 1 x i16>,
413  <vscale x 1 x i8>,
414  <vscale x 1 x i8>,
415  iXLen,
416  iXLen);
417
418define <vscale x 1 x i16>  @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
419; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
420; CHECK:       # %bb.0: # %entry
421; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
422; CHECK-NEXT:    vwmacc.vv v8, v9, v10
423; CHECK-NEXT:    ret
424entry:
425  %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
426    <vscale x 1 x i16> %0,
427    <vscale x 1 x i8> %1,
428    <vscale x 1 x i8> %2,
429    iXLen %3, iXLen 1)
430
431  ret <vscale x 1 x i16> %a
432}
433
434declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
435  <vscale x 1 x i16>,
436  <vscale x 1 x i8>,
437  <vscale x 1 x i8>,
438  iXLen,
439  iXLen);
440
441define <vscale x 1 x i16>  @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
442; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
443; CHECK:       # %bb.0: # %entry
444; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
445; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
446; CHECK-NEXT:    ret
447entry:
448  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
449    <vscale x 1 x i16> %0,
450    <vscale x 1 x i8> %1,
451    <vscale x 1 x i8> %2,
452    iXLen %3, iXLen 1)
453
454  ret <vscale x 1 x i16> %a
455}
456
457declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
458  <vscale x 1 x i16>,
459  <vscale x 1 x i8>,
460  <vscale x 1 x i8>,
461  iXLen,
462  iXLen);
463
464define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
465; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
466; CHECK:       # %bb.0: # %entry
467; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
468; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
469; CHECK-NEXT:    ret
470entry:
471  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
472    <vscale x 1 x i16> %0,
473    <vscale x 1 x i8> %1,
474    <vscale x 1 x i8> %2,
475    iXLen %3, iXLen 1)
476
477  ret <vscale x 1 x i16> %a
478}
479
480declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
481  <vscale x 1 x i16>,
482  i8,
483  <vscale x 1 x i8>,
484  iXLen,
485  iXLen);
486
487define <vscale x 1 x i16>  @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
488; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
489; CHECK:       # %bb.0: # %entry
490; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
491; CHECK-NEXT:    vwmaccus.vx v8, a0, v9
492; CHECK-NEXT:    ret
493entry:
494  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
495    <vscale x 1 x i16> %0,
496    i8 %1,
497    <vscale x 1 x i8> %2,
498    iXLen %3, iXLen 1)
499
500  ret <vscale x 1 x i16> %a
501}
502
503declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
504  <vscale x 8 x i8>,
505  <vscale x 1 x i8>,
506  <vscale x 8 x i8>,
507  iXLen);
508
509define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
510; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
511; CHECK:       # %bb.0: # %entry
512; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
513; CHECK-NEXT:    vredsum.vs v8, v8, v9
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
517    <vscale x 8 x i8> undef,
518    <vscale x 1 x i8> %0,
519    <vscale x 8 x i8> %1,
520    iXLen %2)
521
522  ret <vscale x 8 x i8> %a
523}
524
525declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
526  <vscale x 8 x i8>,
527  <vscale x 1 x i8>,
528  <vscale x 8 x i8>,
529  iXLen);
530
531define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
532; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
533; CHECK:       # %bb.0: # %entry
534; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
535; CHECK-NEXT:    vredand.vs v8, v8, v9
536; CHECK-NEXT:    ret
537entry:
538  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
539    <vscale x 8 x i8> undef,
540    <vscale x 1 x i8> %0,
541    <vscale x 8 x i8> %1,
542    iXLen %2)
543
544  ret <vscale x 8 x i8> %a
545}
546
547declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
548  <vscale x 8 x i8>,
549  <vscale x 1 x i8>,
550  <vscale x 8 x i8>,
551  iXLen);
552
553define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
554; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
555; CHECK:       # %bb.0: # %entry
556; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
557; CHECK-NEXT:    vredor.vs v8, v8, v9
558; CHECK-NEXT:    ret
559entry:
560  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
561    <vscale x 8 x i8> undef,
562    <vscale x 1 x i8> %0,
563    <vscale x 8 x i8> %1,
564    iXLen %2)
565
566  ret <vscale x 8 x i8> %a
567}
568
569declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
570  <vscale x 8 x i8>,
571  <vscale x 1 x i8>,
572  <vscale x 8 x i8>,
573  iXLen);
574
575define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
576; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
577; CHECK:       # %bb.0: # %entry
578; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
579; CHECK-NEXT:    vredxor.vs v8, v8, v9
580; CHECK-NEXT:    ret
581entry:
582  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
583    <vscale x 8 x i8> undef,
584    <vscale x 1 x i8> %0,
585    <vscale x 8 x i8> %1,
586    iXLen %2)
587
588  ret <vscale x 8 x i8> %a
589}
590
591declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
592  <vscale x 8 x i8>,
593  <vscale x 1 x i8>,
594  <vscale x 8 x i8>,
595  iXLen);
596
597define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
598; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
599; CHECK:       # %bb.0: # %entry
600; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
601; CHECK-NEXT:    vredminu.vs v8, v8, v9
602; CHECK-NEXT:    ret
603entry:
604  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
605    <vscale x 8 x i8> undef,
606    <vscale x 1 x i8> %0,
607    <vscale x 8 x i8> %1,
608    iXLen %2)
609
610  ret <vscale x 8 x i8> %a
611}
612
613declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
614  <vscale x 8 x i8>,
615  <vscale x 1 x i8>,
616  <vscale x 8 x i8>,
617  iXLen);
618
619define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
620; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
621; CHECK:       # %bb.0: # %entry
622; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
623; CHECK-NEXT:    vredmin.vs v8, v8, v9
624; CHECK-NEXT:    ret
625entry:
626  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
627    <vscale x 8 x i8> undef,
628    <vscale x 1 x i8> %0,
629    <vscale x 8 x i8> %1,
630    iXLen %2)
631
632  ret <vscale x 8 x i8> %a
633}
634
635declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
636  <vscale x 8 x i8>,
637  <vscale x 1 x i8>,
638  <vscale x 8 x i8>,
639  iXLen);
640
641define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
642; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
643; CHECK:       # %bb.0: # %entry
644; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
645; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
646; CHECK-NEXT:    ret
647entry:
648  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
649    <vscale x 8 x i8> undef,
650    <vscale x 1 x i8> %0,
651    <vscale x 8 x i8> %1,
652    iXLen %2)
653
654  ret <vscale x 8 x i8> %a
655}
656
657declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
658  <vscale x 8 x i8>,
659  <vscale x 1 x i8>,
660  <vscale x 8 x i8>,
661  iXLen);
662
663define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 1 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
664; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
667; CHECK-NEXT:    vredmax.vs v8, v8, v9
668; CHECK-NEXT:    ret
669entry:
670  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
671    <vscale x 8 x i8> undef,
672    <vscale x 1 x i8> %0,
673    <vscale x 8 x i8> %1,
674    iXLen %2)
675
676  ret <vscale x 8 x i8> %a
677}
678
679declare <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
680  <vscale x 4 x i16>,
681  <vscale x 1 x i8>,
682  <vscale x 4 x i16>,
683  iXLen);
684
685define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
686; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
689; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
690; CHECK-NEXT:    ret
691entry:
692  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8(
693    <vscale x 4 x i16> undef,
694    <vscale x 1 x i8> %0,
695    <vscale x 4 x i16> %1,
696    iXLen %2)
697
698  ret <vscale x 4 x i16> %a
699}
700
701declare <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
702  <vscale x 4 x i16>,
703  <vscale x 1 x i8>,
704  <vscale x 4 x i16>,
705  iXLen);
706
707define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 1 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
708; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
711; CHECK-NEXT:    vwredsum.vs v8, v8, v9
712; CHECK-NEXT:    ret
713entry:
714  %a = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8(
715    <vscale x 4 x i16> undef,
716    <vscale x 1 x i8> %0,
717    <vscale x 4 x i16> %1,
718    iXLen %2)
719
720  ret <vscale x 4 x i16> %a
721}
722
723declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
724  <vscale x 4 x half>,
725  <vscale x 1 x half>,
726  <vscale x 4 x half>,
727  iXLen, iXLen);
728
729define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
730; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
733; CHECK-NEXT:    vfredosum.vs v8, v8, v9
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
737    <vscale x 4 x half> undef,
738    <vscale x 1 x half> %0,
739    <vscale x 4 x half> %1,
740    iXLen 7, iXLen %2)
741
742  ret <vscale x 4 x half> %a
743}
744
745declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
746  <vscale x 4 x half>,
747  <vscale x 1 x half>,
748  <vscale x 4 x half>,
749  iXLen, iXLen);
750
751define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
752; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
753; CHECK:       # %bb.0: # %entry
754; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
755; CHECK-NEXT:    vfredusum.vs v8, v8, v9
756; CHECK-NEXT:    ret
757entry:
758  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
759    <vscale x 4 x half> undef,
760    <vscale x 1 x half> %0,
761    <vscale x 4 x half> %1,
762    iXLen 7, iXLen %2)
763
764  ret <vscale x 4 x half> %a
765}
766
767declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
768  <vscale x 4 x half>,
769  <vscale x 1 x half>,
770  <vscale x 4 x half>,
771  iXLen);
772
773define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
774; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
775; CHECK:       # %bb.0: # %entry
776; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
777; CHECK-NEXT:    vfredmax.vs v8, v8, v9
778; CHECK-NEXT:    ret
779entry:
780  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
781    <vscale x 4 x half> undef,
782    <vscale x 1 x half> %0,
783    <vscale x 4 x half> %1,
784    iXLen %2)
785
786  ret <vscale x 4 x half> %a
787}
788
789declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
790  <vscale x 4 x half>,
791  <vscale x 1 x half>,
792  <vscale x 4 x half>,
793  iXLen);
794
795define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 1 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
796; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
799; CHECK-NEXT:    vfredmin.vs v8, v8, v9
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
803    <vscale x 4 x half> undef,
804    <vscale x 1 x half> %0,
805    <vscale x 4 x half> %1,
806    iXLen %2)
807
808  ret <vscale x 4 x half> %a
809}
810
811declare <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
812  <vscale x 2 x float>,
813  <vscale x 1 x half>,
814  <vscale x 2 x float>,
815  iXLen, iXLen);
816
817define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
818; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
819; CHECK:       # %bb.0: # %entry
820; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
821; CHECK-NEXT:    vfwredosum.vs v8, v8, v9
822; CHECK-NEXT:    ret
823entry:
824  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16(
825    <vscale x 2 x float> undef,
826    <vscale x 1 x half> %0,
827    <vscale x 2 x float> %1,
828    iXLen 7, iXLen %2)
829
830  ret <vscale x 2 x float> %a
831}
832declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
833  <vscale x 2 x float>,
834  <vscale x 1 x half>,
835  <vscale x 2 x float>,
836  iXLen, iXLen);
837
838define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 1 x half> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
839; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
842; CHECK-NEXT:    vfwredusum.vs v8, v8, v9
843; CHECK-NEXT:    ret
844entry:
845  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
846    <vscale x 2 x float> undef,
847    <vscale x 1 x half> %0,
848    <vscale x 2 x float> %1,
849    iXLen 7, iXLen %2)
850
851  ret <vscale x 2 x float> %a
852}
853
854declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
855  <vscale x 1 x i8>,
856  <vscale x 1 x i8>,
857  iXLen,
858  iXLen,
859  iXLen);
860
861define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
862; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
865; CHECK-NEXT:    vslidedown.vx v8, v8, a0
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
869    <vscale x 1 x i8> undef,
870    <vscale x 1 x i8> %0,
871    iXLen %1,
872    iXLen %2,
873    iXLen 1)
874
875  ret <vscale x 1 x i8> %a
876}
877
878declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
879  <vscale x 1 x i8>,
880  <vscale x 1 x i8>,
881  iXLen,
882  iXLen,
883  iXLen);
884
885define <vscale x 1 x i8> @intrinsic_vslideup_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, iXLen %3) nounwind {
886; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8:
887; CHECK:       # %bb.0: # %entry
888; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
889; CHECK-NEXT:    vslideup.vx v8, v9, a0
890; CHECK-NEXT:    ret
891entry:
892  %a = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
893    <vscale x 1 x i8> %0,
894    <vscale x 1 x i8> %1,
895    iXLen %2,
896    iXLen %3,
897    iXLen 1)
898
899  ret <vscale x 1 x i8> %a
900}
901
902declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, iXLen);
903
904define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwind {
905; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
906; RV32:       # %bb.0: # %entry
907; RV32-NEXT:    addi sp, sp, -16
908; RV32-NEXT:    sw a0, 8(sp)
909; RV32-NEXT:    sw a1, 12(sp)
910; RV32-NEXT:    addi a0, sp, 8
911; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
912; RV32-NEXT:    vlse64.v v8, (a0), zero
913; RV32-NEXT:    addi sp, sp, 16
914; RV32-NEXT:    ret
915;
916; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
917; RV64:       # %bb.0: # %entry
918; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
919; RV64-NEXT:    vmv.s.x v8, a0
920; RV64-NEXT:    ret
921entry:
922  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> undef, i64 %0, iXLen %1)
923  ret <vscale x 1 x i64> %a
924}
925
926declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, iXLen)
927
928define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind {
929; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
930; CHECK:       # %bb.0: # %entry
931; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
932; CHECK-NEXT:    vfmv.s.f v8, fa0
933; CHECK-NEXT:    ret
934entry:
935  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> undef, half %0, iXLen %1)
936  ret <vscale x 1 x half> %a
937}
938
939declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
940  <vscale x 1 x i8>,
941  <vscale x 1 x i8>,
942  <vscale x 1 x i1>,
943  iXLen);
944
945define <vscale x 1 x i8> @intrinsic_vcompress_um_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
946; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
949; CHECK-NEXT:    vcompress.vm v9, v8, v0
950; CHECK-NEXT:    vmv1r.v v8, v9
951; CHECK-NEXT:    ret
952entry:
953  %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
954    <vscale x 1 x i8> undef,
955    <vscale x 1 x i8> %0,
956    <vscale x 1 x i1> %1,
957    iXLen %2)
958
959  ret <vscale x 1 x i8> %a
960}
961