xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmv.v.v.ll (revision 4a44898be5d46694b59aa411f2b45a52f2ce8411)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  iXLen);
11
12define <vscale x 1 x i8> @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
16; CHECK-NEXT:    vmv.v.v v8, v9
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8(
20    <vscale x 1 x i8> %0,
21    <vscale x 1 x i8> %1,
22    iXLen %2)
23
24  ret <vscale x 1 x i8> %a
25}
26
27declare <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
28  <vscale x 2 x i8>,
29  <vscale x 2 x i8>,
30  iXLen);
31
32define <vscale x 2 x i8> @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
33; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
36; CHECK-NEXT:    vmv.v.v v8, v9
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8(
40    <vscale x 2 x i8> %0,
41    <vscale x 2 x i8> %1,
42    iXLen %2)
43
44  ret <vscale x 2 x i8> %a
45}
46
47declare <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
48  <vscale x 4 x i8>,
49  <vscale x 4 x i8>,
50  iXLen);
51
52define <vscale x 4 x i8> @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
53; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
56; CHECK-NEXT:    vmv.v.v v8, v9
57; CHECK-NEXT:    ret
58entry:
59  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8(
60    <vscale x 4 x i8> %0,
61    <vscale x 4 x i8> %1,
62    iXLen %2)
63
64  ret <vscale x 4 x i8> %a
65}
66
67declare <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
68  <vscale x 8 x i8>,
69  <vscale x 8 x i8>,
70  iXLen);
71
72define <vscale x 8 x i8> @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
73; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8:
74; CHECK:       # %bb.0: # %entry
75; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
76; CHECK-NEXT:    vmv.v.v v8, v9
77; CHECK-NEXT:    ret
78entry:
79  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8(
80    <vscale x 8 x i8> %0,
81    <vscale x 8 x i8> %1,
82    iXLen %2)
83
84  ret <vscale x 8 x i8> %a
85}
86
87declare <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
88  <vscale x 16 x i8>,
89  <vscale x 16 x i8>,
90  iXLen);
91
92define <vscale x 16 x i8> @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
93; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8:
94; CHECK:       # %bb.0: # %entry
95; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
96; CHECK-NEXT:    vmv.v.v v8, v10
97; CHECK-NEXT:    ret
98entry:
99  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8(
100    <vscale x 16 x i8> %0,
101    <vscale x 16 x i8> %1,
102    iXLen %2)
103
104  ret <vscale x 16 x i8> %a
105}
106
107declare <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
108  <vscale x 32 x i8>,
109  <vscale x 32 x i8>,
110  iXLen);
111
112define <vscale x 32 x i8> @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
113; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
116; CHECK-NEXT:    vmv.v.v v8, v12
117; CHECK-NEXT:    ret
118entry:
119  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8(
120    <vscale x 32 x i8> %0,
121    <vscale x 32 x i8> %1,
122    iXLen %2)
123
124  ret <vscale x 32 x i8> %a
125}
126
127declare <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
128  <vscale x 64 x i8>,
129  <vscale x 64 x i8>,
130  iXLen);
131
132define <vscale x 64 x i8> @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
133; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8:
134; CHECK:       # %bb.0: # %entry
135; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
136; CHECK-NEXT:    vmv.v.v v8, v16
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8(
140    <vscale x 64 x i8> %0,
141    <vscale x 64 x i8> %1,
142    iXLen %2)
143
144  ret <vscale x 64 x i8> %a
145}
146
147declare <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
148  <vscale x 1 x i16>,
149  <vscale x 1 x i16>,
150  iXLen);
151
152define <vscale x 1 x i16> @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
153; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16:
154; CHECK:       # %bb.0: # %entry
155; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
156; CHECK-NEXT:    vmv.v.v v8, v9
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16(
160    <vscale x 1 x i16> %0,
161    <vscale x 1 x i16> %1,
162    iXLen %2)
163
164  ret <vscale x 1 x i16> %a
165}
166
167declare <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
168  <vscale x 2 x i16>,
169  <vscale x 2 x i16>,
170  iXLen);
171
172define <vscale x 2 x i16> @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
173; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16:
174; CHECK:       # %bb.0: # %entry
175; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
176; CHECK-NEXT:    vmv.v.v v8, v9
177; CHECK-NEXT:    ret
178entry:
179  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16(
180    <vscale x 2 x i16> %0,
181    <vscale x 2 x i16> %1,
182    iXLen %2)
183
184  ret <vscale x 2 x i16> %a
185}
186
187declare <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
188  <vscale x 4 x i16>,
189  <vscale x 4 x i16>,
190  iXLen);
191
192define <vscale x 4 x i16> @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
193; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16:
194; CHECK:       # %bb.0: # %entry
195; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
196; CHECK-NEXT:    vmv.v.v v8, v9
197; CHECK-NEXT:    ret
198entry:
199  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16(
200    <vscale x 4 x i16> %0,
201    <vscale x 4 x i16> %1,
202    iXLen %2)
203
204  ret <vscale x 4 x i16> %a
205}
206
207declare <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
208  <vscale x 8 x i16>,
209  <vscale x 8 x i16>,
210  iXLen);
211
212define <vscale x 8 x i16> @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
213; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16:
214; CHECK:       # %bb.0: # %entry
215; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
216; CHECK-NEXT:    vmv.v.v v8, v10
217; CHECK-NEXT:    ret
218entry:
219  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16(
220    <vscale x 8 x i16> %0,
221    <vscale x 8 x i16> %1,
222    iXLen %2)
223
224  ret <vscale x 8 x i16> %a
225}
226
227declare <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
228  <vscale x 16 x i16>,
229  <vscale x 16 x i16>,
230  iXLen);
231
232define <vscale x 16 x i16> @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
233; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16:
234; CHECK:       # %bb.0: # %entry
235; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
236; CHECK-NEXT:    vmv.v.v v8, v12
237; CHECK-NEXT:    ret
238entry:
239  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16(
240    <vscale x 16 x i16> %0,
241    <vscale x 16 x i16> %1,
242    iXLen %2)
243
244  ret <vscale x 16 x i16> %a
245}
246
247declare <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
248  <vscale x 32 x i16>,
249  <vscale x 32 x i16>,
250  iXLen);
251
252define <vscale x 32 x i16> @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
253; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
256; CHECK-NEXT:    vmv.v.v v8, v16
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16(
260    <vscale x 32 x i16> %0,
261    <vscale x 32 x i16> %1,
262    iXLen %2)
263
264  ret <vscale x 32 x i16> %a
265}
266
267declare <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
268  <vscale x 1 x i32>,
269  <vscale x 1 x i32>,
270  iXLen);
271
272define <vscale x 1 x i32> @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
273; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
276; CHECK-NEXT:    vmv.v.v v8, v9
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32(
280    <vscale x 1 x i32> %0,
281    <vscale x 1 x i32> %1,
282    iXLen %2)
283
284  ret <vscale x 1 x i32> %a
285}
286
287declare <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
288  <vscale x 2 x i32>,
289  <vscale x 2 x i32>,
290  iXLen);
291
292define <vscale x 2 x i32> @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
293; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32:
294; CHECK:       # %bb.0: # %entry
295; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
296; CHECK-NEXT:    vmv.v.v v8, v9
297; CHECK-NEXT:    ret
298entry:
299  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32(
300    <vscale x 2 x i32> %0,
301    <vscale x 2 x i32> %1,
302    iXLen %2)
303
304  ret <vscale x 2 x i32> %a
305}
306
307declare <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
308  <vscale x 4 x i32>,
309  <vscale x 4 x i32>,
310  iXLen);
311
312define <vscale x 4 x i32> @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
313; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
316; CHECK-NEXT:    vmv.v.v v8, v10
317; CHECK-NEXT:    ret
318entry:
319  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32(
320    <vscale x 4 x i32> %0,
321    <vscale x 4 x i32> %1,
322    iXLen %2)
323
324  ret <vscale x 4 x i32> %a
325}
326
327declare <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
328  <vscale x 8 x i32>,
329  <vscale x 8 x i32>,
330  iXLen);
331
332define <vscale x 8 x i32> @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
333; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
336; CHECK-NEXT:    vmv.v.v v8, v12
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32(
340    <vscale x 8 x i32> %0,
341    <vscale x 8 x i32> %1,
342    iXLen %2)
343
344  ret <vscale x 8 x i32> %a
345}
346
347declare <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
348  <vscale x 16 x i32>,
349  <vscale x 16 x i32>,
350  iXLen);
351
352define <vscale x 16 x i32> @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
353; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32:
354; CHECK:       # %bb.0: # %entry
355; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
356; CHECK-NEXT:    vmv.v.v v8, v16
357; CHECK-NEXT:    ret
358entry:
359  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32(
360    <vscale x 16 x i32> %0,
361    <vscale x 16 x i32> %1,
362    iXLen %2)
363
364  ret <vscale x 16 x i32> %a
365}
366
367declare <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
368  <vscale x 1 x i64>,
369  <vscale x 1 x i64>,
370  iXLen);
371
372define <vscale x 1 x i64> @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
373; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64:
374; CHECK:       # %bb.0: # %entry
375; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
376; CHECK-NEXT:    vmv.v.v v8, v9
377; CHECK-NEXT:    ret
378entry:
379  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(
380    <vscale x 1 x i64> %0,
381    <vscale x 1 x i64> %1,
382    iXLen %2)
383
384  ret <vscale x 1 x i64> %a
385}
386
387declare <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
388  <vscale x 2 x i64>,
389  <vscale x 2 x i64>,
390  iXLen);
391
392define <vscale x 2 x i64> @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
393; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64:
394; CHECK:       # %bb.0: # %entry
395; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
396; CHECK-NEXT:    vmv.v.v v8, v10
397; CHECK-NEXT:    ret
398entry:
399  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64(
400    <vscale x 2 x i64> %0,
401    <vscale x 2 x i64> %1,
402    iXLen %2)
403
404  ret <vscale x 2 x i64> %a
405}
406
407declare <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
408  <vscale x 4 x i64>,
409  <vscale x 4 x i64>,
410  iXLen);
411
412define <vscale x 4 x i64> @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
413; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64:
414; CHECK:       # %bb.0: # %entry
415; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
416; CHECK-NEXT:    vmv.v.v v8, v12
417; CHECK-NEXT:    ret
418entry:
419  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64(
420    <vscale x 4 x i64> %0,
421    <vscale x 4 x i64> %1,
422    iXLen %2)
423
424  ret <vscale x 4 x i64> %a
425}
426
427declare <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
428  <vscale x 8 x i64>,
429  <vscale x 8 x i64>,
430  iXLen);
431
432define <vscale x 8 x i64> @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
433; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
436; CHECK-NEXT:    vmv.v.v v8, v16
437; CHECK-NEXT:    ret
438entry:
439  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64(
440    <vscale x 8 x i64> %0,
441    <vscale x 8 x i64> %1,
442    iXLen %2)
443
444  ret <vscale x 8 x i64> %a
445}
446
447declare <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
448  <vscale x 1 x half>,
449  <vscale x 1 x half>,
450  iXLen);
451
452define <vscale x 1 x half> @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
453; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16:
454; CHECK:       # %bb.0: # %entry
455; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
456; CHECK-NEXT:    vmv.v.v v8, v9
457; CHECK-NEXT:    ret
458entry:
459  %a = call <vscale x 1 x half> @llvm.riscv.vmv.v.v.nxv1f16(
460    <vscale x 1 x half> %0,
461    <vscale x 1 x half> %1,
462    iXLen %2)
463
464  ret <vscale x 1 x half> %a
465}
466
467declare <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
468  <vscale x 2 x half>,
469  <vscale x 2 x half>,
470  iXLen);
471
472define <vscale x 2 x half> @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
473; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
476; CHECK-NEXT:    vmv.v.v v8, v9
477; CHECK-NEXT:    ret
478entry:
479  %a = call <vscale x 2 x half> @llvm.riscv.vmv.v.v.nxv2f16(
480    <vscale x 2 x half> %0,
481    <vscale x 2 x half> %1,
482    iXLen %2)
483
484  ret <vscale x 2 x half> %a
485}
486
487declare <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
488  <vscale x 4 x half>,
489  <vscale x 4 x half>,
490  iXLen);
491
492define <vscale x 4 x half> @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
493; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16:
494; CHECK:       # %bb.0: # %entry
495; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
496; CHECK-NEXT:    vmv.v.v v8, v9
497; CHECK-NEXT:    ret
498entry:
499  %a = call <vscale x 4 x half> @llvm.riscv.vmv.v.v.nxv4f16(
500    <vscale x 4 x half> %0,
501    <vscale x 4 x half> %1,
502    iXLen %2)
503
504  ret <vscale x 4 x half> %a
505}
506
507declare <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
508  <vscale x 8 x half>,
509  <vscale x 8 x half>,
510  iXLen);
511
512define <vscale x 8 x half> @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
513; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16:
514; CHECK:       # %bb.0: # %entry
515; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
516; CHECK-NEXT:    vmv.v.v v8, v10
517; CHECK-NEXT:    ret
518entry:
519  %a = call <vscale x 8 x half> @llvm.riscv.vmv.v.v.nxv8f16(
520    <vscale x 8 x half> %0,
521    <vscale x 8 x half> %1,
522    iXLen %2)
523
524  ret <vscale x 8 x half> %a
525}
526
527declare <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
528  <vscale x 16 x half>,
529  <vscale x 16 x half>,
530  iXLen);
531
532define <vscale x 16 x half> @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
533; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16:
534; CHECK:       # %bb.0: # %entry
535; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
536; CHECK-NEXT:    vmv.v.v v8, v12
537; CHECK-NEXT:    ret
538entry:
539  %a = call <vscale x 16 x half> @llvm.riscv.vmv.v.v.nxv16f16(
540    <vscale x 16 x half> %0,
541    <vscale x 16 x half> %1,
542    iXLen %2)
543
544  ret <vscale x 16 x half> %a
545}
546
547declare <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
548  <vscale x 32 x half>,
549  <vscale x 32 x half>,
550  iXLen);
551
552define <vscale x 32 x half> @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
553; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16:
554; CHECK:       # %bb.0: # %entry
555; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
556; CHECK-NEXT:    vmv.v.v v8, v16
557; CHECK-NEXT:    ret
558entry:
559  %a = call <vscale x 32 x half> @llvm.riscv.vmv.v.v.nxv32f16(
560    <vscale x 32 x half> %0,
561    <vscale x 32 x half> %1,
562    iXLen %2)
563
564  ret <vscale x 32 x half> %a
565}
566
567declare <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16(
568  <vscale x 1 x bfloat>,
569  <vscale x 1 x bfloat>,
570  iXLen);
571
572define <vscale x 1 x bfloat> @intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
573; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1bf16_nxv1bf16:
574; CHECK:       # %bb.0: # %entry
575; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
576; CHECK-NEXT:    vmv.v.v v8, v9
577; CHECK-NEXT:    ret
578entry:
579  %a = call <vscale x 1 x bfloat> @llvm.riscv.vmv.v.v.nxv1bf16(
580    <vscale x 1 x bfloat> %0,
581    <vscale x 1 x bfloat> %1,
582    iXLen %2)
583
584  ret <vscale x 1 x bfloat> %a
585}
586
587declare <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16(
588  <vscale x 2 x bfloat>,
589  <vscale x 2 x bfloat>,
590  iXLen);
591
592define <vscale x 2 x bfloat> @intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
593; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2bf16_nxv2bf16:
594; CHECK:       # %bb.0: # %entry
595; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
596; CHECK-NEXT:    vmv.v.v v8, v9
597; CHECK-NEXT:    ret
598entry:
599  %a = call <vscale x 2 x bfloat> @llvm.riscv.vmv.v.v.nxv2bf16(
600    <vscale x 2 x bfloat> %0,
601    <vscale x 2 x bfloat> %1,
602    iXLen %2)
603
604  ret <vscale x 2 x bfloat> %a
605}
606
607declare <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16(
608  <vscale x 4 x bfloat>,
609  <vscale x 4 x bfloat>,
610  iXLen);
611
612define <vscale x 4 x bfloat> @intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
613; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4bf16_nxv4bf16:
614; CHECK:       # %bb.0: # %entry
615; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
616; CHECK-NEXT:    vmv.v.v v8, v9
617; CHECK-NEXT:    ret
618entry:
619  %a = call <vscale x 4 x bfloat> @llvm.riscv.vmv.v.v.nxv4bf16(
620    <vscale x 4 x bfloat> %0,
621    <vscale x 4 x bfloat> %1,
622    iXLen %2)
623
624  ret <vscale x 4 x bfloat> %a
625}
626
627declare <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16(
628  <vscale x 8 x bfloat>,
629  <vscale x 8 x bfloat>,
630  iXLen);
631
632define <vscale x 8 x bfloat> @intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
633; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8bf16_nxv8bf16:
634; CHECK:       # %bb.0: # %entry
635; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
636; CHECK-NEXT:    vmv.v.v v8, v10
637; CHECK-NEXT:    ret
638entry:
639  %a = call <vscale x 8 x bfloat> @llvm.riscv.vmv.v.v.nxv8bf16(
640    <vscale x 8 x bfloat> %0,
641    <vscale x 8 x bfloat> %1,
642    iXLen %2)
643
644  ret <vscale x 8 x bfloat> %a
645}
646
647declare <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16(
648  <vscale x 16 x bfloat>,
649  <vscale x 16 x bfloat>,
650  iXLen);
651
652define <vscale x 16 x bfloat> @intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
653; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16bf16_nxv16bf16:
654; CHECK:       # %bb.0: # %entry
655; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
656; CHECK-NEXT:    vmv.v.v v8, v12
657; CHECK-NEXT:    ret
658entry:
659  %a = call <vscale x 16 x bfloat> @llvm.riscv.vmv.v.v.nxv16bf16(
660    <vscale x 16 x bfloat> %0,
661    <vscale x 16 x bfloat> %1,
662    iXLen %2)
663
664  ret <vscale x 16 x bfloat> %a
665}
666
667declare <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16(
668  <vscale x 32 x bfloat>,
669  <vscale x 32 x bfloat>,
670  iXLen);
671
672define <vscale x 32 x bfloat> @intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2) nounwind {
673; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32bf16_nxv32bf16:
674; CHECK:       # %bb.0: # %entry
675; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
676; CHECK-NEXT:    vmv.v.v v8, v16
677; CHECK-NEXT:    ret
678entry:
679  %a = call <vscale x 32 x bfloat> @llvm.riscv.vmv.v.v.nxv32bf16(
680    <vscale x 32 x bfloat> %0,
681    <vscale x 32 x bfloat> %1,
682    iXLen %2)
683
684  ret <vscale x 32 x bfloat> %a
685}
686
687declare <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
688  <vscale x 1 x float>,
689  <vscale x 1 x float>,
690  iXLen);
691
692define <vscale x 1 x float> @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
693; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32:
694; CHECK:       # %bb.0: # %entry
695; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
696; CHECK-NEXT:    vmv.v.v v8, v9
697; CHECK-NEXT:    ret
698entry:
699  %a = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32(
700    <vscale x 1 x float> %0,
701    <vscale x 1 x float> %1,
702    iXLen %2)
703
704  ret <vscale x 1 x float> %a
705}
706
707declare <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
708  <vscale x 2 x float>,
709  <vscale x 2 x float>,
710  iXLen);
711
712define <vscale x 2 x float> @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
713; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32:
714; CHECK:       # %bb.0: # %entry
715; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
716; CHECK-NEXT:    vmv.v.v v8, v9
717; CHECK-NEXT:    ret
718entry:
719  %a = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32(
720    <vscale x 2 x float> %0,
721    <vscale x 2 x float> %1,
722    iXLen %2)
723
724  ret <vscale x 2 x float> %a
725}
726
727declare <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
728  <vscale x 4 x float>,
729  <vscale x 4 x float>,
730  iXLen);
731
732define <vscale x 4 x float> @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
733; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32:
734; CHECK:       # %bb.0: # %entry
735; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
736; CHECK-NEXT:    vmv.v.v v8, v10
737; CHECK-NEXT:    ret
738entry:
739  %a = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(
740    <vscale x 4 x float> %0,
741    <vscale x 4 x float> %1,
742    iXLen %2)
743
744  ret <vscale x 4 x float> %a
745}
746
747declare <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
748  <vscale x 8 x float>,
749  <vscale x 8 x float>,
750  iXLen);
751
752define <vscale x 8 x float> @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
753; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32:
754; CHECK:       # %bb.0: # %entry
755; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
756; CHECK-NEXT:    vmv.v.v v8, v12
757; CHECK-NEXT:    ret
758entry:
759  %a = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32(
760    <vscale x 8 x float> %0,
761    <vscale x 8 x float> %1,
762    iXLen %2)
763
764  ret <vscale x 8 x float> %a
765}
766
767declare <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
768  <vscale x 16 x float>,
769  <vscale x 16 x float>,
770  iXLen);
771
772define <vscale x 16 x float> @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
773; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
776; CHECK-NEXT:    vmv.v.v v8, v16
777; CHECK-NEXT:    ret
778entry:
779  %a = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32(
780    <vscale x 16 x float> %0,
781    <vscale x 16 x float> %1,
782    iXLen %2)
783
784  ret <vscale x 16 x float> %a
785}
786
787declare <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
788  <vscale x 1 x double>,
789  <vscale x 1 x double>,
790  iXLen);
791
792define <vscale x 1 x double> @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
793; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
796; CHECK-NEXT:    vmv.v.v v8, v9
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64(
800    <vscale x 1 x double> %0,
801    <vscale x 1 x double> %1,
802    iXLen %2)
803
804  ret <vscale x 1 x double> %a
805}
806
807declare <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
808  <vscale x 2 x double>,
809  <vscale x 2 x double>,
810  iXLen);
811
812define <vscale x 2 x double> @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
813; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
816; CHECK-NEXT:    vmv.v.v v8, v10
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64(
820    <vscale x 2 x double> %0,
821    <vscale x 2 x double> %1,
822    iXLen %2)
823
824  ret <vscale x 2 x double> %a
825}
826
827declare <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
828  <vscale x 4 x double>,
829  <vscale x 4 x double>,
830  iXLen);
831
832define <vscale x 4 x double> @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
833; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64:
834; CHECK:       # %bb.0: # %entry
835; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
836; CHECK-NEXT:    vmv.v.v v8, v12
837; CHECK-NEXT:    ret
838entry:
839  %a = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64(
840    <vscale x 4 x double> %0,
841    <vscale x 4 x double> %1,
842    iXLen %2)
843
844  ret <vscale x 4 x double> %a
845}
846
847declare <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
848  <vscale x 8 x double>,
849  <vscale x 8 x double>,
850  iXLen);
851
852define <vscale x 8 x double> @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
853; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64:
854; CHECK:       # %bb.0: # %entry
855; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
856; CHECK-NEXT:    vmv.v.v v8, v16
857; CHECK-NEXT:    ret
858entry:
859  %a = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64(
860    <vscale x 8 x double> %0,
861    <vscale x 8 x double> %1,
862    iXLen %2)
863
864  ret <vscale x 8 x double> %a
865}
866