xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmerge.ll (revision fa12aa7f6770e989119a7806471b556fccdba2db)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  <vscale x 1 x i1>,
12  iXLen);
13
14define <vscale x 1 x i8> @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
15; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
18; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(
22    <vscale x 1 x i8> undef,
23    <vscale x 1 x i8> %0,
24    <vscale x 1 x i8> %1,
25    <vscale x 1 x i1> %2,
26    iXLen %3)
27
28  ret <vscale x 1 x i8> %a
29}
30
31declare <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
32  <vscale x 2 x i8>,
33  <vscale x 2 x i8>,
34  <vscale x 2 x i8>,
35  <vscale x 2 x i1>,
36  iXLen);
37
38define <vscale x 2 x i8> @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
39; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
42; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8(
46    <vscale x 2 x i8> undef,
47    <vscale x 2 x i8> %0,
48    <vscale x 2 x i8> %1,
49    <vscale x 2 x i1> %2,
50    iXLen %3)
51
52  ret <vscale x 2 x i8> %a
53}
54
55declare <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
56  <vscale x 4 x i8>,
57  <vscale x 4 x i8>,
58  <vscale x 4 x i8>,
59  <vscale x 4 x i1>,
60  iXLen);
61
62define <vscale x 4 x i8> @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
63; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8:
64; CHECK:       # %bb.0: # %entry
65; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
66; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8(
70    <vscale x 4 x i8> undef,
71    <vscale x 4 x i8> %0,
72    <vscale x 4 x i8> %1,
73    <vscale x 4 x i1> %2,
74    iXLen %3)
75
76  ret <vscale x 4 x i8> %a
77}
78
79declare <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
80  <vscale x 8 x i8>,
81  <vscale x 8 x i8>,
82  <vscale x 8 x i8>,
83  <vscale x 8 x i1>,
84  iXLen);
85
86define <vscale x 8 x i8> @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
87; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
90; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8(
94    <vscale x 8 x i8> undef,
95    <vscale x 8 x i8> %0,
96    <vscale x 8 x i8> %1,
97    <vscale x 8 x i1> %2,
98    iXLen %3)
99
100  ret <vscale x 8 x i8> %a
101}
102
103declare <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
104  <vscale x 16 x i8>,
105  <vscale x 16 x i8>,
106  <vscale x 16 x i8>,
107  <vscale x 16 x i1>,
108  iXLen);
109
110define <vscale x 16 x i8> @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
111; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
114; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8(
118    <vscale x 16 x i8> undef,
119    <vscale x 16 x i8> %0,
120    <vscale x 16 x i8> %1,
121    <vscale x 16 x i1> %2,
122    iXLen %3)
123
124  ret <vscale x 16 x i8> %a
125}
126
127declare <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
128  <vscale x 32 x i8>,
129  <vscale x 32 x i8>,
130  <vscale x 32 x i8>,
131  <vscale x 32 x i1>,
132  iXLen);
133
134define <vscale x 32 x i8> @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
135; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
138; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8(
142    <vscale x 32 x i8> undef,
143    <vscale x 32 x i8> %0,
144    <vscale x 32 x i8> %1,
145    <vscale x 32 x i1> %2,
146    iXLen %3)
147
148  ret <vscale x 32 x i8> %a
149}
150
151declare <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
152  <vscale x 64 x i8>,
153  <vscale x 64 x i8>,
154  <vscale x 64 x i8>,
155  <vscale x 64 x i1>,
156  iXLen);
157
158define <vscale x 64 x i8> @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
159; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8:
160; CHECK:       # %bb.0: # %entry
161; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
162; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8(
166    <vscale x 64 x i8> undef,
167    <vscale x 64 x i8> %0,
168    <vscale x 64 x i8> %1,
169    <vscale x 64 x i1> %2,
170    iXLen %3)
171
172  ret <vscale x 64 x i8> %a
173}
174
175declare <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
176  <vscale x 1 x i16>,
177  <vscale x 1 x i16>,
178  <vscale x 1 x i16>,
179  <vscale x 1 x i1>,
180  iXLen);
181
182define <vscale x 1 x i16> @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
183; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
186; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16(
190    <vscale x 1 x i16> undef,
191    <vscale x 1 x i16> %0,
192    <vscale x 1 x i16> %1,
193    <vscale x 1 x i1> %2,
194    iXLen %3)
195
196  ret <vscale x 1 x i16> %a
197}
198
199declare <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
200  <vscale x 2 x i16>,
201  <vscale x 2 x i16>,
202  <vscale x 2 x i16>,
203  <vscale x 2 x i1>,
204  iXLen);
205
206define <vscale x 2 x i16> @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
207; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
210; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16(
214    <vscale x 2 x i16> undef,
215    <vscale x 2 x i16> %0,
216    <vscale x 2 x i16> %1,
217    <vscale x 2 x i1> %2,
218    iXLen %3)
219
220  ret <vscale x 2 x i16> %a
221}
222
223declare <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
224  <vscale x 4 x i16>,
225  <vscale x 4 x i16>,
226  <vscale x 4 x i16>,
227  <vscale x 4 x i1>,
228  iXLen);
229
230define <vscale x 4 x i16> @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
231; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
234; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16(
238    <vscale x 4 x i16> undef,
239    <vscale x 4 x i16> %0,
240    <vscale x 4 x i16> %1,
241    <vscale x 4 x i1> %2,
242    iXLen %3)
243
244  ret <vscale x 4 x i16> %a
245}
246
247declare <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
248  <vscale x 8 x i16>,
249  <vscale x 8 x i16>,
250  <vscale x 8 x i16>,
251  <vscale x 8 x i1>,
252  iXLen);
253
254define <vscale x 8 x i16> @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
255; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
258; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16(
262    <vscale x 8 x i16> undef,
263    <vscale x 8 x i16> %0,
264    <vscale x 8 x i16> %1,
265    <vscale x 8 x i1> %2,
266    iXLen %3)
267
268  ret <vscale x 8 x i16> %a
269}
270
271declare <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
272  <vscale x 16 x i16>,
273  <vscale x 16 x i16>,
274  <vscale x 16 x i16>,
275  <vscale x 16 x i1>,
276  iXLen);
277
278define <vscale x 16 x i16> @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
279; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
282; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16(
286    <vscale x 16 x i16> undef,
287    <vscale x 16 x i16> %0,
288    <vscale x 16 x i16> %1,
289    <vscale x 16 x i1> %2,
290    iXLen %3)
291
292  ret <vscale x 16 x i16> %a
293}
294
295declare <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
296  <vscale x 32 x i16>,
297  <vscale x 32 x i16>,
298  <vscale x 32 x i16>,
299  <vscale x 32 x i1>,
300  iXLen);
301
302define <vscale x 32 x i16> @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
303; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
306; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16(
310    <vscale x 32 x i16> undef,
311    <vscale x 32 x i16> %0,
312    <vscale x 32 x i16> %1,
313    <vscale x 32 x i1> %2,
314    iXLen %3)
315
316  ret <vscale x 32 x i16> %a
317}
318
319declare <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
320  <vscale x 1 x i32>,
321  <vscale x 1 x i32>,
322  <vscale x 1 x i32>,
323  <vscale x 1 x i1>,
324  iXLen);
325
326define <vscale x 1 x i32> @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
327; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
330; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32(
334    <vscale x 1 x i32> undef,
335    <vscale x 1 x i32> %0,
336    <vscale x 1 x i32> %1,
337    <vscale x 1 x i1> %2,
338    iXLen %3)
339
340  ret <vscale x 1 x i32> %a
341}
342
343declare <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
344  <vscale x 2 x i32>,
345  <vscale x 2 x i32>,
346  <vscale x 2 x i32>,
347  <vscale x 2 x i1>,
348  iXLen);
349
350define <vscale x 2 x i32> @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
351; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32:
352; CHECK:       # %bb.0: # %entry
353; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
354; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32(
358    <vscale x 2 x i32> undef,
359    <vscale x 2 x i32> %0,
360    <vscale x 2 x i32> %1,
361    <vscale x 2 x i1> %2,
362    iXLen %3)
363
364  ret <vscale x 2 x i32> %a
365}
366
367declare <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
368  <vscale x 4 x i32>,
369  <vscale x 4 x i32>,
370  <vscale x 4 x i32>,
371  <vscale x 4 x i1>,
372  iXLen);
373
374define <vscale x 4 x i32> @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
375; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
378; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32(
382    <vscale x 4 x i32> undef,
383    <vscale x 4 x i32> %0,
384    <vscale x 4 x i32> %1,
385    <vscale x 4 x i1> %2,
386    iXLen %3)
387
388  ret <vscale x 4 x i32> %a
389}
390
391declare <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
392  <vscale x 8 x i32>,
393  <vscale x 8 x i32>,
394  <vscale x 8 x i32>,
395  <vscale x 8 x i1>,
396  iXLen);
397
398define <vscale x 8 x i32> @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
399; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32:
400; CHECK:       # %bb.0: # %entry
401; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
402; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32(
406    <vscale x 8 x i32> undef,
407    <vscale x 8 x i32> %0,
408    <vscale x 8 x i32> %1,
409    <vscale x 8 x i1> %2,
410    iXLen %3)
411
412  ret <vscale x 8 x i32> %a
413}
414
415declare <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
416  <vscale x 16 x i32>,
417  <vscale x 16 x i32>,
418  <vscale x 16 x i32>,
419  <vscale x 16 x i1>,
420  iXLen);
421
422define <vscale x 16 x i32> @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
423; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
426; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32(
430    <vscale x 16 x i32> undef,
431    <vscale x 16 x i32> %0,
432    <vscale x 16 x i32> %1,
433    <vscale x 16 x i1> %2,
434    iXLen %3)
435
436  ret <vscale x 16 x i32> %a
437}
438
439declare <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
440  <vscale x 1 x i64>,
441  <vscale x 1 x i64>,
442  <vscale x 1 x i64>,
443  <vscale x 1 x i1>,
444  iXLen);
445
446define <vscale x 1 x i64> @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
447; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64:
448; CHECK:       # %bb.0: # %entry
449; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
450; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
454    <vscale x 1 x i64> undef,
455    <vscale x 1 x i64> %0,
456    <vscale x 1 x i64> %1,
457    <vscale x 1 x i1> %2,
458    iXLen %3)
459
460  ret <vscale x 1 x i64> %a
461}
462
463declare <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
464  <vscale x 2 x i64>,
465  <vscale x 2 x i64>,
466  <vscale x 2 x i64>,
467  <vscale x 2 x i1>,
468  iXLen);
469
470define <vscale x 2 x i64> @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
471; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
474; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
478    <vscale x 2 x i64> undef,
479    <vscale x 2 x i64> %0,
480    <vscale x 2 x i64> %1,
481    <vscale x 2 x i1> %2,
482    iXLen %3)
483
484  ret <vscale x 2 x i64> %a
485}
486
487declare <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
488  <vscale x 4 x i64>,
489  <vscale x 4 x i64>,
490  <vscale x 4 x i64>,
491  <vscale x 4 x i1>,
492  iXLen);
493
494define <vscale x 4 x i64> @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
495; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
498; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
502    <vscale x 4 x i64> undef,
503    <vscale x 4 x i64> %0,
504    <vscale x 4 x i64> %1,
505    <vscale x 4 x i1> %2,
506    iXLen %3)
507
508  ret <vscale x 4 x i64> %a
509}
510
511declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
512  <vscale x 8 x i64>,
513  <vscale x 8 x i64>,
514  <vscale x 8 x i64>,
515  <vscale x 8 x i1>,
516  iXLen);
517
518define <vscale x 8 x i64> @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
519; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64:
520; CHECK:       # %bb.0: # %entry
521; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
522; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
523; CHECK-NEXT:    ret
524entry:
525  %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
526    <vscale x 8 x i64> undef,
527    <vscale x 8 x i64> %0,
528    <vscale x 8 x i64> %1,
529    <vscale x 8 x i1> %2,
530    iXLen %3)
531
532  ret <vscale x 8 x i64> %a
533}
534
535declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
536  <vscale x 1 x i8>,
537  <vscale x 1 x i8>,
538  i8,
539  <vscale x 1 x i1>,
540  iXLen);
541
542define <vscale x 1 x i8> @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
543; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
546; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
550    <vscale x 1 x i8> undef,
551    <vscale x 1 x i8> %0,
552    i8 %1,
553    <vscale x 1 x i1> %2,
554    iXLen %3)
555
556  ret <vscale x 1 x i8> %a
557}
558
559declare <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
560  <vscale x 2 x i8>,
561  <vscale x 2 x i8>,
562  i8,
563  <vscale x 2 x i1>,
564  iXLen);
565
566define <vscale x 2 x i8> @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
567; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
570; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
574    <vscale x 2 x i8> undef,
575    <vscale x 2 x i8> %0,
576    i8 %1,
577    <vscale x 2 x i1> %2,
578    iXLen %3)
579
580  ret <vscale x 2 x i8> %a
581}
582
583declare <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
584  <vscale x 4 x i8>,
585  <vscale x 4 x i8>,
586  i8,
587  <vscale x 4 x i1>,
588  iXLen);
589
590define <vscale x 4 x i8> @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
591; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8:
592; CHECK:       # %bb.0: # %entry
593; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
594; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
598    <vscale x 4 x i8> undef,
599    <vscale x 4 x i8> %0,
600    i8 %1,
601    <vscale x 4 x i1> %2,
602    iXLen %3)
603
604  ret <vscale x 4 x i8> %a
605}
606
607declare <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
608  <vscale x 8 x i8>,
609  <vscale x 8 x i8>,
610  i8,
611  <vscale x 8 x i1>,
612  iXLen);
613
614define <vscale x 8 x i8> @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
615; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
618; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
622    <vscale x 8 x i8> undef,
623    <vscale x 8 x i8> %0,
624    i8 %1,
625    <vscale x 8 x i1> %2,
626    iXLen %3)
627
628  ret <vscale x 8 x i8> %a
629}
630
631declare <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
632  <vscale x 16 x i8>,
633  <vscale x 16 x i8>,
634  i8,
635  <vscale x 16 x i1>,
636  iXLen);
637
638define <vscale x 16 x i8> @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
639; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
642; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
643; CHECK-NEXT:    ret
644entry:
645  %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
646    <vscale x 16 x i8> undef,
647    <vscale x 16 x i8> %0,
648    i8 %1,
649    <vscale x 16 x i1> %2,
650    iXLen %3)
651
652  ret <vscale x 16 x i8> %a
653}
654
655declare <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
656  <vscale x 32 x i8>,
657  <vscale x 32 x i8>,
658  i8,
659  <vscale x 32 x i1>,
660  iXLen);
661
662define <vscale x 32 x i8> @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
663; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8:
664; CHECK:       # %bb.0: # %entry
665; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
666; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
667; CHECK-NEXT:    ret
668entry:
669  %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
670    <vscale x 32 x i8> undef,
671    <vscale x 32 x i8> %0,
672    i8 %1,
673    <vscale x 32 x i1> %2,
674    iXLen %3)
675
676  ret <vscale x 32 x i8> %a
677}
678
679declare <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
680  <vscale x 64 x i8>,
681  <vscale x 64 x i8>,
682  i8,
683  <vscale x 64 x i1>,
684  iXLen);
685
686define <vscale x 64 x i8> @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
687; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8:
688; CHECK:       # %bb.0: # %entry
689; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
690; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
694    <vscale x 64 x i8> undef,
695    <vscale x 64 x i8> %0,
696    i8 %1,
697    <vscale x 64 x i1> %2,
698    iXLen %3)
699
700  ret <vscale x 64 x i8> %a
701}
702
703declare <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
704  <vscale x 1 x i16>,
705  <vscale x 1 x i16>,
706  i16,
707  <vscale x 1 x i1>,
708  iXLen);
709
710define <vscale x 1 x i16> @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
711; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16:
712; CHECK:       # %bb.0: # %entry
713; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
714; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
715; CHECK-NEXT:    ret
716entry:
717  %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
718    <vscale x 1 x i16> undef,
719    <vscale x 1 x i16> %0,
720    i16 %1,
721    <vscale x 1 x i1> %2,
722    iXLen %3)
723
724  ret <vscale x 1 x i16> %a
725}
726
727declare <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
728  <vscale x 2 x i16>,
729  <vscale x 2 x i16>,
730  i16,
731  <vscale x 2 x i1>,
732  iXLen);
733
734define <vscale x 2 x i16> @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
735; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16:
736; CHECK:       # %bb.0: # %entry
737; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
738; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
739; CHECK-NEXT:    ret
740entry:
741  %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
742    <vscale x 2 x i16> undef,
743    <vscale x 2 x i16> %0,
744    i16 %1,
745    <vscale x 2 x i1> %2,
746    iXLen %3)
747
748  ret <vscale x 2 x i16> %a
749}
750
751declare <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
752  <vscale x 4 x i16>,
753  <vscale x 4 x i16>,
754  i16,
755  <vscale x 4 x i1>,
756  iXLen);
757
758define <vscale x 4 x i16> @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
759; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16:
760; CHECK:       # %bb.0: # %entry
761; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
762; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
763; CHECK-NEXT:    ret
764entry:
765  %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
766    <vscale x 4 x i16> undef,
767    <vscale x 4 x i16> %0,
768    i16 %1,
769    <vscale x 4 x i1> %2,
770    iXLen %3)
771
772  ret <vscale x 4 x i16> %a
773}
774
775declare <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
776  <vscale x 8 x i16>,
777  <vscale x 8 x i16>,
778  i16,
779  <vscale x 8 x i1>,
780  iXLen);
781
782define <vscale x 8 x i16> @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
783; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16:
784; CHECK:       # %bb.0: # %entry
785; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
786; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
787; CHECK-NEXT:    ret
788entry:
789  %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
790    <vscale x 8 x i16> undef,
791    <vscale x 8 x i16> %0,
792    i16 %1,
793    <vscale x 8 x i1> %2,
794    iXLen %3)
795
796  ret <vscale x 8 x i16> %a
797}
798
799declare <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
800  <vscale x 16 x i16>,
801  <vscale x 16 x i16>,
802  i16,
803  <vscale x 16 x i1>,
804  iXLen);
805
806define <vscale x 16 x i16> @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
807; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16:
808; CHECK:       # %bb.0: # %entry
809; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
810; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
811; CHECK-NEXT:    ret
812entry:
813  %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
814    <vscale x 16 x i16> undef,
815    <vscale x 16 x i16> %0,
816    i16 %1,
817    <vscale x 16 x i1> %2,
818    iXLen %3)
819
820  ret <vscale x 16 x i16> %a
821}
822
823declare <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
824  <vscale x 32 x i16>,
825  <vscale x 32 x i16>,
826  i16,
827  <vscale x 32 x i1>,
828  iXLen);
829
830define <vscale x 32 x i16> @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
831; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16:
832; CHECK:       # %bb.0: # %entry
833; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
834; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
835; CHECK-NEXT:    ret
836entry:
837  %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
838    <vscale x 32 x i16> undef,
839    <vscale x 32 x i16> %0,
840    i16 %1,
841    <vscale x 32 x i1> %2,
842    iXLen %3)
843
844  ret <vscale x 32 x i16> %a
845}
846
847declare <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
848  <vscale x 1 x i32>,
849  <vscale x 1 x i32>,
850  i32,
851  <vscale x 1 x i1>,
852  iXLen);
853
854define <vscale x 1 x i32> @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
855; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32:
856; CHECK:       # %bb.0: # %entry
857; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
858; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
859; CHECK-NEXT:    ret
860entry:
861  %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
862    <vscale x 1 x i32> undef,
863    <vscale x 1 x i32> %0,
864    i32 %1,
865    <vscale x 1 x i1> %2,
866    iXLen %3)
867
868  ret <vscale x 1 x i32> %a
869}
870
871declare <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
872  <vscale x 2 x i32>,
873  <vscale x 2 x i32>,
874  i32,
875  <vscale x 2 x i1>,
876  iXLen);
877
878define <vscale x 2 x i32> @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
879; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32:
880; CHECK:       # %bb.0: # %entry
881; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
882; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
883; CHECK-NEXT:    ret
884entry:
885  %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
886    <vscale x 2 x i32> undef,
887    <vscale x 2 x i32> %0,
888    i32 %1,
889    <vscale x 2 x i1> %2,
890    iXLen %3)
891
892  ret <vscale x 2 x i32> %a
893}
894
895declare <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
896  <vscale x 4 x i32>,
897  <vscale x 4 x i32>,
898  i32,
899  <vscale x 4 x i1>,
900  iXLen);
901
902define <vscale x 4 x i32> @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
903; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32:
904; CHECK:       # %bb.0: # %entry
905; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
906; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
907; CHECK-NEXT:    ret
908entry:
909  %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
910    <vscale x 4 x i32> undef,
911    <vscale x 4 x i32> %0,
912    i32 %1,
913    <vscale x 4 x i1> %2,
914    iXLen %3)
915
916  ret <vscale x 4 x i32> %a
917}
918
919declare <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
920  <vscale x 8 x i32>,
921  <vscale x 8 x i32>,
922  i32,
923  <vscale x 8 x i1>,
924  iXLen);
925
926define <vscale x 8 x i32> @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
927; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32:
928; CHECK:       # %bb.0: # %entry
929; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
930; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
931; CHECK-NEXT:    ret
932entry:
933  %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
934    <vscale x 8 x i32> undef,
935    <vscale x 8 x i32> %0,
936    i32 %1,
937    <vscale x 8 x i1> %2,
938    iXLen %3)
939
940  ret <vscale x 8 x i32> %a
941}
942
943declare <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
944  <vscale x 16 x i32>,
945  <vscale x 16 x i32>,
946  i32,
947  <vscale x 16 x i1>,
948  iXLen);
949
950define <vscale x 16 x i32> @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
951; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32:
952; CHECK:       # %bb.0: # %entry
953; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
954; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
955; CHECK-NEXT:    ret
956entry:
957  %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
958    <vscale x 16 x i32> undef,
959    <vscale x 16 x i32> %0,
960    i32 %1,
961    <vscale x 16 x i1> %2,
962    iXLen %3)
963
964  ret <vscale x 16 x i32> %a
965}
966
967declare <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
968  <vscale x 1 x i64>,
969  <vscale x 1 x i64>,
970  i64,
971  <vscale x 1 x i1>,
972  iXLen);
973
974define <vscale x 1 x i64> @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
975entry:
976  %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
977    <vscale x 1 x i64> undef,
978    <vscale x 1 x i64> %0,
979    i64 %1,
980    <vscale x 1 x i1> %2,
981    iXLen %3)
982
983  ret <vscale x 1 x i64> %a
984}
985
986declare <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
987  <vscale x 2 x i64>,
988  <vscale x 2 x i64>,
989  i64,
990  <vscale x 2 x i1>,
991  iXLen);
992
993define <vscale x 2 x i64> @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
994entry:
995  %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
996    <vscale x 2 x i64> undef,
997    <vscale x 2 x i64> %0,
998    i64 %1,
999    <vscale x 2 x i1> %2,
1000    iXLen %3)
1001
1002  ret <vscale x 2 x i64> %a
1003}
1004
1005declare <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
1006  <vscale x 4 x i64>,
1007  <vscale x 4 x i64>,
1008  i64,
1009  <vscale x 4 x i1>,
1010  iXLen);
1011
1012define <vscale x 4 x i64> @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1013entry:
1014  %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
1015    <vscale x 4 x i64> undef,
1016    <vscale x 4 x i64> %0,
1017    i64 %1,
1018    <vscale x 4 x i1> %2,
1019    iXLen %3)
1020
1021  ret <vscale x 4 x i64> %a
1022}
1023
1024declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
1025  <vscale x 8 x i64>,
1026  <vscale x 8 x i64>,
1027  i64,
1028  <vscale x 8 x i1>,
1029  iXLen);
1030
1031define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1032entry:
1033  %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
1034    <vscale x 8 x i64> undef,
1035    <vscale x 8 x i64> %0,
1036    i64 %1,
1037    <vscale x 8 x i1> %2,
1038    iXLen %3)
1039
1040  ret <vscale x 8 x i64> %a
1041}
1042
1043define <vscale x 1 x i8> @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1044; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8:
1045; CHECK:       # %bb.0: # %entry
1046; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1047; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1048; CHECK-NEXT:    ret
1049entry:
1050  %a = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
1051    <vscale x 1 x i8> undef,
1052    <vscale x 1 x i8> %0,
1053    i8 9,
1054    <vscale x 1 x i1> %1,
1055    iXLen %2)
1056
1057  ret <vscale x 1 x i8> %a
1058}
1059
1060define <vscale x 2 x i8> @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
1061; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8:
1062; CHECK:       # %bb.0: # %entry
1063; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1064; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1065; CHECK-NEXT:    ret
1066entry:
1067  %a = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8(
1068    <vscale x 2 x i8> undef,
1069    <vscale x 2 x i8> %0,
1070    i8 9,
1071    <vscale x 2 x i1> %1,
1072    iXLen %2)
1073
1074  ret <vscale x 2 x i8> %a
1075}
1076
1077define <vscale x 4 x i8> @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
1078; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8:
1079; CHECK:       # %bb.0: # %entry
1080; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1081; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1082; CHECK-NEXT:    ret
1083entry:
1084  %a = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8(
1085    <vscale x 4 x i8> undef,
1086    <vscale x 4 x i8> %0,
1087    i8 9,
1088    <vscale x 4 x i1> %1,
1089    iXLen %2)
1090
1091  ret <vscale x 4 x i8> %a
1092}
1093
1094define <vscale x 8 x i8> @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
1095; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1098; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1099; CHECK-NEXT:    ret
1100entry:
1101  %a = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8(
1102    <vscale x 8 x i8> undef,
1103    <vscale x 8 x i8> %0,
1104    i8 9,
1105    <vscale x 8 x i1> %1,
1106    iXLen %2)
1107
1108  ret <vscale x 8 x i8> %a
1109}
1110
1111define <vscale x 16 x i8> @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
1112; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8:
1113; CHECK:       # %bb.0: # %entry
1114; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
1115; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1116; CHECK-NEXT:    ret
1117entry:
1118  %a = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8(
1119    <vscale x 16 x i8> undef,
1120    <vscale x 16 x i8> %0,
1121    i8 9,
1122    <vscale x 16 x i1> %1,
1123    iXLen %2)
1124
1125  ret <vscale x 16 x i8> %a
1126}
1127
1128define <vscale x 32 x i8> @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
1129; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8:
1130; CHECK:       # %bb.0: # %entry
1131; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
1132; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1133; CHECK-NEXT:    ret
1134entry:
1135  %a = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8(
1136    <vscale x 32 x i8> undef,
1137    <vscale x 32 x i8> %0,
1138    i8 9,
1139    <vscale x 32 x i1> %1,
1140    iXLen %2)
1141
1142  ret <vscale x 32 x i8> %a
1143}
1144
1145define <vscale x 64 x i8> @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
1146; CHECK-LABEL: intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8:
1147; CHECK:       # %bb.0: # %entry
1148; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
1149; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1150; CHECK-NEXT:    ret
1151entry:
1152  %a = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8(
1153    <vscale x 64 x i8> undef,
1154    <vscale x 64 x i8> %0,
1155    i8 9,
1156    <vscale x 64 x i1> %1,
1157    iXLen %2)
1158
1159  ret <vscale x 64 x i8> %a
1160}
1161
1162define <vscale x 1 x i16> @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1163; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16:
1164; CHECK:       # %bb.0: # %entry
1165; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1166; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1167; CHECK-NEXT:    ret
1168entry:
1169  %a = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16(
1170    <vscale x 1 x i16> undef,
1171    <vscale x 1 x i16> %0,
1172    i16 9,
1173    <vscale x 1 x i1> %1,
1174    iXLen %2)
1175
1176  ret <vscale x 1 x i16> %a
1177}
1178
1179define <vscale x 2 x i16> @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
1180; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16:
1181; CHECK:       # %bb.0: # %entry
1182; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1183; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1184; CHECK-NEXT:    ret
1185entry:
1186  %a = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16(
1187    <vscale x 2 x i16> undef,
1188    <vscale x 2 x i16> %0,
1189    i16 9,
1190    <vscale x 2 x i1> %1,
1191    iXLen %2)
1192
1193  ret <vscale x 2 x i16> %a
1194}
1195
1196define <vscale x 4 x i16> @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
1197; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16:
1198; CHECK:       # %bb.0: # %entry
1199; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1200; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1201; CHECK-NEXT:    ret
1202entry:
1203  %a = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16(
1204    <vscale x 4 x i16> undef,
1205    <vscale x 4 x i16> %0,
1206    i16 9,
1207    <vscale x 4 x i1> %1,
1208    iXLen %2)
1209
1210  ret <vscale x 4 x i16> %a
1211}
1212
1213define <vscale x 8 x i16> @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
1214; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16:
1215; CHECK:       # %bb.0: # %entry
1216; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1217; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1218; CHECK-NEXT:    ret
1219entry:
1220  %a = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16(
1221    <vscale x 8 x i16> undef,
1222    <vscale x 8 x i16> %0,
1223    i16 9,
1224    <vscale x 8 x i1> %1,
1225    iXLen %2)
1226
1227  ret <vscale x 8 x i16> %a
1228}
1229
1230define <vscale x 16 x i16> @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
1231; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16:
1232; CHECK:       # %bb.0: # %entry
1233; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1234; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1235; CHECK-NEXT:    ret
1236entry:
1237  %a = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16(
1238    <vscale x 16 x i16> undef,
1239    <vscale x 16 x i16> %0,
1240    i16 9,
1241    <vscale x 16 x i1> %1,
1242    iXLen %2)
1243
1244  ret <vscale x 16 x i16> %a
1245}
1246
1247define <vscale x 32 x i16> @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
1248; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16:
1249; CHECK:       # %bb.0: # %entry
1250; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1251; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1252; CHECK-NEXT:    ret
1253entry:
1254  %a = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16(
1255    <vscale x 32 x i16> undef,
1256    <vscale x 32 x i16> %0,
1257    i16 9,
1258    <vscale x 32 x i1> %1,
1259    iXLen %2)
1260
1261  ret <vscale x 32 x i16> %a
1262}
1263
1264define <vscale x 1 x i32> @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1265; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32:
1266; CHECK:       # %bb.0: # %entry
1267; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1268; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1269; CHECK-NEXT:    ret
1270entry:
1271  %a = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32(
1272    <vscale x 1 x i32> undef,
1273    <vscale x 1 x i32> %0,
1274    i32 9,
1275    <vscale x 1 x i1> %1,
1276    iXLen %2)
1277
1278  ret <vscale x 1 x i32> %a
1279}
1280
1281define <vscale x 2 x i32> @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
1282; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32:
1283; CHECK:       # %bb.0: # %entry
1284; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1285; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1286; CHECK-NEXT:    ret
1287entry:
1288  %a = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32(
1289    <vscale x 2 x i32> undef,
1290    <vscale x 2 x i32> %0,
1291    i32 9,
1292    <vscale x 2 x i1> %1,
1293    iXLen %2)
1294
1295  ret <vscale x 2 x i32> %a
1296}
1297
1298define <vscale x 4 x i32> @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
1299; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32:
1300; CHECK:       # %bb.0: # %entry
1301; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1302; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1303; CHECK-NEXT:    ret
1304entry:
1305  %a = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32(
1306    <vscale x 4 x i32> undef,
1307    <vscale x 4 x i32> %0,
1308    i32 9,
1309    <vscale x 4 x i1> %1,
1310    iXLen %2)
1311
1312  ret <vscale x 4 x i32> %a
1313}
1314
1315define <vscale x 8 x i32> @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
1316; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32:
1317; CHECK:       # %bb.0: # %entry
1318; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1319; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1320; CHECK-NEXT:    ret
1321entry:
1322  %a = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32(
1323    <vscale x 8 x i32> undef,
1324    <vscale x 8 x i32> %0,
1325    i32 9,
1326    <vscale x 8 x i1> %1,
1327    iXLen %2)
1328
1329  ret <vscale x 8 x i32> %a
1330}
1331
1332define <vscale x 16 x i32> @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
1333; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1336; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32(
1340    <vscale x 16 x i32> undef,
1341    <vscale x 16 x i32> %0,
1342    i32 9,
1343    <vscale x 16 x i1> %1,
1344    iXLen %2)
1345
1346  ret <vscale x 16 x i32> %a
1347}
1348
1349define <vscale x 1 x i64> @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
1350; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64:
1351; CHECK:       # %bb.0: # %entry
1352; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1353; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1354; CHECK-NEXT:    ret
1355entry:
1356  %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
1357    <vscale x 1 x i64> undef,
1358    <vscale x 1 x i64> %0,
1359    i64 9,
1360    <vscale x 1 x i1> %1,
1361    iXLen %2)
1362
1363  ret <vscale x 1 x i64> %a
1364}
1365
1366define <vscale x 2 x i64> @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
1367; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64:
1368; CHECK:       # %bb.0: # %entry
1369; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1370; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1371; CHECK-NEXT:    ret
1372entry:
1373  %a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
1374    <vscale x 2 x i64> undef,
1375    <vscale x 2 x i64> %0,
1376    i64 9,
1377    <vscale x 2 x i1> %1,
1378    iXLen %2)
1379
1380  ret <vscale x 2 x i64> %a
1381}
1382
1383define <vscale x 4 x i64> @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
1384; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64:
1385; CHECK:       # %bb.0: # %entry
1386; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1387; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1388; CHECK-NEXT:    ret
1389entry:
1390  %a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
1391    <vscale x 4 x i64> undef,
1392    <vscale x 4 x i64> %0,
1393    i64 9,
1394    <vscale x 4 x i1> %1,
1395    iXLen %2)
1396
1397  ret <vscale x 4 x i64> %a
1398}
1399
1400define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
1401; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1404; CHECK-NEXT:    vmerge.vim v8, v8, 9, v0
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
1408    <vscale x 8 x i64> undef,
1409    <vscale x 8 x i64> %0,
1410    i64 9,
1411    <vscale x 8 x i1> %1,
1412    iXLen %2)
1413
1414  ret <vscale x 8 x i64> %a
1415}
1416
1417declare <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
1418  <vscale x 1 x half>,
1419  <vscale x 1 x half>,
1420  <vscale x 1 x half>,
1421  <vscale x 1 x i1>,
1422  iXLen);
1423
1424define <vscale x 1 x half> @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1425; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1428; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
1432    <vscale x 1 x half> undef,
1433    <vscale x 1 x half> %0,
1434    <vscale x 1 x half> %1,
1435    <vscale x 1 x i1> %2,
1436    iXLen %3)
1437
1438  ret <vscale x 1 x half> %a
1439}
1440
1441declare <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
1442  <vscale x 2 x half>,
1443  <vscale x 2 x half>,
1444  <vscale x 2 x half>,
1445  <vscale x 2 x i1>,
1446  iXLen);
1447
1448define <vscale x 2 x half> @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1449; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16:
1450; CHECK:       # %bb.0: # %entry
1451; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1452; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1453; CHECK-NEXT:    ret
1454entry:
1455  %a = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
1456    <vscale x 2 x half> undef,
1457    <vscale x 2 x half> %0,
1458    <vscale x 2 x half> %1,
1459    <vscale x 2 x i1> %2,
1460    iXLen %3)
1461
1462  ret <vscale x 2 x half> %a
1463}
1464
1465declare <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
1466  <vscale x 4 x half>,
1467  <vscale x 4 x half>,
1468  <vscale x 4 x half>,
1469  <vscale x 4 x i1>,
1470  iXLen);
1471
1472define <vscale x 4 x half> @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1473; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16:
1474; CHECK:       # %bb.0: # %entry
1475; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1476; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1477; CHECK-NEXT:    ret
1478entry:
1479  %a = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
1480    <vscale x 4 x half> undef,
1481    <vscale x 4 x half> %0,
1482    <vscale x 4 x half> %1,
1483    <vscale x 4 x i1> %2,
1484    iXLen %3)
1485
1486  ret <vscale x 4 x half> %a
1487}
1488
1489declare <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
1490  <vscale x 8 x half>,
1491  <vscale x 8 x half>,
1492  <vscale x 8 x half>,
1493  <vscale x 8 x i1>,
1494  iXLen);
1495
1496define <vscale x 8 x half> @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1497; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16:
1498; CHECK:       # %bb.0: # %entry
1499; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1500; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
1501; CHECK-NEXT:    ret
1502entry:
1503  %a = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
1504    <vscale x 8 x half> undef,
1505    <vscale x 8 x half> %0,
1506    <vscale x 8 x half> %1,
1507    <vscale x 8 x i1> %2,
1508    iXLen %3)
1509
1510  ret <vscale x 8 x half> %a
1511}
1512
1513declare <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
1514  <vscale x 16 x half>,
1515  <vscale x 16 x half>,
1516  <vscale x 16 x half>,
1517  <vscale x 16 x i1>,
1518  iXLen);
1519
1520define <vscale x 16 x half> @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1521; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16:
1522; CHECK:       # %bb.0: # %entry
1523; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1524; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
1525; CHECK-NEXT:    ret
1526entry:
1527  %a = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
1528    <vscale x 16 x half> undef,
1529    <vscale x 16 x half> %0,
1530    <vscale x 16 x half> %1,
1531    <vscale x 16 x i1> %2,
1532    iXLen %3)
1533
1534  ret <vscale x 16 x half> %a
1535}
1536
1537declare <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
1538  <vscale x 32 x half>,
1539  <vscale x 32 x half>,
1540  <vscale x 32 x half>,
1541  <vscale x 32 x i1>,
1542  iXLen);
1543
1544define <vscale x 32 x half> @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1545; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16:
1546; CHECK:       # %bb.0: # %entry
1547; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1548; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
1549; CHECK-NEXT:    ret
1550entry:
1551  %a = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
1552    <vscale x 32 x half> undef,
1553    <vscale x 32 x half> %0,
1554    <vscale x 32 x half> %1,
1555    <vscale x 32 x i1> %2,
1556    iXLen %3)
1557
1558  ret <vscale x 32 x half> %a
1559}
1560
1561declare <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16(
1562  <vscale x 1 x bfloat>,
1563  <vscale x 1 x bfloat>,
1564  <vscale x 1 x bfloat>,
1565  <vscale x 1 x i1>,
1566  iXLen);
1567
1568define <vscale x 1 x bfloat> @intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1569; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1bf16_nxv1bf16_nxv1bf16:
1570; CHECK:       # %bb.0: # %entry
1571; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1572; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1573; CHECK-NEXT:    ret
1574entry:
1575  %a = call <vscale x 1 x bfloat> @llvm.riscv.vmerge.nxv1bf16.nxv1bf16(
1576    <vscale x 1 x bfloat> undef,
1577    <vscale x 1 x bfloat> %0,
1578    <vscale x 1 x bfloat> %1,
1579    <vscale x 1 x i1> %2,
1580    iXLen %3)
1581
1582  ret <vscale x 1 x bfloat> %a
1583}
1584
1585declare <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16(
1586  <vscale x 2 x bfloat>,
1587  <vscale x 2 x bfloat>,
1588  <vscale x 2 x bfloat>,
1589  <vscale x 2 x i1>,
1590  iXLen);
1591
1592define <vscale x 2 x bfloat> @intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1593; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2bf16_nxv2bf16_nxv2bf16:
1594; CHECK:       # %bb.0: # %entry
1595; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1596; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1597; CHECK-NEXT:    ret
1598entry:
1599  %a = call <vscale x 2 x bfloat> @llvm.riscv.vmerge.nxv2bf16.nxv2bf16(
1600    <vscale x 2 x bfloat> undef,
1601    <vscale x 2 x bfloat> %0,
1602    <vscale x 2 x bfloat> %1,
1603    <vscale x 2 x i1> %2,
1604    iXLen %3)
1605
1606  ret <vscale x 2 x bfloat> %a
1607}
1608
1609declare <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16(
1610  <vscale x 4 x bfloat>,
1611  <vscale x 4 x bfloat>,
1612  <vscale x 4 x bfloat>,
1613  <vscale x 4 x i1>,
1614  iXLen);
1615
1616define <vscale x 4 x bfloat> @intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1617; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4bf16_nxv4bf16_nxv4bf16:
1618; CHECK:       # %bb.0: # %entry
1619; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1620; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1621; CHECK-NEXT:    ret
1622entry:
1623  %a = call <vscale x 4 x bfloat> @llvm.riscv.vmerge.nxv4bf16.nxv4bf16(
1624    <vscale x 4 x bfloat> undef,
1625    <vscale x 4 x bfloat> %0,
1626    <vscale x 4 x bfloat> %1,
1627    <vscale x 4 x i1> %2,
1628    iXLen %3)
1629
1630  ret <vscale x 4 x bfloat> %a
1631}
1632
1633declare <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16(
1634  <vscale x 8 x bfloat>,
1635  <vscale x 8 x bfloat>,
1636  <vscale x 8 x bfloat>,
1637  <vscale x 8 x i1>,
1638  iXLen);
1639
1640define <vscale x 8 x bfloat> @intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1641; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8bf16_nxv8bf16_nxv8bf16:
1642; CHECK:       # %bb.0: # %entry
1643; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1644; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
1645; CHECK-NEXT:    ret
1646entry:
1647  %a = call <vscale x 8 x bfloat> @llvm.riscv.vmerge.nxv8bf16.nxv8bf16(
1648    <vscale x 8 x bfloat> undef,
1649    <vscale x 8 x bfloat> %0,
1650    <vscale x 8 x bfloat> %1,
1651    <vscale x 8 x i1> %2,
1652    iXLen %3)
1653
1654  ret <vscale x 8 x bfloat> %a
1655}
1656
1657declare <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16(
1658  <vscale x 16 x bfloat>,
1659  <vscale x 16 x bfloat>,
1660  <vscale x 16 x bfloat>,
1661  <vscale x 16 x i1>,
1662  iXLen);
1663
1664define <vscale x 16 x bfloat> @intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1665; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16bf16_nxv16bf16_nxv16bf16:
1666; CHECK:       # %bb.0: # %entry
1667; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1668; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
1669; CHECK-NEXT:    ret
1670entry:
1671  %a = call <vscale x 16 x bfloat> @llvm.riscv.vmerge.nxv16bf16.nxv16bf16(
1672    <vscale x 16 x bfloat> undef,
1673    <vscale x 16 x bfloat> %0,
1674    <vscale x 16 x bfloat> %1,
1675    <vscale x 16 x i1> %2,
1676    iXLen %3)
1677
1678  ret <vscale x 16 x bfloat> %a
1679}
1680
1681declare <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16(
1682  <vscale x 32 x bfloat>,
1683  <vscale x 32 x bfloat>,
1684  <vscale x 32 x bfloat>,
1685  <vscale x 32 x i1>,
1686  iXLen);
1687
1688define <vscale x 32 x bfloat> @intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1689; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32bf16_nxv32bf16_nxv32bf16:
1690; CHECK:       # %bb.0: # %entry
1691; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1692; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
1693; CHECK-NEXT:    ret
1694entry:
1695  %a = call <vscale x 32 x bfloat> @llvm.riscv.vmerge.nxv32bf16.nxv32bf16(
1696    <vscale x 32 x bfloat> undef,
1697    <vscale x 32 x bfloat> %0,
1698    <vscale x 32 x bfloat> %1,
1699    <vscale x 32 x i1> %2,
1700    iXLen %3)
1701
1702  ret <vscale x 32 x bfloat> %a
1703}
1704
1705declare <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
1706  <vscale x 1 x float>,
1707  <vscale x 1 x float>,
1708  <vscale x 1 x float>,
1709  <vscale x 1 x i1>,
1710  iXLen);
1711
1712define <vscale x 1 x float> @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1713; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32:
1714; CHECK:       # %bb.0: # %entry
1715; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1716; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1717; CHECK-NEXT:    ret
1718entry:
1719  %a = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
1720    <vscale x 1 x float> undef,
1721    <vscale x 1 x float> %0,
1722    <vscale x 1 x float> %1,
1723    <vscale x 1 x i1> %2,
1724    iXLen %3)
1725
1726  ret <vscale x 1 x float> %a
1727}
1728
1729declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
1730  <vscale x 2 x float>,
1731  <vscale x 2 x float>,
1732  <vscale x 2 x float>,
1733  <vscale x 2 x i1>,
1734  iXLen);
1735
1736define <vscale x 2 x float> @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1737; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32:
1738; CHECK:       # %bb.0: # %entry
1739; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1740; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1741; CHECK-NEXT:    ret
1742entry:
1743  %a = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
1744    <vscale x 2 x float> undef,
1745    <vscale x 2 x float> %0,
1746    <vscale x 2 x float> %1,
1747    <vscale x 2 x i1> %2,
1748    iXLen %3)
1749
1750  ret <vscale x 2 x float> %a
1751}
1752
1753declare <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
1754  <vscale x 4 x float>,
1755  <vscale x 4 x float>,
1756  <vscale x 4 x float>,
1757  <vscale x 4 x i1>,
1758  iXLen);
1759
1760define <vscale x 4 x float> @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1761; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32:
1762; CHECK:       # %bb.0: # %entry
1763; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1764; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
1765; CHECK-NEXT:    ret
1766entry:
1767  %a = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
1768    <vscale x 4 x float> undef,
1769    <vscale x 4 x float> %0,
1770    <vscale x 4 x float> %1,
1771    <vscale x 4 x i1> %2,
1772    iXLen %3)
1773
1774  ret <vscale x 4 x float> %a
1775}
1776
1777declare <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
1778  <vscale x 8 x float>,
1779  <vscale x 8 x float>,
1780  <vscale x 8 x float>,
1781  <vscale x 8 x i1>,
1782  iXLen);
1783
1784define <vscale x 8 x float> @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1785; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32:
1786; CHECK:       # %bb.0: # %entry
1787; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1788; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
1789; CHECK-NEXT:    ret
1790entry:
1791  %a = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
1792    <vscale x 8 x float> undef,
1793    <vscale x 8 x float> %0,
1794    <vscale x 8 x float> %1,
1795    <vscale x 8 x i1> %2,
1796    iXLen %3)
1797
1798  ret <vscale x 8 x float> %a
1799}
1800
1801declare <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
1802  <vscale x 16 x float>,
1803  <vscale x 16 x float>,
1804  <vscale x 16 x float>,
1805  <vscale x 16 x i1>,
1806  iXLen);
1807
1808define <vscale x 16 x float> @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1809; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32:
1810; CHECK:       # %bb.0: # %entry
1811; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1812; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
1813; CHECK-NEXT:    ret
1814entry:
1815  %a = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
1816    <vscale x 16 x float> undef,
1817    <vscale x 16 x float> %0,
1818    <vscale x 16 x float> %1,
1819    <vscale x 16 x i1> %2,
1820    iXLen %3)
1821
1822  ret <vscale x 16 x float> %a
1823}
1824
1825declare <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
1826  <vscale x 1 x double>,
1827  <vscale x 1 x double>,
1828  <vscale x 1 x double>,
1829  <vscale x 1 x i1>,
1830  iXLen);
1831
1832define <vscale x 1 x double> @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1833; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64:
1834; CHECK:       # %bb.0: # %entry
1835; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1836; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
1837; CHECK-NEXT:    ret
1838entry:
1839  %a = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
1840    <vscale x 1 x double> undef,
1841    <vscale x 1 x double> %0,
1842    <vscale x 1 x double> %1,
1843    <vscale x 1 x i1> %2,
1844    iXLen %3)
1845
1846  ret <vscale x 1 x double> %a
1847}
1848
1849declare <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64(
1850  <vscale x 2 x double>,
1851  <vscale x 2 x double>,
1852  <vscale x 2 x double>,
1853  <vscale x 2 x i1>,
1854  iXLen);
1855
1856define <vscale x 2 x double> @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1857; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64:
1858; CHECK:       # %bb.0: # %entry
1859; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1860; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
1861; CHECK-NEXT:    ret
1862entry:
1863  %a = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64(
1864    <vscale x 2 x double> undef,
1865    <vscale x 2 x double> %0,
1866    <vscale x 2 x double> %1,
1867    <vscale x 2 x i1> %2,
1868    iXLen %3)
1869
1870  ret <vscale x 2 x double> %a
1871}
1872
1873declare <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
1874  <vscale x 4 x double>,
1875  <vscale x 4 x double>,
1876  <vscale x 4 x double>,
1877  <vscale x 4 x i1>,
1878  iXLen);
1879
1880define <vscale x 4 x double> @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1881; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64:
1882; CHECK:       # %bb.0: # %entry
1883; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1884; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
1885; CHECK-NEXT:    ret
1886entry:
1887  %a = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
1888    <vscale x 4 x double> undef,
1889    <vscale x 4 x double> %0,
1890    <vscale x 4 x double> %1,
1891    <vscale x 4 x i1> %2,
1892    iXLen %3)
1893
1894  ret <vscale x 4 x double> %a
1895}
1896
1897declare <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
1898  <vscale x 8 x double>,
1899  <vscale x 8 x double>,
1900  <vscale x 8 x double>,
1901  <vscale x 8 x i1>,
1902  iXLen);
1903
1904define <vscale x 8 x double> @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1905; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64:
1906; CHECK:       # %bb.0: # %entry
1907; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1908; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
1909; CHECK-NEXT:    ret
1910entry:
1911  %a = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
1912    <vscale x 8 x double> undef,
1913    <vscale x 8 x double> %0,
1914    <vscale x 8 x double> %1,
1915    <vscale x 8 x i1> %2,
1916    iXLen %3)
1917
1918  ret <vscale x 8 x double> %a
1919}
1920