xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  <vscale x 1 x i1>,
12  iXLen);
13
14define <vscale x 1 x half> @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
15; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
18; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16(
22    <vscale x 1 x half> undef,
23    <vscale x 1 x half> %0,
24    <vscale x 1 x half> %1,
25    <vscale x 1 x i1> %2,
26    iXLen %3)
27
28  ret <vscale x 1 x half> %a
29}
30
31declare <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
32  <vscale x 1 x half>,
33  <vscale x 1 x half>,
34  half,
35  <vscale x 1 x i1>,
36  iXLen);
37
38define <vscale x 1 x half> @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
39; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
42; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
46    <vscale x 1 x half> undef,
47    <vscale x 1 x half> %0,
48    half %1,
49    <vscale x 1 x i1> %2,
50    iXLen %3)
51
52  ret <vscale x 1 x half> %a
53}
54
55declare <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
56  <vscale x 2 x half>,
57  <vscale x 2 x half>,
58  <vscale x 2 x half>,
59  <vscale x 2 x i1>,
60  iXLen);
61
62define <vscale x 2 x half> @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
63; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16:
64; CHECK:       # %bb.0: # %entry
65; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
66; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16(
70    <vscale x 2 x half> undef,
71    <vscale x 2 x half> %0,
72    <vscale x 2 x half> %1,
73    <vscale x 2 x i1> %2,
74    iXLen %3)
75
76  ret <vscale x 2 x half> %a
77}
78
79declare <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
80  <vscale x 2 x half>,
81  <vscale x 2 x half>,
82  half,
83  <vscale x 2 x i1>,
84  iXLen);
85
86define <vscale x 2 x half> @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
87; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
90; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
94    <vscale x 2 x half> undef,
95    <vscale x 2 x half> %0,
96    half %1,
97    <vscale x 2 x i1> %2,
98    iXLen %3)
99
100  ret <vscale x 2 x half> %a
101}
102
103declare <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
104  <vscale x 4 x half>,
105  <vscale x 4 x half>,
106  <vscale x 4 x half>,
107  <vscale x 4 x i1>,
108  iXLen);
109
110define <vscale x 4 x half> @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
111; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16:
112; CHECK:       # %bb.0: # %entry
113; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
114; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16(
118    <vscale x 4 x half> undef,
119    <vscale x 4 x half> %0,
120    <vscale x 4 x half> %1,
121    <vscale x 4 x i1> %2,
122    iXLen %3)
123
124  ret <vscale x 4 x half> %a
125}
126
127declare <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
128  <vscale x 4 x half>,
129  <vscale x 4 x half>,
130  half,
131  <vscale x 4 x i1>,
132  iXLen);
133
134define <vscale x 4 x half> @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
135; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
138; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
142    <vscale x 4 x half> undef,
143    <vscale x 4 x half> %0,
144    half %1,
145    <vscale x 4 x i1> %2,
146    iXLen %3)
147
148  ret <vscale x 4 x half> %a
149}
150
151declare <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
152  <vscale x 8 x half>,
153  <vscale x 8 x half>,
154  <vscale x 8 x half>,
155  <vscale x 8 x i1>,
156  iXLen);
157
158define <vscale x 8 x half> @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
159; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16:
160; CHECK:       # %bb.0: # %entry
161; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
162; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16(
166    <vscale x 8 x half> undef,
167    <vscale x 8 x half> %0,
168    <vscale x 8 x half> %1,
169    <vscale x 8 x i1> %2,
170    iXLen %3)
171
172  ret <vscale x 8 x half> %a
173}
174
175declare <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
176  <vscale x 8 x half>,
177  <vscale x 8 x half>,
178  half,
179  <vscale x 8 x i1>,
180  iXLen);
181
182define <vscale x 8 x half> @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
183; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
186; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
190    <vscale x 8 x half> undef,
191    <vscale x 8 x half> %0,
192    half %1,
193    <vscale x 8 x i1> %2,
194    iXLen %3)
195
196  ret <vscale x 8 x half> %a
197}
198
199declare <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
200  <vscale x 16 x half>,
201  <vscale x 16 x half>,
202  <vscale x 16 x half>,
203  <vscale x 16 x i1>,
204  iXLen);
205
206define <vscale x 16 x half> @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
207; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
210; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16(
214    <vscale x 16 x half> undef,
215    <vscale x 16 x half> %0,
216    <vscale x 16 x half> %1,
217    <vscale x 16 x i1> %2,
218    iXLen %3)
219
220  ret <vscale x 16 x half> %a
221}
222
223declare <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
224  <vscale x 16 x half>,
225  <vscale x 16 x half>,
226  half,
227  <vscale x 16 x i1>,
228  iXLen);
229
230define <vscale x 16 x half> @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
231; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
234; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
238    <vscale x 16 x half> undef,
239    <vscale x 16 x half> %0,
240    half %1,
241    <vscale x 16 x i1> %2,
242    iXLen %3)
243
244  ret <vscale x 16 x half> %a
245}
246
247declare <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
248  <vscale x 32 x half>,
249  <vscale x 32 x half>,
250  <vscale x 32 x half>,
251  <vscale x 32 x i1>,
252  iXLen);
253
254define <vscale x 32 x half> @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
255; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16:
256; CHECK:       # %bb.0: # %entry
257; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
258; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16(
262    <vscale x 32 x half> undef,
263    <vscale x 32 x half> %0,
264    <vscale x 32 x half> %1,
265    <vscale x 32 x i1> %2,
266    iXLen %3)
267
268  ret <vscale x 32 x half> %a
269}
270
271declare <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
272  <vscale x 32 x half>,
273  <vscale x 32 x half>,
274  half,
275  <vscale x 32 x i1>,
276  iXLen);
277
278define <vscale x 32 x half> @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
279; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
282; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
286    <vscale x 32 x half> undef,
287    <vscale x 32 x half> %0,
288    half %1,
289    <vscale x 32 x i1> %2,
290    iXLen %3)
291
292  ret <vscale x 32 x half> %a
293}
294
295declare <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
296  <vscale x 1 x float>,
297  <vscale x 1 x float>,
298  <vscale x 1 x float>,
299  <vscale x 1 x i1>,
300  iXLen);
301
302define <vscale x 1 x float> @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
303; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32:
304; CHECK:       # %bb.0: # %entry
305; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
306; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32(
310    <vscale x 1 x float> undef,
311    <vscale x 1 x float> %0,
312    <vscale x 1 x float> %1,
313    <vscale x 1 x i1> %2,
314    iXLen %3)
315
316  ret <vscale x 1 x float> %a
317}
318
319declare <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
320  <vscale x 1 x float>,
321  <vscale x 1 x float>,
322  float,
323  <vscale x 1 x i1>,
324  iXLen);
325
326define <vscale x 1 x float> @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
327; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
330; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
334    <vscale x 1 x float> undef,
335    <vscale x 1 x float> %0,
336    float %1,
337    <vscale x 1 x i1> %2,
338    iXLen %3)
339
340  ret <vscale x 1 x float> %a
341}
342
343declare <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
344  <vscale x 2 x float>,
345  <vscale x 2 x float>,
346  <vscale x 2 x float>,
347  <vscale x 2 x i1>,
348  iXLen);
349
350define <vscale x 2 x float> @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
351; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32:
352; CHECK:       # %bb.0: # %entry
353; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
354; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(
358    <vscale x 2 x float> undef,
359    <vscale x 2 x float> %0,
360    <vscale x 2 x float> %1,
361    <vscale x 2 x i1> %2,
362    iXLen %3)
363
364  ret <vscale x 2 x float> %a
365}
366
367declare <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
368  <vscale x 2 x float>,
369  <vscale x 2 x float>,
370  float,
371  <vscale x 2 x i1>,
372  iXLen);
373
374define <vscale x 2 x float> @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
375; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
378; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
382    <vscale x 2 x float> undef,
383    <vscale x 2 x float> %0,
384    float %1,
385    <vscale x 2 x i1> %2,
386    iXLen %3)
387
388  ret <vscale x 2 x float> %a
389}
390
391declare <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
392  <vscale x 4 x float>,
393  <vscale x 4 x float>,
394  <vscale x 4 x float>,
395  <vscale x 4 x i1>,
396  iXLen);
397
398define <vscale x 4 x float> @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
399; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32:
400; CHECK:       # %bb.0: # %entry
401; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
402; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32(
406    <vscale x 4 x float> undef,
407    <vscale x 4 x float> %0,
408    <vscale x 4 x float> %1,
409    <vscale x 4 x i1> %2,
410    iXLen %3)
411
412  ret <vscale x 4 x float> %a
413}
414
415declare <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
416  <vscale x 4 x float>,
417  <vscale x 4 x float>,
418  float,
419  <vscale x 4 x i1>,
420  iXLen);
421
422define <vscale x 4 x float> @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
423; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
426; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
430    <vscale x 4 x float> undef,
431    <vscale x 4 x float> %0,
432    float %1,
433    <vscale x 4 x i1> %2,
434    iXLen %3)
435
436  ret <vscale x 4 x float> %a
437}
438
439declare <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
440  <vscale x 8 x float>,
441  <vscale x 8 x float>,
442  <vscale x 8 x float>,
443  <vscale x 8 x i1>,
444  iXLen);
445
446define <vscale x 8 x float> @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
447; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32:
448; CHECK:       # %bb.0: # %entry
449; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
450; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32(
454    <vscale x 8 x float> undef,
455    <vscale x 8 x float> %0,
456    <vscale x 8 x float> %1,
457    <vscale x 8 x i1> %2,
458    iXLen %3)
459
460  ret <vscale x 8 x float> %a
461}
462
463declare <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
464  <vscale x 8 x float>,
465  <vscale x 8 x float>,
466  float,
467  <vscale x 8 x i1>,
468  iXLen);
469
470define <vscale x 8 x float> @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
471; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
474; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
478    <vscale x 8 x float> undef,
479    <vscale x 8 x float> %0,
480    float %1,
481    <vscale x 8 x i1> %2,
482    iXLen %3)
483
484  ret <vscale x 8 x float> %a
485}
486
487declare <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
488  <vscale x 16 x float>,
489  <vscale x 16 x float>,
490  <vscale x 16 x float>,
491  <vscale x 16 x i1>,
492  iXLen);
493
494define <vscale x 16 x float> @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
495; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
498; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32(
502    <vscale x 16 x float> undef,
503    <vscale x 16 x float> %0,
504    <vscale x 16 x float> %1,
505    <vscale x 16 x i1> %2,
506    iXLen %3)
507
508  ret <vscale x 16 x float> %a
509}
510
511declare <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
512  <vscale x 16 x float>,
513  <vscale x 16 x float>,
514  float,
515  <vscale x 16 x i1>,
516  iXLen);
517
518define <vscale x 16 x float> @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
519; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32:
520; CHECK:       # %bb.0: # %entry
521; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
522; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
523; CHECK-NEXT:    ret
524entry:
525  %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
526    <vscale x 16 x float> undef,
527    <vscale x 16 x float> %0,
528    float %1,
529    <vscale x 16 x i1> %2,
530    iXLen %3)
531
532  ret <vscale x 16 x float> %a
533}
534
535declare <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
536  <vscale x 1 x double>,
537  <vscale x 1 x double>,
538  <vscale x 1 x double>,
539  <vscale x 1 x i1>,
540  iXLen);
541
542define <vscale x 1 x double> @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
543; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
546; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64(
550    <vscale x 1 x double> undef,
551    <vscale x 1 x double> %0,
552    <vscale x 1 x double> %1,
553    <vscale x 1 x i1> %2,
554    iXLen %3)
555
556  ret <vscale x 1 x double> %a
557}
558
559declare <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
560  <vscale x 1 x double>,
561  <vscale x 1 x double>,
562  double,
563  <vscale x 1 x i1>,
564  iXLen);
565
566define <vscale x 1 x double> @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
567; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
570; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
574    <vscale x 1 x double> undef,
575    <vscale x 1 x double> %0,
576    double %1,
577    <vscale x 1 x i1> %2,
578    iXLen %3)
579
580  ret <vscale x 1 x double> %a
581}
582
583declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64(
584  <vscale x 2 x double>,
585  <vscale x 2 x double>,
586  <vscale x 2 x double>,
587  <vscale x 2 x i1>,
588  iXLen);
589
590define <vscale x 2 x double> @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
591; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64:
592; CHECK:       # %bb.0: # %entry
593; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
594; CHECK-NEXT:    vmerge.vvm v8, v8, v10, v0
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64(
598    <vscale x 2 x double> undef,
599    <vscale x 2 x double> %0,
600    <vscale x 2 x double> %1,
601    <vscale x 2 x i1> %2,
602    iXLen %3)
603
604  ret <vscale x 2 x double> %a
605}
606
607declare <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
608  <vscale x 2 x double>,
609  <vscale x 2 x double>,
610  double,
611  <vscale x 2 x i1>,
612  iXLen);
613
614define <vscale x 2 x double> @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
615; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
618; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
622    <vscale x 2 x double> undef,
623    <vscale x 2 x double> %0,
624    double %1,
625    <vscale x 2 x i1> %2,
626    iXLen %3)
627
628  ret <vscale x 2 x double> %a
629}
630
631declare <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
632  <vscale x 4 x double>,
633  <vscale x 4 x double>,
634  <vscale x 4 x double>,
635  <vscale x 4 x i1>,
636  iXLen);
637
638define <vscale x 4 x double> @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
639; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
642; CHECK-NEXT:    vmerge.vvm v8, v8, v12, v0
643; CHECK-NEXT:    ret
644entry:
645  %a = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64(
646    <vscale x 4 x double> undef,
647    <vscale x 4 x double> %0,
648    <vscale x 4 x double> %1,
649    <vscale x 4 x i1> %2,
650    iXLen %3)
651
652  ret <vscale x 4 x double> %a
653}
654
655declare <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
656  <vscale x 4 x double>,
657  <vscale x 4 x double>,
658  double,
659  <vscale x 4 x i1>,
660  iXLen);
661
662define <vscale x 4 x double> @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
663; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64:
664; CHECK:       # %bb.0: # %entry
665; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
666; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
667; CHECK-NEXT:    ret
668entry:
669  %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
670    <vscale x 4 x double> undef,
671    <vscale x 4 x double> %0,
672    double %1,
673    <vscale x 4 x i1> %2,
674    iXLen %3)
675
676  ret <vscale x 4 x double> %a
677}
678
679declare <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
680  <vscale x 8 x double>,
681  <vscale x 8 x double>,
682  <vscale x 8 x double>,
683  <vscale x 8 x i1>,
684  iXLen);
685
686define <vscale x 8 x double> @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
687; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64:
688; CHECK:       # %bb.0: # %entry
689; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
690; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64(
694    <vscale x 8 x double> undef,
695    <vscale x 8 x double> %0,
696    <vscale x 8 x double> %1,
697    <vscale x 8 x i1> %2,
698    iXLen %3)
699
700  ret <vscale x 8 x double> %a
701}
702
703declare <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
704  <vscale x 8 x double>,
705  <vscale x 8 x double>,
706  double,
707  <vscale x 8 x i1>,
708  iXLen);
709
710define <vscale x 8 x double> @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
711; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64:
712; CHECK:       # %bb.0: # %entry
713; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
714; CHECK-NEXT:    vfmerge.vfm v8, v8, fa0, v0
715; CHECK-NEXT:    ret
716entry:
717  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
718    <vscale x 8 x double> undef,
719    <vscale x 8 x double> %0,
720    double %1,
721    <vscale x 8 x i1> %2,
722    iXLen %3)
723
724  ret <vscale x 8 x double> %a
725}
726
727define <vscale x 1 x half> @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
728; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16:
729; CHECK:       # %bb.0: # %entry
730; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
731; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
732; CHECK-NEXT:    ret
733entry:
734  %a = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16(
735    <vscale x 1 x half> undef,
736    <vscale x 1 x half> %0,
737    half zeroinitializer,
738    <vscale x 1 x i1> %1,
739    iXLen %2)
740
741  ret <vscale x 1 x half> %a
742}
743
744define <vscale x 2 x half> @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
745; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16:
746; CHECK:       # %bb.0: # %entry
747; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
748; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
749; CHECK-NEXT:    ret
750entry:
751  %a = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16(
752    <vscale x 2 x half> undef,
753    <vscale x 2 x half> %0,
754    half zeroinitializer,
755    <vscale x 2 x i1> %1,
756    iXLen %2)
757
758  ret <vscale x 2 x half> %a
759}
760
761define <vscale x 4 x half> @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
762; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16:
763; CHECK:       # %bb.0: # %entry
764; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
765; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
766; CHECK-NEXT:    ret
767entry:
768  %a = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16(
769    <vscale x 4 x half> undef,
770    <vscale x 4 x half> %0,
771    half zeroinitializer,
772    <vscale x 4 x i1> %1,
773    iXLen %2)
774
775  ret <vscale x 4 x half> %a
776}
777
778define <vscale x 8 x half> @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
779; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16:
780; CHECK:       # %bb.0: # %entry
781; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
782; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
783; CHECK-NEXT:    ret
784entry:
785  %a = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16(
786    <vscale x 8 x half> undef,
787    <vscale x 8 x half> %0,
788    half zeroinitializer,
789    <vscale x 8 x i1> %1,
790    iXLen %2)
791
792  ret <vscale x 8 x half> %a
793}
794
795define <vscale x 16 x half> @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
796; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
799; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16(
803    <vscale x 16 x half> undef,
804    <vscale x 16 x half> %0,
805    half zeroinitializer,
806    <vscale x 16 x i1> %1,
807    iXLen %2)
808
809  ret <vscale x 16 x half> %a
810}
811
812define <vscale x 32 x half> @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
813; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
816; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16(
820    <vscale x 32 x half> undef,
821    <vscale x 32 x half> %0,
822    half zeroinitializer,
823    <vscale x 32 x i1> %1,
824    iXLen %2)
825
826  ret <vscale x 32 x half> %a
827}
828
829define <vscale x 1 x float> @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
830; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
833; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
834; CHECK-NEXT:    ret
835entry:
836  %a = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32(
837    <vscale x 1 x float> undef,
838    <vscale x 1 x float> %0,
839    float zeroinitializer,
840    <vscale x 1 x i1> %1,
841    iXLen %2)
842
843  ret <vscale x 1 x float> %a
844}
845
846define <vscale x 2 x float> @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
847; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32:
848; CHECK:       # %bb.0: # %entry
849; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
850; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
851; CHECK-NEXT:    ret
852entry:
853  %a = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32(
854    <vscale x 2 x float> undef,
855    <vscale x 2 x float> %0,
856    float zeroinitializer,
857    <vscale x 2 x i1> %1,
858    iXLen %2)
859
860  ret <vscale x 2 x float> %a
861}
862
863define <vscale x 4 x float> @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
864; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
867; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
868; CHECK-NEXT:    ret
869entry:
870  %a = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32(
871    <vscale x 4 x float> undef,
872    <vscale x 4 x float> %0,
873    float zeroinitializer,
874    <vscale x 4 x i1> %1,
875    iXLen %2)
876
877  ret <vscale x 4 x float> %a
878}
879
880define <vscale x 8 x float> @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
881; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32:
882; CHECK:       # %bb.0: # %entry
883; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
884; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
885; CHECK-NEXT:    ret
886entry:
887  %a = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32(
888    <vscale x 8 x float> undef,
889    <vscale x 8 x float> %0,
890    float zeroinitializer,
891    <vscale x 8 x i1> %1,
892    iXLen %2)
893
894  ret <vscale x 8 x float> %a
895}
896
897define <vscale x 16 x float> @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
898; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32:
899; CHECK:       # %bb.0: # %entry
900; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
901; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
902; CHECK-NEXT:    ret
903entry:
904  %a = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32(
905    <vscale x 16 x float> undef,
906    <vscale x 16 x float> %0,
907    float zeroinitializer,
908    <vscale x 16 x i1> %1,
909    iXLen %2)
910
911  ret <vscale x 16 x float> %a
912}
913
914define <vscale x 1 x double> @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
915; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64:
916; CHECK:       # %bb.0: # %entry
917; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
918; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
919; CHECK-NEXT:    ret
920entry:
921  %a = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64(
922    <vscale x 1 x double> undef,
923    <vscale x 1 x double> %0,
924    double zeroinitializer,
925    <vscale x 1 x i1> %1,
926    iXLen %2)
927
928  ret <vscale x 1 x double> %a
929}
930
931define <vscale x 2 x double> @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x i1> %1, iXLen %2) nounwind {
932; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64:
933; CHECK:       # %bb.0: # %entry
934; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
935; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64(
939    <vscale x 2 x double> undef,
940    <vscale x 2 x double> %0,
941    double zeroinitializer,
942    <vscale x 2 x i1> %1,
943    iXLen %2)
944
945  ret <vscale x 2 x double> %a
946}
947
948define <vscale x 4 x double> @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x i1> %1, iXLen %2) nounwind {
949; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64:
950; CHECK:       # %bb.0: # %entry
951; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
952; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
953; CHECK-NEXT:    ret
954entry:
955  %a = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64(
956    <vscale x 4 x double> undef,
957    <vscale x 4 x double> %0,
958    double zeroinitializer,
959    <vscale x 4 x i1> %1,
960    iXLen %2)
961
962  ret <vscale x 4 x double> %a
963}
964
965define <vscale x 8 x double> @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
966; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64:
967; CHECK:       # %bb.0: # %entry
968; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
969; CHECK-NEXT:    vmerge.vim v8, v8, 0, v0
970; CHECK-NEXT:    ret
971entry:
972  %a = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64(
973    <vscale x 8 x double> undef,
974    <vscale x 8 x double> %0,
975    double zeroinitializer,
976    <vscale x 8 x i1> %1,
977    iXLen %2)
978
979  ret <vscale x 8 x double> %a
980}
981