xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/select-fp.ll (revision a7a1195f01037e5019f671c96ef4bca9af9bb9a7)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s
10
11define <vscale x 1 x half> @select_nxv1f16(i1 zeroext %c, <vscale x 1 x half> %a, <vscale x 1 x half> %b) {
12; CHECK-LABEL: select_nxv1f16:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
15; CHECK-NEXT:    vmv.v.x v10, a0
16; CHECK-NEXT:    vmsne.vi v0, v10, 0
17; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
18; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
19; CHECK-NEXT:    ret
20  %v = select i1 %c, <vscale x 1 x half> %a, <vscale x 1 x half> %b
21  ret <vscale x 1 x half> %v
22}
23
24define <vscale x 1 x half> @selectcc_nxv1f16(half %a, half %b, <vscale x 1 x half> %c, <vscale x 1 x half> %d) {
25; CHECK-LABEL: selectcc_nxv1f16:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    feq.h a0, fa0, fa1
28; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
29; CHECK-NEXT:    vmv.v.x v10, a0
30; CHECK-NEXT:    vmsne.vi v0, v10, 0
31; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
32; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
33; CHECK-NEXT:    ret
34  %cmp = fcmp oeq half %a, %b
35  %v = select i1 %cmp, <vscale x 1 x half> %c, <vscale x 1 x half> %d
36  ret <vscale x 1 x half> %v
37}
38
39define <vscale x 2 x half> @select_nxv2f16(i1 zeroext %c, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
40; CHECK-LABEL: select_nxv2f16:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
43; CHECK-NEXT:    vmv.v.x v10, a0
44; CHECK-NEXT:    vmsne.vi v0, v10, 0
45; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
46; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
47; CHECK-NEXT:    ret
48  %v = select i1 %c, <vscale x 2 x half> %a, <vscale x 2 x half> %b
49  ret <vscale x 2 x half> %v
50}
51
52define <vscale x 2 x half> @selectcc_nxv2f16(half %a, half %b, <vscale x 2 x half> %c, <vscale x 2 x half> %d) {
53; CHECK-LABEL: selectcc_nxv2f16:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    feq.h a0, fa0, fa1
56; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
57; CHECK-NEXT:    vmv.v.x v10, a0
58; CHECK-NEXT:    vmsne.vi v0, v10, 0
59; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
60; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
61; CHECK-NEXT:    ret
62  %cmp = fcmp oeq half %a, %b
63  %v = select i1 %cmp, <vscale x 2 x half> %c, <vscale x 2 x half> %d
64  ret <vscale x 2 x half> %v
65}
66
67define <vscale x 4 x half> @select_nxv4f16(i1 zeroext %c, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
68; CHECK-LABEL: select_nxv4f16:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
71; CHECK-NEXT:    vmv.v.x v10, a0
72; CHECK-NEXT:    vmsne.vi v0, v10, 0
73; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
74; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
75; CHECK-NEXT:    ret
76  %v = select i1 %c, <vscale x 4 x half> %a, <vscale x 4 x half> %b
77  ret <vscale x 4 x half> %v
78}
79
80define <vscale x 4 x half> @selectcc_nxv4f16(half %a, half %b, <vscale x 4 x half> %c, <vscale x 4 x half> %d) {
81; CHECK-LABEL: selectcc_nxv4f16:
82; CHECK:       # %bb.0:
83; CHECK-NEXT:    feq.h a0, fa0, fa1
84; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
85; CHECK-NEXT:    vmv.v.x v10, a0
86; CHECK-NEXT:    vmsne.vi v0, v10, 0
87; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
88; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
89; CHECK-NEXT:    ret
90  %cmp = fcmp oeq half %a, %b
91  %v = select i1 %cmp, <vscale x 4 x half> %c, <vscale x 4 x half> %d
92  ret <vscale x 4 x half> %v
93}
94
95define <vscale x 8 x half> @select_nxv8f16(i1 zeroext %c, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
96; CHECK-LABEL: select_nxv8f16:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
99; CHECK-NEXT:    vmv.v.x v12, a0
100; CHECK-NEXT:    vmsne.vi v0, v12, 0
101; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
102; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
103; CHECK-NEXT:    ret
104  %v = select i1 %c, <vscale x 8 x half> %a, <vscale x 8 x half> %b
105  ret <vscale x 8 x half> %v
106}
107
108define <vscale x 8 x half> @selectcc_nxv8f16(half %a, half %b, <vscale x 8 x half> %c, <vscale x 8 x half> %d) {
109; CHECK-LABEL: selectcc_nxv8f16:
110; CHECK:       # %bb.0:
111; CHECK-NEXT:    feq.h a0, fa0, fa1
112; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
113; CHECK-NEXT:    vmv.v.x v12, a0
114; CHECK-NEXT:    vmsne.vi v0, v12, 0
115; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
116; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
117; CHECK-NEXT:    ret
118  %cmp = fcmp oeq half %a, %b
119  %v = select i1 %cmp, <vscale x 8 x half> %c, <vscale x 8 x half> %d
120  ret <vscale x 8 x half> %v
121}
122
123define <vscale x 16 x half> @select_nxv16f16(i1 zeroext %c, <vscale x 16 x half> %a, <vscale x 16 x half> %b) {
124; CHECK-LABEL: select_nxv16f16:
125; CHECK:       # %bb.0:
126; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
127; CHECK-NEXT:    vmv.v.x v16, a0
128; CHECK-NEXT:    vmsne.vi v0, v16, 0
129; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
130; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
131; CHECK-NEXT:    ret
132  %v = select i1 %c, <vscale x 16 x half> %a, <vscale x 16 x half> %b
133  ret <vscale x 16 x half> %v
134}
135
136define <vscale x 16 x half> @selectcc_nxv16f16(half %a, half %b, <vscale x 16 x half> %c, <vscale x 16 x half> %d) {
137; CHECK-LABEL: selectcc_nxv16f16:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    feq.h a0, fa0, fa1
140; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
141; CHECK-NEXT:    vmv.v.x v16, a0
142; CHECK-NEXT:    vmsne.vi v0, v16, 0
143; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
144; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
145; CHECK-NEXT:    ret
146  %cmp = fcmp oeq half %a, %b
147  %v = select i1 %cmp, <vscale x 16 x half> %c, <vscale x 16 x half> %d
148  ret <vscale x 16 x half> %v
149}
150
151define <vscale x 32 x half> @select_nxv32f16(i1 zeroext %c, <vscale x 32 x half> %a, <vscale x 32 x half> %b) {
152; CHECK-LABEL: select_nxv32f16:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
155; CHECK-NEXT:    vmv.v.x v24, a0
156; CHECK-NEXT:    vmsne.vi v0, v24, 0
157; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
158; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
159; CHECK-NEXT:    ret
160  %v = select i1 %c, <vscale x 32 x half> %a, <vscale x 32 x half> %b
161  ret <vscale x 32 x half> %v
162}
163
164define <vscale x 32 x half> @selectcc_nxv32f16(half %a, half %b, <vscale x 32 x half> %c, <vscale x 32 x half> %d) {
165; CHECK-LABEL: selectcc_nxv32f16:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    feq.h a0, fa0, fa1
168; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
169; CHECK-NEXT:    vmv.v.x v24, a0
170; CHECK-NEXT:    vmsne.vi v0, v24, 0
171; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
172; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
173; CHECK-NEXT:    ret
174  %cmp = fcmp oeq half %a, %b
175  %v = select i1 %cmp, <vscale x 32 x half> %c, <vscale x 32 x half> %d
176  ret <vscale x 32 x half> %v
177}
178
179define <vscale x 1 x float> @select_nxv1f32(i1 zeroext %c, <vscale x 1 x float> %a, <vscale x 1 x float> %b) {
180; CHECK-LABEL: select_nxv1f32:
181; CHECK:       # %bb.0:
182; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
183; CHECK-NEXT:    vmv.v.x v10, a0
184; CHECK-NEXT:    vmsne.vi v0, v10, 0
185; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
186; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
187; CHECK-NEXT:    ret
188  %v = select i1 %c, <vscale x 1 x float> %a, <vscale x 1 x float> %b
189  ret <vscale x 1 x float> %v
190}
191
192define <vscale x 1 x float> @selectcc_nxv1f32(float %a, float %b, <vscale x 1 x float> %c, <vscale x 1 x float> %d) {
193; CHECK-LABEL: selectcc_nxv1f32:
194; CHECK:       # %bb.0:
195; CHECK-NEXT:    feq.s a0, fa0, fa1
196; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
197; CHECK-NEXT:    vmv.v.x v10, a0
198; CHECK-NEXT:    vmsne.vi v0, v10, 0
199; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
200; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
201; CHECK-NEXT:    ret
202  %cmp = fcmp oeq float %a, %b
203  %v = select i1 %cmp, <vscale x 1 x float> %c, <vscale x 1 x float> %d
204  ret <vscale x 1 x float> %v
205}
206
207define <vscale x 2 x float> @select_nxv2f32(i1 zeroext %c, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
208; CHECK-LABEL: select_nxv2f32:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
211; CHECK-NEXT:    vmv.v.x v10, a0
212; CHECK-NEXT:    vmsne.vi v0, v10, 0
213; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
214; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
215; CHECK-NEXT:    ret
216  %v = select i1 %c, <vscale x 2 x float> %a, <vscale x 2 x float> %b
217  ret <vscale x 2 x float> %v
218}
219
220define <vscale x 2 x float> @selectcc_nxv2f32(float %a, float %b, <vscale x 2 x float> %c, <vscale x 2 x float> %d) {
221; CHECK-LABEL: selectcc_nxv2f32:
222; CHECK:       # %bb.0:
223; CHECK-NEXT:    feq.s a0, fa0, fa1
224; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
225; CHECK-NEXT:    vmv.v.x v10, a0
226; CHECK-NEXT:    vmsne.vi v0, v10, 0
227; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
228; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
229; CHECK-NEXT:    ret
230  %cmp = fcmp oeq float %a, %b
231  %v = select i1 %cmp, <vscale x 2 x float> %c, <vscale x 2 x float> %d
232  ret <vscale x 2 x float> %v
233}
234
235define <vscale x 4 x float> @select_nxv4f32(i1 zeroext %c, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
236; CHECK-LABEL: select_nxv4f32:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
239; CHECK-NEXT:    vmv.v.x v12, a0
240; CHECK-NEXT:    vmsne.vi v0, v12, 0
241; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
242; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
243; CHECK-NEXT:    ret
244  %v = select i1 %c, <vscale x 4 x float> %a, <vscale x 4 x float> %b
245  ret <vscale x 4 x float> %v
246}
247
248define <vscale x 4 x float> @selectcc_nxv4f32(float %a, float %b, <vscale x 4 x float> %c, <vscale x 4 x float> %d) {
249; CHECK-LABEL: selectcc_nxv4f32:
250; CHECK:       # %bb.0:
251; CHECK-NEXT:    feq.s a0, fa0, fa1
252; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
253; CHECK-NEXT:    vmv.v.x v12, a0
254; CHECK-NEXT:    vmsne.vi v0, v12, 0
255; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
256; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
257; CHECK-NEXT:    ret
258  %cmp = fcmp oeq float %a, %b
259  %v = select i1 %cmp, <vscale x 4 x float> %c, <vscale x 4 x float> %d
260  ret <vscale x 4 x float> %v
261}
262
263define <vscale x 8 x float> @select_nxv8f32(i1 zeroext %c, <vscale x 8 x float> %a, <vscale x 8 x float> %b) {
264; CHECK-LABEL: select_nxv8f32:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
267; CHECK-NEXT:    vmv.v.x v16, a0
268; CHECK-NEXT:    vmsne.vi v0, v16, 0
269; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
270; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
271; CHECK-NEXT:    ret
272  %v = select i1 %c, <vscale x 8 x float> %a, <vscale x 8 x float> %b
273  ret <vscale x 8 x float> %v
274}
275
276define <vscale x 8 x float> @selectcc_nxv8f32(float %a, float %b, <vscale x 8 x float> %c, <vscale x 8 x float> %d) {
277; CHECK-LABEL: selectcc_nxv8f32:
278; CHECK:       # %bb.0:
279; CHECK-NEXT:    feq.s a0, fa0, fa1
280; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
281; CHECK-NEXT:    vmv.v.x v16, a0
282; CHECK-NEXT:    vmsne.vi v0, v16, 0
283; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
284; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
285; CHECK-NEXT:    ret
286  %cmp = fcmp oeq float %a, %b
287  %v = select i1 %cmp, <vscale x 8 x float> %c, <vscale x 8 x float> %d
288  ret <vscale x 8 x float> %v
289}
290
291define <vscale x 16 x float> @select_nxv16f32(i1 zeroext %c, <vscale x 16 x float> %a, <vscale x 16 x float> %b) {
292; CHECK-LABEL: select_nxv16f32:
293; CHECK:       # %bb.0:
294; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
295; CHECK-NEXT:    vmv.v.x v24, a0
296; CHECK-NEXT:    vmsne.vi v0, v24, 0
297; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
298; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
299; CHECK-NEXT:    ret
300  %v = select i1 %c, <vscale x 16 x float> %a, <vscale x 16 x float> %b
301  ret <vscale x 16 x float> %v
302}
303
304define <vscale x 16 x float> @selectcc_nxv16f32(float %a, float %b, <vscale x 16 x float> %c, <vscale x 16 x float> %d) {
305; CHECK-LABEL: selectcc_nxv16f32:
306; CHECK:       # %bb.0:
307; CHECK-NEXT:    feq.s a0, fa0, fa1
308; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
309; CHECK-NEXT:    vmv.v.x v24, a0
310; CHECK-NEXT:    vmsne.vi v0, v24, 0
311; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
312; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
313; CHECK-NEXT:    ret
314  %cmp = fcmp oeq float %a, %b
315  %v = select i1 %cmp, <vscale x 16 x float> %c, <vscale x 16 x float> %d
316  ret <vscale x 16 x float> %v
317}
318
319define <vscale x 1 x double> @select_nxv1f64(i1 zeroext %c, <vscale x 1 x double> %a, <vscale x 1 x double> %b) {
320; CHECK-LABEL: select_nxv1f64:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
323; CHECK-NEXT:    vmv.v.x v10, a0
324; CHECK-NEXT:    vmsne.vi v0, v10, 0
325; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
326; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
327; CHECK-NEXT:    ret
328  %v = select i1 %c, <vscale x 1 x double> %a, <vscale x 1 x double> %b
329  ret <vscale x 1 x double> %v
330}
331
332define <vscale x 1 x double> @selectcc_nxv1f64(double %a, double %b, <vscale x 1 x double> %c, <vscale x 1 x double> %d) {
333; CHECK-LABEL: selectcc_nxv1f64:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    feq.d a0, fa0, fa1
336; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
337; CHECK-NEXT:    vmv.v.x v10, a0
338; CHECK-NEXT:    vmsne.vi v0, v10, 0
339; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
340; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
341; CHECK-NEXT:    ret
342  %cmp = fcmp oeq double %a, %b
343  %v = select i1 %cmp, <vscale x 1 x double> %c, <vscale x 1 x double> %d
344  ret <vscale x 1 x double> %v
345}
346
347define <vscale x 2 x double> @select_nxv2f64(i1 zeroext %c, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
348; CHECK-LABEL: select_nxv2f64:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
351; CHECK-NEXT:    vmv.v.x v12, a0
352; CHECK-NEXT:    vmsne.vi v0, v12, 0
353; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
354; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
355; CHECK-NEXT:    ret
356  %v = select i1 %c, <vscale x 2 x double> %a, <vscale x 2 x double> %b
357  ret <vscale x 2 x double> %v
358}
359
360define <vscale x 2 x double> @selectcc_nxv2f64(double %a, double %b, <vscale x 2 x double> %c, <vscale x 2 x double> %d) {
361; CHECK-LABEL: selectcc_nxv2f64:
362; CHECK:       # %bb.0:
363; CHECK-NEXT:    feq.d a0, fa0, fa1
364; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
365; CHECK-NEXT:    vmv.v.x v12, a0
366; CHECK-NEXT:    vmsne.vi v0, v12, 0
367; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
368; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
369; CHECK-NEXT:    ret
370  %cmp = fcmp oeq double %a, %b
371  %v = select i1 %cmp, <vscale x 2 x double> %c, <vscale x 2 x double> %d
372  ret <vscale x 2 x double> %v
373}
374
375define <vscale x 4 x double> @select_nxv4f64(i1 zeroext %c, <vscale x 4 x double> %a, <vscale x 4 x double> %b) {
376; CHECK-LABEL: select_nxv4f64:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
379; CHECK-NEXT:    vmv.v.x v16, a0
380; CHECK-NEXT:    vmsne.vi v0, v16, 0
381; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
382; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
383; CHECK-NEXT:    ret
384  %v = select i1 %c, <vscale x 4 x double> %a, <vscale x 4 x double> %b
385  ret <vscale x 4 x double> %v
386}
387
388define <vscale x 4 x double> @selectcc_nxv4f64(double %a, double %b, <vscale x 4 x double> %c, <vscale x 4 x double> %d) {
389; CHECK-LABEL: selectcc_nxv4f64:
390; CHECK:       # %bb.0:
391; CHECK-NEXT:    feq.d a0, fa0, fa1
392; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
393; CHECK-NEXT:    vmv.v.x v16, a0
394; CHECK-NEXT:    vmsne.vi v0, v16, 0
395; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
396; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
397; CHECK-NEXT:    ret
398  %cmp = fcmp oeq double %a, %b
399  %v = select i1 %cmp, <vscale x 4 x double> %c, <vscale x 4 x double> %d
400  ret <vscale x 4 x double> %v
401}
402
403define <vscale x 8 x double> @select_nxv8f64(i1 zeroext %c, <vscale x 8 x double> %a, <vscale x 8 x double> %b) {
404; CHECK-LABEL: select_nxv8f64:
405; CHECK:       # %bb.0:
406; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
407; CHECK-NEXT:    vmv.v.x v24, a0
408; CHECK-NEXT:    vmsne.vi v0, v24, 0
409; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
410; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
411; CHECK-NEXT:    ret
412  %v = select i1 %c, <vscale x 8 x double> %a, <vscale x 8 x double> %b
413  ret <vscale x 8 x double> %v
414}
415
416define <vscale x 8 x double> @selectcc_nxv8f64(double %a, double %b, <vscale x 8 x double> %c, <vscale x 8 x double> %d) {
417; CHECK-LABEL: selectcc_nxv8f64:
418; CHECK:       # %bb.0:
419; CHECK-NEXT:    feq.d a0, fa0, fa1
420; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
421; CHECK-NEXT:    vmv.v.x v24, a0
422; CHECK-NEXT:    vmsne.vi v0, v24, 0
423; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
424; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
425; CHECK-NEXT:    ret
426  %cmp = fcmp oeq double %a, %b
427  %v = select i1 %cmp, <vscale x 8 x double> %c, <vscale x 8 x double> %d
428  ret <vscale x 8 x double> %v
429}
430