xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/select-int.ll (revision d89d45ca9a6e51be388a6ff3893d59e54748b928)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7define <vscale x 1 x i1> @select_nxv1i1(i1 zeroext %c, <vscale x 1 x i1> %a, <vscale x 1 x i1> %b) {
8; CHECK-LABEL: select_nxv1i1:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
11; CHECK-NEXT:    vmv.v.x v9, a0
12; CHECK-NEXT:    vmsne.vi v9, v9, 0
13; CHECK-NEXT:    vmandn.mm v8, v8, v9
14; CHECK-NEXT:    vmand.mm v9, v0, v9
15; CHECK-NEXT:    vmor.mm v0, v9, v8
16; CHECK-NEXT:    ret
17  %v = select i1 %c, <vscale x 1 x i1> %a, <vscale x 1 x i1> %b
18  ret <vscale x 1 x i1> %v
19}
20
21define <vscale x 1 x i1> @selectcc_nxv1i1(i1 signext %a, i1 signext %b, <vscale x 1 x i1> %c, <vscale x 1 x i1> %d) {
22; CHECK-LABEL: selectcc_nxv1i1:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    xor a0, a0, a1
25; CHECK-NEXT:    andi a0, a0, 1
26; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
27; CHECK-NEXT:    vmv.v.x v9, a0
28; CHECK-NEXT:    vmsne.vi v9, v9, 0
29; CHECK-NEXT:    vmandn.mm v8, v8, v9
30; CHECK-NEXT:    vmand.mm v9, v0, v9
31; CHECK-NEXT:    vmor.mm v0, v9, v8
32; CHECK-NEXT:    ret
33  %cmp = icmp ne i1 %a, %b
34  %v = select i1 %cmp, <vscale x 1 x i1> %c, <vscale x 1 x i1> %d
35  ret <vscale x 1 x i1> %v
36}
37
38define <vscale x 2 x i1> @select_nxv2i1(i1 zeroext %c, <vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
39; CHECK-LABEL: select_nxv2i1:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
42; CHECK-NEXT:    vmv.v.x v9, a0
43; CHECK-NEXT:    vmsne.vi v9, v9, 0
44; CHECK-NEXT:    vmandn.mm v8, v8, v9
45; CHECK-NEXT:    vmand.mm v9, v0, v9
46; CHECK-NEXT:    vmor.mm v0, v9, v8
47; CHECK-NEXT:    ret
48  %v = select i1 %c, <vscale x 2 x i1> %a, <vscale x 2 x i1> %b
49  ret <vscale x 2 x i1> %v
50}
51
52define <vscale x 2 x i1> @selectcc_nxv2i1(i1 signext %a, i1 signext %b, <vscale x 2 x i1> %c, <vscale x 2 x i1> %d) {
53; CHECK-LABEL: selectcc_nxv2i1:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    xor a0, a0, a1
56; CHECK-NEXT:    andi a0, a0, 1
57; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
58; CHECK-NEXT:    vmv.v.x v9, a0
59; CHECK-NEXT:    vmsne.vi v9, v9, 0
60; CHECK-NEXT:    vmandn.mm v8, v8, v9
61; CHECK-NEXT:    vmand.mm v9, v0, v9
62; CHECK-NEXT:    vmor.mm v0, v9, v8
63; CHECK-NEXT:    ret
64  %cmp = icmp ne i1 %a, %b
65  %v = select i1 %cmp, <vscale x 2 x i1> %c, <vscale x 2 x i1> %d
66  ret <vscale x 2 x i1> %v
67}
68
69define <vscale x 4 x i1> @select_nxv4i1(i1 zeroext %c, <vscale x 4 x i1> %a, <vscale x 4 x i1> %b) {
70; CHECK-LABEL: select_nxv4i1:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
73; CHECK-NEXT:    vmv.v.x v9, a0
74; CHECK-NEXT:    vmsne.vi v9, v9, 0
75; CHECK-NEXT:    vmandn.mm v8, v8, v9
76; CHECK-NEXT:    vmand.mm v9, v0, v9
77; CHECK-NEXT:    vmor.mm v0, v9, v8
78; CHECK-NEXT:    ret
79  %v = select i1 %c, <vscale x 4 x i1> %a, <vscale x 4 x i1> %b
80  ret <vscale x 4 x i1> %v
81}
82
83define <vscale x 4 x i1> @selectcc_nxv4i1(i1 signext %a, i1 signext %b, <vscale x 4 x i1> %c, <vscale x 4 x i1> %d) {
84; CHECK-LABEL: selectcc_nxv4i1:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    xor a0, a0, a1
87; CHECK-NEXT:    andi a0, a0, 1
88; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
89; CHECK-NEXT:    vmv.v.x v9, a0
90; CHECK-NEXT:    vmsne.vi v9, v9, 0
91; CHECK-NEXT:    vmandn.mm v8, v8, v9
92; CHECK-NEXT:    vmand.mm v9, v0, v9
93; CHECK-NEXT:    vmor.mm v0, v9, v8
94; CHECK-NEXT:    ret
95  %cmp = icmp ne i1 %a, %b
96  %v = select i1 %cmp, <vscale x 4 x i1> %c, <vscale x 4 x i1> %d
97  ret <vscale x 4 x i1> %v
98}
99
100define <vscale x 8 x i1> @select_nxv8i1(i1 zeroext %c, <vscale x 8 x i1> %a, <vscale x 8 x i1> %b) {
101; CHECK-LABEL: select_nxv8i1:
102; CHECK:       # %bb.0:
103; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
104; CHECK-NEXT:    vmv.v.x v9, a0
105; CHECK-NEXT:    vmsne.vi v9, v9, 0
106; CHECK-NEXT:    vmandn.mm v8, v8, v9
107; CHECK-NEXT:    vmand.mm v9, v0, v9
108; CHECK-NEXT:    vmor.mm v0, v9, v8
109; CHECK-NEXT:    ret
110  %v = select i1 %c, <vscale x 8 x i1> %a, <vscale x 8 x i1> %b
111  ret <vscale x 8 x i1> %v
112}
113
114define <vscale x 8 x i1> @selectcc_nxv8i1(i1 signext %a, i1 signext %b, <vscale x 8 x i1> %c, <vscale x 8 x i1> %d) {
115; CHECK-LABEL: selectcc_nxv8i1:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    xor a0, a0, a1
118; CHECK-NEXT:    andi a0, a0, 1
119; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
120; CHECK-NEXT:    vmv.v.x v9, a0
121; CHECK-NEXT:    vmsne.vi v9, v9, 0
122; CHECK-NEXT:    vmandn.mm v8, v8, v9
123; CHECK-NEXT:    vmand.mm v9, v0, v9
124; CHECK-NEXT:    vmor.mm v0, v9, v8
125; CHECK-NEXT:    ret
126  %cmp = icmp ne i1 %a, %b
127  %v = select i1 %cmp, <vscale x 8 x i1> %c, <vscale x 8 x i1> %d
128  ret <vscale x 8 x i1> %v
129}
130
131define <vscale x 16 x i1> @select_nxv16i1(i1 zeroext %c, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
132; CHECK-LABEL: select_nxv16i1:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
135; CHECK-NEXT:    vmv.v.x v10, a0
136; CHECK-NEXT:    vmsne.vi v9, v10, 0
137; CHECK-NEXT:    vmandn.mm v8, v8, v9
138; CHECK-NEXT:    vmand.mm v9, v0, v9
139; CHECK-NEXT:    vmor.mm v0, v9, v8
140; CHECK-NEXT:    ret
141  %v = select i1 %c, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b
142  ret <vscale x 16 x i1> %v
143}
144
145define <vscale x 16 x i1> @selectcc_nxv16i1(i1 signext %a, i1 signext %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d) {
146; CHECK-LABEL: selectcc_nxv16i1:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    xor a0, a0, a1
149; CHECK-NEXT:    andi a0, a0, 1
150; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
151; CHECK-NEXT:    vmv.v.x v10, a0
152; CHECK-NEXT:    vmsne.vi v9, v10, 0
153; CHECK-NEXT:    vmandn.mm v8, v8, v9
154; CHECK-NEXT:    vmand.mm v9, v0, v9
155; CHECK-NEXT:    vmor.mm v0, v9, v8
156; CHECK-NEXT:    ret
157  %cmp = icmp ne i1 %a, %b
158  %v = select i1 %cmp, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d
159  ret <vscale x 16 x i1> %v
160}
161
162define <vscale x 32 x i1> @select_nxv32i1(i1 zeroext %c, <vscale x 32 x i1> %a, <vscale x 32 x i1> %b) {
163; CHECK-LABEL: select_nxv32i1:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
166; CHECK-NEXT:    vmv.v.x v12, a0
167; CHECK-NEXT:    vmsne.vi v9, v12, 0
168; CHECK-NEXT:    vmandn.mm v8, v8, v9
169; CHECK-NEXT:    vmand.mm v9, v0, v9
170; CHECK-NEXT:    vmor.mm v0, v9, v8
171; CHECK-NEXT:    ret
172  %v = select i1 %c, <vscale x 32 x i1> %a, <vscale x 32 x i1> %b
173  ret <vscale x 32 x i1> %v
174}
175
176define <vscale x 32 x i1> @selectcc_nxv32i1(i1 signext %a, i1 signext %b, <vscale x 32 x i1> %c, <vscale x 32 x i1> %d) {
177; CHECK-LABEL: selectcc_nxv32i1:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    xor a0, a0, a1
180; CHECK-NEXT:    andi a0, a0, 1
181; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
182; CHECK-NEXT:    vmv.v.x v12, a0
183; CHECK-NEXT:    vmsne.vi v9, v12, 0
184; CHECK-NEXT:    vmandn.mm v8, v8, v9
185; CHECK-NEXT:    vmand.mm v9, v0, v9
186; CHECK-NEXT:    vmor.mm v0, v9, v8
187; CHECK-NEXT:    ret
188  %cmp = icmp ne i1 %a, %b
189  %v = select i1 %cmp, <vscale x 32 x i1> %c, <vscale x 32 x i1> %d
190  ret <vscale x 32 x i1> %v
191}
192
193define <vscale x 64 x i1> @select_nxv64i1(i1 zeroext %c, <vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
194; CHECK-LABEL: select_nxv64i1:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
197; CHECK-NEXT:    vmv.v.x v16, a0
198; CHECK-NEXT:    vmsne.vi v9, v16, 0
199; CHECK-NEXT:    vmandn.mm v8, v8, v9
200; CHECK-NEXT:    vmand.mm v9, v0, v9
201; CHECK-NEXT:    vmor.mm v0, v9, v8
202; CHECK-NEXT:    ret
203  %v = select i1 %c, <vscale x 64 x i1> %a, <vscale x 64 x i1> %b
204  ret <vscale x 64 x i1> %v
205}
206
207define <vscale x 64 x i1> @selectcc_nxv64i1(i1 signext %a, i1 signext %b, <vscale x 64 x i1> %c, <vscale x 64 x i1> %d) {
208; CHECK-LABEL: selectcc_nxv64i1:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    xor a0, a0, a1
211; CHECK-NEXT:    andi a0, a0, 1
212; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
213; CHECK-NEXT:    vmv.v.x v16, a0
214; CHECK-NEXT:    vmsne.vi v9, v16, 0
215; CHECK-NEXT:    vmandn.mm v8, v8, v9
216; CHECK-NEXT:    vmand.mm v9, v0, v9
217; CHECK-NEXT:    vmor.mm v0, v9, v8
218; CHECK-NEXT:    ret
219  %cmp = icmp ne i1 %a, %b
220  %v = select i1 %cmp, <vscale x 64 x i1> %c, <vscale x 64 x i1> %d
221  ret <vscale x 64 x i1> %v
222}
223
224define <vscale x 1 x i8> @select_nxv1i8(i1 zeroext %c, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
225; CHECK-LABEL: select_nxv1i8:
226; CHECK:       # %bb.0:
227; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
228; CHECK-NEXT:    vmv.v.x v10, a0
229; CHECK-NEXT:    vmsne.vi v0, v10, 0
230; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
231; CHECK-NEXT:    ret
232  %v = select i1 %c, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b
233  ret <vscale x 1 x i8> %v
234}
235
236define <vscale x 1 x i8> @selectcc_nxv1i8(i8 signext %a, i8 signext %b, <vscale x 1 x i8> %c, <vscale x 1 x i8> %d) {
237; CHECK-LABEL: selectcc_nxv1i8:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    xor a0, a0, a1
240; CHECK-NEXT:    snez a0, a0
241; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
242; CHECK-NEXT:    vmv.v.x v10, a0
243; CHECK-NEXT:    vmsne.vi v0, v10, 0
244; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
245; CHECK-NEXT:    ret
246  %cmp = icmp ne i8 %a, %b
247  %v = select i1 %cmp, <vscale x 1 x i8> %c, <vscale x 1 x i8> %d
248  ret <vscale x 1 x i8> %v
249}
250
251define <vscale x 2 x i8> @select_nxv2i8(i1 zeroext %c, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
252; CHECK-LABEL: select_nxv2i8:
253; CHECK:       # %bb.0:
254; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
255; CHECK-NEXT:    vmv.v.x v10, a0
256; CHECK-NEXT:    vmsne.vi v0, v10, 0
257; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
258; CHECK-NEXT:    ret
259  %v = select i1 %c, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b
260  ret <vscale x 2 x i8> %v
261}
262
263define <vscale x 2 x i8> @selectcc_nxv2i8(i8 signext %a, i8 signext %b, <vscale x 2 x i8> %c, <vscale x 2 x i8> %d) {
264; CHECK-LABEL: selectcc_nxv2i8:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    xor a0, a0, a1
267; CHECK-NEXT:    snez a0, a0
268; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
269; CHECK-NEXT:    vmv.v.x v10, a0
270; CHECK-NEXT:    vmsne.vi v0, v10, 0
271; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
272; CHECK-NEXT:    ret
273  %cmp = icmp ne i8 %a, %b
274  %v = select i1 %cmp, <vscale x 2 x i8> %c, <vscale x 2 x i8> %d
275  ret <vscale x 2 x i8> %v
276}
277
278define <vscale x 4 x i8> @select_nxv4i8(i1 zeroext %c, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
279; CHECK-LABEL: select_nxv4i8:
280; CHECK:       # %bb.0:
281; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
282; CHECK-NEXT:    vmv.v.x v10, a0
283; CHECK-NEXT:    vmsne.vi v0, v10, 0
284; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
285; CHECK-NEXT:    ret
286  %v = select i1 %c, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b
287  ret <vscale x 4 x i8> %v
288}
289
290define <vscale x 4 x i8> @selectcc_nxv4i8(i8 signext %a, i8 signext %b, <vscale x 4 x i8> %c, <vscale x 4 x i8> %d) {
291; CHECK-LABEL: selectcc_nxv4i8:
292; CHECK:       # %bb.0:
293; CHECK-NEXT:    xor a0, a0, a1
294; CHECK-NEXT:    snez a0, a0
295; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
296; CHECK-NEXT:    vmv.v.x v10, a0
297; CHECK-NEXT:    vmsne.vi v0, v10, 0
298; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
299; CHECK-NEXT:    ret
300  %cmp = icmp ne i8 %a, %b
301  %v = select i1 %cmp, <vscale x 4 x i8> %c, <vscale x 4 x i8> %d
302  ret <vscale x 4 x i8> %v
303}
304
305define <vscale x 8 x i8> @select_nxv8i8(i1 zeroext %c, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
306; CHECK-LABEL: select_nxv8i8:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
309; CHECK-NEXT:    vmv.v.x v10, a0
310; CHECK-NEXT:    vmsne.vi v0, v10, 0
311; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
312; CHECK-NEXT:    ret
313  %v = select i1 %c, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b
314  ret <vscale x 8 x i8> %v
315}
316
317define <vscale x 8 x i8> @selectcc_nxv8i8(i8 signext %a, i8 signext %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d) {
318; CHECK-LABEL: selectcc_nxv8i8:
319; CHECK:       # %bb.0:
320; CHECK-NEXT:    xor a0, a0, a1
321; CHECK-NEXT:    snez a0, a0
322; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
323; CHECK-NEXT:    vmv.v.x v10, a0
324; CHECK-NEXT:    vmsne.vi v0, v10, 0
325; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
326; CHECK-NEXT:    ret
327  %cmp = icmp ne i8 %a, %b
328  %v = select i1 %cmp, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d
329  ret <vscale x 8 x i8> %v
330}
331
332define <vscale x 16 x i8> @select_nxv16i8(i1 zeroext %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
333; CHECK-LABEL: select_nxv16i8:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
336; CHECK-NEXT:    vmv.v.x v12, a0
337; CHECK-NEXT:    vmsne.vi v0, v12, 0
338; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
339; CHECK-NEXT:    ret
340  %v = select i1 %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
341  ret <vscale x 16 x i8> %v
342}
343
344define <vscale x 16 x i8> @selectcc_nxv16i8(i8 signext %a, i8 signext %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d) {
345; CHECK-LABEL: selectcc_nxv16i8:
346; CHECK:       # %bb.0:
347; CHECK-NEXT:    xor a0, a0, a1
348; CHECK-NEXT:    snez a0, a0
349; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
350; CHECK-NEXT:    vmv.v.x v12, a0
351; CHECK-NEXT:    vmsne.vi v0, v12, 0
352; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
353; CHECK-NEXT:    ret
354  %cmp = icmp ne i8 %a, %b
355  %v = select i1 %cmp, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d
356  ret <vscale x 16 x i8> %v
357}
358
359define <vscale x 32 x i8> @select_nxv32i8(i1 zeroext %c, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
360; CHECK-LABEL: select_nxv32i8:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
363; CHECK-NEXT:    vmv.v.x v16, a0
364; CHECK-NEXT:    vmsne.vi v0, v16, 0
365; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
366; CHECK-NEXT:    ret
367  %v = select i1 %c, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b
368  ret <vscale x 32 x i8> %v
369}
370
371define <vscale x 32 x i8> @selectcc_nxv32i8(i8 signext %a, i8 signext %b, <vscale x 32 x i8> %c, <vscale x 32 x i8> %d) {
372; CHECK-LABEL: selectcc_nxv32i8:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    xor a0, a0, a1
375; CHECK-NEXT:    snez a0, a0
376; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
377; CHECK-NEXT:    vmv.v.x v16, a0
378; CHECK-NEXT:    vmsne.vi v0, v16, 0
379; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
380; CHECK-NEXT:    ret
381  %cmp = icmp ne i8 %a, %b
382  %v = select i1 %cmp, <vscale x 32 x i8> %c, <vscale x 32 x i8> %d
383  ret <vscale x 32 x i8> %v
384}
385
386define <vscale x 64 x i8> @select_nxv64i8(i1 zeroext %c, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
387; CHECK-LABEL: select_nxv64i8:
388; CHECK:       # %bb.0:
389; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
390; CHECK-NEXT:    vmv.v.x v24, a0
391; CHECK-NEXT:    vmsne.vi v0, v24, 0
392; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
393; CHECK-NEXT:    ret
394  %v = select i1 %c, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b
395  ret <vscale x 64 x i8> %v
396}
397
398define <vscale x 64 x i8> @selectcc_nxv64i8(i8 signext %a, i8 signext %b, <vscale x 64 x i8> %c, <vscale x 64 x i8> %d) {
399; CHECK-LABEL: selectcc_nxv64i8:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    xor a0, a0, a1
402; CHECK-NEXT:    snez a0, a0
403; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
404; CHECK-NEXT:    vmv.v.x v24, a0
405; CHECK-NEXT:    vmsne.vi v0, v24, 0
406; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
407; CHECK-NEXT:    ret
408  %cmp = icmp ne i8 %a, %b
409  %v = select i1 %cmp, <vscale x 64 x i8> %c, <vscale x 64 x i8> %d
410  ret <vscale x 64 x i8> %v
411}
412
413define <vscale x 1 x i16> @select_nxv1i16(i1 zeroext %c, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
414; CHECK-LABEL: select_nxv1i16:
415; CHECK:       # %bb.0:
416; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
417; CHECK-NEXT:    vmv.v.x v10, a0
418; CHECK-NEXT:    vmsne.vi v0, v10, 0
419; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
420; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
421; CHECK-NEXT:    ret
422  %v = select i1 %c, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b
423  ret <vscale x 1 x i16> %v
424}
425
426define <vscale x 1 x i16> @selectcc_nxv1i16(i16 signext %a, i16 signext %b, <vscale x 1 x i16> %c, <vscale x 1 x i16> %d) {
427; CHECK-LABEL: selectcc_nxv1i16:
428; CHECK:       # %bb.0:
429; CHECK-NEXT:    xor a0, a0, a1
430; CHECK-NEXT:    snez a0, a0
431; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
432; CHECK-NEXT:    vmv.v.x v10, a0
433; CHECK-NEXT:    vmsne.vi v0, v10, 0
434; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
435; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
436; CHECK-NEXT:    ret
437  %cmp = icmp ne i16 %a, %b
438  %v = select i1 %cmp, <vscale x 1 x i16> %c, <vscale x 1 x i16> %d
439  ret <vscale x 1 x i16> %v
440}
441
442define <vscale x 2 x i16> @select_nxv2i16(i1 zeroext %c, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
443; CHECK-LABEL: select_nxv2i16:
444; CHECK:       # %bb.0:
445; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
446; CHECK-NEXT:    vmv.v.x v10, a0
447; CHECK-NEXT:    vmsne.vi v0, v10, 0
448; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
449; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
450; CHECK-NEXT:    ret
451  %v = select i1 %c, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b
452  ret <vscale x 2 x i16> %v
453}
454
455define <vscale x 2 x i16> @selectcc_nxv2i16(i16 signext %a, i16 signext %b, <vscale x 2 x i16> %c, <vscale x 2 x i16> %d) {
456; CHECK-LABEL: selectcc_nxv2i16:
457; CHECK:       # %bb.0:
458; CHECK-NEXT:    xor a0, a0, a1
459; CHECK-NEXT:    snez a0, a0
460; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
461; CHECK-NEXT:    vmv.v.x v10, a0
462; CHECK-NEXT:    vmsne.vi v0, v10, 0
463; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
464; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
465; CHECK-NEXT:    ret
466  %cmp = icmp ne i16 %a, %b
467  %v = select i1 %cmp, <vscale x 2 x i16> %c, <vscale x 2 x i16> %d
468  ret <vscale x 2 x i16> %v
469}
470
471define <vscale x 4 x i16> @select_nxv4i16(i1 zeroext %c, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
472; CHECK-LABEL: select_nxv4i16:
473; CHECK:       # %bb.0:
474; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
475; CHECK-NEXT:    vmv.v.x v10, a0
476; CHECK-NEXT:    vmsne.vi v0, v10, 0
477; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
478; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
479; CHECK-NEXT:    ret
480  %v = select i1 %c, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b
481  ret <vscale x 4 x i16> %v
482}
483
484define <vscale x 4 x i16> @selectcc_nxv4i16(i16 signext %a, i16 signext %b, <vscale x 4 x i16> %c, <vscale x 4 x i16> %d) {
485; CHECK-LABEL: selectcc_nxv4i16:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    xor a0, a0, a1
488; CHECK-NEXT:    snez a0, a0
489; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
490; CHECK-NEXT:    vmv.v.x v10, a0
491; CHECK-NEXT:    vmsne.vi v0, v10, 0
492; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
493; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
494; CHECK-NEXT:    ret
495  %cmp = icmp ne i16 %a, %b
496  %v = select i1 %cmp, <vscale x 4 x i16> %c, <vscale x 4 x i16> %d
497  ret <vscale x 4 x i16> %v
498}
499
500define <vscale x 8 x i16> @select_nxv8i16(i1 zeroext %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
501; CHECK-LABEL: select_nxv8i16:
502; CHECK:       # %bb.0:
503; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
504; CHECK-NEXT:    vmv.v.x v12, a0
505; CHECK-NEXT:    vmsne.vi v0, v12, 0
506; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
507; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
508; CHECK-NEXT:    ret
509  %v = select i1 %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
510  ret <vscale x 8 x i16> %v
511}
512
513define <vscale x 8 x i16> @selectcc_nxv8i16(i16 signext %a, i16 signext %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d) {
514; CHECK-LABEL: selectcc_nxv8i16:
515; CHECK:       # %bb.0:
516; CHECK-NEXT:    xor a0, a0, a1
517; CHECK-NEXT:    snez a0, a0
518; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
519; CHECK-NEXT:    vmv.v.x v12, a0
520; CHECK-NEXT:    vmsne.vi v0, v12, 0
521; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
522; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
523; CHECK-NEXT:    ret
524  %cmp = icmp ne i16 %a, %b
525  %v = select i1 %cmp, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d
526  ret <vscale x 8 x i16> %v
527}
528
529define <vscale x 16 x i16> @select_nxv16i16(i1 zeroext %c, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
530; CHECK-LABEL: select_nxv16i16:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
533; CHECK-NEXT:    vmv.v.x v16, a0
534; CHECK-NEXT:    vmsne.vi v0, v16, 0
535; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
536; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
537; CHECK-NEXT:    ret
538  %v = select i1 %c, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b
539  ret <vscale x 16 x i16> %v
540}
541
542define <vscale x 16 x i16> @selectcc_nxv16i16(i16 signext %a, i16 signext %b, <vscale x 16 x i16> %c, <vscale x 16 x i16> %d) {
543; CHECK-LABEL: selectcc_nxv16i16:
544; CHECK:       # %bb.0:
545; CHECK-NEXT:    xor a0, a0, a1
546; CHECK-NEXT:    snez a0, a0
547; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
548; CHECK-NEXT:    vmv.v.x v16, a0
549; CHECK-NEXT:    vmsne.vi v0, v16, 0
550; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
551; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
552; CHECK-NEXT:    ret
553  %cmp = icmp ne i16 %a, %b
554  %v = select i1 %cmp, <vscale x 16 x i16> %c, <vscale x 16 x i16> %d
555  ret <vscale x 16 x i16> %v
556}
557
558define <vscale x 32 x i16> @select_nxv32i16(i1 zeroext %c, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
559; CHECK-LABEL: select_nxv32i16:
560; CHECK:       # %bb.0:
561; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
562; CHECK-NEXT:    vmv.v.x v24, a0
563; CHECK-NEXT:    vmsne.vi v0, v24, 0
564; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
565; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
566; CHECK-NEXT:    ret
567  %v = select i1 %c, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b
568  ret <vscale x 32 x i16> %v
569}
570
571define <vscale x 32 x i16> @selectcc_nxv32i16(i16 signext %a, i16 signext %b, <vscale x 32 x i16> %c, <vscale x 32 x i16> %d) {
572; CHECK-LABEL: selectcc_nxv32i16:
573; CHECK:       # %bb.0:
574; CHECK-NEXT:    xor a0, a0, a1
575; CHECK-NEXT:    snez a0, a0
576; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
577; CHECK-NEXT:    vmv.v.x v24, a0
578; CHECK-NEXT:    vmsne.vi v0, v24, 0
579; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
580; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
581; CHECK-NEXT:    ret
582  %cmp = icmp ne i16 %a, %b
583  %v = select i1 %cmp, <vscale x 32 x i16> %c, <vscale x 32 x i16> %d
584  ret <vscale x 32 x i16> %v
585}
586
587define <vscale x 1 x i32> @select_nxv1i32(i1 zeroext %c, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b) {
588; CHECK-LABEL: select_nxv1i32:
589; CHECK:       # %bb.0:
590; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
591; CHECK-NEXT:    vmv.v.x v10, a0
592; CHECK-NEXT:    vmsne.vi v0, v10, 0
593; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
594; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
595; CHECK-NEXT:    ret
596  %v = select i1 %c, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b
597  ret <vscale x 1 x i32> %v
598}
599
600define <vscale x 1 x i32> @selectcc_nxv1i32(i32 signext %a, i32 signext %b, <vscale x 1 x i32> %c, <vscale x 1 x i32> %d) {
601; CHECK-LABEL: selectcc_nxv1i32:
602; CHECK:       # %bb.0:
603; CHECK-NEXT:    xor a0, a0, a1
604; CHECK-NEXT:    snez a0, a0
605; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
606; CHECK-NEXT:    vmv.v.x v10, a0
607; CHECK-NEXT:    vmsne.vi v0, v10, 0
608; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
609; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
610; CHECK-NEXT:    ret
611  %cmp = icmp ne i32 %a, %b
612  %v = select i1 %cmp, <vscale x 1 x i32> %c, <vscale x 1 x i32> %d
613  ret <vscale x 1 x i32> %v
614}
615
616define <vscale x 2 x i32> @select_nxv2i32(i1 zeroext %c, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
617; CHECK-LABEL: select_nxv2i32:
618; CHECK:       # %bb.0:
619; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
620; CHECK-NEXT:    vmv.v.x v10, a0
621; CHECK-NEXT:    vmsne.vi v0, v10, 0
622; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
623; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
624; CHECK-NEXT:    ret
625  %v = select i1 %c, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b
626  ret <vscale x 2 x i32> %v
627}
628
629define <vscale x 2 x i32> @selectcc_nxv2i32(i32 signext %a, i32 signext %b, <vscale x 2 x i32> %c, <vscale x 2 x i32> %d) {
630; CHECK-LABEL: selectcc_nxv2i32:
631; CHECK:       # %bb.0:
632; CHECK-NEXT:    xor a0, a0, a1
633; CHECK-NEXT:    snez a0, a0
634; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
635; CHECK-NEXT:    vmv.v.x v10, a0
636; CHECK-NEXT:    vmsne.vi v0, v10, 0
637; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
638; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
639; CHECK-NEXT:    ret
640  %cmp = icmp ne i32 %a, %b
641  %v = select i1 %cmp, <vscale x 2 x i32> %c, <vscale x 2 x i32> %d
642  ret <vscale x 2 x i32> %v
643}
644
645define <vscale x 4 x i32> @select_nxv4i32(i1 zeroext %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
646; CHECK-LABEL: select_nxv4i32:
647; CHECK:       # %bb.0:
648; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
649; CHECK-NEXT:    vmv.v.x v12, a0
650; CHECK-NEXT:    vmsne.vi v0, v12, 0
651; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
652; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
653; CHECK-NEXT:    ret
654  %v = select i1 %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
655  ret <vscale x 4 x i32> %v
656}
657
658define <vscale x 4 x i32> @selectcc_nxv4i32(i32 signext %a, i32 signext %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d) {
659; CHECK-LABEL: selectcc_nxv4i32:
660; CHECK:       # %bb.0:
661; CHECK-NEXT:    xor a0, a0, a1
662; CHECK-NEXT:    snez a0, a0
663; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
664; CHECK-NEXT:    vmv.v.x v12, a0
665; CHECK-NEXT:    vmsne.vi v0, v12, 0
666; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
667; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
668; CHECK-NEXT:    ret
669  %cmp = icmp ne i32 %a, %b
670  %v = select i1 %cmp, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d
671  ret <vscale x 4 x i32> %v
672}
673
674define <vscale x 8 x i32> @select_nxv8i32(i1 zeroext %c, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
675; CHECK-LABEL: select_nxv8i32:
676; CHECK:       # %bb.0:
677; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
678; CHECK-NEXT:    vmv.v.x v16, a0
679; CHECK-NEXT:    vmsne.vi v0, v16, 0
680; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
681; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
682; CHECK-NEXT:    ret
683  %v = select i1 %c, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b
684  ret <vscale x 8 x i32> %v
685}
686
687define <vscale x 8 x i32> @selectcc_nxv8i32(i32 signext %a, i32 signext %b, <vscale x 8 x i32> %c, <vscale x 8 x i32> %d) {
688; CHECK-LABEL: selectcc_nxv8i32:
689; CHECK:       # %bb.0:
690; CHECK-NEXT:    xor a0, a0, a1
691; CHECK-NEXT:    snez a0, a0
692; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
693; CHECK-NEXT:    vmv.v.x v16, a0
694; CHECK-NEXT:    vmsne.vi v0, v16, 0
695; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
696; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
697; CHECK-NEXT:    ret
698  %cmp = icmp ne i32 %a, %b
699  %v = select i1 %cmp, <vscale x 8 x i32> %c, <vscale x 8 x i32> %d
700  ret <vscale x 8 x i32> %v
701}
702
703define <vscale x 16 x i32> @select_nxv16i32(i1 zeroext %c, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
704; CHECK-LABEL: select_nxv16i32:
705; CHECK:       # %bb.0:
706; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
707; CHECK-NEXT:    vmv.v.x v24, a0
708; CHECK-NEXT:    vmsne.vi v0, v24, 0
709; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
710; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
711; CHECK-NEXT:    ret
712  %v = select i1 %c, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b
713  ret <vscale x 16 x i32> %v
714}
715
716define <vscale x 16 x i32> @selectcc_nxv16i32(i32 signext %a, i32 signext %b, <vscale x 16 x i32> %c, <vscale x 16 x i32> %d) {
717; CHECK-LABEL: selectcc_nxv16i32:
718; CHECK:       # %bb.0:
719; CHECK-NEXT:    xor a0, a0, a1
720; CHECK-NEXT:    snez a0, a0
721; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
722; CHECK-NEXT:    vmv.v.x v24, a0
723; CHECK-NEXT:    vmsne.vi v0, v24, 0
724; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
725; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
726; CHECK-NEXT:    ret
727  %cmp = icmp ne i32 %a, %b
728  %v = select i1 %cmp, <vscale x 16 x i32> %c, <vscale x 16 x i32> %d
729  ret <vscale x 16 x i32> %v
730}
731
732define <vscale x 1 x i64> @select_nxv1i64(i1 zeroext %c, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b) {
733; CHECK-LABEL: select_nxv1i64:
734; CHECK:       # %bb.0:
735; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
736; CHECK-NEXT:    vmv.v.x v10, a0
737; CHECK-NEXT:    vmsne.vi v0, v10, 0
738; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
739; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
740; CHECK-NEXT:    ret
741  %v = select i1 %c, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b
742  ret <vscale x 1 x i64> %v
743}
744
745define <vscale x 1 x i64> @selectcc_nxv1i64(i64 signext %a, i64 signext %b, <vscale x 1 x i64> %c, <vscale x 1 x i64> %d) {
746; RV32-LABEL: selectcc_nxv1i64:
747; RV32:       # %bb.0:
748; RV32-NEXT:    xor a1, a1, a3
749; RV32-NEXT:    xor a0, a0, a2
750; RV32-NEXT:    or a0, a0, a1
751; RV32-NEXT:    snez a0, a0
752; RV32-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
753; RV32-NEXT:    vmv.v.x v10, a0
754; RV32-NEXT:    vmsne.vi v0, v10, 0
755; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
756; RV32-NEXT:    vmerge.vvm v8, v9, v8, v0
757; RV32-NEXT:    ret
758;
759; RV64-LABEL: selectcc_nxv1i64:
760; RV64:       # %bb.0:
761; RV64-NEXT:    xor a0, a0, a1
762; RV64-NEXT:    snez a0, a0
763; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
764; RV64-NEXT:    vmv.v.x v10, a0
765; RV64-NEXT:    vmsne.vi v0, v10, 0
766; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
767; RV64-NEXT:    vmerge.vvm v8, v9, v8, v0
768; RV64-NEXT:    ret
769  %cmp = icmp ne i64 %a, %b
770  %v = select i1 %cmp, <vscale x 1 x i64> %c, <vscale x 1 x i64> %d
771  ret <vscale x 1 x i64> %v
772}
773
774define <vscale x 2 x i64> @select_nxv2i64(i1 zeroext %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
775; CHECK-LABEL: select_nxv2i64:
776; CHECK:       # %bb.0:
777; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
778; CHECK-NEXT:    vmv.v.x v12, a0
779; CHECK-NEXT:    vmsne.vi v0, v12, 0
780; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
781; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
782; CHECK-NEXT:    ret
783  %v = select i1 %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
784  ret <vscale x 2 x i64> %v
785}
786
787define <vscale x 2 x i64> @selectcc_nxv2i64(i64 signext %a, i64 signext %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d) {
788; RV32-LABEL: selectcc_nxv2i64:
789; RV32:       # %bb.0:
790; RV32-NEXT:    xor a1, a1, a3
791; RV32-NEXT:    xor a0, a0, a2
792; RV32-NEXT:    or a0, a0, a1
793; RV32-NEXT:    snez a0, a0
794; RV32-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
795; RV32-NEXT:    vmv.v.x v12, a0
796; RV32-NEXT:    vmsne.vi v0, v12, 0
797; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
798; RV32-NEXT:    vmerge.vvm v8, v10, v8, v0
799; RV32-NEXT:    ret
800;
801; RV64-LABEL: selectcc_nxv2i64:
802; RV64:       # %bb.0:
803; RV64-NEXT:    xor a0, a0, a1
804; RV64-NEXT:    snez a0, a0
805; RV64-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
806; RV64-NEXT:    vmv.v.x v12, a0
807; RV64-NEXT:    vmsne.vi v0, v12, 0
808; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
809; RV64-NEXT:    vmerge.vvm v8, v10, v8, v0
810; RV64-NEXT:    ret
811  %cmp = icmp ne i64 %a, %b
812  %v = select i1 %cmp, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d
813  ret <vscale x 2 x i64> %v
814}
815
816define <vscale x 4 x i64> @select_nxv4i64(i1 zeroext %c, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
817; CHECK-LABEL: select_nxv4i64:
818; CHECK:       # %bb.0:
819; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
820; CHECK-NEXT:    vmv.v.x v16, a0
821; CHECK-NEXT:    vmsne.vi v0, v16, 0
822; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
823; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
824; CHECK-NEXT:    ret
825  %v = select i1 %c, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b
826  ret <vscale x 4 x i64> %v
827}
828
829define <vscale x 4 x i64> @selectcc_nxv4i64(i64 signext %a, i64 signext %b, <vscale x 4 x i64> %c, <vscale x 4 x i64> %d) {
830; RV32-LABEL: selectcc_nxv4i64:
831; RV32:       # %bb.0:
832; RV32-NEXT:    xor a1, a1, a3
833; RV32-NEXT:    xor a0, a0, a2
834; RV32-NEXT:    or a0, a0, a1
835; RV32-NEXT:    snez a0, a0
836; RV32-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
837; RV32-NEXT:    vmv.v.x v16, a0
838; RV32-NEXT:    vmsne.vi v0, v16, 0
839; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
840; RV32-NEXT:    vmerge.vvm v8, v12, v8, v0
841; RV32-NEXT:    ret
842;
843; RV64-LABEL: selectcc_nxv4i64:
844; RV64:       # %bb.0:
845; RV64-NEXT:    xor a0, a0, a1
846; RV64-NEXT:    snez a0, a0
847; RV64-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
848; RV64-NEXT:    vmv.v.x v16, a0
849; RV64-NEXT:    vmsne.vi v0, v16, 0
850; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
851; RV64-NEXT:    vmerge.vvm v8, v12, v8, v0
852; RV64-NEXT:    ret
853  %cmp = icmp ne i64 %a, %b
854  %v = select i1 %cmp, <vscale x 4 x i64> %c, <vscale x 4 x i64> %d
855  ret <vscale x 4 x i64> %v
856}
857
858define <vscale x 8 x i64> @select_nxv8i64(i1 zeroext %c, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
859; CHECK-LABEL: select_nxv8i64:
860; CHECK:       # %bb.0:
861; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
862; CHECK-NEXT:    vmv.v.x v24, a0
863; CHECK-NEXT:    vmsne.vi v0, v24, 0
864; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
865; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
866; CHECK-NEXT:    ret
867  %v = select i1 %c, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b
868  ret <vscale x 8 x i64> %v
869}
870
871define <vscale x 8 x i64> @selectcc_nxv8i64(i64 signext %a, i64 signext %b, <vscale x 8 x i64> %c, <vscale x 8 x i64> %d) {
872; RV32-LABEL: selectcc_nxv8i64:
873; RV32:       # %bb.0:
874; RV32-NEXT:    xor a1, a1, a3
875; RV32-NEXT:    xor a0, a0, a2
876; RV32-NEXT:    or a0, a0, a1
877; RV32-NEXT:    snez a0, a0
878; RV32-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
879; RV32-NEXT:    vmv.v.x v24, a0
880; RV32-NEXT:    vmsne.vi v0, v24, 0
881; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
882; RV32-NEXT:    vmerge.vvm v8, v16, v8, v0
883; RV32-NEXT:    ret
884;
885; RV64-LABEL: selectcc_nxv8i64:
886; RV64:       # %bb.0:
887; RV64-NEXT:    xor a0, a0, a1
888; RV64-NEXT:    snez a0, a0
889; RV64-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
890; RV64-NEXT:    vmv.v.x v24, a0
891; RV64-NEXT:    vmsne.vi v0, v24, 0
892; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
893; RV64-NEXT:    vmerge.vvm v8, v16, v8, v0
894; RV64-NEXT:    ret
895  %cmp = icmp ne i64 %a, %b
896  %v = select i1 %cmp, <vscale x 8 x i64> %c, <vscale x 8 x i64> %d
897  ret <vscale x 8 x i64> %v
898}
899