xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+m -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
7; RUN:   -verify-machineinstrs < %s | FileCheck %s
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
9; RUN:   -verify-machineinstrs < %s | FileCheck %s
10
11declare <1 x i1> @llvm.vp.select.v1i1(<1 x i1>, <1 x i1>, <1 x i1>, i32)
12
13define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext %evl) {
14; CHECK-LABEL: select_v1i1:
15; CHECK:       # %bb.0:
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vmandn.mm v9, v9, v0
18; CHECK-NEXT:    vmand.mm v8, v8, v0
19; CHECK-NEXT:    vmor.mm v0, v8, v9
20; CHECK-NEXT:    ret
21  %v = call <1 x i1> @llvm.vp.select.v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 %evl)
22  ret <1 x i1> %v
23}
24
25declare <2 x i1> @llvm.vp.select.v2i1(<2 x i1>, <2 x i1>, <2 x i1>, i32)
26
27define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext %evl) {
28; CHECK-LABEL: select_v2i1:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
31; CHECK-NEXT:    vmandn.mm v9, v9, v0
32; CHECK-NEXT:    vmand.mm v8, v8, v0
33; CHECK-NEXT:    vmor.mm v0, v8, v9
34; CHECK-NEXT:    ret
35  %v = call <2 x i1> @llvm.vp.select.v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 %evl)
36  ret <2 x i1> %v
37}
38
39declare <4 x i1> @llvm.vp.select.v4i1(<4 x i1>, <4 x i1>, <4 x i1>, i32)
40
41define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext %evl) {
42; CHECK-LABEL: select_v4i1:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
45; CHECK-NEXT:    vmandn.mm v9, v9, v0
46; CHECK-NEXT:    vmand.mm v8, v8, v0
47; CHECK-NEXT:    vmor.mm v0, v8, v9
48; CHECK-NEXT:    ret
49  %v = call <4 x i1> @llvm.vp.select.v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 %evl)
50  ret <4 x i1> %v
51}
52
53declare <8 x i1> @llvm.vp.select.v8i1(<8 x i1>, <8 x i1>, <8 x i1>, i32)
54
55define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext %evl) {
56; CHECK-LABEL: select_v8i1:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
59; CHECK-NEXT:    vmandn.mm v9, v9, v0
60; CHECK-NEXT:    vmand.mm v8, v8, v0
61; CHECK-NEXT:    vmor.mm v0, v8, v9
62; CHECK-NEXT:    ret
63  %v = call <8 x i1> @llvm.vp.select.v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 %evl)
64  ret <8 x i1> %v
65}
66
67declare <16 x i1> @llvm.vp.select.v16i1(<16 x i1>, <16 x i1>, <16 x i1>, i32)
68
69define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zeroext %evl) {
70; CHECK-LABEL: select_v16i1:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
73; CHECK-NEXT:    vmandn.mm v9, v9, v0
74; CHECK-NEXT:    vmand.mm v8, v8, v0
75; CHECK-NEXT:    vmor.mm v0, v8, v9
76; CHECK-NEXT:    ret
77  %v = call <16 x i1> @llvm.vp.select.v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 %evl)
78  ret <16 x i1> %v
79}
80
81declare <8 x i7> @llvm.vp.select.v8i7(<8 x i1>, <8 x i7>, <8 x i7>, i32)
82
83define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext %evl) {
84; CHECK-LABEL: select_v8i7:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
87; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
88; CHECK-NEXT:    ret
89  %v = call <8 x i7> @llvm.vp.select.v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 %evl)
90  ret <8 x i7> %v
91}
92
93declare <2 x i8> @llvm.vp.select.v2i8(<2 x i1>, <2 x i8>, <2 x i8>, i32)
94
95define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) {
96; CHECK-LABEL: select_v2i8:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
99; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
100; CHECK-NEXT:    ret
101  %v = call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 %evl)
102  ret <2 x i8> %v
103}
104
105declare <4 x i8> @llvm.vp.select.v4i8(<4 x i1>, <4 x i8>, <4 x i8>, i32)
106
107define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) {
108; CHECK-LABEL: select_v4i8:
109; CHECK:       # %bb.0:
110; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
111; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
112; CHECK-NEXT:    ret
113  %v = call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 %evl)
114  ret <4 x i8> %v
115}
116
117declare <5 x i8> @llvm.vp.select.v5i8(<5 x i1>, <5 x i8>, <5 x i8>, i32)
118
119define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext %evl) {
120; CHECK-LABEL: select_v5i8:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
123; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
124; CHECK-NEXT:    ret
125  %v = call <5 x i8> @llvm.vp.select.v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 %evl)
126  ret <5 x i8> %v
127}
128
129declare <8 x i8> @llvm.vp.select.v8i8(<8 x i1>, <8 x i8>, <8 x i8>, i32)
130
131define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) {
132; CHECK-LABEL: select_v8i8:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
135; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
136; CHECK-NEXT:    ret
137  %v = call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 %evl)
138  ret <8 x i8> %v
139}
140
141declare <16 x i8> @llvm.vp.select.v16i8(<16 x i1>, <16 x i8>, <16 x i8>, i32)
142
143define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) {
144; CHECK-LABEL: select_v16i8:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
147; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
148; CHECK-NEXT:    ret
149  %v = call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 %evl)
150  ret <16 x i8> %v
151}
152
153declare <256 x i8> @llvm.vp.select.v256i8(<256 x i1>, <256 x i8>, <256 x i8>, i32)
154
155define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 zeroext %evl) {
156; CHECK-LABEL: select_v256i8:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    addi sp, sp, -16
159; CHECK-NEXT:    .cfi_def_cfa_offset 16
160; CHECK-NEXT:    csrr a2, vlenb
161; CHECK-NEXT:    slli a2, a2, 3
162; CHECK-NEXT:    sub sp, sp, a2
163; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
164; CHECK-NEXT:    addi a2, sp, 16
165; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
166; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
167; CHECK-NEXT:    vmv1r.v v6, v8
168; CHECK-NEXT:    vmv1r.v v7, v0
169; CHECK-NEXT:    li a2, 128
170; CHECK-NEXT:    addi a4, a1, 128
171; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
172; CHECK-NEXT:    vle8.v v24, (a0)
173; CHECK-NEXT:    addi a0, a3, -128
174; CHECK-NEXT:    vle8.v v8, (a4)
175; CHECK-NEXT:    sltu a4, a3, a0
176; CHECK-NEXT:    vle8.v v16, (a1)
177; CHECK-NEXT:    addi a4, a4, -1
178; CHECK-NEXT:    and a0, a4, a0
179; CHECK-NEXT:    vmv1r.v v0, v6
180; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
181; CHECK-NEXT:    vmerge.vvm v24, v8, v24, v0
182; CHECK-NEXT:    bltu a3, a2, .LBB11_2
183; CHECK-NEXT:  # %bb.1:
184; CHECK-NEXT:    li a3, 128
185; CHECK-NEXT:  .LBB11_2:
186; CHECK-NEXT:    vmv1r.v v0, v7
187; CHECK-NEXT:    addi a0, sp, 16
188; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
189; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
190; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
191; CHECK-NEXT:    vmv8r.v v16, v24
192; CHECK-NEXT:    csrr a0, vlenb
193; CHECK-NEXT:    slli a0, a0, 3
194; CHECK-NEXT:    add sp, sp, a0
195; CHECK-NEXT:    .cfi_def_cfa sp, 16
196; CHECK-NEXT:    addi sp, sp, 16
197; CHECK-NEXT:    .cfi_def_cfa_offset 0
198; CHECK-NEXT:    ret
199  %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 %evl)
200  ret <256 x i8> %v
201}
202
203define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c) {
204; CHECK-LABEL: select_evl_v256i8:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    addi sp, sp, -16
207; CHECK-NEXT:    .cfi_def_cfa_offset 16
208; CHECK-NEXT:    csrr a2, vlenb
209; CHECK-NEXT:    li a3, 24
210; CHECK-NEXT:    mul a2, a2, a3
211; CHECK-NEXT:    sub sp, sp, a2
212; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
213; CHECK-NEXT:    csrr a2, vlenb
214; CHECK-NEXT:    slli a2, a2, 4
215; CHECK-NEXT:    add a2, sp, a2
216; CHECK-NEXT:    addi a2, a2, 16
217; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
218; CHECK-NEXT:    li a2, 128
219; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
220; CHECK-NEXT:    vle8.v v16, (a0)
221; CHECK-NEXT:    csrr a0, vlenb
222; CHECK-NEXT:    slli a0, a0, 3
223; CHECK-NEXT:    add a0, sp, a0
224; CHECK-NEXT:    addi a0, a0, 16
225; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
226; CHECK-NEXT:    addi a0, a1, 128
227; CHECK-NEXT:    vle8.v v24, (a0)
228; CHECK-NEXT:    vle8.v v16, (a1)
229; CHECK-NEXT:    addi a0, sp, 16
230; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
231; CHECK-NEXT:    vmv1r.v v9, v0
232; CHECK-NEXT:    vmv1r.v v0, v8
233; CHECK-NEXT:    csrr a0, vlenb
234; CHECK-NEXT:    slli a0, a0, 3
235; CHECK-NEXT:    add a0, sp, a0
236; CHECK-NEXT:    addi a0, a0, 16
237; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
238; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
239; CHECK-NEXT:    vmerge.vvm v24, v24, v16, v0
240; CHECK-NEXT:    vmv1r.v v0, v9
241; CHECK-NEXT:    csrr a0, vlenb
242; CHECK-NEXT:    slli a0, a0, 4
243; CHECK-NEXT:    add a0, sp, a0
244; CHECK-NEXT:    addi a0, a0, 16
245; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
246; CHECK-NEXT:    addi a0, sp, 16
247; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
248; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
249; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
250; CHECK-NEXT:    vmv8r.v v16, v24
251; CHECK-NEXT:    csrr a0, vlenb
252; CHECK-NEXT:    li a1, 24
253; CHECK-NEXT:    mul a0, a0, a1
254; CHECK-NEXT:    add sp, sp, a0
255; CHECK-NEXT:    .cfi_def_cfa sp, 16
256; CHECK-NEXT:    addi sp, sp, 16
257; CHECK-NEXT:    .cfi_def_cfa_offset 0
258; CHECK-NEXT:    ret
259  %v = call <256 x i8> @llvm.vp.select.v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i32 129)
260  ret <256 x i8> %v
261}
262
263declare <2 x i16> @llvm.vp.select.v2i16(<2 x i1>, <2 x i16>, <2 x i16>, i32)
264
265define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) {
266; CHECK-LABEL: select_v2i16:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
269; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
270; CHECK-NEXT:    ret
271  %v = call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 %evl)
272  ret <2 x i16> %v
273}
274
275declare <4 x i16> @llvm.vp.select.v4i16(<4 x i1>, <4 x i16>, <4 x i16>, i32)
276
277define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) {
278; CHECK-LABEL: select_v4i16:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
281; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
282; CHECK-NEXT:    ret
283  %v = call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 %evl)
284  ret <4 x i16> %v
285}
286
287declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
288
289define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) {
290; CHECK-LABEL: select_v8i16:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
293; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
294; CHECK-NEXT:    ret
295  %v = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 %evl)
296  ret <8 x i16> %v
297}
298
299declare <16 x i16> @llvm.vp.select.v16i16(<16 x i1>, <16 x i16>, <16 x i16>, i32)
300
301define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) {
302; CHECK-LABEL: select_v16i16:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
305; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
306; CHECK-NEXT:    ret
307  %v = call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 %evl)
308  ret <16 x i16> %v
309}
310
311declare <2 x i32> @llvm.vp.select.v2i32(<2 x i1>, <2 x i32>, <2 x i32>, i32)
312
313define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) {
314; CHECK-LABEL: select_v2i32:
315; CHECK:       # %bb.0:
316; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
317; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
318; CHECK-NEXT:    ret
319  %v = call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 %evl)
320  ret <2 x i32> %v
321}
322
323declare <4 x i32> @llvm.vp.select.v4i32(<4 x i1>, <4 x i32>, <4 x i32>, i32)
324
325define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) {
326; CHECK-LABEL: select_v4i32:
327; CHECK:       # %bb.0:
328; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
329; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
330; CHECK-NEXT:    ret
331  %v = call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 %evl)
332  ret <4 x i32> %v
333}
334
335declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
336
337define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) {
338; CHECK-LABEL: select_v8i32:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
341; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
342; CHECK-NEXT:    ret
343  %v = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 %evl)
344  ret <8 x i32> %v
345}
346
347declare <16 x i32> @llvm.vp.select.v16i32(<16 x i1>, <16 x i32>, <16 x i32>, i32)
348
349define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) {
350; CHECK-LABEL: select_v16i32:
351; CHECK:       # %bb.0:
352; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
353; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
354; CHECK-NEXT:    ret
355  %v = call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 %evl)
356  ret <16 x i32> %v
357}
358
359declare <2 x i64> @llvm.vp.select.v2i64(<2 x i1>, <2 x i64>, <2 x i64>, i32)
360
361define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) {
362; CHECK-LABEL: select_v2i64:
363; CHECK:       # %bb.0:
364; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
365; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
366; CHECK-NEXT:    ret
367  %v = call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 %evl)
368  ret <2 x i64> %v
369}
370
371declare <4 x i64> @llvm.vp.select.v4i64(<4 x i1>, <4 x i64>, <4 x i64>, i32)
372
373define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) {
374; CHECK-LABEL: select_v4i64:
375; CHECK:       # %bb.0:
376; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
377; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
378; CHECK-NEXT:    ret
379  %v = call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 %evl)
380  ret <4 x i64> %v
381}
382
383declare <8 x i64> @llvm.vp.select.v8i64(<8 x i1>, <8 x i64>, <8 x i64>, i32)
384
385define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) {
386; CHECK-LABEL: select_v8i64:
387; CHECK:       # %bb.0:
388; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
389; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
390; CHECK-NEXT:    ret
391  %v = call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 %evl)
392  ret <8 x i64> %v
393}
394
395declare <16 x i64> @llvm.vp.select.v16i64(<16 x i1>, <16 x i64>, <16 x i64>, i32)
396
397define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) {
398; CHECK-LABEL: select_v16i64:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
401; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
402; CHECK-NEXT:    ret
403  %v = call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 %evl)
404  ret <16 x i64> %v
405}
406
407declare <32 x i64> @llvm.vp.select.v32i64(<32 x i1>, <32 x i64>, <32 x i64>, i32)
408
409define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 zeroext %evl) {
410; CHECK-LABEL: select_v32i64:
411; CHECK:       # %bb.0:
412; CHECK-NEXT:    addi sp, sp, -16
413; CHECK-NEXT:    .cfi_def_cfa_offset 16
414; CHECK-NEXT:    csrr a1, vlenb
415; CHECK-NEXT:    slli a1, a1, 3
416; CHECK-NEXT:    sub sp, sp, a1
417; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
418; CHECK-NEXT:    addi a1, sp, 16
419; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
420; CHECK-NEXT:    addi a1, a0, 128
421; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
422; CHECK-NEXT:    vle64.v v16, (a1)
423; CHECK-NEXT:    vle64.v v24, (a0)
424; CHECK-NEXT:    li a1, 16
425; CHECK-NEXT:    mv a0, a2
426; CHECK-NEXT:    bltu a2, a1, .LBB25_2
427; CHECK-NEXT:  # %bb.1:
428; CHECK-NEXT:    li a0, 16
429; CHECK-NEXT:  .LBB25_2:
430; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
431; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
432; CHECK-NEXT:    addi a0, a2, -16
433; CHECK-NEXT:    sltu a1, a2, a0
434; CHECK-NEXT:    addi a1, a1, -1
435; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
436; CHECK-NEXT:    vslidedown.vi v0, v0, 2
437; CHECK-NEXT:    and a0, a1, a0
438; CHECK-NEXT:    addi a1, sp, 16
439; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
440; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
441; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
442; CHECK-NEXT:    csrr a0, vlenb
443; CHECK-NEXT:    slli a0, a0, 3
444; CHECK-NEXT:    add sp, sp, a0
445; CHECK-NEXT:    .cfi_def_cfa sp, 16
446; CHECK-NEXT:    addi sp, sp, 16
447; CHECK-NEXT:    .cfi_def_cfa_offset 0
448; CHECK-NEXT:    ret
449  %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 %evl)
450  ret <32 x i64> %v
451}
452
453define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) {
454; CHECK-LABEL: select_evl_v32i64:
455; CHECK:       # %bb.0:
456; CHECK-NEXT:    addi sp, sp, -16
457; CHECK-NEXT:    .cfi_def_cfa_offset 16
458; CHECK-NEXT:    csrr a1, vlenb
459; CHECK-NEXT:    li a2, 24
460; CHECK-NEXT:    mul a1, a1, a2
461; CHECK-NEXT:    sub sp, sp, a1
462; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
463; CHECK-NEXT:    csrr a1, vlenb
464; CHECK-NEXT:    slli a1, a1, 4
465; CHECK-NEXT:    add a1, sp, a1
466; CHECK-NEXT:    addi a1, a1, 16
467; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
468; CHECK-NEXT:    csrr a1, vlenb
469; CHECK-NEXT:    slli a1, a1, 3
470; CHECK-NEXT:    add a1, sp, a1
471; CHECK-NEXT:    addi a1, a1, 16
472; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
473; CHECK-NEXT:    addi a1, a0, 128
474; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
475; CHECK-NEXT:    vle64.v v8, (a0)
476; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
477; CHECK-NEXT:    vle64.v v16, (a1)
478; CHECK-NEXT:    addi a0, sp, 16
479; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
480; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
481; CHECK-NEXT:    vslidedown.vi v24, v0, 2
482; CHECK-NEXT:    csrr a0, vlenb
483; CHECK-NEXT:    slli a0, a0, 3
484; CHECK-NEXT:    add a0, sp, a0
485; CHECK-NEXT:    addi a0, a0, 16
486; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
487; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
488; CHECK-NEXT:    vmerge.vvm v8, v8, v16, v0
489; CHECK-NEXT:    vmv1r.v v0, v24
490; CHECK-NEXT:    csrr a0, vlenb
491; CHECK-NEXT:    slli a0, a0, 4
492; CHECK-NEXT:    add a0, sp, a0
493; CHECK-NEXT:    addi a0, a0, 16
494; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
495; CHECK-NEXT:    addi a0, sp, 16
496; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
497; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
498; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
499; CHECK-NEXT:    csrr a0, vlenb
500; CHECK-NEXT:    li a1, 24
501; CHECK-NEXT:    mul a0, a0, a1
502; CHECK-NEXT:    add sp, sp, a0
503; CHECK-NEXT:    .cfi_def_cfa sp, 16
504; CHECK-NEXT:    addi sp, sp, 16
505; CHECK-NEXT:    .cfi_def_cfa_offset 0
506; CHECK-NEXT:    ret
507  %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17)
508  ret <32 x i64> %v
509}
510
511declare <2 x half> @llvm.vp.select.v2f16(<2 x i1>, <2 x half>, <2 x half>, i32)
512
513define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) {
514; CHECK-LABEL: select_v2f16:
515; CHECK:       # %bb.0:
516; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
517; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
518; CHECK-NEXT:    ret
519  %v = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 %evl)
520  ret <2 x half> %v
521}
522
523declare <4 x half> @llvm.vp.select.v4f16(<4 x i1>, <4 x half>, <4 x half>, i32)
524
525define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) {
526; CHECK-LABEL: select_v4f16:
527; CHECK:       # %bb.0:
528; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
529; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
530; CHECK-NEXT:    ret
531  %v = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 %evl)
532  ret <4 x half> %v
533}
534
535declare <8 x half> @llvm.vp.select.v8f16(<8 x i1>, <8 x half>, <8 x half>, i32)
536
537define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) {
538; CHECK-LABEL: select_v8f16:
539; CHECK:       # %bb.0:
540; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
541; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
542; CHECK-NEXT:    ret
543  %v = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 %evl)
544  ret <8 x half> %v
545}
546
547declare <16 x half> @llvm.vp.select.v16f16(<16 x i1>, <16 x half>, <16 x half>, i32)
548
549define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) {
550; CHECK-LABEL: select_v16f16:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
553; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
554; CHECK-NEXT:    ret
555  %v = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 %evl)
556  ret <16 x half> %v
557}
558
559declare <2 x float> @llvm.vp.select.v2f32(<2 x i1>, <2 x float>, <2 x float>, i32)
560
561define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) {
562; CHECK-LABEL: select_v2f32:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
565; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
566; CHECK-NEXT:    ret
567  %v = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 %evl)
568  ret <2 x float> %v
569}
570
571declare <4 x float> @llvm.vp.select.v4f32(<4 x i1>, <4 x float>, <4 x float>, i32)
572
573define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) {
574; CHECK-LABEL: select_v4f32:
575; CHECK:       # %bb.0:
576; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
577; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
578; CHECK-NEXT:    ret
579  %v = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 %evl)
580  ret <4 x float> %v
581}
582
583declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32)
584
585define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) {
586; CHECK-LABEL: select_v8f32:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
589; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
590; CHECK-NEXT:    ret
591  %v = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 %evl)
592  ret <8 x float> %v
593}
594
595declare <16 x float> @llvm.vp.select.v16f32(<16 x i1>, <16 x float>, <16 x float>, i32)
596
597define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) {
598; CHECK-LABEL: select_v16f32:
599; CHECK:       # %bb.0:
600; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
601; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
602; CHECK-NEXT:    ret
603  %v = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 %evl)
604  ret <16 x float> %v
605}
606
607declare <64 x float> @llvm.vp.select.v64f32(<64 x i1>, <64 x float>, <64 x float>, i32)
608
609define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 zeroext %evl) {
610; CHECK-LABEL: select_v64f32:
611; CHECK:       # %bb.0:
612; CHECK-NEXT:    addi sp, sp, -16
613; CHECK-NEXT:    .cfi_def_cfa_offset 16
614; CHECK-NEXT:    csrr a1, vlenb
615; CHECK-NEXT:    slli a1, a1, 3
616; CHECK-NEXT:    sub sp, sp, a1
617; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
618; CHECK-NEXT:    addi a1, sp, 16
619; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
620; CHECK-NEXT:    addi a1, a0, 128
621; CHECK-NEXT:    li a3, 32
622; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
623; CHECK-NEXT:    vle32.v v16, (a1)
624; CHECK-NEXT:    vle32.v v24, (a0)
625; CHECK-NEXT:    mv a0, a2
626; CHECK-NEXT:    bltu a2, a3, .LBB35_2
627; CHECK-NEXT:  # %bb.1:
628; CHECK-NEXT:    li a0, 32
629; CHECK-NEXT:  .LBB35_2:
630; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
631; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
632; CHECK-NEXT:    addi a0, a2, -32
633; CHECK-NEXT:    sltu a1, a2, a0
634; CHECK-NEXT:    addi a1, a1, -1
635; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
636; CHECK-NEXT:    vslidedown.vi v0, v0, 4
637; CHECK-NEXT:    and a0, a1, a0
638; CHECK-NEXT:    addi a1, sp, 16
639; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
640; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
641; CHECK-NEXT:    vmerge.vvm v16, v16, v24, v0
642; CHECK-NEXT:    csrr a0, vlenb
643; CHECK-NEXT:    slli a0, a0, 3
644; CHECK-NEXT:    add sp, sp, a0
645; CHECK-NEXT:    .cfi_def_cfa sp, 16
646; CHECK-NEXT:    addi sp, sp, 16
647; CHECK-NEXT:    .cfi_def_cfa_offset 0
648; CHECK-NEXT:    ret
649  %v = call <64 x float> @llvm.vp.select.v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %c, i32 %evl)
650  ret <64 x float> %v
651}
652
653declare <2 x double> @llvm.vp.select.v2f64(<2 x i1>, <2 x double>, <2 x double>, i32)
654
655define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) {
656; CHECK-LABEL: select_v2f64:
657; CHECK:       # %bb.0:
658; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
659; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
660; CHECK-NEXT:    ret
661  %v = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 %evl)
662  ret <2 x double> %v
663}
664
665declare <4 x double> @llvm.vp.select.v4f64(<4 x i1>, <4 x double>, <4 x double>, i32)
666
667define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) {
668; CHECK-LABEL: select_v4f64:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
671; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
672; CHECK-NEXT:    ret
673  %v = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 %evl)
674  ret <4 x double> %v
675}
676
677declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32)
678
679define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) {
680; CHECK-LABEL: select_v8f64:
681; CHECK:       # %bb.0:
682; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
683; CHECK-NEXT:    vmerge.vvm v8, v12, v8, v0
684; CHECK-NEXT:    ret
685  %v = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 %evl)
686  ret <8 x double> %v
687}
688
689declare <16 x double> @llvm.vp.select.v16f64(<16 x i1>, <16 x double>, <16 x double>, i32)
690
691define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) {
692; CHECK-LABEL: select_v16f64:
693; CHECK:       # %bb.0:
694; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
695; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
696; CHECK-NEXT:    ret
697  %v = call <16 x double> @llvm.vp.select.v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 %evl)
698  ret <16 x double> %v
699}
700