xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7; Test that we can remove trivially-undef VP operations of various kinds.
8
9declare <4 x i32> @llvm.vp.load.v4i32.p0(ptr, <4 x i1>, i32)
10
11define <4 x i32> @vload_v4i32_zero_evl(ptr %ptr, <4 x i1> %m) {
12; CHECK-LABEL: vload_v4i32_zero_evl:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    ret
15  %v = call <4 x i32> @llvm.vp.load.v4i32.p0(ptr %ptr, <4 x i1> %m, i32 0)
16  ret <4 x i32> %v
17}
18
19define <4 x i32> @vload_v4i32_false_mask(ptr %ptr, i32 %evl) {
20; CHECK-LABEL: vload_v4i32_false_mask:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    ret
23  %v = call <4 x i32> @llvm.vp.load.v4i32.p0(ptr %ptr, <4 x i1> zeroinitializer, i32 %evl)
24  ret <4 x i32> %v
25}
26
27declare <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr>, <4 x i1>, i32)
28
29define <4 x i32> @vgather_v4i32_v4i32_zero_evl(<4 x ptr> %ptrs, <4 x i1> %m) {
30; CHECK-LABEL: vgather_v4i32_v4i32_zero_evl:
31; CHECK:       # %bb.0:
32; CHECK-NEXT:    ret
33  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr> %ptrs, <4 x i1> %m, i32 0)
34  ret <4 x i32> %v
35}
36
37define <4 x i32> @vgather_v4i32_v4i32_false_mask(<4 x ptr> %ptrs, i32 %evl) {
38; CHECK-LABEL: vgather_v4i32_v4i32_false_mask:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    ret
41  %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0(<4 x ptr> %ptrs, <4 x i1> zeroinitializer, i32 %evl)
42  ret <4 x i32> %v
43}
44
45declare void @llvm.vp.store.v4i32.p0(<4 x i32>, ptr, <4 x i1>, i32)
46
47define void @vstore_v4i32_zero_evl(<4 x i32> %val, ptr %ptr, <4 x i1> %m) {
48; CHECK-LABEL: vstore_v4i32_zero_evl:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    ret
51  call void @llvm.vp.store.v4i32.p0(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 0)
52  ret void
53}
54
55define void @vstore_v4i32_false_mask(<4 x i32> %val, ptr %ptr, i32 %evl) {
56; CHECK-LABEL: vstore_v4i32_false_mask:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    ret
59  call void @llvm.vp.store.v4i32.p0(<4 x i32> %val, ptr %ptr, <4 x i1> zeroinitializer, i32 %evl)
60  ret void
61}
62
63declare void @llvm.vp.scatter.v4i32.v4p0(<4 x i32>, <4 x ptr>, <4 x i1>, i32)
64
65define void @vscatter_v4i32_zero_evl(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m) {
66; CHECK-LABEL: vscatter_v4i32_zero_evl:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    ret
69  call void @llvm.vp.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> %m, i32 0)
70  ret void
71}
72
73define void @vscatter_v4i32_false_mask(<4 x i32> %val, <4 x ptr> %ptrs, i32 %evl) {
74; CHECK-LABEL: vscatter_v4i32_false_mask:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    ret
77  call void @llvm.vp.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, <4 x i1> zeroinitializer, i32 %evl)
78  ret void
79}
80
81declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
82
83define <4 x i32> @vadd_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
84; CHECK-LABEL: vadd_v4i32_zero_evl:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    ret
87  %s = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
88  ret <4 x i32> %s
89}
90
91define <4 x i32> @vadd_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
92; CHECK-LABEL: vadd_v4i32_false_mask:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    ret
95  %s = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
96  ret <4 x i32> %s
97}
98
99declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
100
101define <4 x i32> @vand_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
102; CHECK-LABEL: vand_v4i32_zero_evl:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    ret
105  %s = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
106  ret <4 x i32> %s
107}
108
109define <4 x i32> @vand_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
110; CHECK-LABEL: vand_v4i32_false_mask:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    ret
113  %s = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
114  ret <4 x i32> %s
115}
116
117declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
118
119define <4 x i32> @vlshr_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
120; CHECK-LABEL: vlshr_v4i32_zero_evl:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    ret
123  %s = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
124  ret <4 x i32> %s
125}
126
127define <4 x i32> @vlshr_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
128; CHECK-LABEL: vlshr_v4i32_false_mask:
129; CHECK:       # %bb.0:
130; CHECK-NEXT:    ret
131  %s = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
132  ret <4 x i32> %s
133}
134
135declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
136
137define <4 x i32> @vmul_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
138; CHECK-LABEL: vmul_v4i32_zero_evl:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    ret
141  %s = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
142  ret <4 x i32> %s
143}
144
145define <4 x i32> @vmul_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
146; CHECK-LABEL: vmul_v4i32_false_mask:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    ret
149  %s = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
150  ret <4 x i32> %s
151}
152
153declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
154
155define <4 x i32> @vor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
156; CHECK-LABEL: vor_v4i32_zero_evl:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    ret
159  %s = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
160  ret <4 x i32> %s
161}
162
163define <4 x i32> @vor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
164; CHECK-LABEL: vor_v4i32_false_mask:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    ret
167  %s = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
168  ret <4 x i32> %s
169}
170
171declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
172
173define <4 x i32> @vsdiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
174; CHECK-LABEL: vsdiv_v4i32_zero_evl:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    ret
177  %s = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
178  ret <4 x i32> %s
179}
180
181define <4 x i32> @vsdiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
182; CHECK-LABEL: vsdiv_v4i32_false_mask:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    ret
185  %s = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
186  ret <4 x i32> %s
187}
188
189declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
190
191define <4 x i32> @vsrem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
192; CHECK-LABEL: vsrem_v4i32_zero_evl:
193; CHECK:       # %bb.0:
194; CHECK-NEXT:    ret
195  %s = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
196  ret <4 x i32> %s
197}
198
199define <4 x i32> @vsrem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
200; CHECK-LABEL: vsrem_v4i32_false_mask:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    ret
203  %s = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
204  ret <4 x i32> %s
205}
206
207declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
208
209define <4 x i32> @vsub_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
210; CHECK-LABEL: vsub_v4i32_zero_evl:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    ret
213  %s = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
214  ret <4 x i32> %s
215}
216
217define <4 x i32> @vsub_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
218; CHECK-LABEL: vsub_v4i32_false_mask:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    ret
221  %s = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
222  ret <4 x i32> %s
223}
224
225declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
226
227define <4 x i32> @vudiv_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
228; CHECK-LABEL: vudiv_v4i32_zero_evl:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    ret
231  %s = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
232  ret <4 x i32> %s
233}
234
235define <4 x i32> @vudiv_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
236; CHECK-LABEL: vudiv_v4i32_false_mask:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    ret
239  %s = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
240  ret <4 x i32> %s
241}
242
243declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
244
245define <4 x i32> @vurem_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
246; CHECK-LABEL: vurem_v4i32_zero_evl:
247; CHECK:       # %bb.0:
248; CHECK-NEXT:    ret
249  %s = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
250  ret <4 x i32> %s
251}
252
253define <4 x i32> @vurem_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
254; CHECK-LABEL: vurem_v4i32_false_mask:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    ret
257  %s = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
258  ret <4 x i32> %s
259}
260
261declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
262
263define <4 x i32> @vxor_v4i32_zero_evl(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m) {
264; CHECK-LABEL: vxor_v4i32_zero_evl:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    ret
267  %s = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 0)
268  ret <4 x i32> %s
269}
270
271define <4 x i32> @vxor_v4i32_false_mask(<4 x i32> %va, <4 x i32> %vb, i32 %evl) {
272; CHECK-LABEL: vxor_v4i32_false_mask:
273; CHECK:       # %bb.0:
274; CHECK-NEXT:    ret
275  %s = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> zeroinitializer, i32 %evl)
276  ret <4 x i32> %s
277}
278
279declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
280
281define <4 x float> @vfadd_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) {
282; CHECK-LABEL: vfadd_v4f32_zero_evl:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    ret
285  %s = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0)
286  ret <4 x float> %s
287}
288
289define <4 x float> @vfadd_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) {
290; CHECK-LABEL: vfadd_v4f32_false_mask:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    ret
293  %s = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl)
294  ret <4 x float> %s
295}
296
297declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
298
299define <4 x float> @vfsub_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) {
300; CHECK-LABEL: vfsub_v4f32_zero_evl:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    ret
303  %s = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0)
304  ret <4 x float> %s
305}
306
307define <4 x float> @vfsub_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) {
308; CHECK-LABEL: vfsub_v4f32_false_mask:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    ret
311  %s = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl)
312  ret <4 x float> %s
313}
314
315declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
316
317define <4 x float> @vfmul_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) {
318; CHECK-LABEL: vfmul_v4f32_zero_evl:
319; CHECK:       # %bb.0:
320; CHECK-NEXT:    ret
321  %s = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0)
322  ret <4 x float> %s
323}
324
325define <4 x float> @vfmul_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) {
326; CHECK-LABEL: vfmul_v4f32_false_mask:
327; CHECK:       # %bb.0:
328; CHECK-NEXT:    ret
329  %s = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl)
330  ret <4 x float> %s
331}
332
333declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
334
335define <4 x float> @vfdiv_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) {
336; CHECK-LABEL: vfdiv_v4f32_zero_evl:
337; CHECK:       # %bb.0:
338; CHECK-NEXT:    ret
339  %s = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0)
340  ret <4 x float> %s
341}
342
343define <4 x float> @vfdiv_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) {
344; CHECK-LABEL: vfdiv_v4f32_false_mask:
345; CHECK:       # %bb.0:
346; CHECK-NEXT:    ret
347  %s = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl)
348  ret <4 x float> %s
349}
350
351declare <4 x float> @llvm.vp.frem.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
352
353define <4 x float> @vfrem_v4f32_zero_evl(<4 x float> %va, <4 x float> %vb, <4 x i1> %m) {
354; CHECK-LABEL: vfrem_v4f32_zero_evl:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    ret
357  %s = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 0)
358  ret <4 x float> %s
359}
360
361define <4 x float> @vfrem_v4f32_false_mask(<4 x float> %va, <4 x float> %vb, i32 %evl) {
362; CHECK-LABEL: vfrem_v4f32_false_mask:
363; CHECK:       # %bb.0:
364; CHECK-NEXT:    ret
365  %s = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> zeroinitializer, i32 %evl)
366  ret <4 x float> %s
367}
368
369declare i32 @llvm.vp.reduce.add.v4i32(i32, <4 x i32>, <4 x i1>, i32)
370
371define i32 @vreduce_add_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
372; CHECK-LABEL: vreduce_add_v4i32_zero_evl:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    ret
375  %s = call i32 @llvm.vp.reduce.add.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
376  ret i32 %s
377}
378
379define i32 @vreduce_add_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
380; CHECK-LABEL: vreduce_add_v4i32_false_mask:
381; CHECK:       # %bb.0:
382; CHECK-NEXT:    ret
383  %s = call i32 @llvm.vp.reduce.add.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
384  ret i32 %s
385}
386
387declare i32 @llvm.vp.reduce.mul.v4i32(i32, <4 x i32>, <4 x i1>, i32)
388
389define i32 @vreduce_mul_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
390; CHECK-LABEL: vreduce_mul_v4i32_zero_evl:
391; CHECK:       # %bb.0:
392; CHECK-NEXT:    ret
393  %s = call i32 @llvm.vp.reduce.mul.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
394  ret i32 %s
395}
396
397define i32 @vreduce_mul_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
398; CHECK-LABEL: vreduce_mul_v4i32_false_mask:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    ret
401  %s = call i32 @llvm.vp.reduce.mul.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
402  ret i32 %s
403}
404
405declare i32 @llvm.vp.reduce.and.v4i32(i32, <4 x i32>, <4 x i1>, i32)
406
407define i32 @vreduce_and_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
408; CHECK-LABEL: vreduce_and_v4i32_zero_evl:
409; CHECK:       # %bb.0:
410; CHECK-NEXT:    ret
411  %s = call i32 @llvm.vp.reduce.and.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
412  ret i32 %s
413}
414
415define i32 @vreduce_and_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
416; CHECK-LABEL: vreduce_and_v4i32_false_mask:
417; CHECK:       # %bb.0:
418; CHECK-NEXT:    ret
419  %s = call i32 @llvm.vp.reduce.and.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
420  ret i32 %s
421}
422
423declare i32 @llvm.vp.reduce.or.v4i32(i32, <4 x i32>, <4 x i1>, i32)
424
425define i32 @vreduce_or_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
426; CHECK-LABEL: vreduce_or_v4i32_zero_evl:
427; CHECK:       # %bb.0:
428; CHECK-NEXT:    ret
429  %s = call i32 @llvm.vp.reduce.or.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
430  ret i32 %s
431}
432
433define i32 @vreduce_or_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
434; CHECK-LABEL: vreduce_or_v4i32_false_mask:
435; CHECK:       # %bb.0:
436; CHECK-NEXT:    ret
437  %s = call i32 @llvm.vp.reduce.or.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
438  ret i32 %s
439}
440
441declare i32 @llvm.vp.reduce.xor.v4i32(i32, <4 x i32>, <4 x i1>, i32)
442
443define i32 @vreduce_xor_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
444; CHECK-LABEL: vreduce_xor_v4i32_zero_evl:
445; CHECK:       # %bb.0:
446; CHECK-NEXT:    ret
447  %s = call i32 @llvm.vp.reduce.xor.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
448  ret i32 %s
449}
450
451define i32 @vreduce_xor_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
452; CHECK-LABEL: vreduce_xor_v4i32_false_mask:
453; CHECK:       # %bb.0:
454; CHECK-NEXT:    ret
455  %s = call i32 @llvm.vp.reduce.xor.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
456  ret i32 %s
457}
458
459declare i32 @llvm.vp.reduce.smax.v4i32(i32, <4 x i32>, <4 x i1>, i32)
460
461define i32 @vreduce_smax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
462; CHECK-LABEL: vreduce_smax_v4i32_zero_evl:
463; CHECK:       # %bb.0:
464; CHECK-NEXT:    ret
465  %s = call i32 @llvm.vp.reduce.smax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
466  ret i32 %s
467}
468
469define i32 @vreduce_smax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
470; CHECK-LABEL: vreduce_smax_v4i32_false_mask:
471; CHECK:       # %bb.0:
472; CHECK-NEXT:    ret
473  %s = call i32 @llvm.vp.reduce.smax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
474  ret i32 %s
475}
476
477declare i32 @llvm.vp.reduce.smin.v4i32(i32, <4 x i32>, <4 x i1>, i32)
478
479define i32 @vreduce_smin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
480; CHECK-LABEL: vreduce_smin_v4i32_zero_evl:
481; CHECK:       # %bb.0:
482; CHECK-NEXT:    ret
483  %s = call i32 @llvm.vp.reduce.smin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
484  ret i32 %s
485}
486
487define i32 @vreduce_smin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
488; CHECK-LABEL: vreduce_smin_v4i32_false_mask:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    ret
491  %s = call i32 @llvm.vp.reduce.smin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
492  ret i32 %s
493}
494
495declare i32 @llvm.vp.reduce.umax.v4i32(i32, <4 x i32>, <4 x i1>, i32)
496
497define i32 @vreduce_umax_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
498; CHECK-LABEL: vreduce_umax_v4i32_zero_evl:
499; CHECK:       # %bb.0:
500; CHECK-NEXT:    ret
501  %s = call i32 @llvm.vp.reduce.umax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
502  ret i32 %s
503}
504
505define i32 @vreduce_umax_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
506; CHECK-LABEL: vreduce_umax_v4i32_false_mask:
507; CHECK:       # %bb.0:
508; CHECK-NEXT:    ret
509  %s = call i32 @llvm.vp.reduce.umax.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
510  ret i32 %s
511}
512
513declare i32 @llvm.vp.reduce.umin.v4i32(i32, <4 x i32>, <4 x i1>, i32)
514
515define i32 @vreduce_umin_v4i32_zero_evl(i32 %start, <4 x i32> %val, <4 x i1> %m) {
516; CHECK-LABEL: vreduce_umin_v4i32_zero_evl:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    ret
519  %s = call i32 @llvm.vp.reduce.umin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> %m, i32 0)
520  ret i32 %s
521}
522
523define i32 @vreduce_umin_v4i32_false_mask(i32 %start, <4 x i32> %val, i32 %evl) {
524; CHECK-LABEL: vreduce_umin_v4i32_false_mask:
525; CHECK:       # %bb.0:
526; CHECK-NEXT:    ret
527  %s = call i32 @llvm.vp.reduce.umin.v4i32(i32 %start, <4 x i32> %val, <4 x i1> zeroinitializer, i32 %evl)
528  ret i32 %s
529}
530
531declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32)
532
533define float @vreduce_seq_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
534; CHECK-LABEL: vreduce_seq_fadd_v4f32_zero_evl:
535; CHECK:       # %bb.0:
536; CHECK-NEXT:    ret
537  %s = call float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
538  ret float %s
539}
540
541define float @vreduce_seq_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
542; CHECK-LABEL: vreduce_seq_fadd_v4f32_false_mask:
543; CHECK:       # %bb.0:
544; CHECK-NEXT:    ret
545  %s = call float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
546  ret float %s
547}
548
549define float @vreduce_fadd_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
550; CHECK-LABEL: vreduce_fadd_v4f32_zero_evl:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    ret
553  %s = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
554  ret float %s
555}
556
557define float @vreduce_fadd_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
558; CHECK-LABEL: vreduce_fadd_v4f32_false_mask:
559; CHECK:       # %bb.0:
560; CHECK-NEXT:    ret
561  %s = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
562  ret float %s
563}
564
565declare float @llvm.vp.reduce.fmul.v4f32(float, <4 x float>, <4 x i1>, i32)
566
567define float @vreduce_seq_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
568; CHECK-LABEL: vreduce_seq_fmul_v4f32_zero_evl:
569; CHECK:       # %bb.0:
570; CHECK-NEXT:    ret
571  %s = call float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
572  ret float %s
573}
574
575define float @vreduce_seq_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
576; CHECK-LABEL: vreduce_seq_fmul_v4f32_false_mask:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    ret
579  %s = call float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
580  ret float %s
581}
582
583define float @vreduce_fmul_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
584; CHECK-LABEL: vreduce_fmul_v4f32_zero_evl:
585; CHECK:       # %bb.0:
586; CHECK-NEXT:    ret
587  %s = call reassoc float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
588  ret float %s
589}
590
591define float @vreduce_fmul_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
592; CHECK-LABEL: vreduce_fmul_v4f32_false_mask:
593; CHECK:       # %bb.0:
594; CHECK-NEXT:    ret
595  %s = call reassoc float @llvm.vp.reduce.fmul.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
596  ret float %s
597}
598
599declare float @llvm.vp.reduce.fmin.v4f32(float, <4 x float>, <4 x i1>, i32)
600
601define float @vreduce_fmin_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
602; CHECK-LABEL: vreduce_fmin_v4f32_zero_evl:
603; CHECK:       # %bb.0:
604; CHECK-NEXT:    ret
605  %s = call float @llvm.vp.reduce.fmin.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
606  ret float %s
607}
608
609define float @vreduce_fmin_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
610; CHECK-LABEL: vreduce_fmin_v4f32_false_mask:
611; CHECK:       # %bb.0:
612; CHECK-NEXT:    ret
613  %s = call float @llvm.vp.reduce.fmin.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
614  ret float %s
615}
616
617declare float @llvm.vp.reduce.fmax.v4f32(float, <4 x float>, <4 x i1>, i32)
618
619define float @vreduce_fmax_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
620; CHECK-LABEL: vreduce_fmax_v4f32_zero_evl:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    ret
623  %s = call float @llvm.vp.reduce.fmax.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
624  ret float %s
625}
626
627define float @vreduce_fmax_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
628; CHECK-LABEL: vreduce_fmax_v4f32_false_mask:
629; CHECK:       # %bb.0:
630; CHECK-NEXT:    ret
631  %s = call float @llvm.vp.reduce.fmax.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
632  ret float %s
633}
634
635define float @vreduce_fminimum_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
636; CHECK-LABEL: vreduce_fminimum_v4f32_zero_evl:
637; CHECK:       # %bb.0:
638; CHECK-NEXT:    ret
639  %s = call float @llvm.vp.reduce.fminimum.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
640  ret float %s
641}
642
643define float @vreduce_fminimum_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
644; CHECK-LABEL: vreduce_fminimum_v4f32_false_mask:
645; CHECK:       # %bb.0:
646; CHECK-NEXT:    ret
647  %s = call float @llvm.vp.reduce.fminimum.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
648  ret float %s
649}
650
651define float @vreduce_fmaximum_v4f32_zero_evl(float %start, <4 x float> %val, <4 x i1> %m) {
652; CHECK-LABEL: vreduce_fmaximum_v4f32_zero_evl:
653; CHECK:       # %bb.0:
654; CHECK-NEXT:    ret
655  %s = call float @llvm.vp.reduce.fmaximum.v4f32(float %start, <4 x float> %val, <4 x i1> %m, i32 0)
656  ret float %s
657}
658
659define float @vreduce_fmaximum_v4f32_false_mask(float %start, <4 x float> %val, i32 %evl) {
660; CHECK-LABEL: vreduce_fmaximum_v4f32_false_mask:
661; CHECK:       # %bb.0:
662; CHECK-NEXT:    ret
663  %s = call float @llvm.vp.reduce.fmaximum.v4f32(float %start, <4 x float> %val, <4 x i1> zeroinitializer, i32 %evl)
664  ret float %s
665}
666