xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll (revision 26766a00ff946c281b7dd517b2ba8d594012c21e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
6
7declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
8
9define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vp_abs_v2i8:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
13; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
14; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
15; CHECK-NEXT:    ret
16  %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
17  ret <2 x i8> %v
18}
19
20define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
21; CHECK-LABEL: vp_abs_v2i8_unmasked:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
24; CHECK-NEXT:    vrsub.vi v9, v8, 0
25; CHECK-NEXT:    vmax.vv v8, v8, v9
26; CHECK-NEXT:    ret
27  %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
28  ret <2 x i8> %v
29}
30
31declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32)
32
33define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
34; CHECK-LABEL: vp_abs_v4i8:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
37; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
38; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
39; CHECK-NEXT:    ret
40  %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
41  ret <4 x i8> %v
42}
43
44define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
45; CHECK-LABEL: vp_abs_v4i8_unmasked:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
48; CHECK-NEXT:    vrsub.vi v9, v8, 0
49; CHECK-NEXT:    vmax.vv v8, v8, v9
50; CHECK-NEXT:    ret
51  %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
52  ret <4 x i8> %v
53}
54
55declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32)
56
57define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
58; CHECK-LABEL: vp_abs_v8i8:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
61; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
62; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
63; CHECK-NEXT:    ret
64  %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
65  ret <8 x i8> %v
66}
67
68define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
69; CHECK-LABEL: vp_abs_v8i8_unmasked:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
72; CHECK-NEXT:    vrsub.vi v9, v8, 0
73; CHECK-NEXT:    vmax.vv v8, v8, v9
74; CHECK-NEXT:    ret
75  %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
76  ret <8 x i8> %v
77}
78
79declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32)
80
81define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
82; CHECK-LABEL: vp_abs_v16i8:
83; CHECK:       # %bb.0:
84; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
85; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
86; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
87; CHECK-NEXT:    ret
88  %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
89  ret <16 x i8> %v
90}
91
92define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
93; CHECK-LABEL: vp_abs_v16i8_unmasked:
94; CHECK:       # %bb.0:
95; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
96; CHECK-NEXT:    vrsub.vi v9, v8, 0
97; CHECK-NEXT:    vmax.vv v8, v8, v9
98; CHECK-NEXT:    ret
99  %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
100  ret <16 x i8> %v
101}
102
103declare <2 x i16> @llvm.vp.abs.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32)
104
105define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
106; CHECK-LABEL: vp_abs_v2i16:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
109; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
110; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
111; CHECK-NEXT:    ret
112  %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
113  ret <2 x i16> %v
114}
115
116define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
117; CHECK-LABEL: vp_abs_v2i16_unmasked:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
120; CHECK-NEXT:    vrsub.vi v9, v8, 0
121; CHECK-NEXT:    vmax.vv v8, v8, v9
122; CHECK-NEXT:    ret
123  %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
124  ret <2 x i16> %v
125}
126
127declare <4 x i16> @llvm.vp.abs.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32)
128
129define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
130; CHECK-LABEL: vp_abs_v4i16:
131; CHECK:       # %bb.0:
132; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
133; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
134; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
135; CHECK-NEXT:    ret
136  %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
137  ret <4 x i16> %v
138}
139
140define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
141; CHECK-LABEL: vp_abs_v4i16_unmasked:
142; CHECK:       # %bb.0:
143; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
144; CHECK-NEXT:    vrsub.vi v9, v8, 0
145; CHECK-NEXT:    vmax.vv v8, v8, v9
146; CHECK-NEXT:    ret
147  %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
148  ret <4 x i16> %v
149}
150
151declare <8 x i16> @llvm.vp.abs.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32)
152
153define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
154; CHECK-LABEL: vp_abs_v8i16:
155; CHECK:       # %bb.0:
156; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
157; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
158; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
159; CHECK-NEXT:    ret
160  %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
161  ret <8 x i16> %v
162}
163
164define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
165; CHECK-LABEL: vp_abs_v8i16_unmasked:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
168; CHECK-NEXT:    vrsub.vi v9, v8, 0
169; CHECK-NEXT:    vmax.vv v8, v8, v9
170; CHECK-NEXT:    ret
171  %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
172  ret <8 x i16> %v
173}
174
175declare <16 x i16> @llvm.vp.abs.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32)
176
177define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
178; CHECK-LABEL: vp_abs_v16i16:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
181; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
182; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
183; CHECK-NEXT:    ret
184  %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
185  ret <16 x i16> %v
186}
187
188define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
189; CHECK-LABEL: vp_abs_v16i16_unmasked:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
192; CHECK-NEXT:    vrsub.vi v10, v8, 0
193; CHECK-NEXT:    vmax.vv v8, v8, v10
194; CHECK-NEXT:    ret
195  %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
196  ret <16 x i16> %v
197}
198
199declare <2 x i32> @llvm.vp.abs.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32)
200
201define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
202; CHECK-LABEL: vp_abs_v2i32:
203; CHECK:       # %bb.0:
204; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
205; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
206; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
207; CHECK-NEXT:    ret
208  %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
209  ret <2 x i32> %v
210}
211
212define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
213; CHECK-LABEL: vp_abs_v2i32_unmasked:
214; CHECK:       # %bb.0:
215; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
216; CHECK-NEXT:    vrsub.vi v9, v8, 0
217; CHECK-NEXT:    vmax.vv v8, v8, v9
218; CHECK-NEXT:    ret
219  %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
220  ret <2 x i32> %v
221}
222
223declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32)
224
225define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
226; CHECK-LABEL: vp_abs_v4i32:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
229; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
230; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
231; CHECK-NEXT:    ret
232  %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
233  ret <4 x i32> %v
234}
235
236define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
237; CHECK-LABEL: vp_abs_v4i32_unmasked:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
240; CHECK-NEXT:    vrsub.vi v9, v8, 0
241; CHECK-NEXT:    vmax.vv v8, v8, v9
242; CHECK-NEXT:    ret
243  %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
244  ret <4 x i32> %v
245}
246
247declare <8 x i32> @llvm.vp.abs.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32)
248
249define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
250; CHECK-LABEL: vp_abs_v8i32:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
253; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
254; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
255; CHECK-NEXT:    ret
256  %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
257  ret <8 x i32> %v
258}
259
260define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
261; CHECK-LABEL: vp_abs_v8i32_unmasked:
262; CHECK:       # %bb.0:
263; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
264; CHECK-NEXT:    vrsub.vi v10, v8, 0
265; CHECK-NEXT:    vmax.vv v8, v8, v10
266; CHECK-NEXT:    ret
267  %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
268  ret <8 x i32> %v
269}
270
271declare <16 x i32> @llvm.vp.abs.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32)
272
273define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
274; CHECK-LABEL: vp_abs_v16i32:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
277; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
278; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
279; CHECK-NEXT:    ret
280  %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
281  ret <16 x i32> %v
282}
283
284define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
285; CHECK-LABEL: vp_abs_v16i32_unmasked:
286; CHECK:       # %bb.0:
287; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
288; CHECK-NEXT:    vrsub.vi v12, v8, 0
289; CHECK-NEXT:    vmax.vv v8, v8, v12
290; CHECK-NEXT:    ret
291  %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
292  ret <16 x i32> %v
293}
294
295declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32)
296
297define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
298; CHECK-LABEL: vp_abs_v2i64:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
301; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
302; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
303; CHECK-NEXT:    ret
304  %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
305  ret <2 x i64> %v
306}
307
308define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
309; CHECK-LABEL: vp_abs_v2i64_unmasked:
310; CHECK:       # %bb.0:
311; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
312; CHECK-NEXT:    vrsub.vi v9, v8, 0
313; CHECK-NEXT:    vmax.vv v8, v8, v9
314; CHECK-NEXT:    ret
315  %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
316  ret <2 x i64> %v
317}
318
319declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32)
320
321define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
322; CHECK-LABEL: vp_abs_v4i64:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
325; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
326; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
327; CHECK-NEXT:    ret
328  %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
329  ret <4 x i64> %v
330}
331
332define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
333; CHECK-LABEL: vp_abs_v4i64_unmasked:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
336; CHECK-NEXT:    vrsub.vi v10, v8, 0
337; CHECK-NEXT:    vmax.vv v8, v8, v10
338; CHECK-NEXT:    ret
339  %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
340  ret <4 x i64> %v
341}
342
343declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32)
344
345define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
346; CHECK-LABEL: vp_abs_v8i64:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
349; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
350; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
351; CHECK-NEXT:    ret
352  %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
353  ret <8 x i64> %v
354}
355
356define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
357; CHECK-LABEL: vp_abs_v8i64_unmasked:
358; CHECK:       # %bb.0:
359; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
360; CHECK-NEXT:    vrsub.vi v12, v8, 0
361; CHECK-NEXT:    vmax.vv v8, v8, v12
362; CHECK-NEXT:    ret
363  %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
364  ret <8 x i64> %v
365}
366
367declare <15 x i64> @llvm.vp.abs.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
368
369define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
370; CHECK-LABEL: vp_abs_v15i64:
371; CHECK:       # %bb.0:
372; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
373; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
374; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
375; CHECK-NEXT:    ret
376  %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
377  ret <15 x i64> %v
378}
379
380define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
381; CHECK-LABEL: vp_abs_v15i64_unmasked:
382; CHECK:       # %bb.0:
383; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
384; CHECK-NEXT:    vrsub.vi v16, v8, 0
385; CHECK-NEXT:    vmax.vv v8, v8, v16
386; CHECK-NEXT:    ret
387  %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
388  ret <15 x i64> %v
389}
390
391declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
392
393define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
394; CHECK-LABEL: vp_abs_v16i64:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
397; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
398; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
399; CHECK-NEXT:    ret
400  %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
401  ret <16 x i64> %v
402}
403
404define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
405; CHECK-LABEL: vp_abs_v16i64_unmasked:
406; CHECK:       # %bb.0:
407; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
408; CHECK-NEXT:    vrsub.vi v16, v8, 0
409; CHECK-NEXT:    vmax.vv v8, v8, v16
410; CHECK-NEXT:    ret
411  %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
412  ret <16 x i64> %v
413}
414
415declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32)
416
417define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
418; CHECK-LABEL: vp_abs_v32i64:
419; CHECK:       # %bb.0:
420; CHECK-NEXT:    li a2, 16
421; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
422; CHECK-NEXT:    vslidedown.vi v7, v0, 2
423; CHECK-NEXT:    mv a1, a0
424; CHECK-NEXT:    bltu a0, a2, .LBB34_2
425; CHECK-NEXT:  # %bb.1:
426; CHECK-NEXT:    li a1, 16
427; CHECK-NEXT:  .LBB34_2:
428; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
429; CHECK-NEXT:    vrsub.vi v24, v8, 0, v0.t
430; CHECK-NEXT:    vmax.vv v8, v8, v24, v0.t
431; CHECK-NEXT:    addi a1, a0, -16
432; CHECK-NEXT:    sltu a0, a0, a1
433; CHECK-NEXT:    addi a0, a0, -1
434; CHECK-NEXT:    and a0, a0, a1
435; CHECK-NEXT:    vmv1r.v v0, v7
436; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
437; CHECK-NEXT:    vrsub.vi v24, v16, 0, v0.t
438; CHECK-NEXT:    vmax.vv v16, v16, v24, v0.t
439; CHECK-NEXT:    ret
440  %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
441  ret <32 x i64> %v
442}
443
444define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
445; CHECK-LABEL: vp_abs_v32i64_unmasked:
446; CHECK:       # %bb.0:
447; CHECK-NEXT:    li a2, 16
448; CHECK-NEXT:    mv a1, a0
449; CHECK-NEXT:    bltu a0, a2, .LBB35_2
450; CHECK-NEXT:  # %bb.1:
451; CHECK-NEXT:    li a1, 16
452; CHECK-NEXT:  .LBB35_2:
453; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
454; CHECK-NEXT:    vrsub.vi v24, v8, 0
455; CHECK-NEXT:    vmax.vv v8, v8, v24
456; CHECK-NEXT:    addi a1, a0, -16
457; CHECK-NEXT:    sltu a0, a0, a1
458; CHECK-NEXT:    addi a0, a0, -1
459; CHECK-NEXT:    and a0, a0, a1
460; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
461; CHECK-NEXT:    vrsub.vi v24, v16, 0
462; CHECK-NEXT:    vmax.vv v16, v16, v24
463; CHECK-NEXT:    ret
464  %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
465  ret <32 x i64> %v
466}
467