xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
6
7declare <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
8
9define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vp_abs_nxv1i8:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
13; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
14; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
15; CHECK-NEXT:    ret
16  %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
17  ret <vscale x 1 x i8> %v
18}
19
20define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
21; CHECK-LABEL: vp_abs_nxv1i8_unmasked:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
24; CHECK-NEXT:    vrsub.vi v9, v8, 0
25; CHECK-NEXT:    vmax.vv v8, v8, v9
26; CHECK-NEXT:    ret
27  %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
28  ret <vscale x 1 x i8> %v
29}
30
31declare <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8>, i1 immarg, <vscale x 2 x i1>, i32)
32
33define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
34; CHECK-LABEL: vp_abs_nxv2i8:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
37; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
38; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
39; CHECK-NEXT:    ret
40  %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
41  ret <vscale x 2 x i8> %v
42}
43
44define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
45; CHECK-LABEL: vp_abs_nxv2i8_unmasked:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
48; CHECK-NEXT:    vrsub.vi v9, v8, 0
49; CHECK-NEXT:    vmax.vv v8, v8, v9
50; CHECK-NEXT:    ret
51  %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
52  ret <vscale x 2 x i8> %v
53}
54
55declare <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8>, i1 immarg, <vscale x 4 x i1>, i32)
56
57define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
58; CHECK-LABEL: vp_abs_nxv4i8:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
61; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
62; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
63; CHECK-NEXT:    ret
64  %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
65  ret <vscale x 4 x i8> %v
66}
67
68define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
69; CHECK-LABEL: vp_abs_nxv4i8_unmasked:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
72; CHECK-NEXT:    vrsub.vi v9, v8, 0
73; CHECK-NEXT:    vmax.vv v8, v8, v9
74; CHECK-NEXT:    ret
75  %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
76  ret <vscale x 4 x i8> %v
77}
78
79declare <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8>, i1 immarg, <vscale x 8 x i1>, i32)
80
81define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
82; CHECK-LABEL: vp_abs_nxv8i8:
83; CHECK:       # %bb.0:
84; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
85; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
86; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
87; CHECK-NEXT:    ret
88  %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
89  ret <vscale x 8 x i8> %v
90}
91
92define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
93; CHECK-LABEL: vp_abs_nxv8i8_unmasked:
94; CHECK:       # %bb.0:
95; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
96; CHECK-NEXT:    vrsub.vi v9, v8, 0
97; CHECK-NEXT:    vmax.vv v8, v8, v9
98; CHECK-NEXT:    ret
99  %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
100  ret <vscale x 8 x i8> %v
101}
102
103declare <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8>, i1 immarg, <vscale x 16 x i1>, i32)
104
105define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
106; CHECK-LABEL: vp_abs_nxv16i8:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
109; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
110; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
111; CHECK-NEXT:    ret
112  %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
113  ret <vscale x 16 x i8> %v
114}
115
116define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
117; CHECK-LABEL: vp_abs_nxv16i8_unmasked:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
120; CHECK-NEXT:    vrsub.vi v10, v8, 0
121; CHECK-NEXT:    vmax.vv v8, v8, v10
122; CHECK-NEXT:    ret
123  %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
124  ret <vscale x 16 x i8> %v
125}
126
127declare <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8>, i1 immarg, <vscale x 32 x i1>, i32)
128
129define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
130; CHECK-LABEL: vp_abs_nxv32i8:
131; CHECK:       # %bb.0:
132; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
133; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
134; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
135; CHECK-NEXT:    ret
136  %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
137  ret <vscale x 32 x i8> %v
138}
139
140define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
141; CHECK-LABEL: vp_abs_nxv32i8_unmasked:
142; CHECK:       # %bb.0:
143; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
144; CHECK-NEXT:    vrsub.vi v12, v8, 0
145; CHECK-NEXT:    vmax.vv v8, v8, v12
146; CHECK-NEXT:    ret
147  %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
148  ret <vscale x 32 x i8> %v
149}
150
151declare <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8>, i1 immarg, <vscale x 64 x i1>, i32)
152
153define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
154; CHECK-LABEL: vp_abs_nxv64i8:
155; CHECK:       # %bb.0:
156; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
157; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
158; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
159; CHECK-NEXT:    ret
160  %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
161  ret <vscale x 64 x i8> %v
162}
163
164define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
165; CHECK-LABEL: vp_abs_nxv64i8_unmasked:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
168; CHECK-NEXT:    vrsub.vi v16, v8, 0
169; CHECK-NEXT:    vmax.vv v8, v8, v16
170; CHECK-NEXT:    ret
171  %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
172  ret <vscale x 64 x i8> %v
173}
174
175declare <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16>, i1 immarg, <vscale x 1 x i1>, i32)
176
177define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
178; CHECK-LABEL: vp_abs_nxv1i16:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
181; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
182; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
183; CHECK-NEXT:    ret
184  %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
185  ret <vscale x 1 x i16> %v
186}
187
188define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
189; CHECK-LABEL: vp_abs_nxv1i16_unmasked:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
192; CHECK-NEXT:    vrsub.vi v9, v8, 0
193; CHECK-NEXT:    vmax.vv v8, v8, v9
194; CHECK-NEXT:    ret
195  %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
196  ret <vscale x 1 x i16> %v
197}
198
199declare <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16>, i1 immarg, <vscale x 2 x i1>, i32)
200
201define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
202; CHECK-LABEL: vp_abs_nxv2i16:
203; CHECK:       # %bb.0:
204; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
205; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
206; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
207; CHECK-NEXT:    ret
208  %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
209  ret <vscale x 2 x i16> %v
210}
211
212define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
213; CHECK-LABEL: vp_abs_nxv2i16_unmasked:
214; CHECK:       # %bb.0:
215; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
216; CHECK-NEXT:    vrsub.vi v9, v8, 0
217; CHECK-NEXT:    vmax.vv v8, v8, v9
218; CHECK-NEXT:    ret
219  %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
220  ret <vscale x 2 x i16> %v
221}
222
223declare <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16>, i1 immarg, <vscale x 4 x i1>, i32)
224
225define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
226; CHECK-LABEL: vp_abs_nxv4i16:
227; CHECK:       # %bb.0:
228; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
229; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
230; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
231; CHECK-NEXT:    ret
232  %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
233  ret <vscale x 4 x i16> %v
234}
235
236define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
237; CHECK-LABEL: vp_abs_nxv4i16_unmasked:
238; CHECK:       # %bb.0:
239; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
240; CHECK-NEXT:    vrsub.vi v9, v8, 0
241; CHECK-NEXT:    vmax.vv v8, v8, v9
242; CHECK-NEXT:    ret
243  %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
244  ret <vscale x 4 x i16> %v
245}
246
247declare <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16>, i1 immarg, <vscale x 8 x i1>, i32)
248
249define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
250; CHECK-LABEL: vp_abs_nxv8i16:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
253; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
254; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
255; CHECK-NEXT:    ret
256  %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
257  ret <vscale x 8 x i16> %v
258}
259
260define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
261; CHECK-LABEL: vp_abs_nxv8i16_unmasked:
262; CHECK:       # %bb.0:
263; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
264; CHECK-NEXT:    vrsub.vi v10, v8, 0
265; CHECK-NEXT:    vmax.vv v8, v8, v10
266; CHECK-NEXT:    ret
267  %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
268  ret <vscale x 8 x i16> %v
269}
270
271declare <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16>, i1 immarg, <vscale x 16 x i1>, i32)
272
273define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
274; CHECK-LABEL: vp_abs_nxv16i16:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
277; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
278; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
279; CHECK-NEXT:    ret
280  %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
281  ret <vscale x 16 x i16> %v
282}
283
284define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
285; CHECK-LABEL: vp_abs_nxv16i16_unmasked:
286; CHECK:       # %bb.0:
287; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
288; CHECK-NEXT:    vrsub.vi v12, v8, 0
289; CHECK-NEXT:    vmax.vv v8, v8, v12
290; CHECK-NEXT:    ret
291  %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
292  ret <vscale x 16 x i16> %v
293}
294
295declare <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16>, i1 immarg, <vscale x 32 x i1>, i32)
296
297define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
298; CHECK-LABEL: vp_abs_nxv32i16:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
301; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
302; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
303; CHECK-NEXT:    ret
304  %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
305  ret <vscale x 32 x i16> %v
306}
307
308define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
309; CHECK-LABEL: vp_abs_nxv32i16_unmasked:
310; CHECK:       # %bb.0:
311; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
312; CHECK-NEXT:    vrsub.vi v16, v8, 0
313; CHECK-NEXT:    vmax.vv v8, v8, v16
314; CHECK-NEXT:    ret
315  %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
316  ret <vscale x 32 x i16> %v
317}
318
319declare <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32>, i1 immarg, <vscale x 1 x i1>, i32)
320
321define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
322; CHECK-LABEL: vp_abs_nxv1i32:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
325; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
326; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
327; CHECK-NEXT:    ret
328  %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
329  ret <vscale x 1 x i32> %v
330}
331
332define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
333; CHECK-LABEL: vp_abs_nxv1i32_unmasked:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
336; CHECK-NEXT:    vrsub.vi v9, v8, 0
337; CHECK-NEXT:    vmax.vv v8, v8, v9
338; CHECK-NEXT:    ret
339  %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
340  ret <vscale x 1 x i32> %v
341}
342
343declare <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32>, i1 immarg, <vscale x 2 x i1>, i32)
344
345define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
346; CHECK-LABEL: vp_abs_nxv2i32:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
349; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
350; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
351; CHECK-NEXT:    ret
352  %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
353  ret <vscale x 2 x i32> %v
354}
355
356define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
357; CHECK-LABEL: vp_abs_nxv2i32_unmasked:
358; CHECK:       # %bb.0:
359; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
360; CHECK-NEXT:    vrsub.vi v9, v8, 0
361; CHECK-NEXT:    vmax.vv v8, v8, v9
362; CHECK-NEXT:    ret
363  %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
364  ret <vscale x 2 x i32> %v
365}
366
367declare <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32>, i1 immarg, <vscale x 4 x i1>, i32)
368
369define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
370; CHECK-LABEL: vp_abs_nxv4i32:
371; CHECK:       # %bb.0:
372; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
373; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
374; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
375; CHECK-NEXT:    ret
376  %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
377  ret <vscale x 4 x i32> %v
378}
379
380define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
381; CHECK-LABEL: vp_abs_nxv4i32_unmasked:
382; CHECK:       # %bb.0:
383; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
384; CHECK-NEXT:    vrsub.vi v10, v8, 0
385; CHECK-NEXT:    vmax.vv v8, v8, v10
386; CHECK-NEXT:    ret
387  %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
388  ret <vscale x 4 x i32> %v
389}
390
391declare <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32>, i1 immarg, <vscale x 8 x i1>, i32)
392
393define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
394; CHECK-LABEL: vp_abs_nxv8i32:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
397; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
398; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
399; CHECK-NEXT:    ret
400  %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
401  ret <vscale x 8 x i32> %v
402}
403
404define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
405; CHECK-LABEL: vp_abs_nxv8i32_unmasked:
406; CHECK:       # %bb.0:
407; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
408; CHECK-NEXT:    vrsub.vi v12, v8, 0
409; CHECK-NEXT:    vmax.vv v8, v8, v12
410; CHECK-NEXT:    ret
411  %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
412  ret <vscale x 8 x i32> %v
413}
414
415declare <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32>, i1 immarg, <vscale x 16 x i1>, i32)
416
417define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
418; CHECK-LABEL: vp_abs_nxv16i32:
419; CHECK:       # %bb.0:
420; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
421; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
422; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
423; CHECK-NEXT:    ret
424  %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
425  ret <vscale x 16 x i32> %v
426}
427
428define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
429; CHECK-LABEL: vp_abs_nxv16i32_unmasked:
430; CHECK:       # %bb.0:
431; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
432; CHECK-NEXT:    vrsub.vi v16, v8, 0
433; CHECK-NEXT:    vmax.vv v8, v8, v16
434; CHECK-NEXT:    ret
435  %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
436  ret <vscale x 16 x i32> %v
437}
438
439declare <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64>, i1 immarg, <vscale x 1 x i1>, i32)
440
441define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
442; CHECK-LABEL: vp_abs_nxv1i64:
443; CHECK:       # %bb.0:
444; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
445; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
446; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
447; CHECK-NEXT:    ret
448  %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
449  ret <vscale x 1 x i64> %v
450}
451
452define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
453; CHECK-LABEL: vp_abs_nxv1i64_unmasked:
454; CHECK:       # %bb.0:
455; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
456; CHECK-NEXT:    vrsub.vi v9, v8, 0
457; CHECK-NEXT:    vmax.vv v8, v8, v9
458; CHECK-NEXT:    ret
459  %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
460  ret <vscale x 1 x i64> %v
461}
462
463declare <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64>, i1 immarg, <vscale x 2 x i1>, i32)
464
465define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
466; CHECK-LABEL: vp_abs_nxv2i64:
467; CHECK:       # %bb.0:
468; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
469; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
470; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
471; CHECK-NEXT:    ret
472  %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
473  ret <vscale x 2 x i64> %v
474}
475
476define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
477; CHECK-LABEL: vp_abs_nxv2i64_unmasked:
478; CHECK:       # %bb.0:
479; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
480; CHECK-NEXT:    vrsub.vi v10, v8, 0
481; CHECK-NEXT:    vmax.vv v8, v8, v10
482; CHECK-NEXT:    ret
483  %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
484  ret <vscale x 2 x i64> %v
485}
486
487declare <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64>, i1 immarg, <vscale x 4 x i1>, i32)
488
489define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
490; CHECK-LABEL: vp_abs_nxv4i64:
491; CHECK:       # %bb.0:
492; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
493; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
494; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
495; CHECK-NEXT:    ret
496  %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
497  ret <vscale x 4 x i64> %v
498}
499
500define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
501; CHECK-LABEL: vp_abs_nxv4i64_unmasked:
502; CHECK:       # %bb.0:
503; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
504; CHECK-NEXT:    vrsub.vi v12, v8, 0
505; CHECK-NEXT:    vmax.vv v8, v8, v12
506; CHECK-NEXT:    ret
507  %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
508  ret <vscale x 4 x i64> %v
509}
510
511declare <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64>, i1 immarg, <vscale x 7 x i1>, i32)
512
513define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
514; CHECK-LABEL: vp_abs_nxv7i64:
515; CHECK:       # %bb.0:
516; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
517; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
518; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
519; CHECK-NEXT:    ret
520  %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
521  ret <vscale x 7 x i64> %v
522}
523
524define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 zeroext %evl) {
525; CHECK-LABEL: vp_abs_nxv7i64_unmasked:
526; CHECK:       # %bb.0:
527; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
528; CHECK-NEXT:    vrsub.vi v16, v8, 0
529; CHECK-NEXT:    vmax.vv v8, v8, v16
530; CHECK-NEXT:    ret
531  %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
532  ret <vscale x 7 x i64> %v
533}
534
535declare <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64>, i1 immarg, <vscale x 8 x i1>, i32)
536
537define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
538; CHECK-LABEL: vp_abs_nxv8i64:
539; CHECK:       # %bb.0:
540; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
541; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
542; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
543; CHECK-NEXT:    ret
544  %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
545  ret <vscale x 8 x i64> %v
546}
547
548define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
549; CHECK-LABEL: vp_abs_nxv8i64_unmasked:
550; CHECK:       # %bb.0:
551; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
552; CHECK-NEXT:    vrsub.vi v16, v8, 0
553; CHECK-NEXT:    vmax.vv v8, v8, v16
554; CHECK-NEXT:    ret
555  %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
556  ret <vscale x 8 x i64> %v
557}
558
559declare <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64>, i1 immarg, <vscale x 16 x i1>, i32)
560
561define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
562; CHECK-LABEL: vp_abs_nxv16i64:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    addi sp, sp, -16
565; CHECK-NEXT:    .cfi_def_cfa_offset 16
566; CHECK-NEXT:    csrr a1, vlenb
567; CHECK-NEXT:    slli a1, a1, 4
568; CHECK-NEXT:    sub sp, sp, a1
569; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
570; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
571; CHECK-NEXT:    vmv1r.v v24, v0
572; CHECK-NEXT:    csrr a1, vlenb
573; CHECK-NEXT:    slli a1, a1, 3
574; CHECK-NEXT:    add a1, sp, a1
575; CHECK-NEXT:    addi a1, a1, 16
576; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
577; CHECK-NEXT:    csrr a1, vlenb
578; CHECK-NEXT:    srli a2, a1, 3
579; CHECK-NEXT:    sub a3, a0, a1
580; CHECK-NEXT:    vslidedown.vx v0, v0, a2
581; CHECK-NEXT:    sltu a2, a0, a3
582; CHECK-NEXT:    addi a2, a2, -1
583; CHECK-NEXT:    and a2, a2, a3
584; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
585; CHECK-NEXT:    vrsub.vi v8, v16, 0, v0.t
586; CHECK-NEXT:    vmax.vv v8, v16, v8, v0.t
587; CHECK-NEXT:    addi a2, sp, 16
588; CHECK-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
589; CHECK-NEXT:    bltu a0, a1, .LBB46_2
590; CHECK-NEXT:  # %bb.1:
591; CHECK-NEXT:    mv a0, a1
592; CHECK-NEXT:  .LBB46_2:
593; CHECK-NEXT:    vmv1r.v v0, v24
594; CHECK-NEXT:    slli a1, a1, 3
595; CHECK-NEXT:    add a1, sp, a1
596; CHECK-NEXT:    addi a1, a1, 16
597; CHECK-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
598; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
599; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
600; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
601; CHECK-NEXT:    addi a0, sp, 16
602; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
603; CHECK-NEXT:    csrr a0, vlenb
604; CHECK-NEXT:    slli a0, a0, 4
605; CHECK-NEXT:    add sp, sp, a0
606; CHECK-NEXT:    .cfi_def_cfa sp, 16
607; CHECK-NEXT:    addi sp, sp, 16
608; CHECK-NEXT:    .cfi_def_cfa_offset 0
609; CHECK-NEXT:    ret
610  %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
611  ret <vscale x 16 x i64> %v
612}
613
614define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i32 zeroext %evl) {
615; CHECK-LABEL: vp_abs_nxv16i64_unmasked:
616; CHECK:       # %bb.0:
617; CHECK-NEXT:    csrr a1, vlenb
618; CHECK-NEXT:    sub a2, a0, a1
619; CHECK-NEXT:    sltu a3, a0, a2
620; CHECK-NEXT:    addi a3, a3, -1
621; CHECK-NEXT:    and a2, a3, a2
622; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
623; CHECK-NEXT:    vrsub.vi v24, v16, 0
624; CHECK-NEXT:    vmax.vv v16, v16, v24
625; CHECK-NEXT:    bltu a0, a1, .LBB47_2
626; CHECK-NEXT:  # %bb.1:
627; CHECK-NEXT:    mv a0, a1
628; CHECK-NEXT:  .LBB47_2:
629; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
630; CHECK-NEXT:    vrsub.vi v24, v8, 0
631; CHECK-NEXT:    vmax.vv v8, v8, v24
632; CHECK-NEXT:    ret
633  %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
634  ret <vscale x 16 x i64> %v
635}
636