xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
7; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
9; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11declare <2 x half> @llvm.vp.fabs.v2f16(<2 x half>, <2 x i1>, i32)
12
13define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
14; ZVFH-LABEL: vfabs_vv_v2f16:
15; ZVFH:       # %bb.0:
16; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
18; ZVFH-NEXT:    ret
19;
20; ZVFHMIN-LABEL: vfabs_vv_v2f16:
21; ZVFHMIN:       # %bb.0:
22; ZVFHMIN-NEXT:    lui a1, 8
23; ZVFHMIN-NEXT:    addi a1, a1, -1
24; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
25; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
26; ZVFHMIN-NEXT:    ret
27  %v = call <2 x half> @llvm.vp.fabs.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
28  ret <2 x half> %v
29}
30
31define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
32; ZVFH-LABEL: vfabs_vv_v2f16_unmasked:
33; ZVFH:       # %bb.0:
34; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
35; ZVFH-NEXT:    vfabs.v v8, v8
36; ZVFH-NEXT:    ret
37;
38; ZVFHMIN-LABEL: vfabs_vv_v2f16_unmasked:
39; ZVFHMIN:       # %bb.0:
40; ZVFHMIN-NEXT:    lui a1, 8
41; ZVFHMIN-NEXT:    addi a1, a1, -1
42; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
43; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
44; ZVFHMIN-NEXT:    ret
45  %v = call <2 x half> @llvm.vp.fabs.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
46  ret <2 x half> %v
47}
48
49declare <4 x half> @llvm.vp.fabs.v4f16(<4 x half>, <4 x i1>, i32)
50
51define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
52; ZVFH-LABEL: vfabs_vv_v4f16:
53; ZVFH:       # %bb.0:
54; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
55; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
56; ZVFH-NEXT:    ret
57;
58; ZVFHMIN-LABEL: vfabs_vv_v4f16:
59; ZVFHMIN:       # %bb.0:
60; ZVFHMIN-NEXT:    lui a1, 8
61; ZVFHMIN-NEXT:    addi a1, a1, -1
62; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
63; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
64; ZVFHMIN-NEXT:    ret
65  %v = call <4 x half> @llvm.vp.fabs.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
66  ret <4 x half> %v
67}
68
69define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
70; ZVFH-LABEL: vfabs_vv_v4f16_unmasked:
71; ZVFH:       # %bb.0:
72; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
73; ZVFH-NEXT:    vfabs.v v8, v8
74; ZVFH-NEXT:    ret
75;
76; ZVFHMIN-LABEL: vfabs_vv_v4f16_unmasked:
77; ZVFHMIN:       # %bb.0:
78; ZVFHMIN-NEXT:    lui a1, 8
79; ZVFHMIN-NEXT:    addi a1, a1, -1
80; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
81; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
82; ZVFHMIN-NEXT:    ret
83  %v = call <4 x half> @llvm.vp.fabs.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
84  ret <4 x half> %v
85}
86
87declare <8 x half> @llvm.vp.fabs.v8f16(<8 x half>, <8 x i1>, i32)
88
89define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
90; ZVFH-LABEL: vfabs_vv_v8f16:
91; ZVFH:       # %bb.0:
92; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
93; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
94; ZVFH-NEXT:    ret
95;
96; ZVFHMIN-LABEL: vfabs_vv_v8f16:
97; ZVFHMIN:       # %bb.0:
98; ZVFHMIN-NEXT:    lui a1, 8
99; ZVFHMIN-NEXT:    addi a1, a1, -1
100; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
101; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
102; ZVFHMIN-NEXT:    ret
103  %v = call <8 x half> @llvm.vp.fabs.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
104  ret <8 x half> %v
105}
106
107define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
108; ZVFH-LABEL: vfabs_vv_v8f16_unmasked:
109; ZVFH:       # %bb.0:
110; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
111; ZVFH-NEXT:    vfabs.v v8, v8
112; ZVFH-NEXT:    ret
113;
114; ZVFHMIN-LABEL: vfabs_vv_v8f16_unmasked:
115; ZVFHMIN:       # %bb.0:
116; ZVFHMIN-NEXT:    lui a1, 8
117; ZVFHMIN-NEXT:    addi a1, a1, -1
118; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
119; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
120; ZVFHMIN-NEXT:    ret
121  %v = call <8 x half> @llvm.vp.fabs.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
122  ret <8 x half> %v
123}
124
125declare <16 x half> @llvm.vp.fabs.v16f16(<16 x half>, <16 x i1>, i32)
126
127define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
128; ZVFH-LABEL: vfabs_vv_v16f16:
129; ZVFH:       # %bb.0:
130; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
131; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
132; ZVFH-NEXT:    ret
133;
134; ZVFHMIN-LABEL: vfabs_vv_v16f16:
135; ZVFHMIN:       # %bb.0:
136; ZVFHMIN-NEXT:    lui a1, 8
137; ZVFHMIN-NEXT:    addi a1, a1, -1
138; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
139; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
140; ZVFHMIN-NEXT:    ret
141  %v = call <16 x half> @llvm.vp.fabs.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
142  ret <16 x half> %v
143}
144
145define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
146; ZVFH-LABEL: vfabs_vv_v16f16_unmasked:
147; ZVFH:       # %bb.0:
148; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
149; ZVFH-NEXT:    vfabs.v v8, v8
150; ZVFH-NEXT:    ret
151;
152; ZVFHMIN-LABEL: vfabs_vv_v16f16_unmasked:
153; ZVFHMIN:       # %bb.0:
154; ZVFHMIN-NEXT:    lui a1, 8
155; ZVFHMIN-NEXT:    addi a1, a1, -1
156; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
157; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
158; ZVFHMIN-NEXT:    ret
159  %v = call <16 x half> @llvm.vp.fabs.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
160  ret <16 x half> %v
161}
162
163declare <2 x float> @llvm.vp.fabs.v2f32(<2 x float>, <2 x i1>, i32)
164
165define <2 x float> @vfabs_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
166; CHECK-LABEL: vfabs_vv_v2f32:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
169; CHECK-NEXT:    vfabs.v v8, v8, v0.t
170; CHECK-NEXT:    ret
171  %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
172  ret <2 x float> %v
173}
174
175define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
176; CHECK-LABEL: vfabs_vv_v2f32_unmasked:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
179; CHECK-NEXT:    vfabs.v v8, v8
180; CHECK-NEXT:    ret
181  %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
182  ret <2 x float> %v
183}
184
185declare <4 x float> @llvm.vp.fabs.v4f32(<4 x float>, <4 x i1>, i32)
186
187define <4 x float> @vfabs_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
188; CHECK-LABEL: vfabs_vv_v4f32:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
191; CHECK-NEXT:    vfabs.v v8, v8, v0.t
192; CHECK-NEXT:    ret
193  %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
194  ret <4 x float> %v
195}
196
197define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
198; CHECK-LABEL: vfabs_vv_v4f32_unmasked:
199; CHECK:       # %bb.0:
200; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
201; CHECK-NEXT:    vfabs.v v8, v8
202; CHECK-NEXT:    ret
203  %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
204  ret <4 x float> %v
205}
206
207declare <8 x float> @llvm.vp.fabs.v8f32(<8 x float>, <8 x i1>, i32)
208
209define <8 x float> @vfabs_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
210; CHECK-LABEL: vfabs_vv_v8f32:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
213; CHECK-NEXT:    vfabs.v v8, v8, v0.t
214; CHECK-NEXT:    ret
215  %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
216  ret <8 x float> %v
217}
218
219define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
220; CHECK-LABEL: vfabs_vv_v8f32_unmasked:
221; CHECK:       # %bb.0:
222; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
223; CHECK-NEXT:    vfabs.v v8, v8
224; CHECK-NEXT:    ret
225  %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
226  ret <8 x float> %v
227}
228
229declare <16 x float> @llvm.vp.fabs.v16f32(<16 x float>, <16 x i1>, i32)
230
231define <16 x float> @vfabs_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
232; CHECK-LABEL: vfabs_vv_v16f32:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
235; CHECK-NEXT:    vfabs.v v8, v8, v0.t
236; CHECK-NEXT:    ret
237  %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
238  ret <16 x float> %v
239}
240
241define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
242; CHECK-LABEL: vfabs_vv_v16f32_unmasked:
243; CHECK:       # %bb.0:
244; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
245; CHECK-NEXT:    vfabs.v v8, v8
246; CHECK-NEXT:    ret
247  %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
248  ret <16 x float> %v
249}
250
251declare <2 x double> @llvm.vp.fabs.v2f64(<2 x double>, <2 x i1>, i32)
252
253define <2 x double> @vfabs_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
254; CHECK-LABEL: vfabs_vv_v2f64:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
257; CHECK-NEXT:    vfabs.v v8, v8, v0.t
258; CHECK-NEXT:    ret
259  %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
260  ret <2 x double> %v
261}
262
263define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
264; CHECK-LABEL: vfabs_vv_v2f64_unmasked:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
267; CHECK-NEXT:    vfabs.v v8, v8
268; CHECK-NEXT:    ret
269  %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
270  ret <2 x double> %v
271}
272
273declare <4 x double> @llvm.vp.fabs.v4f64(<4 x double>, <4 x i1>, i32)
274
275define <4 x double> @vfabs_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
276; CHECK-LABEL: vfabs_vv_v4f64:
277; CHECK:       # %bb.0:
278; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
279; CHECK-NEXT:    vfabs.v v8, v8, v0.t
280; CHECK-NEXT:    ret
281  %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
282  ret <4 x double> %v
283}
284
285define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
286; CHECK-LABEL: vfabs_vv_v4f64_unmasked:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
289; CHECK-NEXT:    vfabs.v v8, v8
290; CHECK-NEXT:    ret
291  %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
292  ret <4 x double> %v
293}
294
295declare <8 x double> @llvm.vp.fabs.v8f64(<8 x double>, <8 x i1>, i32)
296
297define <8 x double> @vfabs_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
298; CHECK-LABEL: vfabs_vv_v8f64:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
301; CHECK-NEXT:    vfabs.v v8, v8, v0.t
302; CHECK-NEXT:    ret
303  %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
304  ret <8 x double> %v
305}
306
307define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
308; CHECK-LABEL: vfabs_vv_v8f64_unmasked:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
311; CHECK-NEXT:    vfabs.v v8, v8
312; CHECK-NEXT:    ret
313  %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
314  ret <8 x double> %v
315}
316
317declare <15 x double> @llvm.vp.fabs.v15f64(<15 x double>, <15 x i1>, i32)
318
319define <15 x double> @vfabs_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
320; CHECK-LABEL: vfabs_vv_v15f64:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
323; CHECK-NEXT:    vfabs.v v8, v8, v0.t
324; CHECK-NEXT:    ret
325  %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
326  ret <15 x double> %v
327}
328
329define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
330; CHECK-LABEL: vfabs_vv_v15f64_unmasked:
331; CHECK:       # %bb.0:
332; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
333; CHECK-NEXT:    vfabs.v v8, v8
334; CHECK-NEXT:    ret
335  %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
336  ret <15 x double> %v
337}
338
339declare <16 x double> @llvm.vp.fabs.v16f64(<16 x double>, <16 x i1>, i32)
340
341define <16 x double> @vfabs_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
342; CHECK-LABEL: vfabs_vv_v16f64:
343; CHECK:       # %bb.0:
344; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
345; CHECK-NEXT:    vfabs.v v8, v8, v0.t
346; CHECK-NEXT:    ret
347  %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
348  ret <16 x double> %v
349}
350
351define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
352; CHECK-LABEL: vfabs_vv_v16f64_unmasked:
353; CHECK:       # %bb.0:
354; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
355; CHECK-NEXT:    vfabs.v v8, v8
356; CHECK-NEXT:    ret
357  %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
358  ret <16 x double> %v
359}
360
361declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32)
362
363define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
364; CHECK-LABEL: vfabs_vv_v32f64:
365; CHECK:       # %bb.0:
366; CHECK-NEXT:    li a2, 16
367; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
368; CHECK-NEXT:    vslidedown.vi v24, v0, 2
369; CHECK-NEXT:    mv a1, a0
370; CHECK-NEXT:    bltu a0, a2, .LBB26_2
371; CHECK-NEXT:  # %bb.1:
372; CHECK-NEXT:    li a1, 16
373; CHECK-NEXT:  .LBB26_2:
374; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
375; CHECK-NEXT:    vfabs.v v8, v8, v0.t
376; CHECK-NEXT:    addi a1, a0, -16
377; CHECK-NEXT:    sltu a0, a0, a1
378; CHECK-NEXT:    addi a0, a0, -1
379; CHECK-NEXT:    and a0, a0, a1
380; CHECK-NEXT:    vmv1r.v v0, v24
381; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
382; CHECK-NEXT:    vfabs.v v16, v16, v0.t
383; CHECK-NEXT:    ret
384  %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
385  ret <32 x double> %v
386}
387
388define <32 x double> @vfabs_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
389; CHECK-LABEL: vfabs_vv_v32f64_unmasked:
390; CHECK:       # %bb.0:
391; CHECK-NEXT:    li a2, 16
392; CHECK-NEXT:    mv a1, a0
393; CHECK-NEXT:    bltu a0, a2, .LBB27_2
394; CHECK-NEXT:  # %bb.1:
395; CHECK-NEXT:    li a1, 16
396; CHECK-NEXT:  .LBB27_2:
397; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
398; CHECK-NEXT:    vfabs.v v8, v8
399; CHECK-NEXT:    addi a1, a0, -16
400; CHECK-NEXT:    sltu a0, a0, a1
401; CHECK-NEXT:    addi a0, a0, -1
402; CHECK-NEXT:    and a0, a0, a1
403; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
404; CHECK-NEXT:    vfabs.v v16, v16
405; CHECK-NEXT:    ret
406  %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
407  ret <32 x double> %v
408}
409