xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
7; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
9; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32)
12
13define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
14; ZVFH-LABEL: vfneg_vv_v2f16:
15; ZVFH:       # %bb.0:
16; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
18; ZVFH-NEXT:    ret
19;
20; ZVFHMIN-LABEL: vfneg_vv_v2f16:
21; ZVFHMIN:       # %bb.0:
22; ZVFHMIN-NEXT:    lui a1, 8
23; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
24; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1, v0.t
25; ZVFHMIN-NEXT:    ret
26  %v = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
27  ret <2 x half> %v
28}
29
30define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
31; ZVFH-LABEL: vfneg_vv_v2f16_unmasked:
32; ZVFH:       # %bb.0:
33; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
34; ZVFH-NEXT:    vfneg.v v8, v8
35; ZVFH-NEXT:    ret
36;
37; ZVFHMIN-LABEL: vfneg_vv_v2f16_unmasked:
38; ZVFHMIN:       # %bb.0:
39; ZVFHMIN-NEXT:    lui a1, 8
40; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
41; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
42; ZVFHMIN-NEXT:    ret
43  %v = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> %va, <2 x i1> splat (i1 true), i32 %evl)
44  ret <2 x half> %v
45}
46
47declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32)
48
49define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
50; ZVFH-LABEL: vfneg_vv_v4f16:
51; ZVFH:       # %bb.0:
52; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
53; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
54; ZVFH-NEXT:    ret
55;
56; ZVFHMIN-LABEL: vfneg_vv_v4f16:
57; ZVFHMIN:       # %bb.0:
58; ZVFHMIN-NEXT:    lui a1, 8
59; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
60; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1, v0.t
61; ZVFHMIN-NEXT:    ret
62  %v = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
63  ret <4 x half> %v
64}
65
66define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
67; ZVFH-LABEL: vfneg_vv_v4f16_unmasked:
68; ZVFH:       # %bb.0:
69; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
70; ZVFH-NEXT:    vfneg.v v8, v8
71; ZVFH-NEXT:    ret
72;
73; ZVFHMIN-LABEL: vfneg_vv_v4f16_unmasked:
74; ZVFHMIN:       # %bb.0:
75; ZVFHMIN-NEXT:    lui a1, 8
76; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
77; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
78; ZVFHMIN-NEXT:    ret
79  %v = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> %va, <4 x i1> splat (i1 true), i32 %evl)
80  ret <4 x half> %v
81}
82
83declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32)
84
85define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
86; ZVFH-LABEL: vfneg_vv_v8f16:
87; ZVFH:       # %bb.0:
88; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
89; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
90; ZVFH-NEXT:    ret
91;
92; ZVFHMIN-LABEL: vfneg_vv_v8f16:
93; ZVFHMIN:       # %bb.0:
94; ZVFHMIN-NEXT:    lui a1, 8
95; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
96; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1, v0.t
97; ZVFHMIN-NEXT:    ret
98  %v = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
99  ret <8 x half> %v
100}
101
102define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
103; ZVFH-LABEL: vfneg_vv_v8f16_unmasked:
104; ZVFH:       # %bb.0:
105; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
106; ZVFH-NEXT:    vfneg.v v8, v8
107; ZVFH-NEXT:    ret
108;
109; ZVFHMIN-LABEL: vfneg_vv_v8f16_unmasked:
110; ZVFHMIN:       # %bb.0:
111; ZVFHMIN-NEXT:    lui a1, 8
112; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
113; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
114; ZVFHMIN-NEXT:    ret
115  %v = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> %va, <8 x i1> splat (i1 true), i32 %evl)
116  ret <8 x half> %v
117}
118
119declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32)
120
121define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
122; ZVFH-LABEL: vfneg_vv_v16f16:
123; ZVFH:       # %bb.0:
124; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
125; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
126; ZVFH-NEXT:    ret
127;
128; ZVFHMIN-LABEL: vfneg_vv_v16f16:
129; ZVFHMIN:       # %bb.0:
130; ZVFHMIN-NEXT:    lui a1, 8
131; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
132; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1, v0.t
133; ZVFHMIN-NEXT:    ret
134  %v = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
135  ret <16 x half> %v
136}
137
138define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
139; ZVFH-LABEL: vfneg_vv_v16f16_unmasked:
140; ZVFH:       # %bb.0:
141; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
142; ZVFH-NEXT:    vfneg.v v8, v8
143; ZVFH-NEXT:    ret
144;
145; ZVFHMIN-LABEL: vfneg_vv_v16f16_unmasked:
146; ZVFHMIN:       # %bb.0:
147; ZVFHMIN-NEXT:    lui a1, 8
148; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
149; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
150; ZVFHMIN-NEXT:    ret
151  %v = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> %va, <16 x i1> splat (i1 true), i32 %evl)
152  ret <16 x half> %v
153}
154
155declare <2 x float> @llvm.vp.fneg.v2f32(<2 x float>, <2 x i1>, i32)
156
157define <2 x float> @vfneg_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
158; CHECK-LABEL: vfneg_vv_v2f32:
159; CHECK:       # %bb.0:
160; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
161; CHECK-NEXT:    vfneg.v v8, v8, v0.t
162; CHECK-NEXT:    ret
163  %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
164  ret <2 x float> %v
165}
166
167define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
168; CHECK-LABEL: vfneg_vv_v2f32_unmasked:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
171; CHECK-NEXT:    vfneg.v v8, v8
172; CHECK-NEXT:    ret
173  %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> splat (i1 true), i32 %evl)
174  ret <2 x float> %v
175}
176
177declare <4 x float> @llvm.vp.fneg.v4f32(<4 x float>, <4 x i1>, i32)
178
179define <4 x float> @vfneg_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
180; CHECK-LABEL: vfneg_vv_v4f32:
181; CHECK:       # %bb.0:
182; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
183; CHECK-NEXT:    vfneg.v v8, v8, v0.t
184; CHECK-NEXT:    ret
185  %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
186  ret <4 x float> %v
187}
188
189define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
190; CHECK-LABEL: vfneg_vv_v4f32_unmasked:
191; CHECK:       # %bb.0:
192; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
193; CHECK-NEXT:    vfneg.v v8, v8
194; CHECK-NEXT:    ret
195  %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> splat (i1 true), i32 %evl)
196  ret <4 x float> %v
197}
198
199declare <8 x float> @llvm.vp.fneg.v8f32(<8 x float>, <8 x i1>, i32)
200
201define <8 x float> @vfneg_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
202; CHECK-LABEL: vfneg_vv_v8f32:
203; CHECK:       # %bb.0:
204; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
205; CHECK-NEXT:    vfneg.v v8, v8, v0.t
206; CHECK-NEXT:    ret
207  %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
208  ret <8 x float> %v
209}
210
211define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
212; CHECK-LABEL: vfneg_vv_v8f32_unmasked:
213; CHECK:       # %bb.0:
214; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
215; CHECK-NEXT:    vfneg.v v8, v8
216; CHECK-NEXT:    ret
217  %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> splat (i1 true), i32 %evl)
218  ret <8 x float> %v
219}
220
221declare <16 x float> @llvm.vp.fneg.v16f32(<16 x float>, <16 x i1>, i32)
222
223define <16 x float> @vfneg_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
224; CHECK-LABEL: vfneg_vv_v16f32:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
227; CHECK-NEXT:    vfneg.v v8, v8, v0.t
228; CHECK-NEXT:    ret
229  %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
230  ret <16 x float> %v
231}
232
233define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
234; CHECK-LABEL: vfneg_vv_v16f32_unmasked:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
237; CHECK-NEXT:    vfneg.v v8, v8
238; CHECK-NEXT:    ret
239  %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> splat (i1 true), i32 %evl)
240  ret <16 x float> %v
241}
242
243declare <2 x double> @llvm.vp.fneg.v2f64(<2 x double>, <2 x i1>, i32)
244
245define <2 x double> @vfneg_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
246; CHECK-LABEL: vfneg_vv_v2f64:
247; CHECK:       # %bb.0:
248; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
249; CHECK-NEXT:    vfneg.v v8, v8, v0.t
250; CHECK-NEXT:    ret
251  %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
252  ret <2 x double> %v
253}
254
255define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
256; CHECK-LABEL: vfneg_vv_v2f64_unmasked:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
259; CHECK-NEXT:    vfneg.v v8, v8
260; CHECK-NEXT:    ret
261  %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
262  ret <2 x double> %v
263}
264
265declare <4 x double> @llvm.vp.fneg.v4f64(<4 x double>, <4 x i1>, i32)
266
267define <4 x double> @vfneg_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
268; CHECK-LABEL: vfneg_vv_v4f64:
269; CHECK:       # %bb.0:
270; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
271; CHECK-NEXT:    vfneg.v v8, v8, v0.t
272; CHECK-NEXT:    ret
273  %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
274  ret <4 x double> %v
275}
276
277define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
278; CHECK-LABEL: vfneg_vv_v4f64_unmasked:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
281; CHECK-NEXT:    vfneg.v v8, v8
282; CHECK-NEXT:    ret
283  %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
284  ret <4 x double> %v
285}
286
287declare <8 x double> @llvm.vp.fneg.v8f64(<8 x double>, <8 x i1>, i32)
288
289define <8 x double> @vfneg_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
290; CHECK-LABEL: vfneg_vv_v8f64:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
293; CHECK-NEXT:    vfneg.v v8, v8, v0.t
294; CHECK-NEXT:    ret
295  %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
296  ret <8 x double> %v
297}
298
299define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
300; CHECK-LABEL: vfneg_vv_v8f64_unmasked:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
303; CHECK-NEXT:    vfneg.v v8, v8
304; CHECK-NEXT:    ret
305  %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
306  ret <8 x double> %v
307}
308
309declare <15 x double> @llvm.vp.fneg.v15f64(<15 x double>, <15 x i1>, i32)
310
311define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
312; CHECK-LABEL: vfneg_vv_v15f64:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
315; CHECK-NEXT:    vfneg.v v8, v8, v0.t
316; CHECK-NEXT:    ret
317  %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
318  ret <15 x double> %v
319}
320
321define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
322; CHECK-LABEL: vfneg_vv_v15f64_unmasked:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
325; CHECK-NEXT:    vfneg.v v8, v8
326; CHECK-NEXT:    ret
327  %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
328  ret <15 x double> %v
329}
330
331declare <16 x double> @llvm.vp.fneg.v16f64(<16 x double>, <16 x i1>, i32)
332
333define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
334; CHECK-LABEL: vfneg_vv_v16f64:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
337; CHECK-NEXT:    vfneg.v v8, v8, v0.t
338; CHECK-NEXT:    ret
339  %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
340  ret <16 x double> %v
341}
342
343define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
344; CHECK-LABEL: vfneg_vv_v16f64_unmasked:
345; CHECK:       # %bb.0:
346; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
347; CHECK-NEXT:    vfneg.v v8, v8
348; CHECK-NEXT:    ret
349  %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
350  ret <16 x double> %v
351}
352
353declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32)
354
355define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
356; CHECK-LABEL: vfneg_vv_v32f64:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    li a2, 16
359; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
360; CHECK-NEXT:    vslidedown.vi v24, v0, 2
361; CHECK-NEXT:    mv a1, a0
362; CHECK-NEXT:    bltu a0, a2, .LBB26_2
363; CHECK-NEXT:  # %bb.1:
364; CHECK-NEXT:    li a1, 16
365; CHECK-NEXT:  .LBB26_2:
366; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
367; CHECK-NEXT:    vfneg.v v8, v8, v0.t
368; CHECK-NEXT:    addi a1, a0, -16
369; CHECK-NEXT:    sltu a0, a0, a1
370; CHECK-NEXT:    addi a0, a0, -1
371; CHECK-NEXT:    and a0, a0, a1
372; CHECK-NEXT:    vmv1r.v v0, v24
373; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
374; CHECK-NEXT:    vfneg.v v16, v16, v0.t
375; CHECK-NEXT:    ret
376  %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
377  ret <32 x double> %v
378}
379
380define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
381; CHECK-LABEL: vfneg_vv_v32f64_unmasked:
382; CHECK:       # %bb.0:
383; CHECK-NEXT:    li a2, 16
384; CHECK-NEXT:    mv a1, a0
385; CHECK-NEXT:    bltu a0, a2, .LBB27_2
386; CHECK-NEXT:  # %bb.1:
387; CHECK-NEXT:    li a1, 16
388; CHECK-NEXT:  .LBB27_2:
389; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
390; CHECK-NEXT:    vfneg.v v8, v8
391; CHECK-NEXT:    addi a1, a0, -16
392; CHECK-NEXT:    sltu a0, a0, a1
393; CHECK-NEXT:    addi a0, a0, -1
394; CHECK-NEXT:    and a0, a0, a1
395; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
396; CHECK-NEXT:    vfneg.v v16, v16
397; CHECK-NEXT:    ret
398  %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
399  ret <32 x double> %v
400}
401