xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zvfh,+v -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s
6
7declare <2 x half> @llvm.vp.copysign.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
8
9define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vfsgnj_vv_v2f16:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
13; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
14; CHECK-NEXT:    ret
15  %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
16  ret <2 x half> %v
17}
18
19define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
20; CHECK-LABEL: vfsgnj_vv_v2f16_unmasked:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
23; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
24; CHECK-NEXT:    ret
25  %v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
26  ret <2 x half> %v
27}
28
29declare <4 x half> @llvm.vp.copysign.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
30
31define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
32; CHECK-LABEL: vfsgnj_vv_v4f16:
33; CHECK:       # %bb.0:
34; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
35; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
36; CHECK-NEXT:    ret
37  %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
38  ret <4 x half> %v
39}
40
41define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
42; CHECK-LABEL: vfsgnj_vv_v4f16_unmasked:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
45; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
46; CHECK-NEXT:    ret
47  %v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
48  ret <4 x half> %v
49}
50
51declare <8 x half> @llvm.vp.copysign.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
52
53define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
54; CHECK-LABEL: vfsgnj_vv_v8f16:
55; CHECK:       # %bb.0:
56; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
57; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
58; CHECK-NEXT:    ret
59  %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
60  ret <8 x half> %v
61}
62
63define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
64; CHECK-LABEL: vfsgnj_vv_v8f16_unmasked:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
67; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
68; CHECK-NEXT:    ret
69  %v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
70  ret <8 x half> %v
71}
72
73declare <16 x half> @llvm.vp.copysign.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
74
75define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
76; CHECK-LABEL: vfsgnj_vv_v16f16:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
79; CHECK-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
80; CHECK-NEXT:    ret
81  %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
82  ret <16 x half> %v
83}
84
85define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
86; CHECK-LABEL: vfsgnj_vv_v16f16_unmasked:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
89; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
90; CHECK-NEXT:    ret
91  %v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
92  ret <16 x half> %v
93}
94
95declare <2 x float> @llvm.vp.copysign.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
96
97define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
98; CHECK-LABEL: vfsgnj_vv_v2f32:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
101; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
102; CHECK-NEXT:    ret
103  %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
104  ret <2 x float> %v
105}
106
107define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) {
108; CHECK-LABEL: vfsgnj_vv_v2f32_unmasked:
109; CHECK:       # %bb.0:
110; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
111; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
112; CHECK-NEXT:    ret
113  %v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
114  ret <2 x float> %v
115}
116
117declare <4 x float> @llvm.vp.copysign.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
118
119define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
120; CHECK-LABEL: vfsgnj_vv_v4f32:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
123; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
124; CHECK-NEXT:    ret
125  %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
126  ret <4 x float> %v
127}
128
129define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) {
130; CHECK-LABEL: vfsgnj_vv_v4f32_unmasked:
131; CHECK:       # %bb.0:
132; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
133; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
134; CHECK-NEXT:    ret
135  %v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
136  ret <4 x float> %v
137}
138
139declare <8 x float> @llvm.vp.copysign.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
140
141define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
142; CHECK-LABEL: vfsgnj_vv_v8f32:
143; CHECK:       # %bb.0:
144; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
145; CHECK-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
146; CHECK-NEXT:    ret
147  %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
148  ret <8 x float> %v
149}
150
151define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) {
152; CHECK-LABEL: vfsgnj_vv_v8f32_unmasked:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
155; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
156; CHECK-NEXT:    ret
157  %v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
158  ret <8 x float> %v
159}
160
161declare <16 x float> @llvm.vp.copysign.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
162
163define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
164; CHECK-LABEL: vfsgnj_vv_v16f32:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
167; CHECK-NEXT:    vfsgnj.vv v8, v8, v12, v0.t
168; CHECK-NEXT:    ret
169  %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
170  ret <16 x float> %v
171}
172
173define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) {
174; CHECK-LABEL: vfsgnj_vv_v16f32_unmasked:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
177; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
178; CHECK-NEXT:    ret
179  %v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
180  ret <16 x float> %v
181}
182
183declare <2 x double> @llvm.vp.copysign.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
184
185define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
186; CHECK-LABEL: vfsgnj_vv_v2f64:
187; CHECK:       # %bb.0:
188; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
189; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
190; CHECK-NEXT:    ret
191  %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
192  ret <2 x double> %v
193}
194
195define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) {
196; CHECK-LABEL: vfsgnj_vv_v2f64_unmasked:
197; CHECK:       # %bb.0:
198; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
199; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
200; CHECK-NEXT:    ret
201  %v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
202  ret <2 x double> %v
203}
204
205declare <4 x double> @llvm.vp.copysign.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
206
207define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
208; CHECK-LABEL: vfsgnj_vv_v4f64:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
211; CHECK-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
212; CHECK-NEXT:    ret
213  %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
214  ret <4 x double> %v
215}
216
217define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) {
218; CHECK-LABEL: vfsgnj_vv_v4f64_unmasked:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
221; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
222; CHECK-NEXT:    ret
223  %v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
224  ret <4 x double> %v
225}
226
227declare <8 x double> @llvm.vp.copysign.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
228
229define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
230; CHECK-LABEL: vfsgnj_vv_v8f64:
231; CHECK:       # %bb.0:
232; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
233; CHECK-NEXT:    vfsgnj.vv v8, v8, v12, v0.t
234; CHECK-NEXT:    ret
235  %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
236  ret <8 x double> %v
237}
238
239define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) {
240; CHECK-LABEL: vfsgnj_vv_v8f64_unmasked:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
243; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
244; CHECK-NEXT:    ret
245  %v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
246  ret <8 x double> %v
247}
248
249declare <15 x double> @llvm.vp.copysign.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32)
250
251define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
252; CHECK-LABEL: vfsgnj_vv_v15f64:
253; CHECK:       # %bb.0:
254; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
255; CHECK-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
256; CHECK-NEXT:    ret
257  %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
258  ret <15 x double> %v
259}
260
261define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) {
262; CHECK-LABEL: vfsgnj_vv_v15f64_unmasked:
263; CHECK:       # %bb.0:
264; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
265; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
266; CHECK-NEXT:    ret
267  %v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
268  ret <15 x double> %v
269}
270
271declare <16 x double> @llvm.vp.copysign.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32)
272
273define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
274; CHECK-LABEL: vfsgnj_vv_v16f64:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
277; CHECK-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
278; CHECK-NEXT:    ret
279  %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
280  ret <16 x double> %v
281}
282
283define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) {
284; CHECK-LABEL: vfsgnj_vv_v16f64_unmasked:
285; CHECK:       # %bb.0:
286; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
287; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
288; CHECK-NEXT:    ret
289  %v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
290  ret <16 x double> %v
291}
292
293declare <32 x double> @llvm.vp.copysign.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32)
294
295define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
296; CHECK-LABEL: vfsgnj_vv_v32f64:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    addi sp, sp, -16
299; CHECK-NEXT:    .cfi_def_cfa_offset 16
300; CHECK-NEXT:    csrr a1, vlenb
301; CHECK-NEXT:    slli a1, a1, 4
302; CHECK-NEXT:    sub sp, sp, a1
303; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
304; CHECK-NEXT:    csrr a1, vlenb
305; CHECK-NEXT:    slli a1, a1, 3
306; CHECK-NEXT:    add a1, sp, a1
307; CHECK-NEXT:    addi a1, a1, 16
308; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
309; CHECK-NEXT:    addi a1, a0, 128
310; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
311; CHECK-NEXT:    vle64.v v16, (a1)
312; CHECK-NEXT:    addi a1, sp, 16
313; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
314; CHECK-NEXT:    vle64.v v16, (a0)
315; CHECK-NEXT:    li a1, 16
316; CHECK-NEXT:    mv a0, a2
317; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
318; CHECK-NEXT:    vslidedown.vi v24, v0, 2
319; CHECK-NEXT:    bltu a2, a1, .LBB26_2
320; CHECK-NEXT:  # %bb.1:
321; CHECK-NEXT:    li a0, 16
322; CHECK-NEXT:  .LBB26_2:
323; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
324; CHECK-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
325; CHECK-NEXT:    addi a0, a2, -16
326; CHECK-NEXT:    sltu a1, a2, a0
327; CHECK-NEXT:    addi a1, a1, -1
328; CHECK-NEXT:    and a0, a1, a0
329; CHECK-NEXT:    vmv1r.v v0, v24
330; CHECK-NEXT:    csrr a1, vlenb
331; CHECK-NEXT:    slli a1, a1, 3
332; CHECK-NEXT:    add a1, sp, a1
333; CHECK-NEXT:    addi a1, a1, 16
334; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
335; CHECK-NEXT:    addi a1, sp, 16
336; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
337; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
338; CHECK-NEXT:    vfsgnj.vv v16, v16, v24, v0.t
339; CHECK-NEXT:    csrr a0, vlenb
340; CHECK-NEXT:    slli a0, a0, 4
341; CHECK-NEXT:    add sp, sp, a0
342; CHECK-NEXT:    .cfi_def_cfa sp, 16
343; CHECK-NEXT:    addi sp, sp, 16
344; CHECK-NEXT:    .cfi_def_cfa_offset 0
345; CHECK-NEXT:    ret
346  %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
347  ret <32 x double> %v
348}
349
350define <32 x double> @vfsgnj_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) {
351; CHECK-LABEL: vfsgnj_vv_v32f64_unmasked:
352; CHECK:       # %bb.0:
353; CHECK-NEXT:    addi a1, a0, 128
354; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
355; CHECK-NEXT:    vle64.v v24, (a1)
356; CHECK-NEXT:    vle64.v v0, (a0)
357; CHECK-NEXT:    li a1, 16
358; CHECK-NEXT:    mv a0, a2
359; CHECK-NEXT:    bltu a2, a1, .LBB27_2
360; CHECK-NEXT:  # %bb.1:
361; CHECK-NEXT:    li a0, 16
362; CHECK-NEXT:  .LBB27_2:
363; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
364; CHECK-NEXT:    vfsgnj.vv v8, v8, v0
365; CHECK-NEXT:    addi a0, a2, -16
366; CHECK-NEXT:    sltu a1, a2, a0
367; CHECK-NEXT:    addi a1, a1, -1
368; CHECK-NEXT:    and a0, a1, a0
369; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
370; CHECK-NEXT:    vfsgnj.vv v16, v16, v24
371; CHECK-NEXT:    ret
372  %v = call <32 x double> @llvm.vp.copysign.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
373  ret <32 x double> %v
374}
375