xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11declare <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32)
12
13define <vscale x 1 x half> @vfsgnj_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14; ZVFH-LABEL: vfsgnj_vv_nxv1f16:
15; ZVFH:       # %bb.0:
16; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
18; ZVFH-NEXT:    ret
19;
20; ZVFHMIN-LABEL: vfsgnj_vv_nxv1f16:
21; ZVFHMIN:       # %bb.0:
22; ZVFHMIN-NEXT:    lui a1, 8
23; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
24; ZVFHMIN-NEXT:    vand.vx v9, v9, a1, v0.t
25; ZVFHMIN-NEXT:    addi a1, a1, -1
26; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
27; ZVFHMIN-NEXT:    vor.vv v8, v8, v9, v0.t
28; ZVFHMIN-NEXT:    ret
29  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
30  ret <vscale x 1 x half> %v
31}
32
33define <vscale x 1 x half> @vfsgnj_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, i32 zeroext %evl) {
34; ZVFH-LABEL: vfsgnj_vv_nxv1f16_unmasked:
35; ZVFH:       # %bb.0:
36; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
37; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9
38; ZVFH-NEXT:    ret
39;
40; ZVFHMIN-LABEL: vfsgnj_vv_nxv1f16_unmasked:
41; ZVFHMIN:       # %bb.0:
42; ZVFHMIN-NEXT:    lui a1, 8
43; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
44; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
45; ZVFHMIN-NEXT:    addi a1, a1, -1
46; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
47; ZVFHMIN-NEXT:    vor.vv v8, v8, v9
48; ZVFHMIN-NEXT:    ret
49  %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
50  ret <vscale x 1 x half> %v
51}
52
53declare <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32)
54
55define <vscale x 2 x half> @vfsgnj_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
56; ZVFH-LABEL: vfsgnj_vv_nxv2f16:
57; ZVFH:       # %bb.0:
58; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
59; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
60; ZVFH-NEXT:    ret
61;
62; ZVFHMIN-LABEL: vfsgnj_vv_nxv2f16:
63; ZVFHMIN:       # %bb.0:
64; ZVFHMIN-NEXT:    lui a1, 8
65; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
66; ZVFHMIN-NEXT:    vand.vx v9, v9, a1, v0.t
67; ZVFHMIN-NEXT:    addi a1, a1, -1
68; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
69; ZVFHMIN-NEXT:    vor.vv v8, v8, v9, v0.t
70; ZVFHMIN-NEXT:    ret
71  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
72  ret <vscale x 2 x half> %v
73}
74
75define <vscale x 2 x half> @vfsgnj_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evl) {
76; ZVFH-LABEL: vfsgnj_vv_nxv2f16_unmasked:
77; ZVFH:       # %bb.0:
78; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
79; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9
80; ZVFH-NEXT:    ret
81;
82; ZVFHMIN-LABEL: vfsgnj_vv_nxv2f16_unmasked:
83; ZVFHMIN:       # %bb.0:
84; ZVFHMIN-NEXT:    lui a1, 8
85; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
86; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
87; ZVFHMIN-NEXT:    addi a1, a1, -1
88; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
89; ZVFHMIN-NEXT:    vor.vv v8, v8, v9
90; ZVFHMIN-NEXT:    ret
91  %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
92  ret <vscale x 2 x half> %v
93}
94
95declare <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32)
96
97define <vscale x 4 x half> @vfsgnj_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
98; ZVFH-LABEL: vfsgnj_vv_nxv4f16:
99; ZVFH:       # %bb.0:
100; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
101; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
102; ZVFH-NEXT:    ret
103;
104; ZVFHMIN-LABEL: vfsgnj_vv_nxv4f16:
105; ZVFHMIN:       # %bb.0:
106; ZVFHMIN-NEXT:    lui a1, 8
107; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
108; ZVFHMIN-NEXT:    vand.vx v9, v9, a1, v0.t
109; ZVFHMIN-NEXT:    addi a1, a1, -1
110; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
111; ZVFHMIN-NEXT:    vor.vv v8, v8, v9, v0.t
112; ZVFHMIN-NEXT:    ret
113  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
114  ret <vscale x 4 x half> %v
115}
116
117define <vscale x 4 x half> @vfsgnj_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, i32 zeroext %evl) {
118; ZVFH-LABEL: vfsgnj_vv_nxv4f16_unmasked:
119; ZVFH:       # %bb.0:
120; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
121; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9
122; ZVFH-NEXT:    ret
123;
124; ZVFHMIN-LABEL: vfsgnj_vv_nxv4f16_unmasked:
125; ZVFHMIN:       # %bb.0:
126; ZVFHMIN-NEXT:    lui a1, 8
127; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
128; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
129; ZVFHMIN-NEXT:    addi a1, a1, -1
130; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
131; ZVFHMIN-NEXT:    vor.vv v8, v8, v9
132; ZVFHMIN-NEXT:    ret
133  %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
134  ret <vscale x 4 x half> %v
135}
136
137declare <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32)
138
139define <vscale x 8 x half> @vfsgnj_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
140; ZVFH-LABEL: vfsgnj_vv_nxv8f16:
141; ZVFH:       # %bb.0:
142; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
143; ZVFH-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
144; ZVFH-NEXT:    ret
145;
146; ZVFHMIN-LABEL: vfsgnj_vv_nxv8f16:
147; ZVFHMIN:       # %bb.0:
148; ZVFHMIN-NEXT:    lui a1, 8
149; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
150; ZVFHMIN-NEXT:    vand.vx v10, v10, a1, v0.t
151; ZVFHMIN-NEXT:    addi a1, a1, -1
152; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
153; ZVFHMIN-NEXT:    vor.vv v8, v8, v10, v0.t
154; ZVFHMIN-NEXT:    ret
155  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
156  ret <vscale x 8 x half> %v
157}
158
159define <vscale x 8 x half> @vfsgnj_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, i32 zeroext %evl) {
160; ZVFH-LABEL: vfsgnj_vv_nxv8f16_unmasked:
161; ZVFH:       # %bb.0:
162; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
163; ZVFH-NEXT:    vfsgnj.vv v8, v8, v10
164; ZVFH-NEXT:    ret
165;
166; ZVFHMIN-LABEL: vfsgnj_vv_nxv8f16_unmasked:
167; ZVFHMIN:       # %bb.0:
168; ZVFHMIN-NEXT:    lui a1, 8
169; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
170; ZVFHMIN-NEXT:    vand.vx v10, v10, a1
171; ZVFHMIN-NEXT:    addi a1, a1, -1
172; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
173; ZVFHMIN-NEXT:    vor.vv v8, v8, v10
174; ZVFHMIN-NEXT:    ret
175  %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
176  ret <vscale x 8 x half> %v
177}
178
179declare <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32)
180
181define <vscale x 16 x half> @vfsgnj_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
182; ZVFH-LABEL: vfsgnj_vv_nxv16f16:
183; ZVFH:       # %bb.0:
184; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
185; ZVFH-NEXT:    vfsgnj.vv v8, v8, v12, v0.t
186; ZVFH-NEXT:    ret
187;
188; ZVFHMIN-LABEL: vfsgnj_vv_nxv16f16:
189; ZVFHMIN:       # %bb.0:
190; ZVFHMIN-NEXT:    lui a1, 8
191; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
192; ZVFHMIN-NEXT:    vand.vx v12, v12, a1, v0.t
193; ZVFHMIN-NEXT:    addi a1, a1, -1
194; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
195; ZVFHMIN-NEXT:    vor.vv v8, v8, v12, v0.t
196; ZVFHMIN-NEXT:    ret
197  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
198  ret <vscale x 16 x half> %v
199}
200
201define <vscale x 16 x half> @vfsgnj_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, i32 zeroext %evl) {
202; ZVFH-LABEL: vfsgnj_vv_nxv16f16_unmasked:
203; ZVFH:       # %bb.0:
204; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
205; ZVFH-NEXT:    vfsgnj.vv v8, v8, v12
206; ZVFH-NEXT:    ret
207;
208; ZVFHMIN-LABEL: vfsgnj_vv_nxv16f16_unmasked:
209; ZVFHMIN:       # %bb.0:
210; ZVFHMIN-NEXT:    lui a1, 8
211; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
212; ZVFHMIN-NEXT:    vand.vx v12, v12, a1
213; ZVFHMIN-NEXT:    addi a1, a1, -1
214; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
215; ZVFHMIN-NEXT:    vor.vv v8, v8, v12
216; ZVFHMIN-NEXT:    ret
217  %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
218  ret <vscale x 16 x half> %v
219}
220
221declare <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32)
222
223define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
224; ZVFH-LABEL: vfsgnj_vv_nxv32f16:
225; ZVFH:       # %bb.0:
226; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
227; ZVFH-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
228; ZVFH-NEXT:    ret
229;
230; ZVFHMIN-LABEL: vfsgnj_vv_nxv32f16:
231; ZVFHMIN:       # %bb.0:
232; ZVFHMIN-NEXT:    lui a1, 8
233; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
234; ZVFHMIN-NEXT:    vand.vx v16, v16, a1, v0.t
235; ZVFHMIN-NEXT:    addi a1, a1, -1
236; ZVFHMIN-NEXT:    vand.vx v8, v8, a1, v0.t
237; ZVFHMIN-NEXT:    vor.vv v8, v8, v16, v0.t
238; ZVFHMIN-NEXT:    ret
239  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
240  ret <vscale x 32 x half> %v
241}
242
243define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, i32 zeroext %evl) {
244; ZVFH-LABEL: vfsgnj_vv_nxv32f16_unmasked:
245; ZVFH:       # %bb.0:
246; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
247; ZVFH-NEXT:    vfsgnj.vv v8, v8, v16
248; ZVFH-NEXT:    ret
249;
250; ZVFHMIN-LABEL: vfsgnj_vv_nxv32f16_unmasked:
251; ZVFHMIN:       # %bb.0:
252; ZVFHMIN-NEXT:    lui a1, 8
253; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
254; ZVFHMIN-NEXT:    vand.vx v16, v16, a1
255; ZVFHMIN-NEXT:    addi a1, a1, -1
256; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
257; ZVFHMIN-NEXT:    vor.vv v8, v8, v16
258; ZVFHMIN-NEXT:    ret
259  %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
260  ret <vscale x 32 x half> %v
261}
262
263declare <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
264
265define <vscale x 1 x float> @vfsgnj_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
266; CHECK-LABEL: vfsgnj_vv_nxv1f32:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
269; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
270; CHECK-NEXT:    ret
271  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
272  ret <vscale x 1 x float> %v
273}
274
275define <vscale x 1 x float> @vfsgnj_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, i32 zeroext %evl) {
276; CHECK-LABEL: vfsgnj_vv_nxv1f32_unmasked:
277; CHECK:       # %bb.0:
278; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
279; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
280; CHECK-NEXT:    ret
281  %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
282  ret <vscale x 1 x float> %v
283}
284
285declare <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
286
287define <vscale x 2 x float> @vfsgnj_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
288; CHECK-LABEL: vfsgnj_vv_nxv2f32:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
291; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
292; CHECK-NEXT:    ret
293  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
294  ret <vscale x 2 x float> %v
295}
296
297define <vscale x 2 x float> @vfsgnj_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evl) {
298; CHECK-LABEL: vfsgnj_vv_nxv2f32_unmasked:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
301; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
302; CHECK-NEXT:    ret
303  %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
304  ret <vscale x 2 x float> %v
305}
306
307declare <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
308
309define <vscale x 4 x float> @vfsgnj_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
310; CHECK-LABEL: vfsgnj_vv_nxv4f32:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
313; CHECK-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
314; CHECK-NEXT:    ret
315  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
316  ret <vscale x 4 x float> %v
317}
318
319define <vscale x 4 x float> @vfsgnj_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, i32 zeroext %evl) {
320; CHECK-LABEL: vfsgnj_vv_nxv4f32_unmasked:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
323; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
324; CHECK-NEXT:    ret
325  %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
326  ret <vscale x 4 x float> %v
327}
328
329declare <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
330
331define <vscale x 8 x float> @vfsgnj_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
332; CHECK-LABEL: vfsgnj_vv_nxv8f32:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
335; CHECK-NEXT:    vfsgnj.vv v8, v8, v12, v0.t
336; CHECK-NEXT:    ret
337  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
338  ret <vscale x 8 x float> %v
339}
340
341define <vscale x 8 x float> @vfsgnj_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, i32 zeroext %evl) {
342; CHECK-LABEL: vfsgnj_vv_nxv8f32_unmasked:
343; CHECK:       # %bb.0:
344; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
345; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
346; CHECK-NEXT:    ret
347  %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
348  ret <vscale x 8 x float> %v
349}
350
351declare <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
352
353define <vscale x 16 x float> @vfsgnj_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
354; CHECK-LABEL: vfsgnj_vv_nxv16f32:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
357; CHECK-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
358; CHECK-NEXT:    ret
359  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
360  ret <vscale x 16 x float> %v
361}
362
363define <vscale x 16 x float> @vfsgnj_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, i32 zeroext %evl) {
364; CHECK-LABEL: vfsgnj_vv_nxv16f32_unmasked:
365; CHECK:       # %bb.0:
366; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
367; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
368; CHECK-NEXT:    ret
369  %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
370  ret <vscale x 16 x float> %v
371}
372
373declare <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
374
375define <vscale x 1 x double> @vfsgnj_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
376; CHECK-LABEL: vfsgnj_vv_nxv1f64:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
379; CHECK-NEXT:    vfsgnj.vv v8, v8, v9, v0.t
380; CHECK-NEXT:    ret
381  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
382  ret <vscale x 1 x double> %v
383}
384
385define <vscale x 1 x double> @vfsgnj_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evl) {
386; CHECK-LABEL: vfsgnj_vv_nxv1f64_unmasked:
387; CHECK:       # %bb.0:
388; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
389; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
390; CHECK-NEXT:    ret
391  %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
392  ret <vscale x 1 x double> %v
393}
394
395declare <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
396
397define <vscale x 2 x double> @vfsgnj_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
398; CHECK-LABEL: vfsgnj_vv_nxv2f64:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
401; CHECK-NEXT:    vfsgnj.vv v8, v8, v10, v0.t
402; CHECK-NEXT:    ret
403  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
404  ret <vscale x 2 x double> %v
405}
406
407define <vscale x 2 x double> @vfsgnj_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, i32 zeroext %evl) {
408; CHECK-LABEL: vfsgnj_vv_nxv2f64_unmasked:
409; CHECK:       # %bb.0:
410; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
411; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
412; CHECK-NEXT:    ret
413  %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
414  ret <vscale x 2 x double> %v
415}
416
417declare <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
418
419define <vscale x 4 x double> @vfsgnj_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
420; CHECK-LABEL: vfsgnj_vv_nxv4f64:
421; CHECK:       # %bb.0:
422; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
423; CHECK-NEXT:    vfsgnj.vv v8, v8, v12, v0.t
424; CHECK-NEXT:    ret
425  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
426  ret <vscale x 4 x double> %v
427}
428
429define <vscale x 4 x double> @vfsgnj_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, i32 zeroext %evl) {
430; CHECK-LABEL: vfsgnj_vv_nxv4f64_unmasked:
431; CHECK:       # %bb.0:
432; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
433; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
434; CHECK-NEXT:    ret
435  %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
436  ret <vscale x 4 x double> %v
437}
438
439declare <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
440
441define <vscale x 8 x double> @vfsgnj_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
442; CHECK-LABEL: vfsgnj_vv_nxv8f64:
443; CHECK:       # %bb.0:
444; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
445; CHECK-NEXT:    vfsgnj.vv v8, v8, v16, v0.t
446; CHECK-NEXT:    ret
447  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
448  ret <vscale x 8 x double> %v
449}
450
451define <vscale x 8 x double> @vfsgnj_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, i32 zeroext %evl) {
452; CHECK-LABEL: vfsgnj_vv_nxv8f64_unmasked:
453; CHECK:       # %bb.0:
454; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
455; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
456; CHECK-NEXT:    ret
457  %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
458  ret <vscale x 8 x double> %v
459}
460