xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v \
3; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
4; RUN:     --check-prefixes=CHECK,ZVFH
5; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v \
6; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
7; RUN:     --check-prefixes=CHECK,ZVFH
8; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
9; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
10; RUN:     --check-prefixes=CHECK,ZVFHMIN
11; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
12; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
13; RUN:     --check-prefixes=CHECK,ZVFHMIN
14
15define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %v) {
16; CHECK-LABEL: nxv1bf16:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    lui a0, 8
19; CHECK-NEXT:    addi a0, a0, -1
20; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
21; CHECK-NEXT:    vand.vx v8, v8, a0
22; CHECK-NEXT:    ret
23  %r = call <vscale x 1 x bfloat> @llvm.fabs.nxv1bf16(<vscale x 1 x bfloat> %v)
24  ret <vscale x 1 x bfloat> %r
25}
26
27define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %v) {
28; CHECK-LABEL: nxv2bf16:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    lui a0, 8
31; CHECK-NEXT:    addi a0, a0, -1
32; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
33; CHECK-NEXT:    vand.vx v8, v8, a0
34; CHECK-NEXT:    ret
35  %r = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %v)
36  ret <vscale x 2 x bfloat> %r
37}
38
39define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %v) {
40; CHECK-LABEL: nxv4bf16:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    lui a0, 8
43; CHECK-NEXT:    addi a0, a0, -1
44; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
45; CHECK-NEXT:    vand.vx v8, v8, a0
46; CHECK-NEXT:    ret
47  %r = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %v)
48  ret <vscale x 4 x bfloat> %r
49}
50
51define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %v) {
52; CHECK-LABEL: nxv8bf16:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    lui a0, 8
55; CHECK-NEXT:    addi a0, a0, -1
56; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
57; CHECK-NEXT:    vand.vx v8, v8, a0
58; CHECK-NEXT:    ret
59  %r = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %v)
60  ret <vscale x 8 x bfloat> %r
61}
62
63define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %v) {
64; CHECK-LABEL: nxv16bf16:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    lui a0, 8
67; CHECK-NEXT:    addi a0, a0, -1
68; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
69; CHECK-NEXT:    vand.vx v8, v8, a0
70; CHECK-NEXT:    ret
71  %r = call <vscale x 16 x bfloat> @llvm.fabs.nxv16bf16(<vscale x 16 x bfloat> %v)
72  ret <vscale x 16 x bfloat> %r
73}
74
75define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %v) {
76; CHECK-LABEL: nxv32bf16:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    lui a0, 8
79; CHECK-NEXT:    addi a0, a0, -1
80; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
81; CHECK-NEXT:    vand.vx v8, v8, a0
82; CHECK-NEXT:    ret
83  %r = call <vscale x 32 x bfloat> @llvm.fabs.nxv32bf16(<vscale x 32 x bfloat> %v)
84  ret <vscale x 32 x bfloat> %r
85}
86
87declare <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half>)
88
89define <vscale x 1 x half> @vfabs_nxv1f16(<vscale x 1 x half> %v) {
90; ZVFH-LABEL: vfabs_nxv1f16:
91; ZVFH:       # %bb.0:
92; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
93; ZVFH-NEXT:    vfabs.v v8, v8
94; ZVFH-NEXT:    ret
95;
96; ZVFHMIN-LABEL: vfabs_nxv1f16:
97; ZVFHMIN:       # %bb.0:
98; ZVFHMIN-NEXT:    lui a0, 8
99; ZVFHMIN-NEXT:    addi a0, a0, -1
100; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
101; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
102; ZVFHMIN-NEXT:    ret
103  %r = call <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half> %v)
104  ret <vscale x 1 x half> %r
105}
106
107declare <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half>)
108
109define <vscale x 2 x half> @vfabs_nxv2f16(<vscale x 2 x half> %v) {
110; ZVFH-LABEL: vfabs_nxv2f16:
111; ZVFH:       # %bb.0:
112; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
113; ZVFH-NEXT:    vfabs.v v8, v8
114; ZVFH-NEXT:    ret
115;
116; ZVFHMIN-LABEL: vfabs_nxv2f16:
117; ZVFHMIN:       # %bb.0:
118; ZVFHMIN-NEXT:    lui a0, 8
119; ZVFHMIN-NEXT:    addi a0, a0, -1
120; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
121; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
122; ZVFHMIN-NEXT:    ret
123  %r = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %v)
124  ret <vscale x 2 x half> %r
125}
126
127declare <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half>)
128
129define <vscale x 4 x half> @vfabs_nxv4f16(<vscale x 4 x half> %v) {
130; ZVFH-LABEL: vfabs_nxv4f16:
131; ZVFH:       # %bb.0:
132; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
133; ZVFH-NEXT:    vfabs.v v8, v8
134; ZVFH-NEXT:    ret
135;
136; ZVFHMIN-LABEL: vfabs_nxv4f16:
137; ZVFHMIN:       # %bb.0:
138; ZVFHMIN-NEXT:    lui a0, 8
139; ZVFHMIN-NEXT:    addi a0, a0, -1
140; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
141; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
142; ZVFHMIN-NEXT:    ret
143  %r = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %v)
144  ret <vscale x 4 x half> %r
145}
146
147declare <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half>)
148
149define <vscale x 8 x half> @vfabs_nxv8f16(<vscale x 8 x half> %v) {
150; ZVFH-LABEL: vfabs_nxv8f16:
151; ZVFH:       # %bb.0:
152; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
153; ZVFH-NEXT:    vfabs.v v8, v8
154; ZVFH-NEXT:    ret
155;
156; ZVFHMIN-LABEL: vfabs_nxv8f16:
157; ZVFHMIN:       # %bb.0:
158; ZVFHMIN-NEXT:    lui a0, 8
159; ZVFHMIN-NEXT:    addi a0, a0, -1
160; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
161; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
162; ZVFHMIN-NEXT:    ret
163  %r = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %v)
164  ret <vscale x 8 x half> %r
165}
166
167declare <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half>)
168
169define <vscale x 16 x half> @vfabs_nxv16f16(<vscale x 16 x half> %v) {
170; ZVFH-LABEL: vfabs_nxv16f16:
171; ZVFH:       # %bb.0:
172; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
173; ZVFH-NEXT:    vfabs.v v8, v8
174; ZVFH-NEXT:    ret
175;
176; ZVFHMIN-LABEL: vfabs_nxv16f16:
177; ZVFHMIN:       # %bb.0:
178; ZVFHMIN-NEXT:    lui a0, 8
179; ZVFHMIN-NEXT:    addi a0, a0, -1
180; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
181; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
182; ZVFHMIN-NEXT:    ret
183  %r = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> %v)
184  ret <vscale x 16 x half> %r
185}
186
187declare <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half>)
188
189define <vscale x 32 x half> @vfabs_nxv32f16(<vscale x 32 x half> %v) {
190; ZVFH-LABEL: vfabs_nxv32f16:
191; ZVFH:       # %bb.0:
192; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
193; ZVFH-NEXT:    vfabs.v v8, v8
194; ZVFH-NEXT:    ret
195;
196; ZVFHMIN-LABEL: vfabs_nxv32f16:
197; ZVFHMIN:       # %bb.0:
198; ZVFHMIN-NEXT:    lui a0, 8
199; ZVFHMIN-NEXT:    addi a0, a0, -1
200; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
201; ZVFHMIN-NEXT:    vand.vx v8, v8, a0
202; ZVFHMIN-NEXT:    ret
203  %r = call <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half> %v)
204  ret <vscale x 32 x half> %r
205}
206
207declare <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float>)
208
209define <vscale x 1 x float> @vfabs_nxv1f32(<vscale x 1 x float> %v) {
210; CHECK-LABEL: vfabs_nxv1f32:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
213; CHECK-NEXT:    vfabs.v v8, v8
214; CHECK-NEXT:    ret
215  %r = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> %v)
216  ret <vscale x 1 x float> %r
217}
218
219declare <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float>)
220
221define <vscale x 2 x float> @vfabs_nxv2f32(<vscale x 2 x float> %v) {
222; CHECK-LABEL: vfabs_nxv2f32:
223; CHECK:       # %bb.0:
224; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
225; CHECK-NEXT:    vfabs.v v8, v8
226; CHECK-NEXT:    ret
227  %r = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %v)
228  ret <vscale x 2 x float> %r
229}
230
231declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
232
233define <vscale x 4 x float> @vfabs_nxv4f32(<vscale x 4 x float> %v) {
234; CHECK-LABEL: vfabs_nxv4f32:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
237; CHECK-NEXT:    vfabs.v v8, v8
238; CHECK-NEXT:    ret
239  %r = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %v)
240  ret <vscale x 4 x float> %r
241}
242
243declare <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float>)
244
245define <vscale x 8 x float> @vfabs_nxv8f32(<vscale x 8 x float> %v) {
246; CHECK-LABEL: vfabs_nxv8f32:
247; CHECK:       # %bb.0:
248; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
249; CHECK-NEXT:    vfabs.v v8, v8
250; CHECK-NEXT:    ret
251  %r = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> %v)
252  ret <vscale x 8 x float> %r
253}
254
255declare <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float>)
256
257define <vscale x 16 x float> @vfabs_nxv16f32(<vscale x 16 x float> %v) {
258; CHECK-LABEL: vfabs_nxv16f32:
259; CHECK:       # %bb.0:
260; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
261; CHECK-NEXT:    vfabs.v v8, v8
262; CHECK-NEXT:    ret
263  %r = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> %v)
264  ret <vscale x 16 x float> %r
265}
266
267declare <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double>)
268
269define <vscale x 1 x double> @vfabs_nxv1f64(<vscale x 1 x double> %v) {
270; CHECK-LABEL: vfabs_nxv1f64:
271; CHECK:       # %bb.0:
272; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
273; CHECK-NEXT:    vfabs.v v8, v8
274; CHECK-NEXT:    ret
275  %r = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> %v)
276  ret <vscale x 1 x double> %r
277}
278
279declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
280
281define <vscale x 2 x double> @vfabs_nxv2f64(<vscale x 2 x double> %v) {
282; CHECK-LABEL: vfabs_nxv2f64:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
285; CHECK-NEXT:    vfabs.v v8, v8
286; CHECK-NEXT:    ret
287  %r = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %v)
288  ret <vscale x 2 x double> %r
289}
290
291declare <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double>)
292
293define <vscale x 4 x double> @vfabs_nxv4f64(<vscale x 4 x double> %v) {
294; CHECK-LABEL: vfabs_nxv4f64:
295; CHECK:       # %bb.0:
296; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
297; CHECK-NEXT:    vfabs.v v8, v8
298; CHECK-NEXT:    ret
299  %r = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %v)
300  ret <vscale x 4 x double> %r
301}
302
303declare <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double>)
304
305define <vscale x 8 x double> @vfabs_nxv8f64(<vscale x 8 x double> %v) {
306; CHECK-LABEL: vfabs_nxv8f64:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
309; CHECK-NEXT:    vfabs.v v8, v8
310; CHECK-NEXT:    ret
311  %r = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> %v)
312  ret <vscale x 8 x double> %r
313}
314