xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7define <vscale x 2 x i1> @isnan_nxv2f16(<vscale x 2 x half> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
8; CHECK-LABEL: isnan_nxv2f16:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
11; CHECK-NEXT:    vfclass.v v8, v8, v0.t
12; CHECK-NEXT:    li a0, 768
13; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
14; CHECK-NEXT:    vand.vx v8, v8, a0
15; CHECK-NEXT:    vmsne.vi v0, v8, 0
16; CHECK-NEXT:    ret
17  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> %m, i32 %evl)  ; nan
18  ret <vscale x 2 x i1> %1
19}
20
21define <vscale x 2 x i1> @isnan_nxv2f16_unmasked(<vscale x 2 x half> %x, i32 zeroext %evl) {
22; CHECK-LABEL: isnan_nxv2f16_unmasked:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
25; CHECK-NEXT:    vfclass.v v8, v8
26; CHECK-NEXT:    li a0, 768
27; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
28; CHECK-NEXT:    vand.vx v8, v8, a0
29; CHECK-NEXT:    vmsne.vi v0, v8, 0
30; CHECK-NEXT:    ret
31  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> splat (i1 true), i32 %evl)  ; nan
32  ret <vscale x 2 x i1> %1
33}
34
35define <vscale x 2 x i1> @isnan_nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
36; CHECK-LABEL: isnan_nxv2f32:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
39; CHECK-NEXT:    vfclass.v v8, v8, v0.t
40; CHECK-NEXT:    li a0, 927
41; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
42; CHECK-NEXT:    vand.vx v8, v8, a0
43; CHECK-NEXT:    vmsne.vi v0, v8, 0
44; CHECK-NEXT:    ret
45  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> %m, i32 %evl)
46  ret <vscale x 2 x i1> %1
47}
48
49define <vscale x 2 x i1> @isnan_nxv2f32_unmasked(<vscale x 2 x float> %x, i32 zeroext %evl) {
50; CHECK-LABEL: isnan_nxv2f32_unmasked:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
53; CHECK-NEXT:    vfclass.v v8, v8
54; CHECK-NEXT:    li a0, 927
55; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
56; CHECK-NEXT:    vand.vx v8, v8, a0
57; CHECK-NEXT:    vmsne.vi v0, v8, 0
58; CHECK-NEXT:    ret
59  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> splat (i1 true), i32 %evl)
60  ret <vscale x 2 x i1> %1
61}
62
63define <vscale x 4 x i1> @isnan_nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
64; CHECK-LABEL: isnan_nxv4f32:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
67; CHECK-NEXT:    vfclass.v v8, v8, v0.t
68; CHECK-NEXT:    li a0, 768
69; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
70; CHECK-NEXT:    vand.vx v8, v8, a0
71; CHECK-NEXT:    vmsne.vi v0, v8, 0
72; CHECK-NEXT:    ret
73  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> %m, i32 %evl)  ; nan
74  ret <vscale x 4 x i1> %1
75}
76
77define <vscale x 4 x i1> @isnan_nxv4f32_unmasked(<vscale x 4 x float> %x, i32 zeroext %evl) {
78; CHECK-LABEL: isnan_nxv4f32_unmasked:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
81; CHECK-NEXT:    vfclass.v v8, v8
82; CHECK-NEXT:    li a0, 768
83; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
84; CHECK-NEXT:    vand.vx v8, v8, a0
85; CHECK-NEXT:    vmsne.vi v0, v8, 0
86; CHECK-NEXT:    ret
87  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> splat (i1 true), i32 %evl)  ; nan
88  ret <vscale x 4 x i1> %1
89}
90
91define <vscale x 8 x i1> @isnan_nxv8f32(<vscale x 8 x float> %x,  <vscale x 8 x i1> %m, i32 zeroext %evl) {
92; CHECK-LABEL: isnan_nxv8f32:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
95; CHECK-NEXT:    vfclass.v v8, v8, v0.t
96; CHECK-NEXT:    li a0, 512
97; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
98; CHECK-NEXT:    vmseq.vx v0, v8, a0
99; CHECK-NEXT:    ret
100  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> %m, i32 %evl)
101  ret <vscale x 8 x i1> %1
102}
103
104define <vscale x 8 x i1> @isnan_nxv8f32_unmasked(<vscale x 8 x float> %x, i32 zeroext %evl) {
105; CHECK-LABEL: isnan_nxv8f32_unmasked:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
108; CHECK-NEXT:    vfclass.v v8, v8
109; CHECK-NEXT:    li a0, 512
110; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
111; CHECK-NEXT:    vmseq.vx v0, v8, a0
112; CHECK-NEXT:    ret
113  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> splat (i1 true), i32 %evl)
114  ret <vscale x 8 x i1> %1
115}
116
117define <vscale x 16 x i1> @isnan_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
118; CHECK-LABEL: isnan_nxv16f32:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
121; CHECK-NEXT:    vfclass.v v8, v8, v0.t
122; CHECK-NEXT:    li a0, 256
123; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
124; CHECK-NEXT:    vmseq.vx v0, v8, a0
125; CHECK-NEXT:    ret
126  %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> %m, i32 %evl)
127  ret <vscale x 16 x i1> %1
128}
129
130define <vscale x 16 x i1> @isnan_nxv16f32_unmasked(<vscale x 16 x float> %x, i32 zeroext %evl) {
131; CHECK-LABEL: isnan_nxv16f32_unmasked:
132; CHECK:       # %bb.0:
133; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
134; CHECK-NEXT:    vfclass.v v8, v8
135; CHECK-NEXT:    li a0, 256
136; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
137; CHECK-NEXT:    vmseq.vx v0, v8, a0
138; CHECK-NEXT:    ret
139  %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> splat (i1 true), i32 %evl)
140  ret <vscale x 16 x i1> %1
141}
142
143define <vscale x 2 x i1> @isnormal_nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
144; CHECK-LABEL: isnormal_nxv2f64:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
147; CHECK-NEXT:    vfclass.v v8, v8, v0.t
148; CHECK-NEXT:    li a0, 129
149; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
150; CHECK-NEXT:    vand.vx v8, v8, a0
151; CHECK-NEXT:    vmsne.vi v0, v8, 0
152; CHECK-NEXT:    ret
153  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> %m, i32 %evl) ; 0x204 = "inf"
154  ret <vscale x 2 x i1> %1
155}
156
157define <vscale x 2 x i1> @isnormal_nxv2f64_unmasked(<vscale x 2 x double> %x, i32 zeroext %evl) {
158; CHECK-LABEL: isnormal_nxv2f64_unmasked:
159; CHECK:       # %bb.0:
160; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
161; CHECK-NEXT:    vfclass.v v8, v8
162; CHECK-NEXT:    li a0, 129
163; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
164; CHECK-NEXT:    vand.vx v8, v8, a0
165; CHECK-NEXT:    vmsne.vi v0, v8, 0
166; CHECK-NEXT:    ret
167  %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> splat (i1 true), i32 %evl) ; 0x204 = "inf"
168  ret <vscale x 2 x i1> %1
169}
170
171define <vscale x 4 x i1> @isposinf_nxv4f64(<vscale x 4 x double> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
172; CHECK-LABEL: isposinf_nxv4f64:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
175; CHECK-NEXT:    vfclass.v v8, v8, v0.t
176; CHECK-NEXT:    li a0, 128
177; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
178; CHECK-NEXT:    vmseq.vx v0, v8, a0
179; CHECK-NEXT:    ret
180  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
181  ret <vscale x 4 x i1> %1
182}
183
184define <vscale x 4 x i1> @isposinf_nxv4f64_unmasked(<vscale x 4 x double> %x, i32 zeroext %evl) {
185; CHECK-LABEL: isposinf_nxv4f64_unmasked:
186; CHECK:       # %bb.0:
187; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
188; CHECK-NEXT:    vfclass.v v8, v8
189; CHECK-NEXT:    li a0, 128
190; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
191; CHECK-NEXT:    vmseq.vx v0, v8, a0
192; CHECK-NEXT:    ret
193  %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> splat (i1 true), i32 %evl) ; 0x200 = "+inf"
194  ret <vscale x 4 x i1> %1
195}
196
197define <vscale x 8 x i1> @isneginf_nxv8f64(<vscale x 8 x double> %x, <vscale x 8 x i1> %m, i32 zeroext %evl) {
198; CHECK-LABEL: isneginf_nxv8f64:
199; CHECK:       # %bb.0:
200; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
201; CHECK-NEXT:    vfclass.v v8, v8, v0.t
202; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
203; CHECK-NEXT:    vmseq.vi v0, v8, 1
204; CHECK-NEXT:    ret
205  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> %m, i32 %evl) ; "-inf"
206  ret <vscale x 8 x i1> %1
207}
208
209define <vscale x 8 x i1> @isneginf_nxv8f64_unmasked(<vscale x 8 x double> %x, i32 zeroext %evl) {
210; CHECK-LABEL: isneginf_nxv8f64_unmasked:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
213; CHECK-NEXT:    vfclass.v v8, v8
214; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
215; CHECK-NEXT:    vmseq.vi v0, v8, 1
216; CHECK-NEXT:    ret
217  %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> splat (i1 true), i32 %evl) ; "-inf"
218  ret <vscale x 8 x i1> %1
219}
220
221
222declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half>, i32, <vscale x 2 x i1>, i32)
223declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float>, i32, <vscale x 2 x i1>, i32)
224declare <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float>, i32, <vscale x 4 x i1>, i32)
225declare <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float>, i32, <vscale x 8 x i1>, i32)
226declare <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float>, i32, <vscale x 16 x i1>, i32)
227declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double>, i32, <vscale x 2 x i1>, i32)
228declare <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double>, i32, <vscale x 4 x i1>, i32)
229declare <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double>, i32, <vscale x 8 x i1>, i32)
230