xref: /llvm-project/clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics-constrained.c (revision 207e5ccceec8d3cc3f32723e78f2a142bc61b07d)
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
2 // RUN: -disable-O0-optnone \
3 // RUN: -emit-llvm -o - %s | opt -S -passes=mem2reg \
4 // RUN: | FileCheck --check-prefixes=COMMON,COMMONIR,UNCONSTRAINED %s
5 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +fullfp16 \
6 // RUN: -disable-O0-optnone \
7 // RUN: -ffp-exception-behavior=strict -emit-llvm -o - %s | opt -S -passes=mem2reg \
8 // RUN: | FileCheck --check-prefixes=COMMON,COMMONIR,CONSTRAINED %s
9 
10 // REQUIRES: aarch64-registered-target
11 
12 #include <arm_fp16.h>
13 
14 // COMMON-LABEL: test_vceqzh_f16
15 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oeq half %a, 0xH0000
16 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half 0xH0000, metadata !"oeq", metadata !"fpexcept.strict")
17 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
18 // COMMONIR:       ret i16 [[TMP2]]
19 uint16_t test_vceqzh_f16(float16_t a) {
20   return vceqzh_f16(a);
21 }
22 
23 // COMMON-LABEL: test_vcgezh_f16
24 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oge half %a, 0xH0000
25 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"oge", metadata !"fpexcept.strict")
26 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
27 // COMMONIR:       ret i16 [[TMP2]]
28 uint16_t test_vcgezh_f16(float16_t a) {
29   return vcgezh_f16(a);
30 }
31 
32 // COMMON-LABEL: test_vcgtzh_f16
33 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ogt half %a, 0xH0000
34 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ogt", metadata !"fpexcept.strict")
35 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
36 // COMMONIR:       ret i16 [[TMP2]]
37 uint16_t test_vcgtzh_f16(float16_t a) {
38   return vcgtzh_f16(a);
39 }
40 
41 // COMMON-LABEL: test_vclezh_f16
42 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ole half %a, 0xH0000
43 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"ole", metadata !"fpexcept.strict")
44 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
45 // COMMONIR:       ret i16 [[TMP2]]
46 uint16_t test_vclezh_f16(float16_t a) {
47   return vclezh_f16(a);
48 }
49 
50 // COMMON-LABEL: test_vcltzh_f16
51 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp olt half %a, 0xH0000
52 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half 0xH0000, metadata !"olt", metadata !"fpexcept.strict")
53 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
54 // COMMONIR:       ret i16 [[TMP2]]
55 uint16_t test_vcltzh_f16(float16_t a) {
56   return vcltzh_f16(a);
57 }
58 
59 // COMMON-LABEL: test_vcvth_f16_s16
60 // UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i16 %a to half
61 // CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
62 // COMMONIR:       ret half [[VCVT]]
63 float16_t test_vcvth_f16_s16 (int16_t a) {
64   return vcvth_f16_s16(a);
65 }
66 
67 // COMMON-LABEL: test_vcvth_f16_s32
68 // UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i32 %a to half
69 // CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
70 // COMMONIR:       ret half [[VCVT]]
71 float16_t test_vcvth_f16_s32 (int32_t a) {
72   return vcvth_f16_s32(a);
73 }
74 
75 // COMMON-LABEL: test_vcvth_f16_s64
76 // UNCONSTRAINED:  [[VCVT:%.*]] = sitofp i64 %a to half
77 // CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
78 // COMMONIR:       ret half [[VCVT]]
79 float16_t test_vcvth_f16_s64 (int64_t a) {
80   return vcvth_f16_s64(a);
81 }
82 
83 // COMMON-LABEL: test_vcvth_f16_u16
84 // UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i16 %a to half
85 // CONSTRAINED:  [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
86 // COMMONIR:       ret half [[VCVT]]
87 float16_t test_vcvth_f16_u16 (uint16_t a) {
88   return vcvth_f16_u16(a);
89 }
90 
91 // COMMON-LABEL: test_vcvth_f16_u32
92 // UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i32 %a to half
93 // CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
94 // COMMONIR:  ret half [[VCVT]]
95 float16_t test_vcvth_f16_u32 (uint32_t a) {
96   return vcvth_f16_u32(a);
97 }
98 
99 // COMMON-LABEL: test_vcvth_f16_u64
100 // UNCONSTRAINED:  [[VCVT:%.*]] = uitofp i64 %a to half
101 // CONSTRAINED:    [[VCVT:%.*]] = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
102 // COMMONIR:       ret half [[VCVT]]
103 float16_t test_vcvth_f16_u64 (uint64_t a) {
104   return vcvth_f16_u64(a);
105 }
106 
107 // COMMON-LABEL: test_vcvth_s16_f16
108 // COMMONIR:       [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
109 // COMMONIR:       [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16
110 // COMMONIR:       ret i16 [[TRUNC]]
111 int16_t test_vcvth_s16_f16 (float16_t a) {
112   return vcvth_s16_f16(a);
113 }
114 
115 // COMMON-LABEL: test_vcvth_s32_f16
116 // COMMONIR:       [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzs.i32.f16(half %a)
117 // COMMONIR:       ret i32 [[VCVT]]
118 int32_t test_vcvth_s32_f16 (float16_t a) {
119   return vcvth_s32_f16(a);
120 }
121 
122 // COMMON-LABEL: test_vcvth_s64_f16
123 // COMMONIR:       [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzs.i64.f16(half %a)
124 // COMMONIR:       ret i64 [[VCVT]]
125 int64_t test_vcvth_s64_f16 (float16_t a) {
126   return vcvth_s64_f16(a);
127 }
128 
129 // COMMON-LABEL: test_vcvth_u16_f16
130 // COMMONIR:       [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
131 // COMMONIR:       [[TRUNC:%.*]] = trunc i32 [[VCVT]] to i16
132 // COMMONIR:       ret i16 [[TRUNC]]
133 uint16_t test_vcvth_u16_f16 (float16_t a) {
134   return vcvth_u16_f16(a);
135 }
136 
137 // COMMON-LABEL: test_vcvth_u32_f16
138 // COMMONIR:       [[VCVT:%.*]] = call i32 @llvm.aarch64.neon.fcvtzu.i32.f16(half %a)
139 // COMMONIR:       ret i32 [[VCVT]]
140 uint32_t test_vcvth_u32_f16 (float16_t a) {
141   return vcvth_u32_f16(a);
142 }
143 
144 // COMMON-LABEL: test_vcvth_u64_f16
145 // COMMONIR:       [[VCVT:%.*]] = call i64 @llvm.aarch64.neon.fcvtzu.i64.f16(half %a)
146 // COMMONIR:       ret i64 [[VCVT]]
147 uint64_t test_vcvth_u64_f16 (float16_t a) {
148   return vcvth_u64_f16(a);
149 }
150 
151 // COMMON-LABEL: test_vrndh_f16
152 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.trunc.f16(half %a)
153 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict")
154 // COMMONIR:       ret half [[RND]]
155 float16_t test_vrndh_f16(float16_t a) {
156   return vrndh_f16(a);
157 }
158 
159 // COMMON-LABEL: test_vrndah_f16
160 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.round.f16(half %a)
161 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.round.f16(half %a, metadata !"fpexcept.strict")
162 // COMMONIR:       ret half [[RND]]
163 float16_t test_vrndah_f16(float16_t a) {
164   return vrndah_f16(a);
165 }
166 
167 // COMMON-LABEL: test_vrndih_f16
168 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.nearbyint.f16(half %a)
169 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.nearbyint.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
170 // COMMONIR:       ret half [[RND]]
171 float16_t test_vrndih_f16(float16_t a) {
172   return vrndih_f16(a);
173 }
174 
175 // COMMON-LABEL: test_vrndmh_f16
176 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.floor.f16(half %a)
177 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.floor.f16(half %a, metadata !"fpexcept.strict")
178 // COMMONIR:       ret half [[RND]]
179 float16_t test_vrndmh_f16(float16_t a) {
180   return vrndmh_f16(a);
181 }
182 
183 // COMMON-LABEL: test_vrndph_f16
184 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.ceil.f16(half %a)
185 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.ceil.f16(half %a, metadata !"fpexcept.strict")
186 // COMMONIR:       ret half [[RND]]
187 float16_t test_vrndph_f16(float16_t a) {
188   return vrndph_f16(a);
189 }
190 
191 // COMMON-LABEL: test_vrndxh_f16
192 // UNCONSTRAINED:  [[RND:%.*]] = call half @llvm.rint.f16(half %a)
193 // CONSTRAINED:    [[RND:%.*]] = call half @llvm.experimental.constrained.rint.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
194 // COMMONIR:       ret half [[RND]]
195 float16_t test_vrndxh_f16(float16_t a) {
196   return vrndxh_f16(a);
197 }
198 
199 // COMMON-LABEL: test_vsqrth_f16
200 // UNCONSTRAINED:  [[SQR:%.*]] = call half @llvm.sqrt.f16(half %a)
201 // CONSTRAINED:    [[SQR:%.*]] = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
202 // COMMONIR:       ret half [[SQR]]
203 float16_t test_vsqrth_f16(float16_t a) {
204   return vsqrth_f16(a);
205 }
206 
207 // COMMON-LABEL: test_vaddh_f16
208 // UNCONSTRAINED:  [[ADD:%.*]] = fadd half %a, %b
209 // CONSTRAINED:    [[ADD:%.*]] = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
210 // COMMONIR:       ret half [[ADD]]
211 float16_t test_vaddh_f16(float16_t a, float16_t b) {
212   return vaddh_f16(a, b);
213 }
214 
215 // COMMON-LABEL: test_vceqh_f16
216 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oeq half %a, %b
217 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict")
218 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
219 // COMMONIR:       ret i16 [[TMP2]]
220 uint16_t test_vceqh_f16(float16_t a, float16_t b) {
221   return vceqh_f16(a, b);
222 }
223 
224 // COMMON-LABEL: test_vcgeh_f16
225 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp oge half %a, %b
226 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict")
227 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
228 // COMMONIR:       ret i16 [[TMP2]]
229 uint16_t test_vcgeh_f16(float16_t a, float16_t b) {
230   return vcgeh_f16(a, b);
231 }
232 
233 // COMMON-LABEL: test_vcgth_f16
234 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ogt half %a, %b
235 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict")
236 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
237 // COMMONIR:       ret i16 [[TMP2]]
238 uint16_t test_vcgth_f16(float16_t a, float16_t b) {
239   return vcgth_f16(a, b);
240 }
241 
242 // COMMON-LABEL: test_vcleh_f16
243 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp ole half %a, %b
244 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict")
245 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
246 // COMMONIR:       ret i16 [[TMP2]]
247 uint16_t test_vcleh_f16(float16_t a, float16_t b) {
248   return vcleh_f16(a, b);
249 }
250 
251 // COMMON-LABEL: test_vclth_f16
252 // UNCONSTRAINED:  [[TMP1:%.*]] = fcmp olt half %a, %b
253 // CONSTRAINED:    [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict")
254 // COMMONIR:       [[TMP2:%.*]] = sext i1 [[TMP1]] to i16
255 // COMMONIR:       ret i16 [[TMP2]]
256 uint16_t test_vclth_f16(float16_t a, float16_t b) {
257   return vclth_f16(a, b);
258 }
259 
260 // COMMON-LABEL: test_vdivh_f16
261 // UNCONSTRAINED:  [[DIV:%.*]] = fdiv half %a, %b
262 // CONSTRAINED:    [[DIV:%.*]] = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
263 // COMMONIR:       ret half [[DIV]]
264 float16_t test_vdivh_f16(float16_t a, float16_t b) {
265   return vdivh_f16(a, b);
266 }
267 
268 // COMMON-LABEL: test_vmulh_f16
269 // UNCONSTRAINED:  [[MUL:%.*]] = fmul half %a, %b
270 // CONSTRAINED:  [[MUL:%.*]] = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
271 // COMMONIR:       ret half [[MUL]]
272 float16_t test_vmulh_f16(float16_t a, float16_t b) {
273   return vmulh_f16(a, b);
274 }
275 
276 // COMMON-LABEL: test_vsubh_f16
277 // UNCONSTRAINED:  [[SUB:%.*]] = fsub half %a, %b
278 // CONSTRAINED:    [[SUB:%.*]] = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
279 // COMMONIR:       ret half [[SUB]]
280 float16_t test_vsubh_f16(float16_t a, float16_t b) {
281   return vsubh_f16(a, b);
282 }
283 
284 // COMMON-LABEL: test_vfmah_f16
285 // UNCONSTRAINED:  [[FMA:%.*]] = call half @llvm.fma.f16(half %b, half %c, half %a)
286 // CONSTRAINED:    [[FMA:%.*]] = call half @llvm.experimental.constrained.fma.f16(half %b, half %c, half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
287 // COMMONIR:       ret half [[FMA]]
288 float16_t test_vfmah_f16(float16_t a, float16_t b, float16_t c) {
289   return vfmah_f16(a, b, c);
290 }
291 
292 // COMMON-LABEL: test_vfmsh_f16
293 // COMMONIR:  [[SUB:%.*]] = fneg half %b
294 // UNCONSTRAINED:  [[ADD:%.*]] = call half @llvm.fma.f16(half [[SUB]], half %c, half %a)
295 // CONSTRAINED:    [[ADD:%.*]] = call half @llvm.experimental.constrained.fma.f16(half [[SUB]], half %c, half %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
296 // COMMONIR:       ret half [[ADD]]
297 float16_t test_vfmsh_f16(float16_t a, float16_t b, float16_t c) {
298   return vfmsh_f16(a, b, c);
299 }
300 
301