xref: /netbsd-src/lib/libc_vfp/vfpdf.S (revision 0fff63c9f62dc2199095a5082bc5df348ea6a049)
1/*-
2 * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <arm/asm.h>
31
32RCSID("$NetBSD: vfpdf.S,v 1.5 2020/12/02 14:20:20 wiz Exp $")
33
34/*
35 * This file provides softfloat compatible routines which use VFP instructions
36 * to do the actual work.  This should give near hard-float performance while
37 * being compatible with soft-float code.
38 *
39 * This file implements the double precision floating point routines.
40 */
41
42.fpu vfp
43
44#ifdef	__ARMEL__
45#define	vmov_arg0	vmov	d0, r0, r1
46#define	vmov_arg1	vmov	d1, r2, r3
47#define	vmov_ret	vmov	r0, r1, d0
48#else
49#define	vmov_arg0	vmov	d0, r1, r0
50#define	vmov_arg1	vmov	d1, r3, r2
51#define	vmov_ret	vmov	r1, r0, d0
52#endif
53#define	vmov_args	vmov_arg0; vmov_arg1
54
55#ifdef __ARM_EABI__
56#define	__adddf3	__aeabi_dadd
57#define	__divdf3	__aeabi_ddiv
58#define	__muldf3	__aeabi_dmul
59#define	__subdf3	__aeabi_dsub
60#define	__negdf2	__aeabi_dneg
61#define	__extendsfdf2	__aeabi_f2d
62#define	__fixdfsi	__aeabi_d2iz
63#define	__fixunsdfsi	__aeabi_d2uiz
64#define	__floatsidf	__aeabi_i2d
65#define	__floatunsidf	__aeabi_ui2d
66#endif
67
68ENTRY(__adddf3)
69	vmov_args
70	vadd.f64	d0, d0, d1
71	vmov_ret
72	RET
73END(__adddf3)
74
75ENTRY(__subdf3)
76	vmov_args
77	vsub.f64	d0, d0, d1
78	vmov_ret
79	RET
80END(__subdf3)
81
82#ifdef __ARM_EABI__
83ENTRY(__aeabi_drsub)
84	vmov_args
85	vsub.f64	d0, d1, d0
86	vmov_ret
87	RET
88END(__aeabi_drsub)
89#endif
90
91ENTRY(__muldf3)
92	vmov_args
93	vmul.f64	d0, d0, d1
94	vmov_ret
95	RET
96END(__muldf3)
97
98ENTRY(__divdf3)
99	vmov_args
100	vdiv.f64	d0, d0, d1
101	vmov_ret
102	RET
103END(__divdf3)
104
105ENTRY(__negdf2)
106	vmov_arg0
107	vneg.f64	d0, d0
108	vmov_ret
109	RET
110END(__negdf2)
111
112ENTRY(__extendsfdf2)
113	vmov		s0, r0
114	vcvt.f64.f32	d0, s0
115	vmov_ret
116	RET
117END(__extendsfdf2)
118
119ENTRY(__fixdfsi)
120	vmov_arg0
121	vcvt.s32.f64	s0, d0
122	vmov		r0, s0
123	RET
124END(__fixdfsi)
125
126ENTRY(__fixunsdfsi)
127	vmov_arg0
128	vcvt.u32.f64	s0, d0
129	vmov		r0, s0
130	RET
131END(__fixunsdfsi)
132
133ENTRY(__floatsidf)
134	vmov		s0, r0
135	vcvt.f64.s32	d0, s0
136	vmov_ret
137	RET
138END(__floatsidf)
139
140ENTRY(__floatunsidf)
141	vmov		s0, r0
142	vcvt.f64.u32	d0, s0
143	vmov_ret
144	RET
145END(__floatunsidf)
146
147/*
148 * Effect of a floating point comparison on the condition flags.
149 *      N Z C V
150 * EQ = 0 1 1 0
151 * LT = 1 0 0 0
152 * GT = 0 0 1 0
153 * UN = 0 0 1 1
154 */
155#ifdef __ARM_EABI__
156ENTRY(__aeabi_cdcmpeq)
157	vmov_args
158	vcmp.f64	d0, d1
159	vmrs		APSR_nzcv, fpscr
160	RET
161END(__aeabi_cdcmpeq)
162
163ENTRY(__aeabi_cdcmple)
164	vmov_args
165	vcmpe.f64	d0, d1
166	vmrs		APSR_nzcv, fpscr
167	RET
168END(__aeabi_cdcmple)
169
170ENTRY(__aeabi_cdrcmple)
171	vmov_args
172	vcmpe.f64	d1, d0
173	vmrs		APSR_nzcv, fpscr
174	RET
175END(__aeabi_cdrcmple)
176
177ENTRY(__aeabi_dcmpeq)
178	vmov_args
179	vcmp.f64	d0, d1
180	vmrs		APSR_nzcv, fpscr
181	moveq		r0, #1		/* (a == b) */
182	movne		r0, #0		/* (a != b) or unordered */
183	RET
184END(__aeabi_dcmpeq)
185
186ENTRY(__aeabi_dcmplt)
187	vmov_args
188	vcmp.f64	d0, d1
189	vmrs		APSR_nzcv, fpscr
190	movlt		r0, #1		/* (a < b) */
191	movcs		r0, #0		/* (a >= b) or unordered */
192	RET
193END(__aeabi_dcmplt)
194
195ENTRY(__aeabi_dcmple)
196	vmov_args
197	vcmp.f64	d0, d1
198	vmrs		APSR_nzcv, fpscr
199	movls		r0, #1		/* (a <= b) */
200	movhi		r0, #0		/* (a > b) or unordered */
201	RET
202END(__aeabi_dcmple)
203
204ENTRY(__aeabi_dcmpge)
205	vmov_args
206	vcmp.f64	d0, d1
207	vmrs		APSR_nzcv, fpscr
208	movge		r0, #1		/* (a >= b) */
209	movlt		r0, #0		/* (a < b) or unordered */
210	RET
211END(__aeabi_dcmpge)
212
213ENTRY(__aeabi_dcmpgt)
214	vmov_args
215	vcmp.f64	d0, d1
216	vmrs		APSR_nzcv, fpscr
217	movgt		r0, #1		/* (a > b) */
218	movle		r0, #0		/* (a <= b) or unordered */
219	RET
220END(__aeabi_dcmpgt)
221
222ENTRY(__aeabi_dcmpun)
223	vmov_args
224	vcmp.f64	d0, d1
225	vmrs		APSR_nzcv, fpscr
226	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
227	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
228	RET
229END(__aeabi_dcmpun)
230
231#else
232/* N set if compare <= result */
233/* Z set if compare = result */
234/* C set if compare (=,>=,UNORD) result */
235/* V set if compare UNORD result */
236
237STRONG_ALIAS(__eqdf2, __nedf2)
238ENTRY(__nedf2)
239	vmov_args
240	vcmp.f64	d0, d1
241	vmrs		APSR_nzcv, fpscr
242	moveq		r0, #0		/* !(a == b) */
243	movne		r0, #1		/* !(a == b) */
244	RET
245END(__nedf2)
246
247STRONG_ALIAS(__gedf2, __ltdf2)
248ENTRY(__ltdf2)
249	vmov_args
250	vcmp.f64	d0, d1
251	vmrs		APSR_nzcv, fpscr
252	mvnmi		r0, #0		/* -(a < b) */
253	movpl		r0, #0		/* -(a < b) */
254	RET
255END(__ltdf2)
256
257STRONG_ALIAS(__gtdf2, __ledf2)
258ENTRY(__ledf2)
259	vmov_args
260	vcmp.f64	d0, d1
261	vmrs		APSR_nzcv, fpscr
262	movgt		r0, #1		/* (a > b) */
263	movle		r0, #0		/* (a > b) */
264	RET
265END(__ledf2)
266
267ENTRY(__unorddf2)
268	vmov_args
269	vcmp.f64	d0, d1
270	vmrs		APSR_nzcv, fpscr
271	movvs		r0, #1		/* isnan(a) || isnan(b) */
272	movvc		r0, #0		/* isnan(a) || isnan(b) */
273	RET
274END(__unorddf2)
275#endif /* !__ARM_EABI__ */
276