xref: /minix3/lib/libc_vfp/vfpdf.S (revision 84d9c625bfea59e274550651111ae9edfdc40fbd)
1/*-
2 * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <arm/asm.h>
31
32RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
33
34/*
35 * This file provides softfloat compatible routines which use VFP instructions
36 * to do the actual work.  This should give near hard-float performance while
37 * being compatible with soft-float code.
38 *
39 * This file implements the double precision floating point routines.
40 */
41
42#ifdef	__ARMEL__
43#define	vmov_arg0	vmov	d0, r0, r1
44#define	vmov_arg1	vmov	d1, r2, r3
45#define	vmov_ret	vmov	r0, r1, d0
46#else
47#define	vmov_arg0	vmov	d0, r1, r0
48#define	vmov_arg1	vmov	d1, r3, r2
49#define	vmov_ret	vmov	r1, r0, d0
50#endif
51#define	vmov_args	vmov_arg0; vmov_arg1
52
53#ifdef __ARM_EABI__
54#define	__adddf3	__aeabi_dadd
55#define	__divdf3	__aeabi_ddiv
56#define	__muldf3	__aeabi_dmul
57#define	__subdf3	__aeabi_dsub
58#define	__negdf2	__aeabi_dneg
59#define	__extendsfdf2	__aeabi_f2d
60#define	__fixdfsi	__aeabi_d2iz
61#define	__fixunsdfsi	__aeabi_d2uiz
62#define	__floatsidf	__aeabi_i2d
63#define	__floatunsidf	__aeabi_ui2d
64#endif
65
66ENTRY(__adddf3)
67	vmov_args
68	vadd.f64	d0, d0, d1
69	vmov_ret
70	RET
71END(__adddf3)
72
73ENTRY(__subdf3)
74	vmov_args
75	vsub.f64	d0, d0, d1
76	vmov_ret
77	RET
78END(__subdf3)
79
80#ifdef __ARM_EABI__
81ENTRY(__aeabi_drsub)
82	vmov_args
83	vsub.f64	d0, d1, d0
84	vmov_ret
85	RET
86END(__aeabi_drsub)
87#endif
88
89ENTRY(__muldf3)
90	vmov_args
91	vmul.f64	d0, d0, d1
92	vmov_ret
93	RET
94END(__muldf3)
95
96ENTRY(__divdf3)
97	vmov_args
98	vdiv.f64	d0, d0, d1
99	vmov_ret
100	RET
101END(__divdf3)
102
103ENTRY(__negdf2)
104	vmov_arg0
105	vneg.f64	d0, d0
106	vmov_ret
107	RET
108END(__negdf2)
109
110ENTRY(__extendsfdf2)
111	vmov		s0, r0
112	vcvt.f64.f32	d0, s0
113	vmov_ret
114	RET
115END(__extendsfdf2)
116
117ENTRY(__fixdfsi)
118	vmov_arg0
119	vcvt.s32.f64	s0, d0
120	vmov		r0, s0
121	RET
122END(__fixdfsi)
123
124ENTRY(__fixunsdfsi)
125	vmov_arg0
126	vcvt.u32.f64	s0, d0
127	vmov		r0, s0
128	RET
129END(__fixunsdfsi)
130
131ENTRY(__floatsidf)
132	vmov		s0, r0
133	vcvt.f64.s32	d0, s0
134	vmov_ret
135	RET
136END(__floatsidf)
137
138ENTRY(__floatunsidf)
139	vmov		s0, r0
140	vcvt.f64.u32	d0, s0
141	vmov_ret
142	RET
143END(__floatunsidf)
144
145/*
146 * Effect of a floating point comparision on the condition flags.
147 *      N Z C V
148 * EQ = 0 1 1 0
149 * LT = 1 0 0 0
150 * GT = 0 0 1 0
151 * UN = 0 0 1 1
152 */
153#ifdef __ARM_EABI__
154ENTRY(__aeabi_cdcmpeq)
155	vmov_args
156	vcmp.f64	d0, d1
157	vmrs		APSR_nzcv, fpscr
158	RET
159END(__aeabi_cdcmpeq)
160
161ENTRY(__aeabi_cdcmple)
162	vmov_args
163	vcmpe.f64	d0, d1
164	vmrs		APSR_nzcv, fpscr
165	RET
166END(__aeabi_cdcmple)
167
168ENTRY(__aeabi_cdrcmple)
169	vmov_args
170	vcmpe.f64	d1, d0
171	vmrs		APSR_nzcv, fpscr
172	RET
173END(__aeabi_cdrcmple)
174
175ENTRY(__aeabi_dcmpeq)
176	vmov_args
177	vcmp.f64	d0, d1
178	vmrs		APSR_nzcv, fpscr
179	moveq		r0, #1		/* (a == b) */
180	movne		r0, #0		/* (a != b) or unordered */
181	RET
182END(__aeabi_dcmpeq)
183
184ENTRY(__aeabi_dcmplt)
185	vmov_args
186	vcmp.f64	d0, d1
187	vmrs		APSR_nzcv, fpscr
188	movlt		r0, #1		/* (a < b) */
189	movcs		r0, #0		/* (a >= b) or unordered */
190	RET
191END(__aeabi_dcmplt)
192
193ENTRY(__aeabi_dcmple)
194	vmov_args
195	vcmp.f64	d0, d1
196	vmrs		APSR_nzcv, fpscr
197	movls		r0, #1		/* (a <= b) */
198	movhi		r0, #0		/* (a > b) or unordered */
199	RET
200END(__aeabi_dcmple)
201
202ENTRY(__aeabi_dcmpge)
203	vmov_args
204	vcmp.f64	d0, d1
205	vmrs		APSR_nzcv, fpscr
206	movge		r0, #1		/* (a >= b) */
207	movlt		r0, #0		/* (a < b) or unordered */
208	RET
209END(__aeabi_dcmpge)
210
211ENTRY(__aeabi_dcmpgt)
212	vmov_args
213	vcmp.f64	d0, d1
214	vmrs		APSR_nzcv, fpscr
215	movgt		r0, #1		/* (a > b) */
216	movle		r0, #0		/* (a <= b) or unordered */
217	RET
218END(__aeabi_dcmpgt)
219
220ENTRY(__aeabi_dcmpun)
221	vmov_args
222	vcmp.f64	d0, d1
223	vmrs		APSR_nzcv, fpscr
224	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
225	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
226	RET
227END(__aeabi_dcmpun)
228
229#else
230/* N set if compare <= result */
231/* Z set if compare = result */
232/* C set if compare (=,>=,UNORD) result */
233/* V set if compare UNORD result */
234
235STRONG_ALIAS(__eqdf2, __nedf2)
236ENTRY(__nedf2)
237	vmov_args
238	vcmp.f64	d0, d1
239	vmrs		APSR_nzcv, fpscr
240	moveq		r0, #0		/* !(a == b) */
241	movne		r0, #1		/* !(a == b) */
242	RET
243END(__nedf2)
244
245STRONG_ALIAS(__gedf2, __ltdf2)
246ENTRY(__ltdf2)
247	vmov_args
248	vcmp.f64	d0, d1
249	vmrs		APSR_nzcv, fpscr
250	mvnmi		r0, #0		/* -(a < b) */
251	movpl		r0, #0		/* -(a < b) */
252	RET
253END(__ltdf2)
254
255STRONG_ALIAS(__gtdf2, __ledf2)
256ENTRY(__ledf2)
257	vmov_args
258	vcmp.f64	d0, d1
259	vmrs		APSR_nzcv, fpscr
260	movgt		r0, #1		/* (a > b) */
261	movle		r0, #0		/* (a > b) */
262	RET
263END(__ledf2)
264
265ENTRY(__unorddf2)
266	vmov_args
267	vcmp.f64	d0, d1
268	vmrs		APSR_nzcv, fpscr
269	movvs		r0, #1		/* isnan(a) || isnan(b) */
270	movvc		r0, #0		/* isnan(a) || isnan(b) */
271	RET
272END(__unorddf2)
273#endif /* !__ARM_EABI__ */
274