xref: /netbsd-src/sys/arch/m68k/fpe/fpu_implode.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: fpu_implode.c,v 1.8 2003/10/23 15:07:30 kleink Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)fpu_implode.c	8.1 (Berkeley) 6/11/93
41  */
42 
43 /*
44  * FPU subroutines: `implode' internal format numbers into the machine's
45  * `packed binary' format.
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: fpu_implode.c,v 1.8 2003/10/23 15:07:30 kleink Exp $");
50 
51 #include <sys/types.h>
52 #include <sys/systm.h>
53 
54 #include <machine/ieee.h>
55 #include <machine/reg.h>
56 
57 #include "fpu_emulate.h"
58 #include "fpu_arith.h"
59 
60 /* Conversion from internal format -- note asymmetry. */
61 static u_int	fpu_ftoi __P((struct fpemu *fe, struct fpn *fp));
62 static u_int	fpu_ftos __P((struct fpemu *fe, struct fpn *fp));
63 static u_int	fpu_ftod __P((struct fpemu *fe, struct fpn *fp, u_int *));
64 static u_int	fpu_ftox __P((struct fpemu *fe, struct fpn *fp, u_int *));
65 
66 /*
67  * Round a number (algorithm from Motorola MC68882 manual, modified for
68  * our internal format).  Set inexact exception if rounding is required.
69  * Return true iff we rounded up.
70  *
71  * After rounding, we discard the guard and round bits by shifting right
72  * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
73  * This saves effort later.
74  *
75  * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
76  * responsibility to fix this if necessary.
77  */
78 int
79 fpu_round(register struct fpemu *fe, register struct fpn *fp)
80 {
81 	register u_int m0, m1, m2;
82 	register int gr, s;
83 
84 	m0 = fp->fp_mant[0];
85 	m1 = fp->fp_mant[1];
86 	m2 = fp->fp_mant[2];
87 	gr = m2 & 3;
88 	s = fp->fp_sticky;
89 
90 	/* mant >>= FP_NG */
91 	m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
92 	m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
93 	m0 >>= FP_NG;
94 
95 	if ((gr | s) == 0)	/* result is exact: no rounding needed */
96 		goto rounddown;
97 
98 	fe->fe_fpsr |= FPSR_INEX2;	/* inexact */
99 
100 	/* Go to rounddown to round down; break to round up. */
101 	switch (fe->fe_fpcr & FPCR_ROUND) {
102 
103 	case FPCR_NEAR:
104 	default:
105 		/*
106 		 * Round only if guard is set (gr & 2).  If guard is set,
107 		 * but round & sticky both clear, then we want to round
108 		 * but have a tie, so round to even, i.e., add 1 iff odd.
109 		 */
110 		if ((gr & 2) == 0)
111 			goto rounddown;
112 		if ((gr & 1) || fp->fp_sticky || (m2 & 1))
113 			break;
114 		goto rounddown;
115 
116 	case FPCR_ZERO:
117 		/* Round towards zero, i.e., down. */
118 		goto rounddown;
119 
120 	case FPCR_MINF:
121 		/* Round towards -Inf: up if negative, down if positive. */
122 		if (fp->fp_sign)
123 			break;
124 		goto rounddown;
125 
126 	case FPCR_PINF:
127 		/* Round towards +Inf: up if positive, down otherwise. */
128 		if (!fp->fp_sign)
129 			break;
130 		goto rounddown;
131 	}
132 
133 	/* Bump low bit of mantissa, with carry. */
134 	if (++m2 == 0 && ++m1 == 0)
135 		m0++;
136 	fp->fp_sticky = 0;
137 	fp->fp_mant[0] = m0;
138 	fp->fp_mant[1] = m1;
139 	fp->fp_mant[2] = m2;
140 	return (1);
141 
142 rounddown:
143 	fp->fp_sticky = 0;
144 	fp->fp_mant[0] = m0;
145 	fp->fp_mant[1] = m1;
146 	fp->fp_mant[2] = m2;
147 	return (0);
148 }
149 
150 /*
151  * For overflow: return true if overflow is to go to +/-Inf, according
152  * to the sign of the overflowing result.  If false, overflow is to go
153  * to the largest magnitude value instead.
154  */
155 static int
156 toinf(struct fpemu *fe, int sign)
157 {
158 	int inf;
159 
160 	/* look at rounding direction */
161 	switch (fe->fe_fpcr & FPCR_ROUND) {
162 
163 	default:
164 	case FPCR_NEAR:		/* the nearest value is always Inf */
165 		inf = 1;
166 		break;
167 
168 	case FPCR_ZERO:		/* toward 0 => never towards Inf */
169 		inf = 0;
170 		break;
171 
172 	case FPCR_PINF:		/* toward +Inf iff positive */
173 		inf = (sign == 0);
174 		break;
175 
176 	case FPCR_MINF:		/* toward -Inf iff negative */
177 		inf = sign;
178 		break;
179 	}
180 	return (inf);
181 }
182 
183 /*
184  * fpn -> int (int value returned as return value).
185  *
186  * N.B.: this conversion always rounds towards zero (this is a peculiarity
187  * of the SPARC instruction set).
188  */
189 static u_int
190 fpu_ftoi(fe, fp)
191 	struct fpemu *fe;
192 	register struct fpn *fp;
193 {
194 	register u_int i;
195 	register int sign, exp;
196 
197 	sign = fp->fp_sign;
198 	switch (fp->fp_class) {
199 
200 	case FPC_ZERO:
201 		return (0);
202 
203 	case FPC_NUM:
204 		/*
205 		 * If exp >= 2^32, overflow.  Otherwise shift value right
206 		 * into last mantissa word (this will not exceed 0xffffffff),
207 		 * shifting any guard and round bits out into the sticky
208 		 * bit.  Then ``round'' towards zero, i.e., just set an
209 		 * inexact exception if sticky is set (see fpu_round()).
210 		 * If the result is > 0x80000000, or is positive and equals
211 		 * 0x80000000, overflow; otherwise the last fraction word
212 		 * is the result.
213 		 */
214 		if ((exp = fp->fp_exp) >= 32)
215 			break;
216 		/* NB: the following includes exp < 0 cases */
217 		if (fpu_shr(fp, FP_NMANT - 1 - FP_NG - exp) != 0)
218 			/* m68881/2 do not underflow when
219 			   converting to integer */;
220 		fpu_round(fe, fp);
221 		i = fp->fp_mant[2];
222 		if (i >= ((u_int)0x80000000 + sign))
223 			break;
224 		return (sign ? -i : i);
225 
226 	default:		/* Inf, qNaN, sNaN */
227 		break;
228 	}
229 	/* overflow: replace any inexact exception with invalid */
230 	fe->fe_fpsr = (fe->fe_fpsr & ~FPSR_INEX2) | FPSR_OPERR;
231 	return (0x7fffffff + sign);
232 }
233 
234 /*
235  * fpn -> single (32 bit single returned as return value).
236  * We assume <= 29 bits in a single-precision fraction (1.f part).
237  */
238 static u_int
239 fpu_ftos(fe, fp)
240 	struct fpemu *fe;
241 	register struct fpn *fp;
242 {
243 	register u_int sign = fp->fp_sign << 31;
244 	register int exp;
245 
246 #define	SNG_EXP(e)	((e) << SNG_FRACBITS)	/* makes e an exponent */
247 #define	SNG_MASK	(SNG_EXP(1) - 1)	/* mask for fraction */
248 
249 	/* Take care of non-numbers first. */
250 	if (ISNAN(fp)) {
251 		/*
252 		 * Preserve upper bits of NaN, per SPARC V8 appendix N.
253 		 * Note that fp->fp_mant[0] has the quiet bit set,
254 		 * even if it is classified as a signalling NaN.
255 		 */
256 		(void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
257 		exp = SNG_EXP_INFNAN;
258 		goto done;
259 	}
260 	if (ISINF(fp))
261 		return (sign | SNG_EXP(SNG_EXP_INFNAN));
262 	if (ISZERO(fp))
263 		return (sign);
264 
265 	/*
266 	 * Normals (including subnormals).  Drop all the fraction bits
267 	 * (including the explicit ``implied'' 1 bit) down into the
268 	 * single-precision range.  If the number is subnormal, move
269 	 * the ``implied'' 1 into the explicit range as well, and shift
270 	 * right to introduce leading zeroes.  Rounding then acts
271 	 * differently for normals and subnormals: the largest subnormal
272 	 * may round to the smallest normal (1.0 x 2^minexp), or may
273 	 * remain subnormal.  In the latter case, signal an underflow
274 	 * if the result was inexact or if underflow traps are enabled.
275 	 *
276 	 * Rounding a normal, on the other hand, always produces another
277 	 * normal (although either way the result might be too big for
278 	 * single precision, and cause an overflow).  If rounding a
279 	 * normal produces 2.0 in the fraction, we need not adjust that
280 	 * fraction at all, since both 1.0 and 2.0 are zero under the
281 	 * fraction mask.
282 	 *
283 	 * Note that the guard and round bits vanish from the number after
284 	 * rounding.
285 	 */
286 	if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) {	/* subnormal */
287 		fe->fe_fpsr |= FPSR_UNFL;
288 		/* -NG for g,r; -SNG_FRACBITS-exp for fraction */
289 		(void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
290 		if (fpu_round(fe, fp) && fp->fp_mant[2] == SNG_EXP(1))
291 			return (sign | SNG_EXP(1) | 0);
292 		if (fe->fe_fpsr & FPSR_INEX2)
293 			fe->fe_fpsr |= FPSR_UNFL
294 			/* mc68881/2 don't underflow when converting */;
295 		return (sign | SNG_EXP(0) | fp->fp_mant[2]);
296 	}
297 	/* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
298 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
299 #ifdef DIAGNOSTIC
300 	if ((fp->fp_mant[2] & SNG_EXP(1 << FP_NG)) == 0)
301 		panic("fpu_ftos");
302 #endif
303 	if (fpu_round(fe, fp) && fp->fp_mant[2] == SNG_EXP(2))
304 		exp++;
305 	if (exp >= SNG_EXP_INFNAN) {
306 		/* overflow to inf or to max single */
307 		fe->fe_fpsr |= FPSR_OPERR | FPSR_INEX2 | FPSR_OVFL;
308 		if (toinf(fe, sign))
309 			return (sign | SNG_EXP(SNG_EXP_INFNAN));
310 		return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
311 	}
312 done:
313 	/* phew, made it */
314 	return (sign | SNG_EXP(exp) | (fp->fp_mant[2] & SNG_MASK));
315 }
316 
317 /*
318  * fpn -> double (32 bit high-order result returned; 32-bit low order result
319  * left in res[1]).  Assumes <= 61 bits in double precision fraction.
320  *
321  * This code mimics fpu_ftos; see it for comments.
322  */
323 static u_int
324 fpu_ftod(fe, fp, res)
325 	struct fpemu *fe;
326 	register struct fpn *fp;
327 	u_int *res;
328 {
329 	register u_int sign = fp->fp_sign << 31;
330 	register int exp;
331 
332 #define	DBL_EXP(e)	((e) << (DBL_FRACBITS & 31))
333 #define	DBL_MASK	(DBL_EXP(1) - 1)
334 
335 	if (ISNAN(fp)) {
336 		(void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
337 		exp = DBL_EXP_INFNAN;
338 		goto done;
339 	}
340 	if (ISINF(fp)) {
341 		sign |= DBL_EXP(DBL_EXP_INFNAN);
342 		res[1] = 0;
343 		return (sign);
344 	}
345 	if (ISZERO(fp)) {
346 		res[1] = 0;
347 		return (sign);
348 	}
349 
350 	if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
351 		fe->fe_fpsr |= FPSR_UNFL;
352 		(void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
353 		if (fpu_round(fe, fp) && fp->fp_mant[1] == DBL_EXP(1)) {
354 			res[1] = 0;
355 			return (sign | DBL_EXP(1) | 0);
356 		}
357 		if (fe->fe_fpsr & FPSR_INEX2)
358                         fe->fe_fpsr |= FPSR_UNFL
359 			/* mc68881/2 don't underflow when converting */;
360 		exp = 0;
361 		goto done;
362 	}
363 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
364 	if (fpu_round(fe, fp) && fp->fp_mant[1] == DBL_EXP(2))
365 		exp++;
366 	if (exp >= DBL_EXP_INFNAN) {
367 		fe->fe_fpsr |= FPSR_OPERR | FPSR_INEX2 | FPSR_OVFL;
368 		if (toinf(fe, sign)) {
369 			res[1] = 0;
370 			return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
371 		}
372 		res[1] = ~0;
373 		return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
374 	}
375 done:
376 	res[1] = fp->fp_mant[2];
377 	return (sign | DBL_EXP(exp) | (fp->fp_mant[1] & DBL_MASK));
378 }
379 
380 /*
381  * fpn -> 68k extended (32 bit high-order result returned; two 32-bit low
382  * order result left in res[1] & res[2]).  Assumes == 64 bits in extended
383  * precision fraction.
384  *
385  * This code mimics fpu_ftos; see it for comments.
386  */
387 static u_int
388 fpu_ftox(fe, fp, res)
389 	struct fpemu *fe;
390 	register struct fpn *fp;
391 	u_int *res;
392 {
393 	register u_int sign = fp->fp_sign << 31;
394 	register int exp;
395 
396 #define	EXT_EXP(e)	((e) << 16)
397 /*
398  * on m68k extended prec, significand does not share the same long
399  * word with exponent
400  */
401 #define	EXT_MASK	0
402 #define EXT_EXPLICIT1	(1UL << (63 & 31))
403 #define EXT_EXPLICIT2	(1UL << (64 & 31))
404 
405 	if (ISNAN(fp)) {
406 		(void) fpu_shr(fp, FP_NMANT - EXT_FRACBITS);
407 		exp = EXT_EXP_INFNAN;
408 		goto done;
409 	}
410 	if (ISINF(fp)) {
411 		sign |= EXT_EXP(EXT_EXP_INFNAN);
412 		res[1] = res[2] = 0;
413 		return (sign);
414 	}
415 	if (ISZERO(fp)) {
416 		res[1] = res[2] = 0;
417 		return (sign);
418 	}
419 
420 	if ((exp = fp->fp_exp + EXT_EXP_BIAS) < 0) {
421 		fe->fe_fpsr |= FPSR_UNFL;
422 		/* I'm not sure about this <=... exp==0 doesn't mean
423 		   it's a denormal in extended format */
424 		(void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
425 		if (fpu_round(fe, fp) && fp->fp_mant[1] == EXT_EXPLICIT1) {
426 			res[1] = res[2] = 0;
427 			return (sign | EXT_EXP(1) | 0);
428 		}
429 		if (fe->fe_fpsr & FPSR_INEX2)
430                         fe->fe_fpsr |= FPSR_UNFL
431 			/* mc68881/2 don't underflow */;
432 		exp = 0;
433 		goto done;
434 	}
435 #if (FP_NMANT - FP_NG - EXT_FRACBITS) > 0
436 	(void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS);
437 #endif
438 	if (fpu_round(fe, fp) && fp->fp_mant[0] == EXT_EXPLICIT2)
439 		exp++;
440 	if (exp >= EXT_EXP_INFNAN) {
441 		fe->fe_fpsr |= FPSR_OPERR | FPSR_INEX2 | FPSR_OVFL;
442 		if (toinf(fe, sign)) {
443 			res[1] = res[2] = 0;
444 			return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
445 		}
446 		res[1] = res[2] = ~0;
447 		return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
448 	}
449 done:
450 	res[1] = fp->fp_mant[1];
451 	res[2] = fp->fp_mant[2];
452 	return (sign | EXT_EXP(exp));
453 }
454 
455 /*
456  * Implode an fpn, writing the result into the given space.
457  */
458 void
459 fpu_implode(fe, fp, type, space)
460 	struct fpemu *fe;
461 	register struct fpn *fp;
462 	int type;
463 	register u_int *space;
464 {
465 	/* XXX Dont delete exceptions set here: fe->fe_fpsr &= ~FPSR_EXCP; */
466 
467 	switch (type) {
468 	case FTYPE_LNG:
469 		space[0] = fpu_ftoi(fe, fp);
470 		break;
471 
472 	case FTYPE_SNG:
473 		space[0] = fpu_ftos(fe, fp);
474 		break;
475 
476 	case FTYPE_DBL:
477 		space[0] = fpu_ftod(fe, fp, space);
478 		break;
479 
480 	case FTYPE_EXT:
481 		/* funky rounding precision options ?? */
482 		space[0] = fpu_ftox(fe, fp, space);
483 		break;
484 
485 	default:
486 		panic("fpu_implode");
487 	}
488 }
489