xref: /netbsd-src/sys/arch/powerpc/fpu/fpu_implode.c (revision 8f5a4424de12726e89aef4f82f75ded907f22012)
1*8f5a4424Srin /*	$NetBSD: fpu_implode.c,v 1.24 2022/09/14 05:55:08 rin Exp $ */
218b2f7e6Ssimonb 
318b2f7e6Ssimonb /*
418b2f7e6Ssimonb  * Copyright (c) 1992, 1993
518b2f7e6Ssimonb  *	The Regents of the University of California.  All rights reserved.
618b2f7e6Ssimonb  *
718b2f7e6Ssimonb  * This software was developed by the Computer Systems Engineering group
818b2f7e6Ssimonb  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
918b2f7e6Ssimonb  * contributed to Berkeley.
1018b2f7e6Ssimonb  *
1118b2f7e6Ssimonb  * All advertising materials mentioning features or use of this software
1218b2f7e6Ssimonb  * must display the following acknowledgement:
1318b2f7e6Ssimonb  *	This product includes software developed by the University of
1418b2f7e6Ssimonb  *	California, Lawrence Berkeley Laboratory.
1518b2f7e6Ssimonb  *
1618b2f7e6Ssimonb  * Redistribution and use in source and binary forms, with or without
1718b2f7e6Ssimonb  * modification, are permitted provided that the following conditions
1818b2f7e6Ssimonb  * are met:
1918b2f7e6Ssimonb  * 1. Redistributions of source code must retain the above copyright
2018b2f7e6Ssimonb  *    notice, this list of conditions and the following disclaimer.
2118b2f7e6Ssimonb  * 2. Redistributions in binary form must reproduce the above copyright
2218b2f7e6Ssimonb  *    notice, this list of conditions and the following disclaimer in the
2318b2f7e6Ssimonb  *    documentation and/or other materials provided with the distribution.
24aad01611Sagc  * 3. Neither the name of the University nor the names of its contributors
2518b2f7e6Ssimonb  *    may be used to endorse or promote products derived from this software
2618b2f7e6Ssimonb  *    without specific prior written permission.
2718b2f7e6Ssimonb  *
2818b2f7e6Ssimonb  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2918b2f7e6Ssimonb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3018b2f7e6Ssimonb  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3118b2f7e6Ssimonb  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3218b2f7e6Ssimonb  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3318b2f7e6Ssimonb  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3418b2f7e6Ssimonb  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3518b2f7e6Ssimonb  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3618b2f7e6Ssimonb  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3718b2f7e6Ssimonb  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3818b2f7e6Ssimonb  * SUCH DAMAGE.
3918b2f7e6Ssimonb  *
4018b2f7e6Ssimonb  *	@(#)fpu_implode.c	8.1 (Berkeley) 6/11/93
4118b2f7e6Ssimonb  */
4218b2f7e6Ssimonb 
4318b2f7e6Ssimonb /*
4418b2f7e6Ssimonb  * FPU subroutines: `implode' internal format numbers into the machine's
4518b2f7e6Ssimonb  * `packed binary' format.
4618b2f7e6Ssimonb  */
4718b2f7e6Ssimonb 
48ed517291Slukem #include <sys/cdefs.h>
49*8f5a4424Srin __KERNEL_RCSID(0, "$NetBSD: fpu_implode.c,v 1.24 2022/09/14 05:55:08 rin Exp $");
50ed517291Slukem 
5118b2f7e6Ssimonb #include <sys/types.h>
5218b2f7e6Ssimonb #include <sys/systm.h>
5318b2f7e6Ssimonb 
5418b2f7e6Ssimonb #include <powerpc/instr.h>
5518b2f7e6Ssimonb #include <machine/fpu.h>
5606f65540Srin #include <machine/ieee.h>
5706f65540Srin #include <machine/reg.h>
5818b2f7e6Ssimonb 
5918b2f7e6Ssimonb #include <powerpc/fpu/fpu_arith.h>
6018b2f7e6Ssimonb #include <powerpc/fpu/fpu_emu.h>
6118b2f7e6Ssimonb #include <powerpc/fpu/fpu_extern.h>
6218b2f7e6Ssimonb 
6341cf117bSrin static int round(struct fpemu *, struct fpn *, int *);
6418b2f7e6Ssimonb static int toinf(struct fpemu *, int);
652acd85f5Srin static int round_int(struct fpn *, int *, int, int, int);
6618b2f7e6Ssimonb 
6741cf117bSrin static u_int fpu_ftoi(struct fpemu *, struct fpn *, int *, int);
6841cf117bSrin static uint64_t fpu_ftox(struct fpemu *, struct fpn *, int *, int);
6941cf117bSrin static u_int fpu_ftos(struct fpemu *, struct fpn *, int *);
7041cf117bSrin static uint64_t fpu_ftod(struct fpemu *, struct fpn *, int *);
7162f3100bSrin 
7218b2f7e6Ssimonb /*
7318b2f7e6Ssimonb  * Round a number (algorithm from Motorola MC68882 manual, modified for
7418b2f7e6Ssimonb  * our internal format).  Set inexact exception if rounding is required.
7518b2f7e6Ssimonb  * Return true iff we rounded up.
7618b2f7e6Ssimonb  *
7718b2f7e6Ssimonb  * After rounding, we discard the guard and round bits by shifting right
7818b2f7e6Ssimonb  * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
7918b2f7e6Ssimonb  * This saves effort later.
8018b2f7e6Ssimonb  *
8118b2f7e6Ssimonb  * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
8218b2f7e6Ssimonb  * responsibility to fix this if necessary.
8318b2f7e6Ssimonb  */
8418b2f7e6Ssimonb static int
round(struct fpemu * fe,struct fpn * fp,int * cx)8541cf117bSrin round(struct fpemu *fe, struct fpn *fp, int *cx)
8618b2f7e6Ssimonb {
8718b2f7e6Ssimonb 	u_int m0, m1, m2, m3;
8818b2f7e6Ssimonb 	int gr, s;
8918b2f7e6Ssimonb 	FPU_DECL_CARRY;
9018b2f7e6Ssimonb 
9118b2f7e6Ssimonb 	m0 = fp->fp_mant[0];
9218b2f7e6Ssimonb 	m1 = fp->fp_mant[1];
9318b2f7e6Ssimonb 	m2 = fp->fp_mant[2];
9418b2f7e6Ssimonb 	m3 = fp->fp_mant[3];
9518b2f7e6Ssimonb 	gr = m3 & 3;
9618b2f7e6Ssimonb 	s = fp->fp_sticky;
9718b2f7e6Ssimonb 
9818b2f7e6Ssimonb 	/* mant >>= FP_NG */
9918b2f7e6Ssimonb 	m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
10018b2f7e6Ssimonb 	m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
10118b2f7e6Ssimonb 	m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
10218b2f7e6Ssimonb 	m0 >>= FP_NG;
10318b2f7e6Ssimonb 
10418b2f7e6Ssimonb 	if ((gr | s) == 0)	/* result is exact: no rounding needed */
10518b2f7e6Ssimonb 		goto rounddown;
10618b2f7e6Ssimonb 
10741cf117bSrin 	*cx |= FPSCR_FI;	/* inexact */
10818b2f7e6Ssimonb 
10918b2f7e6Ssimonb 	/* Go to rounddown to round down; break to round up. */
11018b2f7e6Ssimonb 	switch ((fe->fe_fpscr) & FPSCR_RN) {
11118b2f7e6Ssimonb 
11218b2f7e6Ssimonb 	case FSR_RD_RN:
11318b2f7e6Ssimonb 	default:
11418b2f7e6Ssimonb 		/*
11518b2f7e6Ssimonb 		 * Round only if guard is set (gr & 2).  If guard is set,
11618b2f7e6Ssimonb 		 * but round & sticky both clear, then we want to round
11718b2f7e6Ssimonb 		 * but have a tie, so round to even, i.e., add 1 iff odd.
11818b2f7e6Ssimonb 		 */
11918b2f7e6Ssimonb 		if ((gr & 2) == 0)
12018b2f7e6Ssimonb 			goto rounddown;
12118b2f7e6Ssimonb 		if ((gr & 1) || fp->fp_sticky || (m3 & 1))
12218b2f7e6Ssimonb 			break;
12318b2f7e6Ssimonb 		goto rounddown;
12418b2f7e6Ssimonb 
12518b2f7e6Ssimonb 	case FSR_RD_RZ:
12618b2f7e6Ssimonb 		/* Round towards zero, i.e., down. */
12718b2f7e6Ssimonb 		goto rounddown;
12818b2f7e6Ssimonb 
12918b2f7e6Ssimonb 	case FSR_RD_RM:
13018b2f7e6Ssimonb 		/* Round towards -Inf: up if negative, down if positive. */
13118b2f7e6Ssimonb 		if (fp->fp_sign)
13218b2f7e6Ssimonb 			break;
13318b2f7e6Ssimonb 		goto rounddown;
13418b2f7e6Ssimonb 
13518b2f7e6Ssimonb 	case FSR_RD_RP:
13618b2f7e6Ssimonb 		/* Round towards +Inf: up if positive, down otherwise. */
13718b2f7e6Ssimonb 		if (!fp->fp_sign)
13818b2f7e6Ssimonb 			break;
13918b2f7e6Ssimonb 		goto rounddown;
14018b2f7e6Ssimonb 	}
14118b2f7e6Ssimonb 
14218b2f7e6Ssimonb 	/* Bump low bit of mantissa, with carry. */
14341cf117bSrin 	*cx |= FPSCR_FR;
14418b2f7e6Ssimonb 
14518b2f7e6Ssimonb 	FPU_ADDS(m3, m3, 1);
14618b2f7e6Ssimonb 	FPU_ADDCS(m2, m2, 0);
14718b2f7e6Ssimonb 	FPU_ADDCS(m1, m1, 0);
14818b2f7e6Ssimonb 	FPU_ADDC(m0, m0, 0);
14918b2f7e6Ssimonb 	fp->fp_mant[0] = m0;
15018b2f7e6Ssimonb 	fp->fp_mant[1] = m1;
15118b2f7e6Ssimonb 	fp->fp_mant[2] = m2;
15218b2f7e6Ssimonb 	fp->fp_mant[3] = m3;
15318b2f7e6Ssimonb 	return (1);
15418b2f7e6Ssimonb 
15518b2f7e6Ssimonb rounddown:
15618b2f7e6Ssimonb 	fp->fp_mant[0] = m0;
15718b2f7e6Ssimonb 	fp->fp_mant[1] = m1;
15818b2f7e6Ssimonb 	fp->fp_mant[2] = m2;
15918b2f7e6Ssimonb 	fp->fp_mant[3] = m3;
16018b2f7e6Ssimonb 	return (0);
16118b2f7e6Ssimonb }
16218b2f7e6Ssimonb 
16318b2f7e6Ssimonb /*
16418b2f7e6Ssimonb  * For overflow: return true if overflow is to go to +/-Inf, according
16518b2f7e6Ssimonb  * to the sign of the overflowing result.  If false, overflow is to go
16618b2f7e6Ssimonb  * to the largest magnitude value instead.
16718b2f7e6Ssimonb  */
16818b2f7e6Ssimonb static int
toinf(struct fpemu * fe,int sign)16918b2f7e6Ssimonb toinf(struct fpemu *fe, int sign)
17018b2f7e6Ssimonb {
17118b2f7e6Ssimonb 	int inf;
17218b2f7e6Ssimonb 
17318b2f7e6Ssimonb 	/* look at rounding direction */
17418b2f7e6Ssimonb 	switch ((fe->fe_fpscr) & FPSCR_RN) {
17518b2f7e6Ssimonb 
17618b2f7e6Ssimonb 	default:
17718b2f7e6Ssimonb 	case FSR_RD_RN:		/* the nearest value is always Inf */
17818b2f7e6Ssimonb 		inf = 1;
17918b2f7e6Ssimonb 		break;
18018b2f7e6Ssimonb 
18118b2f7e6Ssimonb 	case FSR_RD_RZ:		/* toward 0 => never towards Inf */
18218b2f7e6Ssimonb 		inf = 0;
18318b2f7e6Ssimonb 		break;
18418b2f7e6Ssimonb 
18518b2f7e6Ssimonb 	case FSR_RD_RP:		/* toward +Inf iff positive */
18618b2f7e6Ssimonb 		inf = sign == 0;
18718b2f7e6Ssimonb 		break;
18818b2f7e6Ssimonb 
18918b2f7e6Ssimonb 	case FSR_RD_RM:		/* toward -Inf iff negative */
19018b2f7e6Ssimonb 		inf = sign;
19118b2f7e6Ssimonb 		break;
19218b2f7e6Ssimonb 	}
19318b2f7e6Ssimonb 	return (inf);
19418b2f7e6Ssimonb }
19518b2f7e6Ssimonb 
1962acd85f5Srin static int
round_int(struct fpn * fp,int * cx,int rn,int sign,int odd)1972acd85f5Srin round_int(struct fpn *fp, int *cx, int rn, int sign, int odd)
1982acd85f5Srin {
1992acd85f5Srin 	int g, rs;
2002acd85f5Srin 
2012acd85f5Srin 	g =   fp->fp_mant[3] & 0x80000000;
2022acd85f5Srin 	rs = (fp->fp_mant[3] & 0x7fffffff) | fp->fp_sticky;
2032acd85f5Srin 
2042acd85f5Srin 	if ((g | rs) == 0)
2052acd85f5Srin 		return 0;	/* exact */
2062acd85f5Srin 
20741cf117bSrin 	*cx |= FPSCR_FI;
2082acd85f5Srin 
2092acd85f5Srin 	switch (rn) {
2102acd85f5Srin 	case FSR_RD_RN:
2112acd85f5Srin 		if (g && (rs | odd))
2122acd85f5Srin 			break;
2132acd85f5Srin 		return 0;
2142acd85f5Srin 	case FSR_RD_RZ:
2152acd85f5Srin 		return 0;
2162acd85f5Srin 	case FSR_RD_RP:
2172acd85f5Srin 		if (!sign)
2182acd85f5Srin 			break;
2192acd85f5Srin 		return 0;
2202acd85f5Srin 	case FSR_RD_RM:
2212acd85f5Srin 		if (sign)
2222acd85f5Srin 			break;
2232acd85f5Srin 		return 0;
2242acd85f5Srin 	}
2252acd85f5Srin 
2262acd85f5Srin 	*cx |= FPSCR_FR;
2272acd85f5Srin 	return 1;
2282acd85f5Srin }
2292acd85f5Srin 
23018b2f7e6Ssimonb /*
23118b2f7e6Ssimonb  * fpn -> int (int value returned as return value).
23218b2f7e6Ssimonb  */
23362f3100bSrin static u_int
fpu_ftoi(struct fpemu * fe,struct fpn * fp,int * cx,int rn)23441cf117bSrin fpu_ftoi(struct fpemu *fe, struct fpn *fp, int *cx, int rn)
23518b2f7e6Ssimonb {
23618b2f7e6Ssimonb 	u_int i;
23741cf117bSrin 	int sign, exp, tmp_cx;
23818b2f7e6Ssimonb 
23918b2f7e6Ssimonb 	sign = fp->fp_sign;
24018b2f7e6Ssimonb 	switch (fp->fp_class) {
2412acd85f5Srin 	case FPC_SNAN:
24241cf117bSrin 		*cx |= FPSCR_VXSNAN;
2432acd85f5Srin 		/* FALLTHROUGH */
2442acd85f5Srin 	case FPC_QNAN:
2452acd85f5Srin 		sign = 1;
2462acd85f5Srin 		break;
24718b2f7e6Ssimonb 
24818b2f7e6Ssimonb 	case FPC_ZERO:
24918b2f7e6Ssimonb 		return (0);
25018b2f7e6Ssimonb 
25118b2f7e6Ssimonb 	case FPC_NUM:
25218b2f7e6Ssimonb 		/*
25318b2f7e6Ssimonb 		 * If exp >= 2^32, overflow.  Otherwise shift value right
25418b2f7e6Ssimonb 		 * into last mantissa word (this will not exceed 0xffffffff),
25518b2f7e6Ssimonb 		 * shifting any guard and round bits out into the sticky
25618b2f7e6Ssimonb 		 * bit.  Then ``round'' towards zero, i.e., just set an
25718b2f7e6Ssimonb 		 * inexact exception if sticky is set (see round()).
25818b2f7e6Ssimonb 		 * If the result is > 0x80000000, or is positive and equals
25918b2f7e6Ssimonb 		 * 0x80000000, overflow; otherwise the last fraction word
26018b2f7e6Ssimonb 		 * is the result.
26118b2f7e6Ssimonb 		 */
26218b2f7e6Ssimonb 		if ((exp = fp->fp_exp) >= 32)
26318b2f7e6Ssimonb 			break;
26418b2f7e6Ssimonb 		/* NB: the following includes exp < 0 cases */
2652acd85f5Srin 		(void)fpu_shr(fp, FP_NMANT - 32 - 1 - exp);
2662a0258e4Srin 		i = fp->fp_mant[2];
26741cf117bSrin 		tmp_cx = 0;
26841cf117bSrin 		i += round_int(fp, &tmp_cx, rn, sign, i & 1);
26918b2f7e6Ssimonb 		if (i >= ((u_int)0x80000000 + sign))
27018b2f7e6Ssimonb 			break;
27141cf117bSrin 		*cx |= tmp_cx;
27218b2f7e6Ssimonb 		return (sign ? -i : i);
27318b2f7e6Ssimonb 
2742acd85f5Srin 	case FPC_INF:
27518b2f7e6Ssimonb 		break;
27618b2f7e6Ssimonb 	}
27718b2f7e6Ssimonb 	/* overflow: replace any inexact exception with invalid */
27841cf117bSrin 	*cx |= FPSCR_VXCVI;
27918b2f7e6Ssimonb 	return (0x7fffffff + sign);
28018b2f7e6Ssimonb }
28118b2f7e6Ssimonb 
28218b2f7e6Ssimonb /*
28318b2f7e6Ssimonb  * fpn -> extended int (high bits of int value returned as return value).
28418b2f7e6Ssimonb  */
28562f3100bSrin static uint64_t
fpu_ftox(struct fpemu * fe,struct fpn * fp,int * cx,int rn)28641cf117bSrin fpu_ftox(struct fpemu *fe, struct fpn *fp, int *cx, int rn)
28718b2f7e6Ssimonb {
2887a813560Srin 	uint64_t i;
28941cf117bSrin 	int sign, exp, tmp_cx;
29018b2f7e6Ssimonb 
29118b2f7e6Ssimonb 	sign = fp->fp_sign;
29218b2f7e6Ssimonb 	switch (fp->fp_class) {
2932acd85f5Srin 	case FPC_SNAN:
29441cf117bSrin 		*cx |= FPSCR_VXSNAN;
2952acd85f5Srin 		/* FALLTHROUGH */
2962acd85f5Srin 	case FPC_QNAN:
2972acd85f5Srin 		sign = 1;
2982acd85f5Srin 		break;
29918b2f7e6Ssimonb 
30018b2f7e6Ssimonb 	case FPC_ZERO:
30118b2f7e6Ssimonb 		return (0);
30218b2f7e6Ssimonb 
30318b2f7e6Ssimonb 	case FPC_NUM:
30418b2f7e6Ssimonb 		/*
30518b2f7e6Ssimonb 		 * If exp >= 2^64, overflow.  Otherwise shift value right
30618b2f7e6Ssimonb 		 * into last mantissa word (this will not exceed 0xffffffffffffffff),
30718b2f7e6Ssimonb 		 * shifting any guard and round bits out into the sticky
30818b2f7e6Ssimonb 		 * bit.  Then ``round'' towards zero, i.e., just set an
30918b2f7e6Ssimonb 		 * inexact exception if sticky is set (see round()).
31018b2f7e6Ssimonb 		 * If the result is > 0x8000000000000000, or is positive and equals
31118b2f7e6Ssimonb 		 * 0x8000000000000000, overflow; otherwise the last fraction word
31218b2f7e6Ssimonb 		 * is the result.
31318b2f7e6Ssimonb 		 */
31418b2f7e6Ssimonb 		if ((exp = fp->fp_exp) >= 64)
31518b2f7e6Ssimonb 			break;
31618b2f7e6Ssimonb 		/* NB: the following includes exp < 0 cases */
3172acd85f5Srin 		(void)fpu_shr(fp, FP_NMANT - 32 - 1 - exp);
3182a0258e4Srin 		i = ((uint64_t)fp->fp_mant[1] << 32) | fp->fp_mant[2];
31941cf117bSrin 		tmp_cx = 0;
32041cf117bSrin 		i += round_int(fp, &tmp_cx, rn, sign, i & 1);
3217a813560Srin 		if (i >= ((uint64_t)0x8000000000000000LL + sign))
32218b2f7e6Ssimonb 			break;
32341cf117bSrin 		*cx |= tmp_cx;
32418b2f7e6Ssimonb 		return (sign ? -i : i);
32518b2f7e6Ssimonb 
3262acd85f5Srin 	case FPC_INF:
32718b2f7e6Ssimonb 		break;
32818b2f7e6Ssimonb 	}
32918b2f7e6Ssimonb 	/* overflow: replace any inexact exception with invalid */
33041cf117bSrin 	*cx |= FPSCR_VXCVI;
33118b2f7e6Ssimonb 	return (0x7fffffffffffffffLL + sign);
33218b2f7e6Ssimonb }
33318b2f7e6Ssimonb 
3345ef7ca99Srin #define	FPRF_SIGN(sign)	((sign) ? FPSCR_FL : FPSCR_FG)
3355ef7ca99Srin 
33618b2f7e6Ssimonb /*
33718b2f7e6Ssimonb  * fpn -> single (32 bit single returned as return value).
33818b2f7e6Ssimonb  * We assume <= 29 bits in a single-precision fraction (1.f part).
33918b2f7e6Ssimonb  */
34062f3100bSrin static u_int
fpu_ftos(struct fpemu * fe,struct fpn * fp,int * cx)34141cf117bSrin fpu_ftos(struct fpemu *fe, struct fpn *fp, int *cx)
34218b2f7e6Ssimonb {
34318b2f7e6Ssimonb 	u_int sign = fp->fp_sign << 31;
34418b2f7e6Ssimonb 	int exp;
34518b2f7e6Ssimonb 
34618b2f7e6Ssimonb #define	SNG_EXP(e)	((e) << SNG_FRACBITS)	/* makes e an exponent */
34718b2f7e6Ssimonb #define	SNG_MASK	(SNG_EXP(1) - 1)	/* mask for fraction */
34818b2f7e6Ssimonb 
34918b2f7e6Ssimonb 	/* Take care of non-numbers first. */
35018b2f7e6Ssimonb 	if (ISNAN(fp)) {
35141cf117bSrin 		*cx |= FPSCR_C | FPSCR_FU;
35218b2f7e6Ssimonb 		/*
35318b2f7e6Ssimonb 		 * Preserve upper bits of NaN, per SPARC V8 appendix N.
35418b2f7e6Ssimonb 		 * Note that fp->fp_mant[0] has the quiet bit set,
35518b2f7e6Ssimonb 		 * even if it is classified as a signalling NaN.
35618b2f7e6Ssimonb 		 */
35718b2f7e6Ssimonb 		(void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
35818b2f7e6Ssimonb 		exp = SNG_EXP_INFNAN;
35918b2f7e6Ssimonb 		goto done;
36018b2f7e6Ssimonb 	}
3615ef7ca99Srin 	if (ISINF(fp)) {
36241cf117bSrin 		*cx |= FPRF_SIGN(sign) | FPSCR_FU;
36318b2f7e6Ssimonb 		return (sign | SNG_EXP(SNG_EXP_INFNAN));
3645ef7ca99Srin 	}
3655ef7ca99Srin 	if (ISZERO(fp)) {
36641cf117bSrin 		*cx |= FPSCR_FE;
3675ef7ca99Srin 		if (sign)
36841cf117bSrin 			*cx |= FPSCR_C;
36918b2f7e6Ssimonb 		return (sign);
3705ef7ca99Srin 	}
37118b2f7e6Ssimonb 
37218b2f7e6Ssimonb 	/*
37318b2f7e6Ssimonb 	 * Normals (including subnormals).  Drop all the fraction bits
37418b2f7e6Ssimonb 	 * (including the explicit ``implied'' 1 bit) down into the
37518b2f7e6Ssimonb 	 * single-precision range.  If the number is subnormal, move
37618b2f7e6Ssimonb 	 * the ``implied'' 1 into the explicit range as well, and shift
37718b2f7e6Ssimonb 	 * right to introduce leading zeroes.  Rounding then acts
37818b2f7e6Ssimonb 	 * differently for normals and subnormals: the largest subnormal
37918b2f7e6Ssimonb 	 * may round to the smallest normal (1.0 x 2^minexp), or may
38018b2f7e6Ssimonb 	 * remain subnormal.  In the latter case, signal an underflow
38118b2f7e6Ssimonb 	 * if the result was inexact or if underflow traps are enabled.
38218b2f7e6Ssimonb 	 *
38318b2f7e6Ssimonb 	 * Rounding a normal, on the other hand, always produces another
38418b2f7e6Ssimonb 	 * normal (although either way the result might be too big for
38518b2f7e6Ssimonb 	 * single precision, and cause an overflow).  If rounding a
38618b2f7e6Ssimonb 	 * normal produces 2.0 in the fraction, we need not adjust that
38718b2f7e6Ssimonb 	 * fraction at all, since both 1.0 and 2.0 are zero under the
38818b2f7e6Ssimonb 	 * fraction mask.
38918b2f7e6Ssimonb 	 *
39018b2f7e6Ssimonb 	 * Note that the guard and round bits vanish from the number after
39118b2f7e6Ssimonb 	 * rounding.
39218b2f7e6Ssimonb 	 */
39318b2f7e6Ssimonb 	if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) {	/* subnormal */
39418b2f7e6Ssimonb 		/* -NG for g,r; -SNG_FRACBITS-exp for fraction */
39518b2f7e6Ssimonb 		(void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
39641cf117bSrin 		if (round(fe, fp, cx) && fp->fp_mant[3] == SNG_EXP(1)) {
39741cf117bSrin 			*cx |= FPRF_SIGN(sign);
39818b2f7e6Ssimonb 			return (sign | SNG_EXP(1) | 0);
3995ef7ca99Srin 		}
400810c1964Srin 		if (*cx & FPSCR_FI) {
40141cf117bSrin 			*cx |= FPSCR_UX;
402810c1964Srin 			if (fp->fp_mant[3] == 0) {
403810c1964Srin 				*cx |= FPSCR_FE;
404810c1964Srin 				return sign;
405810c1964Srin 			}
406810c1964Srin 		}
407810c1964Srin 		*cx |= FPSCR_C | FPRF_SIGN(sign);
40818b2f7e6Ssimonb 		return (sign | SNG_EXP(0) | fp->fp_mant[3]);
40918b2f7e6Ssimonb 	}
41018b2f7e6Ssimonb 	/* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
41118b2f7e6Ssimonb 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
41218b2f7e6Ssimonb #ifdef DIAGNOSTIC
41318b2f7e6Ssimonb 	if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
41418b2f7e6Ssimonb 		panic("fpu_ftos");
41518b2f7e6Ssimonb #endif
41641cf117bSrin 	if (round(fe, fp, cx) && fp->fp_mant[3] == SNG_EXP(2))
41718b2f7e6Ssimonb 		exp++;
41818b2f7e6Ssimonb 	if (exp >= SNG_EXP_INFNAN) {
419*8f5a4424Srin 		*cx |= FPSCR_OX | FPSCR_FI;
42018b2f7e6Ssimonb 		/* overflow to inf or to max single */
4215ef7ca99Srin 		if (toinf(fe, sign)) {
42241cf117bSrin 			*cx |= FPRF_SIGN(sign) | FPSCR_FU;
42318b2f7e6Ssimonb 			return (sign | SNG_EXP(SNG_EXP_INFNAN));
4245ef7ca99Srin 		}
42541cf117bSrin 		*cx |= FPRF_SIGN(sign);
42618b2f7e6Ssimonb 		return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
42718b2f7e6Ssimonb 	}
42841cf117bSrin 	*cx |= FPRF_SIGN(sign);
42918b2f7e6Ssimonb done:
43018b2f7e6Ssimonb 	/* phew, made it */
43118b2f7e6Ssimonb 	return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
43218b2f7e6Ssimonb }
43318b2f7e6Ssimonb 
43418b2f7e6Ssimonb /*
435f7b84308Srin  * fpn -> double.  Assumes <= 61 bits in double precision fraction.
43618b2f7e6Ssimonb  *
43718b2f7e6Ssimonb  * This code mimics fpu_ftos; see it for comments.
43818b2f7e6Ssimonb  */
439f7b84308Srin static uint64_t
fpu_ftod(struct fpemu * fe,struct fpn * fp,int * cx)44041cf117bSrin fpu_ftod(struct fpemu *fe, struct fpn *fp, int *cx)
44118b2f7e6Ssimonb {
44218b2f7e6Ssimonb 	u_int sign = fp->fp_sign << 31;
44318b2f7e6Ssimonb 	int exp;
44418b2f7e6Ssimonb 
44518b2f7e6Ssimonb #define	DBL_EXP(e)	((e) << (DBL_FRACBITS & 31))
44618b2f7e6Ssimonb #define	DBL_MASK	(DBL_EXP(1) - 1)
447f7b84308Srin #define	HI_WORD(i)	((uint64_t)(i) << 32)
448f7b84308Srin #define	LO_WORD(i)	((uint32_t)(i))
44918b2f7e6Ssimonb 
45018b2f7e6Ssimonb 	if (ISNAN(fp)) {
45141cf117bSrin 		*cx |= FPSCR_C | FPSCR_FU;
45218b2f7e6Ssimonb 		(void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
45318b2f7e6Ssimonb 		exp = DBL_EXP_INFNAN;
45418b2f7e6Ssimonb 		goto done;
45518b2f7e6Ssimonb 	}
45618b2f7e6Ssimonb 	if (ISINF(fp)) {
45741cf117bSrin 		*cx |= FPRF_SIGN(sign) | FPSCR_FU;
458b8d99318Srin 		return HI_WORD(sign | DBL_EXP(DBL_EXP_INFNAN));
45918b2f7e6Ssimonb 	}
46018b2f7e6Ssimonb 	if (ISZERO(fp)) {
46141cf117bSrin 		*cx |= FPSCR_FE;
4625ef7ca99Srin 		if (sign)
46341cf117bSrin 			*cx |= FPSCR_C;
464b8d99318Srin 		return HI_WORD(sign);
46518b2f7e6Ssimonb 	}
46618b2f7e6Ssimonb 
46718b2f7e6Ssimonb 	if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
46818b2f7e6Ssimonb 		(void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
46941cf117bSrin 		if (round(fe, fp, cx) && fp->fp_mant[2] == DBL_EXP(1)) {
47041cf117bSrin 			*cx |= FPRF_SIGN(sign);
471f7b84308Srin 			return HI_WORD(sign | DBL_EXP(1) | 0);
47218b2f7e6Ssimonb 		}
473810c1964Srin 		if (*cx & FPSCR_FI) {
47441cf117bSrin 			*cx |= FPSCR_UX;
475810c1964Srin 			if ((fp->fp_mant[2] & DBL_MASK) == 0 &&
476810c1964Srin 			     fp->fp_mant[3] == 0) {
477810c1964Srin 				*cx |= FPSCR_FE;
478810c1964Srin 				return HI_WORD(sign);
479810c1964Srin 			}
480810c1964Srin 		}
481810c1964Srin 		*cx |= FPSCR_C | FPRF_SIGN(sign);
48218b2f7e6Ssimonb 		exp = 0;
48318b2f7e6Ssimonb 		goto done;
48418b2f7e6Ssimonb 	}
48518b2f7e6Ssimonb 	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
48641cf117bSrin 	if (round(fe, fp, cx) && fp->fp_mant[2] == DBL_EXP(2))
48718b2f7e6Ssimonb 		exp++;
48818b2f7e6Ssimonb 	if (exp >= DBL_EXP_INFNAN) {
489*8f5a4424Srin 		*cx |= FPSCR_OX | FPSCR_FI;
49041cf117bSrin 		/* overflow to inf or to max double */
49118b2f7e6Ssimonb 		if (toinf(fe, sign)) {
49241cf117bSrin 			*cx |= FPRF_SIGN(sign) | FPSCR_FU;
493f7b84308Srin 			return HI_WORD(sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
49418b2f7e6Ssimonb 		}
49541cf117bSrin 		*cx |= FPRF_SIGN(sign);
496dbd91bbfSrin 		return HI_WORD(sign | DBL_EXP(DBL_EXP_INFNAN - 1) | DBL_MASK) |
497f7b84308Srin 		       LO_WORD(~0);
49818b2f7e6Ssimonb 	}
49941cf117bSrin 	*cx |= FPRF_SIGN(sign);
50018b2f7e6Ssimonb done:
501f7b84308Srin 	return HI_WORD(sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK)) |
502f7b84308Srin 	       LO_WORD(fp->fp_mant[3]);
50318b2f7e6Ssimonb }
50418b2f7e6Ssimonb 
50518b2f7e6Ssimonb /*
50618b2f7e6Ssimonb  * Implode an fpn, writing the result into the given space.
50718b2f7e6Ssimonb  */
50818b2f7e6Ssimonb void
fpu_implode(struct fpemu * fe,struct fpn * fp,int type,uint64_t * p)509e41cdfd4Srin fpu_implode(struct fpemu *fe, struct fpn *fp, int type, uint64_t *p)
51018b2f7e6Ssimonb {
511e41cdfd4Srin 	u_int *hi, *lo;
51241cf117bSrin 	int cx, rn;
51341cf117bSrin 	bool fpscr;
5142a0258e4Srin 
515e41cdfd4Srin 	hi = (u_int *)p;
516e41cdfd4Srin 	lo = hi + 1;
517e41cdfd4Srin 
5182a0258e4Srin 	if (type & FTYPE_RD_RZ)
5192a0258e4Srin 		rn = FSR_RD_RZ;
5202a0258e4Srin 	else
5212a0258e4Srin 		rn = fe->fe_fpscr & FPSCR_RN;
52241cf117bSrin 	fpscr = type & FTYPE_FPSCR;
5235ef7ca99Srin 	type &= ~FTYPE_FLAG_MASK;
52418b2f7e6Ssimonb 
52541cf117bSrin 	cx = 0;
52618b2f7e6Ssimonb 	switch (type) {
52718b2f7e6Ssimonb 
52818b2f7e6Ssimonb 	case FTYPE_LNG:
5295ef7ca99Srin 		/* FPRF is undefined. */
53041cf117bSrin 		*p = fpu_ftox(fe, fp, &cx, rn);
53102b5fd10Srin 		DPRINTF(FPE_REG, ("fpu_implode: long %x %x\n", *hi, *lo));
53218b2f7e6Ssimonb 		break;
53318b2f7e6Ssimonb 
53418b2f7e6Ssimonb 	case FTYPE_INT:
5355ef7ca99Srin 		/* FPRF is undefined. */
536e41cdfd4Srin 		*hi = 0;
53741cf117bSrin 		*lo = fpu_ftoi(fe, fp, &cx, rn);
53802b5fd10Srin 		DPRINTF(FPE_REG, ("fpu_implode: int %x\n", *lo));
53918b2f7e6Ssimonb 		break;
54018b2f7e6Ssimonb 
54118b2f7e6Ssimonb 	case FTYPE_SNG:
54241cf117bSrin 		*hi = fpu_ftos(fe, fp, &cx);
543cc4d5728Srin 		*lo = 0;
54402b5fd10Srin 		DPRINTF(FPE_REG, ("fpu_implode: single %x\n", *hi));
54518b2f7e6Ssimonb 		break;
54618b2f7e6Ssimonb 
54718b2f7e6Ssimonb 	case FTYPE_DBL:
54841cf117bSrin 		*p = fpu_ftod(fe, fp, &cx);
54902b5fd10Srin 		DPRINTF(FPE_REG, ("fpu_implode: double %x %x\n", *hi, *lo));
5506b9809a8Srin 		break;
55118b2f7e6Ssimonb 
55218b2f7e6Ssimonb 	default:
55318b2f7e6Ssimonb 		panic("fpu_implode: invalid type %d", type);
55418b2f7e6Ssimonb 	}
55541cf117bSrin 
55641cf117bSrin 	if (fpscr) {
55741cf117bSrin 		fe->fe_fpscr &= ~(FPSCR_FR | FPSCR_FI | FPSCR_FPRF);
55841cf117bSrin 		fe->fe_cx |= cx;
55941cf117bSrin 		if (cx & FPSCR_FI)
56041cf117bSrin 			fe->fe_cx |= FPSCR_XX;
56141cf117bSrin 	}
56218b2f7e6Ssimonb }
563