xref: /netbsd-src/sys/arch/sparc/fpu/fpu_explode.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: fpu_explode.c,v 1.4 2000/06/18 06:54:17 mrg Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)fpu_explode.c	8.1 (Berkeley) 6/11/93
45  */
46 
47 /*
48  * FPU subroutines: `explode' the machine's `packed binary' format numbers
49  * into our internal format.
50  */
51 
52 #include <sys/types.h>
53 #include <sys/systm.h>
54 
55 #include <machine/ieee.h>
56 #include <machine/instr.h>
57 #include <machine/reg.h>
58 
59 #include <sparc/fpu/fpu_arith.h>
60 #include <sparc/fpu/fpu_emu.h>
61 #include <sparc/fpu/fpu_extern.h>
62 
63 /*
64  * N.B.: in all of the following, we assume the FP format is
65  *
66  *	---------------------------
67  *	| s | exponent | fraction |
68  *	---------------------------
69  *
70  * (which represents -1**s * 1.fraction * 2**exponent), so that the
71  * sign bit is way at the top (bit 31), the exponent is next, and
72  * then the remaining bits mark the fraction.  A zero exponent means
73  * zero or denormalized (0.fraction rather than 1.fraction), and the
74  * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
75  *
76  * Since the sign bit is always the topmost bit---this holds even for
77  * integers---we set that outside all the *tof functions.  Each function
78  * returns the class code for the new number (but note that we use
79  * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
80  */
81 
82 /*
83  * int -> fpn.
84  */
85 int
86 fpu_itof(fp, i)
87 	register struct fpn *fp;
88 	register u_int i;
89 {
90 
91 	if (i == 0)
92 		return (FPC_ZERO);
93 	/*
94 	 * The value FP_1 represents 2^FP_LG, so set the exponent
95 	 * there and let normalization fix it up.  Convert negative
96 	 * numbers to sign-and-magnitude.  Note that this relies on
97 	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
98 	 */
99 	fp->fp_exp = FP_LG;
100 	fp->fp_mant[0] = (int)i < 0 ? -i : i;
101 	fp->fp_mant[1] = 0;
102 	fp->fp_mant[2] = 0;
103 	fp->fp_mant[3] = 0;
104 	fpu_norm(fp);
105 	return (FPC_NUM);
106 }
107 
108 #ifdef SUN4U
109 /*
110  * 64-bit int -> fpn.
111  */
112 int
113 fpu_xitof(fp, i)
114 	register struct fpn *fp;
115 	register u_int64_t i;
116 {
117 
118 	if (i == 0)
119 		return (FPC_ZERO);
120 	/*
121 	 * The value FP_1 represents 2^FP_LG, so set the exponent
122 	 * there and let normalization fix it up.  Convert negative
123 	 * numbers to sign-and-magnitude.  Note that this relies on
124 	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
125 	 */
126 	fp->fp_exp = FP_LG2;
127 	*((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
128 	fp->fp_mant[2] = 0;
129 	fp->fp_mant[3] = 0;
130 	fpu_norm(fp);
131 	return (FPC_NUM);
132 }
133 #endif /* SUN4U */
134 
135 #define	mask(nbits) ((1 << (nbits)) - 1)
136 
137 /*
138  * All external floating formats convert to internal in the same manner,
139  * as defined here.  Note that only normals get an implied 1.0 inserted.
140  */
141 #define	FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
142 	if (exp == 0) { \
143 		if (allfrac == 0) \
144 			return (FPC_ZERO); \
145 		fp->fp_exp = 1 - expbias; \
146 		fp->fp_mant[0] = f0; \
147 		fp->fp_mant[1] = f1; \
148 		fp->fp_mant[2] = f2; \
149 		fp->fp_mant[3] = f3; \
150 		fpu_norm(fp); \
151 		return (FPC_NUM); \
152 	} \
153 	if (exp == (2 * expbias + 1)) { \
154 		if (allfrac == 0) \
155 			return (FPC_INF); \
156 		fp->fp_mant[0] = f0; \
157 		fp->fp_mant[1] = f1; \
158 		fp->fp_mant[2] = f2; \
159 		fp->fp_mant[3] = f3; \
160 		return (FPC_QNAN); \
161 	} \
162 	fp->fp_exp = exp - expbias; \
163 	fp->fp_mant[0] = FP_1 | f0; \
164 	fp->fp_mant[1] = f1; \
165 	fp->fp_mant[2] = f2; \
166 	fp->fp_mant[3] = f3; \
167 	return (FPC_NUM)
168 
169 /*
170  * 32-bit single precision -> fpn.
171  * We assume a single occupies at most (64-FP_LG) bits in the internal
172  * format: i.e., needs at most fp_mant[0] and fp_mant[1].
173  */
174 int
175 fpu_stof(fp, i)
176 	register struct fpn *fp;
177 	register u_int i;
178 {
179 	register int exp;
180 	register u_int frac, f0, f1;
181 #define SNG_SHIFT (SNG_FRACBITS - FP_LG)
182 
183 	exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
184 	frac = i & mask(SNG_FRACBITS);
185 	f0 = frac >> SNG_SHIFT;
186 	f1 = frac << (32 - SNG_SHIFT);
187 	FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
188 }
189 
190 /*
191  * 64-bit double -> fpn.
192  * We assume this uses at most (96-FP_LG) bits.
193  */
194 int
195 fpu_dtof(fp, i, j)
196 	register struct fpn *fp;
197 	register u_int i, j;
198 {
199 	register int exp;
200 	register u_int frac, f0, f1, f2;
201 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
202 
203 	exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
204 	frac = i & mask(DBL_FRACBITS - 32);
205 	f0 = frac >> DBL_SHIFT;
206 	f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
207 	f2 = j << (32 - DBL_SHIFT);
208 	frac |= j;
209 	FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
210 }
211 
212 /*
213  * 128-bit extended -> fpn.
214  */
215 int
216 fpu_xtof(fp, i, j, k, l)
217 	register struct fpn *fp;
218 	register u_int i, j, k, l;
219 {
220 	register int exp;
221 	register u_int frac, f0, f1, f2, f3;
222 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG))	/* left shift! */
223 
224 	/*
225 	 * Note that ext and fpn `line up', hence no shifting needed.
226 	 */
227 	exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
228 	frac = i & mask(EXT_FRACBITS - 3 * 32);
229 	f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
230 	f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
231 	f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
232 	f3 = l << EXT_SHIFT;
233 	frac |= j | k | l;
234 	FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
235 }
236 
237 /*
238  * Explode the contents of a register / regpair / regquad.
239  * If the input is a signalling NaN, an NV (invalid) exception
240  * will be set.  (Note that nothing but NV can occur until ALU
241  * operations are performed.)
242  */
243 void
244 fpu_explode(fe, fp, type, reg)
245 	register struct fpemu *fe;
246 	register struct fpn *fp;
247 	int type, reg;
248 {
249 	register u_int s, *space;
250 #ifdef SUN4U
251 	u_int64_t l, *xspace;
252 
253 	xspace = (u_int64_t *)&fe->fe_fpstate->fs_regs[reg & ~1];
254 	l = xspace[0];
255 #endif /* SUN4U */
256 	space = &fe->fe_fpstate->fs_regs[reg];
257 	s = space[0];
258 	fp->fp_sign = s >> 31;
259 	fp->fp_sticky = 0;
260 	switch (type) {
261 #ifdef SUN4U
262 	case FTYPE_LNG:
263 		s = fpu_xitof(fp, l);
264 		break;
265 #endif /* SUN4U */
266 
267 	case FTYPE_INT:
268 		s = fpu_itof(fp, s);
269 		break;
270 
271 	case FTYPE_SNG:
272 		s = fpu_stof(fp, s);
273 		break;
274 
275 	case FTYPE_DBL:
276 		s = fpu_dtof(fp, s, space[1]);
277 		break;
278 
279 	case FTYPE_EXT:
280 		s = fpu_xtof(fp, s, space[1], space[2], space[3]);
281 		break;
282 
283 	default:
284 		panic("fpu_explode");
285 	}
286 	if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
287 		/*
288 		 * Input is a signalling NaN.  All operations that return
289 		 * an input NaN operand put it through a ``NaN conversion'',
290 		 * which basically just means ``turn on the quiet bit''.
291 		 * We do this here so that all NaNs internally look quiet
292 		 * (we can tell signalling ones by their class).
293 		 */
294 		fp->fp_mant[0] |= FP_QUIETBIT;
295 		fe->fe_cx = FSR_NV;	/* assert invalid operand */
296 		s = FPC_SNAN;
297 	}
298 	fp->fp_class = s;
299 }
300