xref: /openbsd-src/sys/arch/sparc64/fpu/fpu_explode.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /*	$OpenBSD: fpu_explode.c,v 1.4 2006/06/21 19:24:38 jason Exp $	*/
2 /*	$NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $ */
3 
4 /*
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This software was developed by the Computer Systems Engineering group
9  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10  * contributed to Berkeley.
11  *
12  * All advertising materials mentioning features or use of this software
13  * must display the following acknowledgement:
14  *	This product includes software developed by the University of
15  *	California, Lawrence Berkeley Laboratory.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)fpu_explode.c	8.1 (Berkeley) 6/11/93
42  */
43 
44 /*
45  * FPU subroutines: `explode' the machine's `packed binary' format numbers
46  * into our internal format.
47  */
48 
49 #include <sys/types.h>
50 #include <sys/systm.h>
51 
52 #include <machine/ieee.h>
53 #include <machine/instr.h>
54 #include <machine/reg.h>
55 
56 #include <sparc64/fpu/fpu_arith.h>
57 #include <sparc64/fpu/fpu_emu.h>
58 #include <sparc64/fpu/fpu_extern.h>
59 
60 /*
61  * N.B.: in all of the following, we assume the FP format is
62  *
63  *	---------------------------
64  *	| s | exponent | fraction |
65  *	---------------------------
66  *
67  * (which represents -1**s * 1.fraction * 2**exponent), so that the
68  * sign bit is way at the top (bit 31), the exponent is next, and
69  * then the remaining bits mark the fraction.  A zero exponent means
70  * zero or denormalized (0.fraction rather than 1.fraction), and the
71  * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
72  *
73  * Since the sign bit is always the topmost bit---this holds even for
74  * integers---we set that outside all the *tof functions.  Each function
75  * returns the class code for the new number (but note that we use
76  * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
77  */
78 
79 /*
80  * int -> fpn.
81  */
82 int
83 fpu_itof(fp, i)
84 	register struct fpn *fp;
85 	register u_int i;
86 {
87 
88 	if (i == 0)
89 		return (FPC_ZERO);
90 	/*
91 	 * The value FP_1 represents 2^FP_LG, so set the exponent
92 	 * there and let normalization fix it up.  Convert negative
93 	 * numbers to sign-and-magnitude.  Note that this relies on
94 	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
95 	 */
96 	fp->fp_exp = FP_LG;
97 	fp->fp_mant[0] = (int)i < 0 ? -i : i;
98 	fp->fp_mant[1] = 0;
99 	fp->fp_mant[2] = 0;
100 	fp->fp_mant[3] = 0;
101 	fpu_norm(fp);
102 	return (FPC_NUM);
103 }
104 
105 /*
106  * 64-bit int -> fpn.
107  */
108 int
109 fpu_xtof(fp, i)
110 	register struct fpn *fp;
111 	register u_int64_t i;
112 {
113 	if (i == 0)
114 		return (FPC_ZERO);
115 
116 	/*
117 	 * The value FP_1 represents 2^FP_LG, so set the exponent
118 	 * there and let normalization fix it up.  Convert negative
119 	 * numbers to sign-and-magnitude.  Note that this relies on
120 	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
121 	 */
122 	fp->fp_exp = FP_LG2;
123 	*((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
124 	fp->fp_mant[2] = 0;
125 	fp->fp_mant[3] = 0;
126 	fpu_norm(fp);
127 	return (FPC_NUM);
128 }
129 
130 #define	mask(nbits) ((1L << (nbits)) - 1)
131 
132 /*
133  * All external floating formats convert to internal in the same manner,
134  * as defined here.  Note that only normals get an implied 1.0 inserted.
135  */
136 #define	FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
137 	if (exp == 0) { \
138 		if (allfrac == 0) \
139 			return (FPC_ZERO); \
140 		fp->fp_exp = 1 - expbias; \
141 		fp->fp_mant[0] = f0; \
142 		fp->fp_mant[1] = f1; \
143 		fp->fp_mant[2] = f2; \
144 		fp->fp_mant[3] = f3; \
145 		fpu_norm(fp); \
146 		return (FPC_NUM); \
147 	} \
148 	if (exp == (2 * expbias + 1)) { \
149 		if (allfrac == 0) \
150 			return (FPC_INF); \
151 		fp->fp_mant[0] = f0; \
152 		fp->fp_mant[1] = f1; \
153 		fp->fp_mant[2] = f2; \
154 		fp->fp_mant[3] = f3; \
155 		return (FPC_QNAN); \
156 	} \
157 	fp->fp_exp = exp - expbias; \
158 	fp->fp_mant[0] = FP_1 | f0; \
159 	fp->fp_mant[1] = f1; \
160 	fp->fp_mant[2] = f2; \
161 	fp->fp_mant[3] = f3; \
162 	return (FPC_NUM)
163 
164 /*
165  * 32-bit single precision -> fpn.
166  * We assume a single occupies at most (64-FP_LG) bits in the internal
167  * format: i.e., needs at most fp_mant[0] and fp_mant[1].
168  */
169 int
170 fpu_stof(fp, i)
171 	register struct fpn *fp;
172 	register u_int i;
173 {
174 	register int exp;
175 	register u_int frac, f0, f1;
176 #define SNG_SHIFT (SNG_FRACBITS - FP_LG)
177 
178 	exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
179 	frac = i & mask(SNG_FRACBITS);
180 	f0 = frac >> SNG_SHIFT;
181 	f1 = frac << (32 - SNG_SHIFT);
182 	FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
183 }
184 
185 /*
186  * 64-bit double -> fpn.
187  * We assume this uses at most (96-FP_LG) bits.
188  */
189 int
190 fpu_dtof(fp, i, j)
191 	register struct fpn *fp;
192 	register u_int i, j;
193 {
194 	register int exp;
195 	register u_int frac, f0, f1, f2;
196 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
197 
198 	exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
199 	frac = i & mask(DBL_FRACBITS - 32);
200 	f0 = frac >> DBL_SHIFT;
201 	f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
202 	f2 = j << (32 - DBL_SHIFT);
203 	frac |= j;
204 	FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
205 }
206 
207 /*
208  * 128-bit extended -> fpn.
209  */
210 int
211 fpu_qtof(fp, i, j, k, l)
212 	register struct fpn *fp;
213 	register u_int i, j, k, l;
214 {
215 	register int exp;
216 	register u_int frac, f0, f1, f2, f3;
217 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG))	/* left shift! */
218 
219 	/*
220 	 * Note that ext and fpn `line up', hence no shifting needed.
221 	 */
222 	exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
223 	frac = i & mask(EXT_FRACBITS - 3 * 32);
224 	f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
225 	f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
226 	f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
227 	f3 = l << EXT_SHIFT;
228 	frac |= j | k | l;
229 	FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
230 }
231 
232 /*
233  * Explode the contents of a register / regpair / regquad.
234  * If the input is a signalling NaN, an NV (invalid) exception
235  * will be set.  (Note that nothing but NV can occur until ALU
236  * operations are performed.)
237  */
238 void
239 fpu_explode(fe, fp, type, reg)
240 	register struct fpemu *fe;
241 	register struct fpn *fp;
242 	int type, reg;
243 {
244 	register u_int s, *space;
245 	u_int64_t l, *xspace;
246 
247 	xspace = (u_int64_t *)&fe->fe_fpstate->fs_regs[reg & ~1];
248 	l = xspace[0];
249 	space = &fe->fe_fpstate->fs_regs[reg];
250 	s = space[0];
251 	fp->fp_sign = s >> 31;
252 	fp->fp_sticky = 0;
253 	DPRINTF(FPE_INSN, ("fpu_explode: "));
254 	switch (type) {
255 	case FTYPE_LNG:
256 		DPRINTF(FPE_INSN, ("LNG: %llx", l));
257 		s = fpu_xtof(fp, l);
258 		break;
259 
260 	case FTYPE_INT:
261 		DPRINTF(FPE_INSN, ("INT: %x", s));
262 		s = fpu_itof(fp, s);
263 		break;
264 
265 	case FTYPE_SNG:
266 		DPRINTF(FPE_INSN, ("SNG: %x", s));
267 		s = fpu_stof(fp, s);
268 		break;
269 
270 	case FTYPE_DBL:
271 		DPRINTF(FPE_INSN, ("DBL: %x %x", s, space[1]));
272 		s = fpu_dtof(fp, s, space[1]);
273 		break;
274 
275 	case FTYPE_EXT:
276 		DPRINTF(FPE_INSN, ("EXT: %x %x %x %x", s, space[1],
277 		    space[2], space[3]));
278 		s = fpu_qtof(fp, s, space[1], space[2], space[3]);
279 		break;
280 
281 	default:
282 		panic("fpu_explode");
283 	}
284 	DPRINTF(FPE_INSN, ("\n"));
285 
286 	if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
287 		/*
288 		 * Input is a signalling NaN.  All operations that return
289 		 * an input NaN operand put it through a ``NaN conversion'',
290 		 * which basically just means ``turn on the quiet bit''.
291 		 * We do this here so that all NaNs internally look quiet
292 		 * (we can tell signalling ones by their class).
293 		 */
294 		fp->fp_mant[0] |= FP_QUIETBIT;
295 		fe->fe_cx = FSR_NV;	/* assert invalid operand */
296 		s = FPC_SNAN;
297 	}
298 	fp->fp_class = s;
299 	DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
300 	    ((type == FTYPE_INT) ? 'i' :
301 		((type == FTYPE_SNG) ? 's' :
302 		    ((type == FTYPE_DBL) ? 'd' :
303 			((type == FTYPE_EXT) ? 'q' : '?')))),
304 	    reg));
305 	DUMPFPN(FPE_REG, fp);
306 	DPRINTF(FPE_REG, ("\n"));
307 }
308