1 /* $OpenBSD: fpu_explode.c,v 1.10 2019/06/21 17:00:58 jca Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93 45 * $NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $ 46 */ 47 48 /* 49 * FPU subroutines: `explode' the machine's `packed binary' format numbers 50 * into our internal format. 51 */ 52 53 #include <sys/param.h> 54 55 #include <machine/frame.h> 56 #include <machine/fsr.h> 57 #include <machine/ieee.h> 58 #include <machine/instr.h> 59 60 #include "fpu_arith.h" 61 #include "fpu_emu.h" 62 #include "fpu_extern.h" 63 #include "fpu_reg.h" 64 65 /* 66 * N.B.: in all of the following, we assume the FP format is 67 * 68 * --------------------------- 69 * | s | exponent | fraction | 70 * --------------------------- 71 * 72 * (which represents -1**s * 1.fraction * 2**exponent), so that the 73 * sign bit is way at the top (bit 31), the exponent is next, and 74 * then the remaining bits mark the fraction. A zero exponent means 75 * zero or denormalized (0.fraction rather than 1.fraction), and the 76 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN. 77 * 78 * Since the sign bit is always the topmost bit---this holds even for 79 * integers---we set that outside all the *tof functions. Each function 80 * returns the class code for the new number (but note that we use 81 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate). 82 */ 83 84 /* 85 * int -> fpn. 86 */ 87 int 88 __fpu_itof(fp, i) 89 struct fpn *fp; 90 u_int i; 91 { 92 93 if (i == 0) 94 return (FPC_ZERO); 95 /* 96 * The value FP_1 represents 2^FP_LG, so set the exponent 97 * there and let normalization fix it up. Convert negative 98 * numbers to sign-and-magnitude. Note that this relies on 99 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 100 */ 101 fp->fp_exp = FP_LG; 102 fp->fp_mant[0] = (fp->fp_sign && (int)i < 0) ? -i : i; 103 fp->fp_mant[1] = 0; 104 fp->fp_mant[2] = 0; 105 fp->fp_mant[3] = 0; 106 __fpu_norm(fp); 107 return (FPC_NUM); 108 } 109 110 /* 111 * uint -> fpn. 112 */ 113 int 114 __fpu_uitof(fp, i) 115 struct fpn *fp; 116 u_int i; 117 { 118 119 if (i == 0) 120 return (FPC_ZERO); 121 /* 122 * The value FP_1 represents 2^FP_LG, so set the exponent 123 * there and let normalization fix it up. 124 * Note that this relies on fpu_norm()'s handling of 125 * `supernormals'; see fpu_subr.c. 126 */ 127 fp->fp_exp = FP_LG; 128 fp->fp_mant[0] = i; 129 fp->fp_mant[1] = 0; 130 fp->fp_mant[2] = 0; 131 fp->fp_mant[3] = 0; 132 __fpu_norm(fp); 133 return (FPC_NUM); 134 } 135 136 /* 137 * 64-bit int -> fpn. 138 */ 139 int 140 __fpu_xtof(fp, i) 141 struct fpn *fp; 142 u_int64_t i; 143 { 144 145 if (i == 0) 146 return (FPC_ZERO); 147 /* 148 * The value FP_1 represents 2^FP_LG, so set the exponent 149 * there and let normalization fix it up. Convert negative 150 * numbers to sign-and-magnitude. Note that this relies on 151 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 152 */ 153 fp->fp_exp = FP_LG2; 154 i = (fp->fp_sign && (int64_t)i < 0) ? -i : i; 155 fp->fp_mant[0] = (i >> 32) & 0xffffffff; 156 fp->fp_mant[1] = (i >> 0) & 0xffffffff; 157 fp->fp_mant[2] = 0; 158 fp->fp_mant[3] = 0; 159 __fpu_norm(fp); 160 return (FPC_NUM); 161 } 162 163 /* 164 * 64-bit uint -> fpn. 165 */ 166 int 167 __fpu_uxtof(fp, i) 168 struct fpn *fp; 169 u_int64_t i; 170 { 171 172 if (i == 0) 173 return (FPC_ZERO); 174 /* 175 * The value FP_1 represents 2^FP_LG, so set the exponent 176 * there and let normalization fix it up. 177 * Note that this relies on fpu_norm()'s handling of 178 * `supernormals'; see fpu_subr.c. 179 */ 180 fp->fp_exp = FP_LG2; 181 fp->fp_mant[0] = (i >> 32) & 0xffffffff; 182 fp->fp_mant[1] = (i >> 0) & 0xffffffff; 183 fp->fp_mant[2] = 0; 184 fp->fp_mant[3] = 0; 185 __fpu_norm(fp); 186 return (FPC_NUM); 187 } 188 189 #define mask(nbits) ((1L << (nbits)) - 1) 190 191 /* 192 * All external floating formats convert to internal in the same manner, 193 * as defined here. Note that only normals get an implied 1.0 inserted. 194 */ 195 #define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \ 196 if (exp == 0) { \ 197 if (allfrac == 0) \ 198 return (FPC_ZERO); \ 199 fp->fp_exp = 1 - expbias; \ 200 fp->fp_mant[0] = f0; \ 201 fp->fp_mant[1] = f1; \ 202 fp->fp_mant[2] = f2; \ 203 fp->fp_mant[3] = f3; \ 204 __fpu_norm(fp); \ 205 return (FPC_NUM); \ 206 } \ 207 if (exp == (2 * expbias + 1)) { \ 208 if (allfrac == 0) \ 209 return (FPC_INF); \ 210 fp->fp_mant[0] = f0; \ 211 fp->fp_mant[1] = f1; \ 212 fp->fp_mant[2] = f2; \ 213 fp->fp_mant[3] = f3; \ 214 return (FPC_QNAN); \ 215 } \ 216 fp->fp_exp = exp - expbias; \ 217 fp->fp_mant[0] = FP_1 | f0; \ 218 fp->fp_mant[1] = f1; \ 219 fp->fp_mant[2] = f2; \ 220 fp->fp_mant[3] = f3; \ 221 return (FPC_NUM) 222 223 /* 224 * 32-bit single precision -> fpn. 225 * We assume a single occupies at most (64-FP_LG) bits in the internal 226 * format: i.e., needs at most fp_mant[0] and fp_mant[1]. 227 */ 228 int 229 __fpu_stof(fp, i) 230 struct fpn *fp; 231 u_int i; 232 { 233 int exp; 234 u_int frac, f0, f1; 235 #define SNG_SHIFT (SNG_FRACBITS - FP_LG) 236 237 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS); 238 frac = i & mask(SNG_FRACBITS); 239 f0 = frac >> SNG_SHIFT; 240 f1 = frac << (32 - SNG_SHIFT); 241 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0); 242 } 243 244 /* 245 * 64-bit double -> fpn. 246 * We assume this uses at most (96-FP_LG) bits. 247 */ 248 int 249 __fpu_dtof(fp, i, j) 250 struct fpn *fp; 251 u_int i, j; 252 { 253 int exp; 254 u_int frac, f0, f1, f2; 255 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG) 256 257 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS); 258 frac = i & mask(DBL_FRACBITS - 32); 259 f0 = frac >> DBL_SHIFT; 260 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT); 261 f2 = j << (32 - DBL_SHIFT); 262 frac |= j; 263 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0); 264 } 265 266 /* 267 * 128-bit extended -> fpn. 268 */ 269 int 270 __fpu_qtof(fp, i, j, k, l) 271 struct fpn *fp; 272 u_int i, j, k, l; 273 { 274 int exp; 275 u_int frac, f0, f1, f2, f3; 276 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */ 277 278 /* 279 * Note that ext and fpn `line up', hence no shifting needed. 280 */ 281 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS); 282 frac = i & mask(EXT_FRACBITS - 3 * 32); 283 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT)); 284 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT)); 285 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT)); 286 f3 = l << EXT_SHIFT; 287 frac |= j | k | l; 288 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3); 289 } 290 291 #if 0 /* __fpu_explode is unused */ 292 /* 293 * Explode the contents of a / regpair / regquad. 294 * If the input is a signalling NaN, an NV (invalid) exception 295 * will be set. (Note that nothing but NV can occur until ALU 296 * operations are performed.) 297 */ 298 void 299 __fpu_explode(fe, fp, type, reg) 300 struct fpemu *fe; 301 struct fpn *fp; 302 int type, reg; 303 { 304 u_int32_t s = 0/* XXX gcc */, *sp; 305 u_int64_t l[2]; 306 307 if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) { 308 l[0] = __fpu_getreg64(reg & ~1); 309 sp = (u_int32_t *)l; 310 fp->fp_sign = sp[0] >> 31; 311 fp->fp_sticky = 0; 312 switch (type) { 313 case FTYPE_LNG: 314 s = __fpu_xtof(fp, l[0]); 315 break; 316 case FTYPE_DBL: 317 s = __fpu_dtof(fp, sp[0], sp[1]); 318 break; 319 case FTYPE_EXT: 320 l[1] = __fpu_getreg64((reg & ~1) + 2); 321 s = __fpu_qtof(fp, sp[0], sp[1], sp[2], sp[3]); 322 break; 323 default: 324 #ifdef DIAGNOSTIC 325 __utrap_panic("fpu_explode"); 326 #endif 327 break; 328 } 329 } else { 330 #ifdef DIAGNOSTIC 331 if (type != FTYPE_SNG) 332 __utrap_panic("fpu_explode"); 333 #endif 334 s = __fpu_getreg32(reg); 335 fp->fp_sign = s >> 31; 336 fp->fp_sticky = 0; 337 s = __fpu_stof(fp, s); 338 } 339 340 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) { 341 /* 342 * Input is a signalling NaN. All operations that return 343 * an input NaN operand put it through a ``NaN conversion'', 344 * which basically just means ``turn on the quiet bit''. 345 * We do this here so that all NaNs internally look quiet 346 * (we can tell signalling ones by their class). 347 */ 348 fp->fp_mant[0] |= FP_QUIETBIT; 349 fe->fe_cx = FSR_NV; /* assert invalid operand */ 350 s = FPC_SNAN; 351 } 352 fp->fp_class = s; 353 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' : 354 ((type == FTYPE_INT) ? 'i' : 355 ((type == FTYPE_SNG) ? 's' : 356 ((type == FTYPE_DBL) ? 'd' : 357 ((type == FTYPE_EXT) ? 'q' : '?')))), 358 reg)); 359 DUMPFPN(FPE_REG, fp); 360 DPRINTF(FPE_REG, ("\n")); 361 } 362 #endif 363