1*4d12bfcdSjoerg/* $NetBSD: modf.S,v 1.7 2013/09/12 15:36:16 joerg Exp $ */ 21394f01bSchristos 30b9f5089Scgd/* 40b9f5089Scgd * Copyright (c) 1992, 1993 50b9f5089Scgd * The Regents of the University of California. All rights reserved. 60b9f5089Scgd * 70b9f5089Scgd * This software was developed by the Computer Systems Engineering group 80b9f5089Scgd * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 90b9f5089Scgd * contributed to Berkeley. 100b9f5089Scgd * 110b9f5089Scgd * Redistribution and use in source and binary forms, with or without 120b9f5089Scgd * modification, are permitted provided that the following conditions 130b9f5089Scgd * are met: 140b9f5089Scgd * 1. Redistributions of source code must retain the above copyright 150b9f5089Scgd * notice, this list of conditions and the following disclaimer. 160b9f5089Scgd * 2. Redistributions in binary form must reproduce the above copyright 170b9f5089Scgd * notice, this list of conditions and the following disclaimer in the 180b9f5089Scgd * documentation and/or other materials provided with the distribution. 19eb7c1594Sagc * 3. Neither the name of the University nor the names of its contributors 200b9f5089Scgd * may be used to endorse or promote products derived from this software 210b9f5089Scgd * without specific prior written permission. 220b9f5089Scgd * 230b9f5089Scgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 240b9f5089Scgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 250b9f5089Scgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 260b9f5089Scgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 270b9f5089Scgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 280b9f5089Scgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 290b9f5089Scgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 300b9f5089Scgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 310b9f5089Scgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 320b9f5089Scgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 330b9f5089Scgd * SUCH DAMAGE. 340b9f5089Scgd * 350b9f5089Scgd * from: Header: modf.s,v 1.3 92/06/20 00:00:54 torek Exp 360b9f5089Scgd */ 370b9f5089Scgd 381394f01bSchristos#include <machine/asm.h> 390b9f5089Scgd#if defined(LIBC_SCCS) && !defined(lint) 401394f01bSchristos#if 0 410b9f5089Scgd .asciz "@(#)modf.s 8.1 (Berkeley) 6/4/93" 421394f01bSchristos#else 43*4d12bfcdSjoerg RCSID("$NetBSD: modf.S,v 1.7 2013/09/12 15:36:16 joerg Exp $") 441394f01bSchristos#endif 450b9f5089Scgd#endif /* LIBC_SCCS and not lint */ 460b9f5089Scgd 470b9f5089Scgd#include <machine/fsr.h> 480b9f5089Scgd 490b9f5089Scgd/* 500b9f5089Scgd * double modf(double val, double *iptr) 510b9f5089Scgd * 520b9f5089Scgd * Returns the fractional part of `val', storing the integer part of 530b9f5089Scgd * `val' in *iptr. Both *iptr and the return value have the same sign 540b9f5089Scgd * as `val'. 550b9f5089Scgd * 560b9f5089Scgd * Method: 570b9f5089Scgd * 580b9f5089Scgd * We use the fpu's normalization hardware to compute the integer portion 590b9f5089Scgd * of the double precision argument. Sun IEEE double precision numbers 600b9f5089Scgd * have 52 bits of mantissa, 11 bits of exponent, and one bit of sign, 610b9f5089Scgd * with the sign occupying bit 31 of word 0, and the exponent bits 30:20 620b9f5089Scgd * of word 0. Thus, values >= 2^52 are by definition integers. 630b9f5089Scgd * 640b9f5089Scgd * If we take a value that is in the range [+0..2^52) and add 2^52, all 650b9f5089Scgd * of the fractional bits fall out and all of the integer bits are summed 660b9f5089Scgd * with 2^52. If we then subtract 2^52, we get those integer bits back. 670b9f5089Scgd * This must be done with rounding set to `towards 0' or `towards -inf'. 680b9f5089Scgd * `Toward -inf' fails when the value is 0 (we get -0 back).... 690b9f5089Scgd * 700b9f5089Scgd * Note that this method will work anywhere, but is machine dependent in 710b9f5089Scgd * various aspects. 720b9f5089Scgd * 730b9f5089Scgd * Stack usage: 740b9f5089Scgd * 4@[%fp - 4] saved %fsr 750b9f5089Scgd * 4@[%fp - 8] new %fsr with rounding set to `towards 0' 760b9f5089Scgd * 8@[%fp - 16] space for moving between %i and %f registers 770b9f5089Scgd * Register usage: 780b9f5089Scgd * %i0%i1 double val; 790b9f5089Scgd * %l0 scratch 800b9f5089Scgd * %l1 sign bit (0x80000000) 810b9f5089Scgd * %i2 double *iptr; 820b9f5089Scgd * %f2:f3 `magic number' 2^52, in fpu registers 830b9f5089Scgd * %f4:f5 double v, in fpu registers 840b9f5089Scgd */ 850b9f5089Scgd 860b9f5089Scgd .align 8 870b9f5089ScgdLmagic: 880b9f5089Scgd .word 0x43300000 ! sign = 0, exponent = 52 + 1023, mantissa = 0 890b9f5089Scgd .word 0 ! (i.e., .double 0r4503599627370496e+00) 900b9f5089Scgd 910b9f5089ScgdL0: 920b9f5089Scgd .word 0 ! 0.0 930b9f5089Scgd .word 0 940b9f5089Scgd 950b9f5089ScgdENTRY(modf) 960b9f5089Scgd save %sp, -64-16, %sp 970b9f5089Scgd 980b9f5089Scgd /* 990b9f5089Scgd * First, compute v = abs(val) by clearing sign bit, 1000b9f5089Scgd * and then set up the fpu registers. This would be 1010b9f5089Scgd * much easier if we could do alu operations on fpu registers! 1020b9f5089Scgd */ 103230b8165Spk sethi %hi(0x80000000), %l1 ! sign bit 1040b9f5089Scgd andn %i0, %l1, %l0 1050b9f5089Scgd st %l0, [%fp - 16] 106*4d12bfcdSjoerg#ifdef __PIC__ 107e0bf9676Spk PICCY_SET(Lmagic, %l0, %o7) 108e0bf9676Spk ldd [%l0], %f2 109e0bf9676Spk#else 1100b9f5089Scgd sethi %hi(Lmagic), %l0 1110b9f5089Scgd ldd [%l0 + %lo(Lmagic)], %f2 112e0bf9676Spk#endif 1130b9f5089Scgd st %i1, [%fp - 12] 1140b9f5089Scgd ldd [%fp - 16], %f4 ! %f4:f5 = v 1150b9f5089Scgd 1160b9f5089Scgd /* 1170b9f5089Scgd * Is %f4:f5 >= %f2:f3 ? If so, it is all integer bits. 1180b9f5089Scgd * It is probably less, though. 1190b9f5089Scgd */ 1200b9f5089Scgd fcmped %f4, %f2 1210b9f5089Scgd nop ! fpop2 delay 1220b9f5089Scgd fbuge Lbig ! if >= (or unordered), go out 1230b9f5089Scgd nop 1240b9f5089Scgd 1250b9f5089Scgd /* 1260b9f5089Scgd * v < 2^52, so add 2^52, then subtract 2^52, but do it all 1270b9f5089Scgd * with rounding set towards zero. We leave any enabled 1280b9f5089Scgd * traps enabled, but change the rounding mode. This might 1290b9f5089Scgd * not be so good. Oh well.... 1300b9f5089Scgd */ 1310b9f5089Scgd st %fsr, [%fp - 4] ! %l5 = current FSR mode 1320b9f5089Scgd set FSR_RD, %l3 ! %l3 = rounding direction mask 1330b9f5089Scgd ld [%fp - 4], %l5 1340b9f5089Scgd set FSR_RD_RZ << FSR_RD_SHIFT, %l4 1350b9f5089Scgd andn %l5, %l3, %l6 1360b9f5089Scgd or %l6, %l4, %l6 ! round towards zero, please 1370b9f5089Scgd and %l5, %l3, %l5 ! save original rounding mode 1380b9f5089Scgd st %l6, [%fp - 8] 1390b9f5089Scgd ld [%fp - 8], %fsr 1400b9f5089Scgd 1410b9f5089Scgd faddd %f4, %f2, %f4 ! %f4:f5 += 2^52 1420b9f5089Scgd fsubd %f4, %f2, %f4 ! %f4:f5 -= 2^52 1430b9f5089Scgd 1440b9f5089Scgd /* 1450b9f5089Scgd * Restore %fsr, but leave exceptions accrued. 1460b9f5089Scgd */ 1470b9f5089Scgd st %fsr, [%fp - 4] 1480b9f5089Scgd ld [%fp - 4], %l6 1490b9f5089Scgd andn %l6, %l3, %l6 ! %l6 = %fsr & ~FSR_RD; 1500b9f5089Scgd or %l5, %l6, %l5 ! %l5 |= %l6; 1510b9f5089Scgd st %l5, [%fp - 4] 1520b9f5089Scgd ld [%fp - 4], %fsr ! restore %fsr, leaving accrued stuff 1530b9f5089Scgd 1540b9f5089Scgd /* 1550b9f5089Scgd * Now insert the original sign in %f4:f5. 1560b9f5089Scgd * This is a lot of work, so it is conditional here. 1570b9f5089Scgd */ 1580b9f5089Scgd btst %l1, %i0 1590b9f5089Scgd be 1f 1600b9f5089Scgd nop 1610b9f5089Scgd st %f4, [%fp - 16] 1620b9f5089Scgd ld [%fp - 16], %g1 1630b9f5089Scgd or %l1, %g1, %g1 1640b9f5089Scgd st %g1, [%fp - 16] 1650b9f5089Scgd ld [%fp - 16], %f4 1660b9f5089Scgd1: 1670b9f5089Scgd 1680b9f5089Scgd /* 1690b9f5089Scgd * The value in %f4:f5 is now the integer portion of the original 1700b9f5089Scgd * argument. We need to store this in *ival (%i2), subtract it 1710b9f5089Scgd * from the original value argument (%i0:i1), and return the result. 1720b9f5089Scgd */ 1730b9f5089Scgd std %f4, [%i2] ! *ival = %f4:f5; 1740b9f5089Scgd std %i0, [%fp - 16] 1750b9f5089Scgd ldd [%fp - 16], %f0 ! %f0:f1 = val; 1760b9f5089Scgd fsubd %f0, %f4, %f0 ! %f0:f1 -= %f4:f5; 1770b9f5089Scgd ret 1780b9f5089Scgd restore 1790b9f5089Scgd 1800b9f5089ScgdLbig: 1810b9f5089Scgd /* 1820b9f5089Scgd * We get here if the original comparison of %f4:f5 (v) to 1830b9f5089Scgd * %f2:f3 (2^52) came out `greater or unordered'. In this 1840b9f5089Scgd * case the integer part is the original value, and the 1850b9f5089Scgd * fractional part is 0. 1860b9f5089Scgd */ 187*4d12bfcdSjoerg#ifdef __PIC__ 188e0bf9676Spk PICCY_SET(L0, %l0, %o7) 189e0bf9676Spk std %f0, [%i2] ! *ival = val; 190e0bf9676Spk ldd [%l0], %f0 ! return 0.0; 191e0bf9676Spk#else 1920b9f5089Scgd sethi %hi(L0), %l0 1930b9f5089Scgd std %f0, [%i2] ! *ival = val; 1940b9f5089Scgd ldd [%l0 + %lo(L0)], %f0 ! return 0.0; 195e0bf9676Spk#endif 1960b9f5089Scgd ret 1970b9f5089Scgd restore 198