1*181254a7Smrg /* s_modfl.c -- long double version of s_modf.c.
2*181254a7Smrg * Conversion to IEEE quad long double by Jakub Jelinek, jj@ultra.linux.cz.
3*181254a7Smrg */
4*181254a7Smrg
5*181254a7Smrg /*
6*181254a7Smrg * ====================================================
7*181254a7Smrg * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
8*181254a7Smrg *
9*181254a7Smrg * Developed at SunPro, a Sun Microsystems, Inc. business.
10*181254a7Smrg * Permission to use, copy, modify, and distribute this
11*181254a7Smrg * software is freely granted, provided that this notice
12*181254a7Smrg * is preserved.
13*181254a7Smrg * ====================================================
14*181254a7Smrg */
15*181254a7Smrg
16*181254a7Smrg #if defined(LIBM_SCCS) && !defined(lint)
17*181254a7Smrg static char rcsid[] = "NetBSD: ";
18*181254a7Smrg #endif
19*181254a7Smrg
20*181254a7Smrg /*
21*181254a7Smrg * modfq(long double x, long double *iptr)
22*181254a7Smrg * return fraction part of x, and return x's integral part in *iptr.
23*181254a7Smrg * Method:
24*181254a7Smrg * Bit twiddling.
25*181254a7Smrg *
26*181254a7Smrg * Exception:
27*181254a7Smrg * No exception.
28*181254a7Smrg */
29*181254a7Smrg
30*181254a7Smrg #include "quadmath-imp.h"
31*181254a7Smrg
32*181254a7Smrg static const __float128 one = 1.0;
33*181254a7Smrg
modfq(__float128 x,__float128 * iptr)34*181254a7Smrg __float128 modfq(__float128 x, __float128 *iptr)
35*181254a7Smrg {
36*181254a7Smrg int64_t i0,i1,j0;
37*181254a7Smrg uint64_t i;
38*181254a7Smrg GET_FLT128_WORDS64(i0,i1,x);
39*181254a7Smrg j0 = ((i0>>48)&0x7fff)-0x3fff; /* exponent of x */
40*181254a7Smrg if(j0<48) { /* integer part in high x */
41*181254a7Smrg if(j0<0) { /* |x|<1 */
42*181254a7Smrg /* *iptr = +-0 */
43*181254a7Smrg SET_FLT128_WORDS64(*iptr,i0&0x8000000000000000ULL,0);
44*181254a7Smrg return x;
45*181254a7Smrg } else {
46*181254a7Smrg i = (0x0000ffffffffffffLL)>>j0;
47*181254a7Smrg if(((i0&i)|i1)==0) { /* x is integral */
48*181254a7Smrg *iptr = x;
49*181254a7Smrg /* return +-0 */
50*181254a7Smrg SET_FLT128_WORDS64(x,i0&0x8000000000000000ULL,0);
51*181254a7Smrg return x;
52*181254a7Smrg } else {
53*181254a7Smrg SET_FLT128_WORDS64(*iptr,i0&(~i),0);
54*181254a7Smrg return x - *iptr;
55*181254a7Smrg }
56*181254a7Smrg }
57*181254a7Smrg } else if (j0>111) { /* no fraction part */
58*181254a7Smrg *iptr = x*one;
59*181254a7Smrg /* We must handle NaNs separately. */
60*181254a7Smrg if (j0 == 0x4000 && ((i0 & 0x0000ffffffffffffLL) | i1))
61*181254a7Smrg return x*one;
62*181254a7Smrg /* return +-0 */
63*181254a7Smrg SET_FLT128_WORDS64(x,i0&0x8000000000000000ULL,0);
64*181254a7Smrg return x;
65*181254a7Smrg } else { /* fraction part in low x */
66*181254a7Smrg i = -1ULL>>(j0-48);
67*181254a7Smrg if((i1&i)==0) { /* x is integral */
68*181254a7Smrg *iptr = x;
69*181254a7Smrg /* return +-0 */
70*181254a7Smrg SET_FLT128_WORDS64(x,i0&0x8000000000000000ULL,0);
71*181254a7Smrg return x;
72*181254a7Smrg } else {
73*181254a7Smrg SET_FLT128_WORDS64(*iptr,i0,i1&(~i));
74*181254a7Smrg return x - *iptr;
75*181254a7Smrg }
76*181254a7Smrg }
77*181254a7Smrg }
78