1*36ac495dSmrg /* This is a stripped down version of floatlib.c. It supplies only those
2*36ac495dSmrg functions which exist in libgcc, but for which there is not assembly
3*36ac495dSmrg language versions in m68k/lb1sf68.S.
4*36ac495dSmrg
5*36ac495dSmrg It also includes simplistic support for extended floats (by working in
6*36ac495dSmrg double precision). You must compile this file again with -DEXTFLOAT
7*36ac495dSmrg to get this support. */
8*36ac495dSmrg
9*36ac495dSmrg /*
10*36ac495dSmrg ** gnulib support for software floating point.
11*36ac495dSmrg ** Copyright (C) 1991 by Pipeline Associates, Inc. All rights reserved.
12*36ac495dSmrg ** Permission is granted to do *anything* you want with this file,
13*36ac495dSmrg ** commercial or otherwise, provided this message remains intact. So there!
14*36ac495dSmrg ** I would appreciate receiving any updates/patches/changes that anyone
15*36ac495dSmrg ** makes, and am willing to be the repository for said changes (am I
16*36ac495dSmrg ** making a big mistake?).
17*36ac495dSmrg **
18*36ac495dSmrg ** Pat Wood
19*36ac495dSmrg ** Pipeline Associates, Inc.
20*36ac495dSmrg ** pipeline!phw@motown.com or
21*36ac495dSmrg ** sun!pipeline!phw or
22*36ac495dSmrg ** uunet!motown!pipeline!phw
23*36ac495dSmrg **
24*36ac495dSmrg ** 05/01/91 -- V1.0 -- first release to gcc mailing lists
25*36ac495dSmrg ** 05/04/91 -- V1.1 -- added float and double prototypes and return values
26*36ac495dSmrg ** -- fixed problems with adding and subtracting zero
27*36ac495dSmrg ** -- fixed rounding in truncdfsf2
28*36ac495dSmrg ** -- fixed SWAP define and tested on 386
29*36ac495dSmrg */
30*36ac495dSmrg
31*36ac495dSmrg /*
32*36ac495dSmrg ** The following are routines that replace the gnulib soft floating point
33*36ac495dSmrg ** routines that are called automatically when -msoft-float is selected.
34*36ac495dSmrg ** The support single and double precision IEEE format, with provisions
35*36ac495dSmrg ** for byte-swapped machines (tested on 386). Some of the double-precision
36*36ac495dSmrg ** routines work at full precision, but most of the hard ones simply punt
37*36ac495dSmrg ** and call the single precision routines, producing a loss of accuracy.
38*36ac495dSmrg ** long long support is not assumed or included.
39*36ac495dSmrg ** Overall accuracy is close to IEEE (actually 68882) for single-precision
40*36ac495dSmrg ** arithmetic. I think there may still be a 1 in 1000 chance of a bit
41*36ac495dSmrg ** being rounded the wrong way during a multiply. I'm not fussy enough to
42*36ac495dSmrg ** bother with it, but if anyone is, knock yourself out.
43*36ac495dSmrg **
44*36ac495dSmrg ** Efficiency has only been addressed where it was obvious that something
45*36ac495dSmrg ** would make a big difference. Anyone who wants to do this right for
46*36ac495dSmrg ** best speed should go in and rewrite in assembler.
47*36ac495dSmrg **
48*36ac495dSmrg ** I have tested this only on a 68030 workstation and 386/ix integrated
49*36ac495dSmrg ** in with -msoft-float.
50*36ac495dSmrg */
51*36ac495dSmrg
52*36ac495dSmrg /* the following deal with IEEE single-precision numbers */
53*36ac495dSmrg #define EXCESS 126L
54*36ac495dSmrg #define SIGNBIT 0x80000000L
55*36ac495dSmrg #define HIDDEN (1L << 23L)
56*36ac495dSmrg #define SIGN(fp) ((fp) & SIGNBIT)
57*36ac495dSmrg #define EXP(fp) (((fp) >> 23L) & 0xFF)
58*36ac495dSmrg #define MANT(fp) (((fp) & 0x7FFFFFL) | HIDDEN)
59*36ac495dSmrg #define PACK(s,e,m) ((s) | ((e) << 23L) | (m))
60*36ac495dSmrg
61*36ac495dSmrg /* the following deal with IEEE double-precision numbers */
62*36ac495dSmrg #define EXCESSD 1022L
63*36ac495dSmrg #define HIDDEND (1L << 20L)
64*36ac495dSmrg #define EXPDBITS 11
65*36ac495dSmrg #define EXPDMASK 0x7FFL
66*36ac495dSmrg #define EXPD(fp) (((fp.l.upper) >> 20L) & 0x7FFL)
67*36ac495dSmrg #define SIGND(fp) ((fp.l.upper) & SIGNBIT)
68*36ac495dSmrg #define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \
69*36ac495dSmrg (fp.l.lower >> 22))
70*36ac495dSmrg #define MANTDMASK 0xFFFFFL /* mask of upper part */
71*36ac495dSmrg
72*36ac495dSmrg /* the following deal with IEEE extended-precision numbers */
73*36ac495dSmrg #define EXCESSX 16382L
74*36ac495dSmrg #define HIDDENX (1L << 31L)
75*36ac495dSmrg #define EXPXBITS 15
76*36ac495dSmrg #define EXPXMASK 0x7FFF
77*36ac495dSmrg #define EXPX(fp) (((fp.l.upper) >> 16) & EXPXMASK)
78*36ac495dSmrg #define SIGNX(fp) ((fp.l.upper) & SIGNBIT)
79*36ac495dSmrg #define MANTXMASK 0x7FFFFFFFL /* mask of upper part */
80*36ac495dSmrg
81*36ac495dSmrg union double_long
82*36ac495dSmrg {
83*36ac495dSmrg double d;
84*36ac495dSmrg struct {
85*36ac495dSmrg long upper;
86*36ac495dSmrg unsigned long lower;
87*36ac495dSmrg } l;
88*36ac495dSmrg };
89*36ac495dSmrg
90*36ac495dSmrg union float_long {
91*36ac495dSmrg float f;
92*36ac495dSmrg long l;
93*36ac495dSmrg };
94*36ac495dSmrg
95*36ac495dSmrg union long_double_long
96*36ac495dSmrg {
97*36ac495dSmrg long double ld;
98*36ac495dSmrg struct
99*36ac495dSmrg {
100*36ac495dSmrg long upper;
101*36ac495dSmrg unsigned long middle;
102*36ac495dSmrg unsigned long lower;
103*36ac495dSmrg } l;
104*36ac495dSmrg };
105*36ac495dSmrg
106*36ac495dSmrg #ifndef EXTFLOAT
107*36ac495dSmrg
108*36ac495dSmrg int
__unordsf2(float a,float b)109*36ac495dSmrg __unordsf2(float a, float b)
110*36ac495dSmrg {
111*36ac495dSmrg union float_long fl;
112*36ac495dSmrg
113*36ac495dSmrg fl.f = a;
114*36ac495dSmrg if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
115*36ac495dSmrg return 1;
116*36ac495dSmrg fl.f = b;
117*36ac495dSmrg if (EXP(fl.l) == EXP(~0u) && (MANT(fl.l) & ~HIDDEN) != 0)
118*36ac495dSmrg return 1;
119*36ac495dSmrg return 0;
120*36ac495dSmrg }
121*36ac495dSmrg
122*36ac495dSmrg int
__unorddf2(double a,double b)123*36ac495dSmrg __unorddf2(double a, double b)
124*36ac495dSmrg {
125*36ac495dSmrg union double_long dl;
126*36ac495dSmrg
127*36ac495dSmrg dl.d = a;
128*36ac495dSmrg if (EXPD(dl) == EXPDMASK
129*36ac495dSmrg && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
130*36ac495dSmrg return 1;
131*36ac495dSmrg dl.d = b;
132*36ac495dSmrg if (EXPD(dl) == EXPDMASK
133*36ac495dSmrg && ((dl.l.upper & MANTDMASK) != 0 || dl.l.lower != 0))
134*36ac495dSmrg return 1;
135*36ac495dSmrg return 0;
136*36ac495dSmrg }
137*36ac495dSmrg
138*36ac495dSmrg /* convert unsigned int to double */
139*36ac495dSmrg double
__floatunsidf(unsigned long a1)140*36ac495dSmrg __floatunsidf (unsigned long a1)
141*36ac495dSmrg {
142*36ac495dSmrg long exp = 32 + EXCESSD;
143*36ac495dSmrg union double_long dl;
144*36ac495dSmrg
145*36ac495dSmrg if (!a1)
146*36ac495dSmrg {
147*36ac495dSmrg dl.l.upper = dl.l.lower = 0;
148*36ac495dSmrg return dl.d;
149*36ac495dSmrg }
150*36ac495dSmrg
151*36ac495dSmrg while (a1 < 0x2000000L)
152*36ac495dSmrg {
153*36ac495dSmrg a1 <<= 4;
154*36ac495dSmrg exp -= 4;
155*36ac495dSmrg }
156*36ac495dSmrg
157*36ac495dSmrg while (a1 < 0x80000000L)
158*36ac495dSmrg {
159*36ac495dSmrg a1 <<= 1;
160*36ac495dSmrg exp--;
161*36ac495dSmrg }
162*36ac495dSmrg
163*36ac495dSmrg /* pack up and go home */
164*36ac495dSmrg dl.l.upper = exp << 20L;
165*36ac495dSmrg dl.l.upper |= (a1 >> 11L) & ~HIDDEND;
166*36ac495dSmrg dl.l.lower = a1 << 21L;
167*36ac495dSmrg
168*36ac495dSmrg return dl.d;
169*36ac495dSmrg }
170*36ac495dSmrg
171*36ac495dSmrg /* convert int to double */
172*36ac495dSmrg double
__floatsidf(long a1)173*36ac495dSmrg __floatsidf (long a1)
174*36ac495dSmrg {
175*36ac495dSmrg long sign = 0, exp = 31 + EXCESSD;
176*36ac495dSmrg union double_long dl;
177*36ac495dSmrg
178*36ac495dSmrg if (!a1)
179*36ac495dSmrg {
180*36ac495dSmrg dl.l.upper = dl.l.lower = 0;
181*36ac495dSmrg return dl.d;
182*36ac495dSmrg }
183*36ac495dSmrg
184*36ac495dSmrg if (a1 < 0)
185*36ac495dSmrg {
186*36ac495dSmrg sign = SIGNBIT;
187*36ac495dSmrg a1 = (long)-(unsigned long)a1;
188*36ac495dSmrg if (a1 < 0)
189*36ac495dSmrg {
190*36ac495dSmrg dl.l.upper = SIGNBIT | ((32 + EXCESSD) << 20L);
191*36ac495dSmrg dl.l.lower = 0;
192*36ac495dSmrg return dl.d;
193*36ac495dSmrg }
194*36ac495dSmrg }
195*36ac495dSmrg
196*36ac495dSmrg while (a1 < 0x1000000L)
197*36ac495dSmrg {
198*36ac495dSmrg a1 <<= 4;
199*36ac495dSmrg exp -= 4;
200*36ac495dSmrg }
201*36ac495dSmrg
202*36ac495dSmrg while (a1 < 0x40000000L)
203*36ac495dSmrg {
204*36ac495dSmrg a1 <<= 1;
205*36ac495dSmrg exp--;
206*36ac495dSmrg }
207*36ac495dSmrg
208*36ac495dSmrg /* pack up and go home */
209*36ac495dSmrg dl.l.upper = sign;
210*36ac495dSmrg dl.l.upper |= exp << 20L;
211*36ac495dSmrg dl.l.upper |= (a1 >> 10L) & ~HIDDEND;
212*36ac495dSmrg dl.l.lower = a1 << 22L;
213*36ac495dSmrg
214*36ac495dSmrg return dl.d;
215*36ac495dSmrg }
216*36ac495dSmrg
217*36ac495dSmrg /* convert unsigned int to float */
218*36ac495dSmrg float
__floatunsisf(unsigned long l)219*36ac495dSmrg __floatunsisf (unsigned long l)
220*36ac495dSmrg {
221*36ac495dSmrg double foo = __floatunsidf (l);
222*36ac495dSmrg return foo;
223*36ac495dSmrg }
224*36ac495dSmrg
225*36ac495dSmrg /* convert int to float */
226*36ac495dSmrg float
__floatsisf(long l)227*36ac495dSmrg __floatsisf (long l)
228*36ac495dSmrg {
229*36ac495dSmrg double foo = __floatsidf (l);
230*36ac495dSmrg return foo;
231*36ac495dSmrg }
232*36ac495dSmrg
233*36ac495dSmrg /* convert float to double */
234*36ac495dSmrg double
__extendsfdf2(float a1)235*36ac495dSmrg __extendsfdf2 (float a1)
236*36ac495dSmrg {
237*36ac495dSmrg register union float_long fl1;
238*36ac495dSmrg register union double_long dl;
239*36ac495dSmrg register long exp;
240*36ac495dSmrg register long mant;
241*36ac495dSmrg
242*36ac495dSmrg fl1.f = a1;
243*36ac495dSmrg
244*36ac495dSmrg dl.l.upper = SIGN (fl1.l);
245*36ac495dSmrg if ((fl1.l & ~SIGNBIT) == 0)
246*36ac495dSmrg {
247*36ac495dSmrg dl.l.lower = 0;
248*36ac495dSmrg return dl.d;
249*36ac495dSmrg }
250*36ac495dSmrg
251*36ac495dSmrg exp = EXP(fl1.l);
252*36ac495dSmrg mant = MANT (fl1.l) & ~HIDDEN;
253*36ac495dSmrg if (exp == 0)
254*36ac495dSmrg {
255*36ac495dSmrg /* Denormal. */
256*36ac495dSmrg exp = 1;
257*36ac495dSmrg while (!(mant & HIDDEN))
258*36ac495dSmrg {
259*36ac495dSmrg mant <<= 1;
260*36ac495dSmrg exp--;
261*36ac495dSmrg }
262*36ac495dSmrg mant &= ~HIDDEN;
263*36ac495dSmrg }
264*36ac495dSmrg exp = exp - EXCESS + EXCESSD;
265*36ac495dSmrg dl.l.upper |= exp << 20;
266*36ac495dSmrg dl.l.upper |= mant >> 3;
267*36ac495dSmrg dl.l.lower = mant << 29;
268*36ac495dSmrg
269*36ac495dSmrg return dl.d;
270*36ac495dSmrg }
271*36ac495dSmrg
272*36ac495dSmrg /* convert double to float */
273*36ac495dSmrg float
__truncdfsf2(double a1)274*36ac495dSmrg __truncdfsf2 (double a1)
275*36ac495dSmrg {
276*36ac495dSmrg register long exp;
277*36ac495dSmrg register long mant;
278*36ac495dSmrg register union float_long fl;
279*36ac495dSmrg register union double_long dl1;
280*36ac495dSmrg int sticky;
281*36ac495dSmrg int shift;
282*36ac495dSmrg
283*36ac495dSmrg dl1.d = a1;
284*36ac495dSmrg
285*36ac495dSmrg if ((dl1.l.upper & ~SIGNBIT) == 0 && !dl1.l.lower)
286*36ac495dSmrg {
287*36ac495dSmrg fl.l = SIGND(dl1);
288*36ac495dSmrg return fl.f;
289*36ac495dSmrg }
290*36ac495dSmrg
291*36ac495dSmrg exp = EXPD (dl1) - EXCESSD + EXCESS;
292*36ac495dSmrg
293*36ac495dSmrg sticky = dl1.l.lower & ((1 << 22) - 1);
294*36ac495dSmrg mant = MANTD (dl1);
295*36ac495dSmrg /* shift double mantissa 6 bits so we can round */
296*36ac495dSmrg sticky |= mant & ((1 << 6) - 1);
297*36ac495dSmrg mant >>= 6;
298*36ac495dSmrg
299*36ac495dSmrg /* Check for underflow and denormals. */
300*36ac495dSmrg if (exp <= 0)
301*36ac495dSmrg {
302*36ac495dSmrg if (exp < -24)
303*36ac495dSmrg {
304*36ac495dSmrg sticky |= mant;
305*36ac495dSmrg mant = 0;
306*36ac495dSmrg }
307*36ac495dSmrg else
308*36ac495dSmrg {
309*36ac495dSmrg sticky |= mant & ((1 << (1 - exp)) - 1);
310*36ac495dSmrg mant >>= 1 - exp;
311*36ac495dSmrg }
312*36ac495dSmrg exp = 0;
313*36ac495dSmrg }
314*36ac495dSmrg
315*36ac495dSmrg /* now round */
316*36ac495dSmrg shift = 1;
317*36ac495dSmrg if ((mant & 1) && (sticky || (mant & 2)))
318*36ac495dSmrg {
319*36ac495dSmrg int rounding = exp ? 2 : 1;
320*36ac495dSmrg
321*36ac495dSmrg mant += 1;
322*36ac495dSmrg
323*36ac495dSmrg /* did the round overflow? */
324*36ac495dSmrg if (mant >= (HIDDEN << rounding))
325*36ac495dSmrg {
326*36ac495dSmrg exp++;
327*36ac495dSmrg shift = rounding;
328*36ac495dSmrg }
329*36ac495dSmrg }
330*36ac495dSmrg /* shift down */
331*36ac495dSmrg mant >>= shift;
332*36ac495dSmrg
333*36ac495dSmrg mant &= ~HIDDEN;
334*36ac495dSmrg
335*36ac495dSmrg /* pack up and go home */
336*36ac495dSmrg fl.l = PACK (SIGND (dl1), exp, mant);
337*36ac495dSmrg return (fl.f);
338*36ac495dSmrg }
339*36ac495dSmrg
340*36ac495dSmrg /* convert double to int */
341*36ac495dSmrg long
__fixdfsi(double a1)342*36ac495dSmrg __fixdfsi (double a1)
343*36ac495dSmrg {
344*36ac495dSmrg register union double_long dl1;
345*36ac495dSmrg register long exp;
346*36ac495dSmrg register long l;
347*36ac495dSmrg
348*36ac495dSmrg dl1.d = a1;
349*36ac495dSmrg
350*36ac495dSmrg if (!dl1.l.upper && !dl1.l.lower)
351*36ac495dSmrg return 0;
352*36ac495dSmrg
353*36ac495dSmrg exp = EXPD (dl1) - EXCESSD - 31;
354*36ac495dSmrg l = MANTD (dl1);
355*36ac495dSmrg
356*36ac495dSmrg if (exp > 0)
357*36ac495dSmrg {
358*36ac495dSmrg /* Return largest integer. */
359*36ac495dSmrg return SIGND (dl1) ? 0x80000000L : 0x7fffffffL;
360*36ac495dSmrg }
361*36ac495dSmrg
362*36ac495dSmrg if (exp <= -32)
363*36ac495dSmrg return 0;
364*36ac495dSmrg
365*36ac495dSmrg /* shift down until exp = 0 */
366*36ac495dSmrg if (exp < 0)
367*36ac495dSmrg l >>= -exp;
368*36ac495dSmrg
369*36ac495dSmrg return (SIGND (dl1) ? -l : l);
370*36ac495dSmrg }
371*36ac495dSmrg
372*36ac495dSmrg /* convert float to int */
373*36ac495dSmrg long
__fixsfsi(float a1)374*36ac495dSmrg __fixsfsi (float a1)
375*36ac495dSmrg {
376*36ac495dSmrg double foo = a1;
377*36ac495dSmrg return __fixdfsi (foo);
378*36ac495dSmrg }
379*36ac495dSmrg
380*36ac495dSmrg #else /* EXTFLOAT */
381*36ac495dSmrg
382*36ac495dSmrg /* We do not need these routines for coldfire, as it has no extended
383*36ac495dSmrg float format. */
384*36ac495dSmrg #if !defined (__mcoldfire__)
385*36ac495dSmrg
386*36ac495dSmrg /* Primitive extended precision floating point support.
387*36ac495dSmrg
388*36ac495dSmrg We assume all numbers are normalized, don't do any rounding, etc. */
389*36ac495dSmrg
390*36ac495dSmrg /* Prototypes for the above in case we use them. */
391*36ac495dSmrg double __floatunsidf (unsigned long);
392*36ac495dSmrg double __floatsidf (long);
393*36ac495dSmrg float __floatsisf (long);
394*36ac495dSmrg double __extendsfdf2 (float);
395*36ac495dSmrg float __truncdfsf2 (double);
396*36ac495dSmrg long __fixdfsi (double);
397*36ac495dSmrg long __fixsfsi (float);
398*36ac495dSmrg long __cmpdf2 (double, double);
399*36ac495dSmrg
400*36ac495dSmrg int
__unordxf2(long double a,long double b)401*36ac495dSmrg __unordxf2(long double a, long double b)
402*36ac495dSmrg {
403*36ac495dSmrg union long_double_long ldl;
404*36ac495dSmrg
405*36ac495dSmrg ldl.ld = a;
406*36ac495dSmrg if (EXPX(ldl) == EXPXMASK
407*36ac495dSmrg && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
408*36ac495dSmrg return 1;
409*36ac495dSmrg ldl.ld = b;
410*36ac495dSmrg if (EXPX(ldl) == EXPXMASK
411*36ac495dSmrg && ((ldl.l.middle & MANTXMASK) != 0 || ldl.l.lower != 0))
412*36ac495dSmrg return 1;
413*36ac495dSmrg return 0;
414*36ac495dSmrg }
415*36ac495dSmrg
416*36ac495dSmrg /* convert double to long double */
417*36ac495dSmrg long double
__extenddfxf2(double d)418*36ac495dSmrg __extenddfxf2 (double d)
419*36ac495dSmrg {
420*36ac495dSmrg register union double_long dl;
421*36ac495dSmrg register union long_double_long ldl;
422*36ac495dSmrg register long exp;
423*36ac495dSmrg
424*36ac495dSmrg dl.d = d;
425*36ac495dSmrg /*printf ("dfxf in: %g\n", d);*/
426*36ac495dSmrg
427*36ac495dSmrg ldl.l.upper = SIGND (dl);
428*36ac495dSmrg if ((dl.l.upper & ~SIGNBIT) == 0 && !dl.l.lower)
429*36ac495dSmrg {
430*36ac495dSmrg ldl.l.middle = 0;
431*36ac495dSmrg ldl.l.lower = 0;
432*36ac495dSmrg return ldl.ld;
433*36ac495dSmrg }
434*36ac495dSmrg
435*36ac495dSmrg exp = EXPD (dl) - EXCESSD + EXCESSX;
436*36ac495dSmrg ldl.l.upper |= exp << 16;
437*36ac495dSmrg ldl.l.middle = HIDDENX;
438*36ac495dSmrg /* 31-20: # mantissa bits in ldl.l.middle - # mantissa bits in dl.l.upper */
439*36ac495dSmrg ldl.l.middle |= (dl.l.upper & MANTDMASK) << (31 - 20);
440*36ac495dSmrg /* 1+20: explicit-integer-bit + # mantissa bits in dl.l.upper */
441*36ac495dSmrg ldl.l.middle |= dl.l.lower >> (1 + 20);
442*36ac495dSmrg /* 32 - 21: # bits of dl.l.lower in ldl.l.middle */
443*36ac495dSmrg ldl.l.lower = dl.l.lower << (32 - 21);
444*36ac495dSmrg
445*36ac495dSmrg /*printf ("dfxf out: %s\n", dumpxf (ldl.ld));*/
446*36ac495dSmrg return ldl.ld;
447*36ac495dSmrg }
448*36ac495dSmrg
449*36ac495dSmrg /* convert long double to double */
450*36ac495dSmrg double
__truncxfdf2(long double ld)451*36ac495dSmrg __truncxfdf2 (long double ld)
452*36ac495dSmrg {
453*36ac495dSmrg register long exp;
454*36ac495dSmrg register union double_long dl;
455*36ac495dSmrg register union long_double_long ldl;
456*36ac495dSmrg
457*36ac495dSmrg ldl.ld = ld;
458*36ac495dSmrg /*printf ("xfdf in: %s\n", dumpxf (ld));*/
459*36ac495dSmrg
460*36ac495dSmrg dl.l.upper = SIGNX (ldl);
461*36ac495dSmrg if ((ldl.l.upper & ~SIGNBIT) == 0 && !ldl.l.middle && !ldl.l.lower)
462*36ac495dSmrg {
463*36ac495dSmrg dl.l.lower = 0;
464*36ac495dSmrg return dl.d;
465*36ac495dSmrg }
466*36ac495dSmrg
467*36ac495dSmrg exp = EXPX (ldl) - EXCESSX + EXCESSD;
468*36ac495dSmrg /* ??? quick and dirty: keep `exp' sane */
469*36ac495dSmrg if (exp >= EXPDMASK)
470*36ac495dSmrg exp = EXPDMASK - 1;
471*36ac495dSmrg dl.l.upper |= exp << (32 - (EXPDBITS + 1));
472*36ac495dSmrg /* +1-1: add one for sign bit, but take one off for explicit-integer-bit */
473*36ac495dSmrg dl.l.upper |= (ldl.l.middle & MANTXMASK) >> (EXPDBITS + 1 - 1);
474*36ac495dSmrg dl.l.lower = (ldl.l.middle & MANTXMASK) << (32 - (EXPDBITS + 1 - 1));
475*36ac495dSmrg dl.l.lower |= ldl.l.lower >> (EXPDBITS + 1 - 1);
476*36ac495dSmrg
477*36ac495dSmrg /*printf ("xfdf out: %g\n", dl.d);*/
478*36ac495dSmrg return dl.d;
479*36ac495dSmrg }
480*36ac495dSmrg
481*36ac495dSmrg /* convert a float to a long double */
482*36ac495dSmrg long double
__extendsfxf2(float f)483*36ac495dSmrg __extendsfxf2 (float f)
484*36ac495dSmrg {
485*36ac495dSmrg long double foo = __extenddfxf2 (__extendsfdf2 (f));
486*36ac495dSmrg return foo;
487*36ac495dSmrg }
488*36ac495dSmrg
489*36ac495dSmrg /* convert a long double to a float */
490*36ac495dSmrg float
__truncxfsf2(long double ld)491*36ac495dSmrg __truncxfsf2 (long double ld)
492*36ac495dSmrg {
493*36ac495dSmrg float foo = __truncdfsf2 (__truncxfdf2 (ld));
494*36ac495dSmrg return foo;
495*36ac495dSmrg }
496*36ac495dSmrg
497*36ac495dSmrg /* convert an int to a long double */
498*36ac495dSmrg long double
__floatsixf(long l)499*36ac495dSmrg __floatsixf (long l)
500*36ac495dSmrg {
501*36ac495dSmrg double foo = __floatsidf (l);
502*36ac495dSmrg return foo;
503*36ac495dSmrg }
504*36ac495dSmrg
505*36ac495dSmrg /* convert an unsigned int to a long double */
506*36ac495dSmrg long double
__floatunsixf(unsigned long l)507*36ac495dSmrg __floatunsixf (unsigned long l)
508*36ac495dSmrg {
509*36ac495dSmrg double foo = __floatunsidf (l);
510*36ac495dSmrg return foo;
511*36ac495dSmrg }
512*36ac495dSmrg
513*36ac495dSmrg /* convert a long double to an int */
514*36ac495dSmrg long
__fixxfsi(long double ld)515*36ac495dSmrg __fixxfsi (long double ld)
516*36ac495dSmrg {
517*36ac495dSmrg long foo = __fixdfsi ((double) ld);
518*36ac495dSmrg return foo;
519*36ac495dSmrg }
520*36ac495dSmrg
521*36ac495dSmrg /* The remaining provide crude math support by working in double precision. */
522*36ac495dSmrg
523*36ac495dSmrg long double
__addxf3(long double x1,long double x2)524*36ac495dSmrg __addxf3 (long double x1, long double x2)
525*36ac495dSmrg {
526*36ac495dSmrg return (double) x1 + (double) x2;
527*36ac495dSmrg }
528*36ac495dSmrg
529*36ac495dSmrg long double
__subxf3(long double x1,long double x2)530*36ac495dSmrg __subxf3 (long double x1, long double x2)
531*36ac495dSmrg {
532*36ac495dSmrg return (double) x1 - (double) x2;
533*36ac495dSmrg }
534*36ac495dSmrg
535*36ac495dSmrg long double
__mulxf3(long double x1,long double x2)536*36ac495dSmrg __mulxf3 (long double x1, long double x2)
537*36ac495dSmrg {
538*36ac495dSmrg return (double) x1 * (double) x2;
539*36ac495dSmrg }
540*36ac495dSmrg
541*36ac495dSmrg long double
__divxf3(long double x1,long double x2)542*36ac495dSmrg __divxf3 (long double x1, long double x2)
543*36ac495dSmrg {
544*36ac495dSmrg return (double) x1 / (double) x2;
545*36ac495dSmrg }
546*36ac495dSmrg
547*36ac495dSmrg long double
__negxf2(long double x1)548*36ac495dSmrg __negxf2 (long double x1)
549*36ac495dSmrg {
550*36ac495dSmrg return - (double) x1;
551*36ac495dSmrg }
552*36ac495dSmrg
553*36ac495dSmrg long
__cmpxf2(long double x1,long double x2)554*36ac495dSmrg __cmpxf2 (long double x1, long double x2)
555*36ac495dSmrg {
556*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
557*36ac495dSmrg }
558*36ac495dSmrg
559*36ac495dSmrg long
__eqxf2(long double x1,long double x2)560*36ac495dSmrg __eqxf2 (long double x1, long double x2)
561*36ac495dSmrg {
562*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
563*36ac495dSmrg }
564*36ac495dSmrg
565*36ac495dSmrg long
__nexf2(long double x1,long double x2)566*36ac495dSmrg __nexf2 (long double x1, long double x2)
567*36ac495dSmrg {
568*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
569*36ac495dSmrg }
570*36ac495dSmrg
571*36ac495dSmrg long
__ltxf2(long double x1,long double x2)572*36ac495dSmrg __ltxf2 (long double x1, long double x2)
573*36ac495dSmrg {
574*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
575*36ac495dSmrg }
576*36ac495dSmrg
577*36ac495dSmrg long
__lexf2(long double x1,long double x2)578*36ac495dSmrg __lexf2 (long double x1, long double x2)
579*36ac495dSmrg {
580*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
581*36ac495dSmrg }
582*36ac495dSmrg
583*36ac495dSmrg long
__gtxf2(long double x1,long double x2)584*36ac495dSmrg __gtxf2 (long double x1, long double x2)
585*36ac495dSmrg {
586*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
587*36ac495dSmrg }
588*36ac495dSmrg
589*36ac495dSmrg long
__gexf2(long double x1,long double x2)590*36ac495dSmrg __gexf2 (long double x1, long double x2)
591*36ac495dSmrg {
592*36ac495dSmrg return __cmpdf2 ((double) x1, (double) x2);
593*36ac495dSmrg }
594*36ac495dSmrg
595*36ac495dSmrg #endif /* !__mcoldfire__ */
596*36ac495dSmrg #endif /* EXTFLOAT */
597