124581Szliu /* 224581Szliu * Copyright (c) 1985 Regents of the University of California. 324581Szliu * 424581Szliu * Use and reproduction of this software are granted in accordance with 524581Szliu * the terms and conditions specified in the Berkeley Software License 624581Szliu * Agreement (in particular, this entails acknowledgement of the programs' 724581Szliu * source, and inclusion of this notice) with the additional understanding 824581Szliu * that all recipients should regard themselves as participants in an 924581Szliu * ongoing research project and hence should feel obligated to report 1024581Szliu * their experiences (good or bad) with these elementary function codes, 1124581Szliu * using "sendbug 4bsd-bugs@BERKELEY", to the authors. 1224581Szliu */ 1324581Szliu 1424581Szliu #ifndef lint 1524719Selefunt static char sccsid[] = 16*29941Selefunt "@(#)support.c 1.1 (Berkeley) 5/23/85; 1.4 (ucb.elefunt) 11/02/86"; 1724581Szliu #endif not lint 1824581Szliu 1924581Szliu /* 2024581Szliu * Some IEEE standard p754 recommended functions and remainder and sqrt for 2124581Szliu * supporting the C elementary functions. 2224581Szliu ****************************************************************************** 2324581Szliu * WARNING: 2424581Szliu * These codes are developed (in double) to support the C elementary 2524581Szliu * functions temporarily. They are not universal, and some of them are very 2624581Szliu * slow (in particular, drem and sqrt is extremely inefficient). Each 2724581Szliu * computer system should have its implementation of these functions using 2824581Szliu * its own assembler. 2924581Szliu ****************************************************************************** 3024581Szliu * 3124581Szliu * IEEE p754 required operations: 3224581Szliu * drem(x,p) 3324581Szliu * returns x REM y = x - [x/y]*y , where [x/y] is the integer 3424581Szliu * nearest x/y; in half way case, choose the even one. 3524581Szliu * sqrt(x) 3624581Szliu * returns the square root of x correctly rounded according to 3724581Szliu * the rounding mod. 3824581Szliu * 3924581Szliu * IEEE p754 recommended functions: 4024581Szliu * (a) copysign(x,y) 4124581Szliu * returns x with the sign of y. 4224581Szliu * (b) scalb(x,N) 4324581Szliu * returns x * (2**N), for integer values N. 4424581Szliu * (c) logb(x) 4524581Szliu * returns the unbiased exponent of x, a signed integer in 4624581Szliu * double precision, except that logb(0) is -INF, logb(INF) 4724581Szliu * is +INF, and logb(NAN) is that NAN. 4824581Szliu * (d) finite(x) 4924581Szliu * returns the value TRUE if -INF < x < +INF and returns 5024581Szliu * FALSE otherwise. 5124581Szliu * 5224581Szliu * 5324581Szliu * CODED IN C BY K.C. NG, 11/25/84; 5424581Szliu * REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85. 5524581Szliu */ 5624581Szliu 5724581Szliu 5824581Szliu #ifdef VAX /* VAX D format */ 5924581Szliu static unsigned short msign=0x7fff , mexp =0x7f80 ; 6024581Szliu static short prep1=57, gap=7, bias=129 ; 6124581Szliu static double novf=1.7E38, nunf=3.0E-39, zero=0.0 ; 6224581Szliu #else /*IEEE double format */ 6324581Szliu static unsigned short msign=0x7fff, mexp =0x7ff0 ; 6424581Szliu static short prep1=54, gap=4, bias=1023 ; 6524581Szliu static double novf=1.7E308, nunf=3.0E-308,zero=0.0; 6624581Szliu #endif 6724581Szliu 6824581Szliu double scalb(x,N) 6924581Szliu double x; int N; 7024581Szliu { 7124581Szliu int k; 7224581Szliu double scalb(); 7324581Szliu 7424581Szliu #ifdef NATIONAL 7524581Szliu unsigned short *px=(unsigned short *) &x + 3; 7624581Szliu #else /* VAX, SUN, ZILOG */ 7724581Szliu unsigned short *px=(unsigned short *) &x; 7824581Szliu #endif 7924581Szliu 8024581Szliu if( x == zero ) return(x); 8124581Szliu 8224581Szliu #ifdef VAX 8324581Szliu if( (k= *px & mexp ) != ~msign ) { 8424581Szliu if( N<-260) return(nunf*nunf); else if(N>260) return(novf+novf); 8524581Szliu #else /* IEEE */ 8624581Szliu if( (k= *px & mexp ) != mexp ) { 8724581Szliu if( N<-2100) return(nunf*nunf); else if(N>2100) return(novf+novf); 8824581Szliu if( k == 0 ) { 8924581Szliu x *= scalb(1.0,(int)prep1); N -= prep1; return(scalb(x,N));} 9024581Szliu #endif 9124581Szliu 9224581Szliu if((k = (k>>gap)+ N) > 0 ) 9324581Szliu if( k < (mexp>>gap) ) *px = (*px&~mexp) | (k<<gap); 9424581Szliu else x=novf+novf; /* overflow */ 9524581Szliu else 9624581Szliu if( k > -prep1 ) 9724581Szliu /* gradual underflow */ 9824581Szliu {*px=(*px&~mexp)|(short)(1<<gap); x *= scalb(1.0,k-1);} 9924581Szliu else 10024581Szliu return(nunf*nunf); 10124581Szliu } 10224581Szliu return(x); 10324581Szliu } 10424581Szliu 10524581Szliu 10624581Szliu double copysign(x,y) 10724581Szliu double x,y; 10824581Szliu { 10924581Szliu #ifdef NATIONAL 11024581Szliu unsigned short *px=(unsigned short *) &x+3, 11124581Szliu *py=(unsigned short *) &y+3; 11224581Szliu #else /* VAX, SUN, ZILOG */ 11324581Szliu unsigned short *px=(unsigned short *) &x, 11424581Szliu *py=(unsigned short *) &y; 11524581Szliu #endif 11624581Szliu 11724581Szliu #ifdef VAX 11824581Szliu if ( (*px & mexp) == 0 ) return(x); 11924581Szliu #endif 12024581Szliu 12124581Szliu *px = ( *px & msign ) | ( *py & ~msign ); 12224581Szliu return(x); 12324581Szliu } 12424581Szliu 12524581Szliu double logb(x) 12624581Szliu double x; 12724581Szliu { 12824581Szliu 12924581Szliu #ifdef NATIONAL 13024581Szliu short *px=(short *) &x+3, k; 13124581Szliu #else /* VAX, SUN, ZILOG */ 13224581Szliu short *px=(short *) &x, k; 13324581Szliu #endif 13424581Szliu 13524581Szliu #ifdef VAX 13627449Szliu return (int)(((*px&mexp)>>gap)-bias); 13724581Szliu #else /* IEEE */ 13824581Szliu if( (k= *px & mexp ) != mexp ) 13924581Szliu if ( k != 0 ) 14024581Szliu return ( (k>>gap) - bias ); 14124581Szliu else if( x != zero) 14224581Szliu return ( -1022.0 ); 14324581Szliu else 14424581Szliu return(-(1.0/zero)); 14524581Szliu else if(x != x) 14624581Szliu return(x); 14724581Szliu else 14824581Szliu {*px &= msign; return(x);} 14924581Szliu #endif 15024581Szliu } 15124581Szliu 15224581Szliu finite(x) 15324581Szliu double x; 15424581Szliu { 15524581Szliu #ifdef VAX 15624581Szliu return(1.0); 15724581Szliu #else /* IEEE */ 15824581Szliu #ifdef NATIONAL 15924581Szliu return( (*((short *) &x+3 ) & mexp ) != mexp ); 16024581Szliu #else /* SUN, ZILOG */ 16124581Szliu return( (*((short *) &x ) & mexp ) != mexp ); 16224581Szliu #endif 16324581Szliu #endif 16424581Szliu } 16524581Szliu 16624581Szliu double drem(x,p) 16724581Szliu double x,p; 16824581Szliu { 16924581Szliu short sign; 17024581Szliu double hp,dp,tmp,drem(),scalb(); 17124581Szliu unsigned short k; 17224581Szliu #ifdef NATIONAL 17324581Szliu unsigned short 17424581Szliu *px=(unsigned short *) &x +3, 17524581Szliu *pp=(unsigned short *) &p +3, 17624581Szliu *pd=(unsigned short *) &dp +3, 17724581Szliu *pt=(unsigned short *) &tmp+3; 17824581Szliu #else /* VAX, SUN, ZILOG */ 17924581Szliu unsigned short 18024581Szliu *px=(unsigned short *) &x , 18124581Szliu *pp=(unsigned short *) &p , 18224581Szliu *pd=(unsigned short *) &dp , 18324581Szliu *pt=(unsigned short *) &tmp; 18424581Szliu #endif 18524581Szliu 18624581Szliu *pp &= msign ; 18724581Szliu 18824581Szliu #ifdef VAX 189*29941Selefunt if( ( *px & mexp ) == ~msign ) 19024581Szliu #else /* IEEE */ 191*29941Selefunt if( ( *px & mexp ) == mexp ) 19224581Szliu #endif 193*29941Selefunt return (x-p)-(x-p); /* create nan if x is inf */ 194*29941Selefunt if(p==zero) return zero/zero; 195*29941Selefunt #ifdef VAX 196*29941Selefunt if( ( *pp & mexp ) == ~msign ) 197*29941Selefunt #else /* IEEE */ 198*29941Selefunt if( ( *pp & mexp ) == mexp ) 199*29941Selefunt #endif 200*29941Selefunt { if (p != p) return p; else return x;} 20124581Szliu 20224581Szliu 20324581Szliu else if ( ((*pp & mexp)>>gap) <= 1 ) 20424581Szliu /* subnormal p, or almost subnormal p */ 20524581Szliu { double b; b=scalb(1.0,(int)prep1); 20624581Szliu p *= b; x = drem(x,p); x *= b; return(drem(x,p)/b);} 20724581Szliu else if ( p >= novf/2) 20824581Szliu { p /= 2 ; x /= 2; return(drem(x,p)*2);} 20924581Szliu else 21024581Szliu { 21124581Szliu dp=p+p; hp=p/2; 21224581Szliu sign= *px & ~msign ; 21324581Szliu *px &= msign ; 21424581Szliu while ( x > dp ) 21524581Szliu { 21624581Szliu k=(*px & mexp) - (*pd & mexp) ; 21724581Szliu tmp = dp ; 21824581Szliu *pt += k ; 21924581Szliu 22024581Szliu #ifdef VAX 22124581Szliu if( x < tmp ) *pt -= 128 ; 22224581Szliu #else /* IEEE */ 22324581Szliu if( x < tmp ) *pt -= 16 ; 22424581Szliu #endif 22524581Szliu 22624581Szliu x -= tmp ; 22724581Szliu } 22824581Szliu if ( x > hp ) 22924581Szliu { x -= p ; if ( x >= hp ) x -= p ; } 23024581Szliu 23124581Szliu *px = *px ^ sign; 23224581Szliu return( x); 23324581Szliu 23424581Szliu } 23524581Szliu } 23624581Szliu double sqrt(x) 23724581Szliu double x; 23824581Szliu { 23924581Szliu double q,s,b,r; 24024581Szliu double logb(),scalb(); 24124581Szliu double t,zero=0.0; 24224581Szliu int m,n,i,finite(); 24324581Szliu #ifdef VAX 24424581Szliu int k=54; 24524581Szliu #else /* IEEE */ 24624581Szliu int k=51; 24724581Szliu #endif 24824581Szliu 24924581Szliu /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */ 25024581Szliu if(x!=x||x==zero) return(x); 25124581Szliu 25224581Szliu /* sqrt(negative) is invalid */ 25324581Szliu if(x<zero) return(zero/zero); 25424581Szliu 25524581Szliu /* sqrt(INF) is INF */ 25624581Szliu if(!finite(x)) return(x); 25724581Szliu 25824581Szliu /* scale x to [1,4) */ 25924581Szliu n=logb(x); 26024581Szliu x=scalb(x,-n); 26124581Szliu if((m=logb(x))!=0) x=scalb(x,-m); /* subnormal number */ 26224581Szliu m += n; 26324581Szliu n = m/2; 26424581Szliu if((n+n)!=m) {x *= 2; m -=1; n=m/2;} 26524581Szliu 26624581Szliu /* generate sqrt(x) bit by bit (accumulating in q) */ 26724581Szliu q=1.0; s=4.0; x -= 1.0; r=1; 26824581Szliu for(i=1;i<=k;i++) { 26924581Szliu t=s+1; x *= 4; r /= 2; 27024581Szliu if(t<=x) { 27124581Szliu s=t+t+2, x -= t; q += r;} 27224581Szliu else 27324581Szliu s *= 2; 27424581Szliu } 27524581Szliu 27624581Szliu /* generate the last bit and determine the final rounding */ 27724581Szliu r/=2; x *= 4; 27824581Szliu if(x==zero) goto end; 100+r; /* trigger inexact flag */ 27924581Szliu if(s<x) { 28024581Szliu q+=r; x -=s; s += 2; s *= 2; x *= 4; 28124581Szliu t = (x-s)-5; 28224581Szliu b=1.0+3*r/4; if(b==1.0) goto end; /* b==1 : Round-to-zero */ 28324581Szliu b=1.0+r/4; if(b>1.0) t=1; /* b>1 : Round-to-(+INF) */ 28424581Szliu if(t>=0) q+=r; } /* else: Round-to-nearest */ 28524581Szliu else { 28624581Szliu s *= 2; x *= 4; 28724581Szliu t = (x-s)-1; 28824581Szliu b=1.0+3*r/4; if(b==1.0) goto end; 28924581Szliu b=1.0+r/4; if(b>1.0) t=1; 29024581Szliu if(t>=0) q+=r; } 29124581Szliu 29224581Szliu end: return(scalb(q,n)); 29324581Szliu } 29424581Szliu 29524581Szliu #if 0 29624581Szliu /* DREM(X,Y) 29724581Szliu * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE) 29824581Szliu * DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS) 29924581Szliu * INTENDED FOR ASSEMBLY LANGUAGE 30024581Szliu * CODED IN C BY K.C. NG, 3/23/85, 4/8/85. 30124581Szliu * 30224581Szliu * Warning: this code should not get compiled in unless ALL of 30324581Szliu * the following machine-dependent routines are supplied. 30424581Szliu * 30524581Szliu * Required machine dependent functions (not on a VAX): 30624581Szliu * swapINX(i): save inexact flag and reset it to "i" 30724581Szliu * swapENI(e): save inexact enable and reset it to "e" 30824581Szliu */ 30924581Szliu 31024581Szliu double drem(x,y) 31124581Szliu double x,y; 31224581Szliu { 31324581Szliu 31424581Szliu #ifdef NATIONAL /* order of words in floating point number */ 31524581Szliu static n0=3,n1=2,n2=1,n3=0; 31624581Szliu #else /* VAX, SUN, ZILOG */ 31724581Szliu static n0=0,n1=1,n2=2,n3=3; 31824581Szliu #endif 31924581Szliu 32024581Szliu static unsigned short mexp =0x7ff0, m25 =0x0190, m57 =0x0390; 32124581Szliu static double zero=0.0; 32224581Szliu double hy,y1,t,t1; 32324581Szliu short k; 32424581Szliu long n; 32524581Szliu int i,e; 32624581Szliu unsigned short xexp,yexp, *px =(unsigned short *) &x , 32724581Szliu nx,nf, *py =(unsigned short *) &y , 32824581Szliu sign, *pt =(unsigned short *) &t , 32924581Szliu *pt1 =(unsigned short *) &t1 ; 33024581Szliu 33124581Szliu xexp = px[n0] & mexp ; /* exponent of x */ 33224581Szliu yexp = py[n0] & mexp ; /* exponent of y */ 33324581Szliu sign = px[n0] &0x8000; /* sign of x */ 33424581Szliu 33524581Szliu /* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */ 33624581Szliu if(x!=x) return(x); if(y!=y) return(y); /* x or y is NaN */ 33724581Szliu if( xexp == mexp ) return(zero/zero); /* x is INF */ 33824581Szliu if(y==zero) return(y/y); 33924581Szliu 34024581Szliu /* save the inexact flag and inexact enable in i and e respectively 34124581Szliu * and reset them to zero 34224581Szliu */ 34324581Szliu i=swapINX(0); e=swapENI(0); 34424581Szliu 34524581Szliu /* subnormal number */ 34624581Szliu nx=0; 34724581Szliu if(yexp==0) {t=1.0,pt[n0]+=m57; y*=t; nx=m57;} 34824581Szliu 34924581Szliu /* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */ 35024581Szliu if( yexp <= m57 ) {py[n0]+=m57; nx+=m57; yexp+=m57;} 35124581Szliu 35224581Szliu nf=nx; 35324581Szliu py[n0] &= 0x7fff; 35424581Szliu px[n0] &= 0x7fff; 35524581Szliu 35624581Szliu /* mask off the least significant 27 bits of y */ 35724581Szliu t=y; pt[n3]=0; pt[n2]&=0xf800; y1=t; 35824581Szliu 35924581Szliu /* LOOP: argument reduction on x whenever x > y */ 36024581Szliu loop: 36124581Szliu while ( x > y ) 36224581Szliu { 36324581Szliu t=y; 36424581Szliu t1=y1; 36524581Szliu xexp=px[n0]&mexp; /* exponent of x */ 36624581Szliu k=xexp-yexp-m25; 36724581Szliu if(k>0) /* if x/y >= 2**26, scale up y so that x/y < 2**26 */ 36824581Szliu {pt[n0]+=k;pt1[n0]+=k;} 36924581Szliu n=x/t; x=(x-n*t1)-n*(t-t1); 37024581Szliu } 37124581Szliu /* end while (x > y) */ 37224581Szliu 37324581Szliu if(nx!=0) {t=1.0; pt[n0]+=nx; x*=t; nx=0; goto loop;} 37424581Szliu 37524581Szliu /* final adjustment */ 37624581Szliu 37724581Szliu hy=y/2.0; 37824581Szliu if(x>hy||((x==hy)&&n%2==1)) x-=y; 37924581Szliu px[n0] ^= sign; 38024581Szliu if(nf!=0) { t=1.0; pt[n0]-=nf; x*=t;} 38124581Szliu 38224581Szliu /* restore inexact flag and inexact enable */ 38324581Szliu swapINX(i); swapENI(e); 38424581Szliu 38524581Szliu return(x); 38624581Szliu } 38724581Szliu #endif 38824581Szliu 38924581Szliu #if 0 39024581Szliu /* SQRT 39124581Szliu * RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT 39224581Szliu * FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE 39324581Szliu * CODED IN C BY K.C. NG, 3/22/85. 39424581Szliu * 39524581Szliu * Warning: this code should not get compiled in unless ALL of 39624581Szliu * the following machine-dependent routines are supplied. 39724581Szliu * 39824581Szliu * Required machine dependent functions: 39924581Szliu * swapINX(i) ...return the status of INEXACT flag and reset it to "i" 40024581Szliu * swapRM(r) ...return the current Rounding Mode and reset it to "r" 40124581Szliu * swapENI(e) ...return the status of inexact enable and reset it to "e" 40224581Szliu * addc(t) ...perform t=t+1 regarding t as a 64 bit unsigned integer 40324581Szliu * subc(t) ...perform t=t-1 regarding t as a 64 bit unsigned integer 40424581Szliu */ 40524581Szliu 40624581Szliu static unsigned long table[] = { 40724581Szliu 0, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740, 40824581Szliu 58733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478, 40924581Szliu 21581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, }; 41024581Szliu 41124581Szliu double newsqrt(x) 41224581Szliu double x; 41324581Szliu { 41424581Szliu double y,z,t,addc(),subc(),b54=134217728.*134217728.; /* b54=2**54 */ 41524581Szliu long mx,scalx,mexp=0x7ff00000; 41624581Szliu int i,j,r,e,swapINX(),swapRM(),swapENI(); 41724581Szliu unsigned long *py=(unsigned long *) &y , 41824581Szliu *pt=(unsigned long *) &t , 41924581Szliu *px=(unsigned long *) &x ; 42024581Szliu #ifdef NATIONAL /* ordering of word in a floating point number */ 42124581Szliu int n0=1, n1=0; 42224581Szliu #else 42324581Szliu int n0=0, n1=1; 42424581Szliu #endif 42524581Szliu /* Rounding Mode: RN ...round-to-nearest 42624581Szliu * RZ ...round-towards 0 42724581Szliu * RP ...round-towards +INF 42824581Szliu * RM ...round-towards -INF 42924581Szliu */ 43024581Szliu int RN=0,RZ=1,RP=2,RM=3;/* machine dependent: work on a Zilog Z8070 43124581Szliu * and a National 32081 & 16081 43224581Szliu */ 43324581Szliu 43424581Szliu /* exceptions */ 43524581Szliu if(x!=x||x==0.0) return(x); /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */ 43624581Szliu if(x<0) return((x-x)/(x-x)); /* sqrt(negative) is invalid */ 43724581Szliu if((mx=px[n0]&mexp)==mexp) return(x); /* sqrt(+INF) is +INF */ 43824581Szliu 43924581Szliu /* save, reset, initialize */ 44024581Szliu e=swapENI(0); /* ...save and reset the inexact enable */ 44124581Szliu i=swapINX(0); /* ...save INEXACT flag */ 44224581Szliu r=swapRM(RN); /* ...save and reset the Rounding Mode to RN */ 44324581Szliu scalx=0; 44424581Szliu 44524581Szliu /* subnormal number, scale up x to x*2**54 */ 44624581Szliu if(mx==0) {x *= b54 ; scalx-=0x01b00000;} 44724581Szliu 44824581Szliu /* scale x to avoid intermediate over/underflow: 44924581Szliu * if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */ 45024581Szliu if(mx>0x5ff00000) {px[n0] -= 0x20000000; scalx+= 0x10000000;} 45124581Szliu if(mx<0x1ff00000) {px[n0] += 0x20000000; scalx-= 0x10000000;} 45224581Szliu 45324581Szliu /* magic initial approximation to almost 8 sig. bits */ 45424581Szliu py[n0]=(px[n0]>>1)+0x1ff80000; 45524581Szliu py[n0]=py[n0]-table[(py[n0]>>15)&31]; 45624581Szliu 45724581Szliu /* Heron's rule once with correction to improve y to almost 18 sig. bits */ 45824581Szliu t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0; 45924581Szliu 46024581Szliu /* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */ 46124581Szliu t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; 46224581Szliu t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; 46324581Szliu 46424581Szliu /* twiddle last bit to force y correctly rounded */ 46524581Szliu swapRM(RZ); /* ...set Rounding Mode to round-toward-zero */ 46624581Szliu swapINX(0); /* ...clear INEXACT flag */ 46724581Szliu swapENI(e); /* ...restore inexact enable status */ 46824581Szliu t=x/y; /* ...chopped quotient, possibly inexact */ 46924581Szliu j=swapINX(i); /* ...read and restore inexact flag */ 47024581Szliu if(j==0) { if(t==y) goto end; else t=subc(t); } /* ...t=t-ulp */ 47124581Szliu b54+0.1; /* ..trigger inexact flag, sqrt(x) is inexact */ 47224581Szliu if(r==RN) t=addc(t); /* ...t=t+ulp */ 47324581Szliu else if(r==RP) { t=addc(t);y=addc(y);}/* ...t=t+ulp;y=y+ulp; */ 47424581Szliu y=y+t; /* ...chopped sum */ 47524581Szliu py[n0]=py[n0]-0x00100000; /* ...correctly rounded sqrt(x) */ 47624581Szliu end: py[n0]=py[n0]+scalx; /* ...scale back y */ 47724581Szliu swapRM(r); /* ...restore Rounding Mode */ 47824581Szliu return(y); 47924581Szliu } 48024581Szliu #endif 481