xref: /csrg-svn/lib/libm/vax/argred.s (revision 26924)
124565Szliu#
224565Szliu# Copyright (c) 1985 Regents of the University of California.
324565Szliu#
424565Szliu# Use and reproduction of this software are granted  in  accordance  with
524565Szliu# the terms and conditions specified in  the  Berkeley  Software  License
624565Szliu# Agreement (in particular, this entails acknowledgement of the programs'
724565Szliu# source, and inclusion of this notice) with the additional understanding
824565Szliu# that  all  recipients  should regard themselves as participants  in  an
924565Szliu# ongoing  research  project and hence should  feel  obligated  to report
1024565Szliu# their  experiences (good or bad) with these elementary function  codes,
1124565Szliu# using "sendbug 4bsd-bugs@BERKELEY", to the authors.
1224565Szliu#
1324728Selefunt	.data
1424728Selefunt	.align	2
1524728Selefunt_sccsid:
16*26924Szliu.asciz	"@(#)argred.s	1.1 (Berkeley) 8/21/85; 1.4 (ucb.elefunt) 03/21/86"
1724565Szliu
1824565Szliu#  libm$argred implements Bob Corbett's argument reduction and
1924565Szliu#  libm$sincos implements Peter Tang's double precision sin/cos.
2024565Szliu#
2124565Szliu#  Note: The two entry points libm$argred and libm$sincos are meant
2224565Szliu#        to be used only by _sin, _cos and _tan.
2324565Szliu#
2424565Szliu# method: true range reduction to [-pi/4,pi/4], P. Tang  &  B. Corbett
2524565Szliu# S. McDonald, April 4,  1985
2624565Szliu#
2724565Szliu	.globl	libm$argred
2824565Szliu	.globl	libm$sincos
2924565Szliu	.text
3024565Szliu	.align	1
3124565Szliu
3224565Szliulibm$argred:
3324565Szliu#
3424565Szliu#  Compare the argument with the largest possible that can
3524565Szliu#  be reduced by table lookup.  r3 := |x|  will be used in  table_lookup .
3624565Szliu#
3724565Szliu	movd	r0,r3
3824565Szliu	bgeq	abs1
3924565Szliu	mnegd	r3,r3
4024565Szliuabs1:
4124565Szliu	cmpd	r3,$0d+4.55530934770520019583e+01
4224565Szliu	blss	small_arg
4324565Szliu	jsb	trigred
4424565Szliu	rsb
4524565Szliusmall_arg:
4624565Szliu	jsb	table_lookup
4724565Szliu	rsb
4824565Szliu#
4924565Szliu#  At this point,
5024565Szliu#	   r0  contains the quadrant number, 0, 1, 2, or 3;
5124565Szliu#	r2/r1  contains the reduced argument as a D-format number;
5224565Szliu#  	   r3  contains a F-format extension to the reduced argument;
5324565Szliu#          r4  contains a  0 or 1  corresponding to a  sin or cos  entry.
5424565Szliu#
5524565Szliulibm$sincos:
5624565Szliu#
5724565Szliu#  Compensate for a cosine entry by adding one to the quadrant number.
5824565Szliu#
5924565Szliu	addl2	r4,r0
6024565Szliu#
6124565Szliu#  Polyd clobbers  r5-r0 ;  save  X  in  r7/r6 .
6224565Szliu#  This can be avoided by rewriting  trigred .
6324565Szliu#
6424565Szliu	movd	r1,r6
6524565Szliu#
6624565Szliu#  Likewise, save  alpha  in  r8 .
6724565Szliu#  This can be avoided by rewriting  trigred .
6824565Szliu#
6924565Szliu	movf	r3,r8
7024565Szliu#
7124565Szliu#  Odd or even quadrant?  cosine if odd, sine otherwise.
7224565Szliu#  Save  floor(quadrant/2) in  r9  ; it determines the final sign.
7324565Szliu#
7424565Szliu	rotl	$-1,r0,r9
7524565Szliu	blss	cosine
7624565Szliusine:
7724565Szliu	muld2	r1,r1		# Xsq = X * X
78*26924Szliu	cmpw	$0x2480,r1	# [zl] Xsq > 2^-56?
79*26924Szliu	blss	1f		# [zl] yes, go ahead and do polyd
80*26924Szliu	clrq	r1		# [zl] work around 11/780 FPA polyd bug
81*26924Szliu1:
8224565Szliu	polyd	r1,$7,sin_coef	# Q = P(Xsq) , of deg 7
8324565Szliu	mulf3	$0f3.0,r8,r4	# beta = 3 * alpha
8424565Szliu	mulf2	r0,r4		# beta = Q * beta
8524565Szliu	addf2	r8,r4		# beta = alpha + beta
8624565Szliu	muld2	r6,r0		# S(X) = X * Q
8724565Szliu#	cvtfd	r4,r4		... r5 = 0 after a polyd.
8824565Szliu	addd2	r4,r0		# S(X) = beta + S(X)
8924565Szliu	addd2	r6,r0		# S(X) = X + S(X)
9024565Szliu	brb	done
9124565Szliucosine:
9224565Szliu	muld2	r6,r6		# Xsq = X * X
9324565Szliu	beql	zero_arg
9424565Szliu	mulf2	r1,r8		# beta = X * alpha
9524565Szliu	polyd	r6,$7,cos_coef	# Q = P'(Xsq) , of deg 7
9624565Szliu	subd3	r0,r8,r0	# beta = beta - Q
9724565Szliu	subw2	$0x80,r6	# Xsq = Xsq / 2
9824565Szliu	addd2	r0,r6		# Xsq = Xsq + beta
9924565Szliuzero_arg:
10024565Szliu	subd3	r6,$0d1.0,r0	# C(X) = 1 - Xsq
10124565Szliudone:
10224565Szliu	blbc	r9,even
10324565Szliu	mnegd	r0,r0
10424565Szliueven:
10524565Szliu	rsb
10624565Szliu
10724565Szliu.data
10824565Szliu.align	2
10924565Szliu
11024565Szliusin_coef:
11124565Szliu	.double	0d-7.53080332264191085773e-13	# s7 = 2^-29 -1.a7f2504ffc49f8..
11224565Szliu	.double	0d+1.60573519267703489121e-10	# s6 = 2^-21  1.611adaede473c8..
11324565Szliu	.double	0d-2.50520965150706067211e-08	# s5 = 2^-1a -1.ae644921ed8382..
11424565Szliu	.double	0d+2.75573191800593885716e-06	# s4 = 2^-13  1.71de3a4b884278..
11524565Szliu	.double	0d-1.98412698411850507950e-04	# s3 = 2^-0d -1.a01a01a0125e7d..
11624565Szliu	.double	0d+8.33333333333325688985e-03	# s2 = 2^-07  1.11111111110e50
11724565Szliu	.double	0d-1.66666666666666664354e-01	# s1 = 2^-03 -1.55555555555554
11824565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
11924565Szliu
12024565Szliucos_coef:
12124565Szliu	.double	0d-1.13006966202629430300e-11	# s7 = 2^-25 -1.8D9BA04D1374BE..
12224565Szliu	.double	0d+2.08746646574796004700e-09	# s6 = 2^-1D  1.1EE632650350BA..
12324565Szliu	.double	0d-2.75573073031284417300e-07	# s5 = 2^-16 -1.27E4F31411719E..
12424565Szliu	.double	0d+2.48015872682668025200e-05	# s4 = 2^-10  1.A01A0196B902E8..
12524565Szliu	.double	0d-1.38888888888464709200e-03	# s3 = 2^-0A -1.6C16C16C11FACE..
12624565Szliu	.double	0d+4.16666666666664761400e-02	# s2 = 2^-05  1.5555555555539E
12724565Szliu	.double	0d+0.00000000000000000000e+00	# s1 = 0
12824565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
12924565Szliu
13024565Szliu#
13124565Szliu#  Multiples of  pi/2  expressed as the sum of three doubles,
13224565Szliu#
13324565Szliu#  trailing:	n * pi/2 ,  n = 0, 1, 2, ..., 29
13424565Szliu#			trailing[n] ,
13524565Szliu#
13624565Szliu#  middle:	n * pi/2 ,  n = 0, 1, 2, ..., 29
13724565Szliu#			middle[n]   ,
13824565Szliu#
13924565Szliu#  leading:	n * pi/2 ,  n = 0, 1, 2, ..., 29
14024565Szliu#			leading[n]  ,
14124565Szliu#
14224565Szliu#	where
14324565Szliu#		leading[n]  := (n * pi/2)  rounded,
14424565Szliu#		middle[n]   := (n * pi/2  -  leading[n])  rounded,
14524565Szliu#		trailing[n] := (( n * pi/2 - leading[n]) - middle[n])  rounded .
14624565Szliu
14724565Szliutrailing:
14824565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  trailing
14924565Szliu	.double	0d+4.33590506506189049611e-35	#  1 * pi/2  trailing
15024565Szliu	.double	0d+8.67181013012378099223e-35	#  2 * pi/2  trailing
15124565Szliu	.double	0d+1.30077151951856714215e-34	#  3 * pi/2  trailing
15224565Szliu	.double	0d+1.73436202602475619845e-34	#  4 * pi/2  trailing
15324565Szliu	.double	0d-1.68390735624352669192e-34	#  5 * pi/2  trailing
15424565Szliu	.double	0d+2.60154303903713428430e-34	#  6 * pi/2  trailing
15524565Szliu	.double	0d-8.16726343231148352150e-35	#  7 * pi/2  trailing
15624565Szliu	.double	0d+3.46872405204951239689e-34	#  8 * pi/2  trailing
15724565Szliu	.double	0d+3.90231455855570147991e-34	#  9 * pi/2  trailing
15824565Szliu	.double	0d-3.36781471248705338384e-34	# 10 * pi/2  trailing
15924565Szliu	.double	0d-1.06379439835298071785e-33	# 11 * pi/2  trailing
16024565Szliu	.double	0d+5.20308607807426856861e-34	# 12 * pi/2  trailing
16124565Szliu	.double	0d+5.63667658458045770509e-34	# 13 * pi/2  trailing
16224565Szliu	.double	0d-1.63345268646229670430e-34	# 14 * pi/2  trailing
16324565Szliu	.double	0d-1.19986217995610764801e-34	# 15 * pi/2  trailing
16424565Szliu	.double	0d+6.93744810409902479378e-34	# 16 * pi/2  trailing
16524565Szliu	.double	0d-8.03640094449267300110e-34	# 17 * pi/2  trailing
16624565Szliu	.double	0d+7.80462911711140295982e-34	# 18 * pi/2  trailing
16724565Szliu	.double	0d-7.16921993148029483506e-34	# 19 * pi/2  trailing
16824565Szliu	.double	0d-6.73562942497410676769e-34	# 20 * pi/2  trailing
16924565Szliu	.double	0d-6.30203891846791677593e-34	# 21 * pi/2  trailing
17024565Szliu	.double	0d-2.12758879670596143570e-33	# 22 * pi/2  trailing
17124565Szliu	.double	0d+2.53800212047402350390e-33	# 23 * pi/2  trailing
17224565Szliu	.double	0d+1.04061721561485371372e-33	# 24 * pi/2  trailing
17324565Szliu	.double	0d+6.11729905311472319056e-32	# 25 * pi/2  trailing
17424565Szliu	.double	0d+1.12733531691609154102e-33	# 26 * pi/2  trailing
17524565Szliu	.double	0d-3.70049587943078297272e-34	# 27 * pi/2  trailing
17624565Szliu	.double	0d-3.26690537292459340860e-34	# 28 * pi/2  trailing
17724565Szliu	.double	0d-1.14812616507957271361e-34	# 29 * pi/2  trailing
17824565Szliu
17924565Szliumiddle:
18024565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  middle
18124565Szliu	.double	0d+5.72118872610983179676e-18	#  1 * pi/2  middle
18224565Szliu	.double	0d+1.14423774522196635935e-17	#  2 * pi/2  middle
18324565Szliu	.double	0d-3.83475850529283316309e-17	#  3 * pi/2  middle
18424565Szliu	.double	0d+2.28847549044393271871e-17	#  4 * pi/2  middle
18524565Szliu	.double	0d-2.69052076007086676522e-17	#  5 * pi/2  middle
18624565Szliu	.double	0d-7.66951701058566632618e-17	#  6 * pi/2  middle
18724565Szliu	.double	0d-1.54628301484890040587e-17	#  7 * pi/2  middle
18824565Szliu	.double	0d+4.57695098088786543741e-17	#  8 * pi/2  middle
18924565Szliu	.double	0d+1.07001849766246313192e-16	#  9 * pi/2  middle
19024565Szliu	.double	0d-5.38104152014173353044e-17	# 10 * pi/2  middle
19124565Szliu	.double	0d-2.14622680169080983801e-16	# 11 * pi/2  middle
19224565Szliu	.double	0d-1.53390340211713326524e-16	# 12 * pi/2  middle
19324565Szliu	.double	0d-9.21580002543456677056e-17	# 13 * pi/2  middle
19424565Szliu	.double	0d-3.09256602969780081173e-17	# 14 * pi/2  middle
19524565Szliu	.double	0d+3.03066796603896507006e-17	# 15 * pi/2  middle
19624565Szliu	.double	0d+9.15390196177573087482e-17	# 16 * pi/2  middle
19724565Szliu	.double	0d+1.52771359575124969107e-16	# 17 * pi/2  middle
19824565Szliu	.double	0d+2.14003699532492626384e-16	# 18 * pi/2  middle
19924565Szliu	.double	0d-1.68853170360202329427e-16	# 19 * pi/2  middle
20024565Szliu	.double	0d-1.07620830402834670609e-16	# 20 * pi/2  middle
20124565Szliu	.double	0d+3.97700719404595604379e-16	# 21 * pi/2  middle
20224565Szliu	.double	0d-4.29245360338161967602e-16	# 22 * pi/2  middle
20324565Szliu	.double	0d-3.68013020380794313406e-16	# 23 * pi/2  middle
20424565Szliu	.double	0d-3.06780680423426653047e-16	# 24 * pi/2  middle
20524565Szliu	.double	0d-2.45548340466059054318e-16	# 25 * pi/2  middle
20624565Szliu	.double	0d-1.84316000508691335411e-16	# 26 * pi/2  middle
20724565Szliu	.double	0d-1.23083660551323675053e-16	# 27 * pi/2  middle
20824565Szliu	.double	0d-6.18513205939560162346e-17	# 28 * pi/2  middle
20924565Szliu	.double	0d-6.18980636588357585202e-19	# 29 * pi/2  middle
21024565Szliu
21124565Szliuleading:
21224565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  leading
21324565Szliu	.double	0d+1.57079632679489661351e+00	#  1 * pi/2  leading
21424565Szliu	.double	0d+3.14159265358979322702e+00	#  2 * pi/2  leading
21524565Szliu	.double	0d+4.71238898038468989604e+00	#  3 * pi/2  leading
21624565Szliu	.double	0d+6.28318530717958645404e+00	#  4 * pi/2  leading
21724565Szliu	.double	0d+7.85398163397448312306e+00	#  5 * pi/2  leading
21824565Szliu	.double	0d+9.42477796076937979208e+00	#  6 * pi/2  leading
21924565Szliu	.double	0d+1.09955742875642763501e+01	#  7 * pi/2  leading
22024565Szliu	.double	0d+1.25663706143591729081e+01	#  8 * pi/2  leading
22124565Szliu	.double	0d+1.41371669411540694661e+01	#  9 * pi/2  leading
22224565Szliu	.double	0d+1.57079632679489662461e+01	# 10 * pi/2  leading
22324565Szliu	.double	0d+1.72787595947438630262e+01	# 11 * pi/2  leading
22424565Szliu	.double	0d+1.88495559215387595842e+01	# 12 * pi/2  leading
22524565Szliu	.double	0d+2.04203522483336561422e+01	# 13 * pi/2  leading
22624565Szliu	.double	0d+2.19911485751285527002e+01	# 14 * pi/2  leading
22724565Szliu	.double	0d+2.35619449019234492582e+01	# 15 * pi/2  leading
22824565Szliu	.double	0d+2.51327412287183458162e+01	# 16 * pi/2  leading
22924565Szliu	.double	0d+2.67035375555132423742e+01	# 17 * pi/2  leading
23024565Szliu	.double	0d+2.82743338823081389322e+01	# 18 * pi/2  leading
23124565Szliu	.double	0d+2.98451302091030359342e+01	# 19 * pi/2  leading
23224565Szliu	.double	0d+3.14159265358979324922e+01	# 20 * pi/2  leading
23324565Szliu	.double	0d+3.29867228626928286062e+01	# 21 * pi/2  leading
23424565Szliu	.double	0d+3.45575191894877260523e+01	# 22 * pi/2  leading
23524565Szliu	.double	0d+3.61283155162826226103e+01	# 23 * pi/2  leading
23624565Szliu	.double	0d+3.76991118430775191683e+01	# 24 * pi/2  leading
23724565Szliu	.double	0d+3.92699081698724157263e+01	# 25 * pi/2  leading
23824565Szliu	.double	0d+4.08407044966673122843e+01	# 26 * pi/2  leading
23924565Szliu	.double	0d+4.24115008234622088423e+01	# 27 * pi/2  leading
24024565Szliu	.double	0d+4.39822971502571054003e+01	# 28 * pi/2  leading
24124565Szliu	.double	0d+4.55530934770520019583e+01	# 29 * pi/2  leading
24224565Szliu
24324565SzliutwoOverPi:
24424565Szliu	.double	0d+6.36619772367581343076e-01
24524565Szliu	.text
24624565Szliu	.align	1
24724565Szliu
24824565Szliutable_lookup:
24924565Szliu	muld3	r3,twoOverPi,r0
25024565Szliu	cvtrdl	r0,r0			# n = nearest int to ((2/pi)*|x|) rnded
25124565Szliu	mull3	$8,r0,r5
25224565Szliu	subd2	leading(r5),r3		# p = (|x| - leading n*pi/2) exactly
25324565Szliu	subd3	middle(r5),r3,r1	# q = (p - middle  n*pi/2) rounded
25424565Szliu	subd2	r1,r3			# r = (p - q)
25524565Szliu	subd2	middle(r5),r3		# r =  r - middle  n*pi/2
25624565Szliu	subd2	trailing(r5),r3		# r =  r - trailing n*pi/2  rounded
25724565Szliu#
25824565Szliu#  If the original argument was negative,
25924565Szliu#  negate the reduce argument and
26024565Szliu#  adjust the octant/quadrant number.
26124565Szliu#
26224565Szliu	tstw	4(ap)
26324565Szliu	bgeq	abs2
26424565Szliu	mnegf	r1,r1
26524565Szliu	mnegf	r3,r3
26624565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
26724565Szliu	subb3	r0,$4,r0
26824565Szliuabs2:
26924565Szliu#
27024565Szliu#  Clear all unneeded octant/quadrant bits.
27124565Szliu#
27224565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
27324565Szliu	bicb2	$0xfc,r0
27424565Szliu	rsb
27524565Szliu#
27624565Szliu#						p.0
27724565Szliu	.text
27824565Szliu	.align	2
27924565Szliu#
28024565Szliu# Only 256 (actually 225) bits of 2/pi are needed for VAX double
28124565Szliu# precision; this was determined by enumerating all the nearest
28224565Szliu# machine integer multiples of pi/2 using continued fractions.
28324565Szliu# (8a8d3673775b7ff7 required the most bits.)		-S.McD
28424565Szliu#
28524565Szliu	.long	0
28624565Szliu	.long	0
28724565Szliu	.long	0xaef1586d
28824565Szliu	.long	0x9458eaf7
28924565Szliu	.long	0x10e4107f
29024565Szliu	.long	0xd8a5664f
29124565Szliu	.long	0x4d377036
29224565Szliu	.long	0x09d5f47d
29324565Szliu	.long	0x91054a7f
29424565Szliu	.long	0xbe60db93
29524565Szliubits2opi:
29624565Szliu	.long	0x00000028
29724565Szliu	.long	0
29824565Szliu#
29924565Szliu#  Note: wherever you see the word `octant', read `quadrant'.
30024565Szliu#  Currently this code is set up for  pi/2  argument reduction.
30124565Szliu#  By uncommenting/commenting the appropriate lines, it will
30224565Szliu#  also serve as a  pi/4  argument reduction code.
30324565Szliu#
30424565Szliu
30524565Szliu#						p.1
30624565Szliu#  Trigred  preforms argument reduction
30724565Szliu#  for the trigonometric functions.  It
30824565Szliu#  takes one input argument, a D-format
30924565Szliu#  number in  r1/r0 .  The magnitude of
31024565Szliu#  the input argument must be greater
31124565Szliu#  than or equal to  1/2 .  Trigred produces
31224565Szliu#  three results:  the number of the octant
31324565Szliu#  occupied by the argument, the reduced
31424565Szliu#  argument, and an extension of the
31524565Szliu#  reduced argument.  The octant number is
31624565Szliu#  returned in  r0 .  The reduced argument
31724565Szliu#  is returned as a D-format number in
31824565Szliu#  r2/r1 .  An 8 bit extension of the
31924565Szliu#  reduced argument is returned as an
32024565Szliu#  F-format number in r3.
32124565Szliu#						p.2
32224565Szliutrigred:
32324565Szliu#
32424565Szliu#  Save the sign of the input argument.
32524565Szliu#
32624565Szliu	movw	r0,-(sp)
32724565Szliu#
32824565Szliu#  Extract the exponent field.
32924565Szliu#
33024565Szliu	extzv	$7,$7,r0,r2
33124565Szliu#
33224565Szliu#  Convert the fraction part of the input
33324565Szliu#  argument into a quadword integer.
33424565Szliu#
33524565Szliu	bicw2	$0xff80,r0
33624565Szliu	bisb2	$0x80,r0	# -S.McD
33724565Szliu	rotl	$16,r0,r0
33824565Szliu	rotl	$16,r1,r1
33924565Szliu#
34024565Szliu#  If  r1  is negative, add  1  to  r0 .  This
34124565Szliu#  adjustment is made so that the two's
34224565Szliu#  complement multiplications done later
34324565Szliu#  will produce unsigned results.
34424565Szliu#
34524565Szliu	bgeq	posmid
34624565Szliu	incl	r0
34724565Szliuposmid:
34824565Szliu#						p.3
34924565Szliu#
35024565Szliu#  Set  r3  to the address of the first quadword
35124565Szliu#  used to obtain the needed portion of  2/pi .
35224565Szliu#  The address is longword aligned to ensure
35324565Szliu#  efficient access.
35424565Szliu#
35524565Szliu	ashl	$-3,r2,r3
35624565Szliu	bicb2	$3,r3
35724565Szliu	subl3	r3,$bits2opi,r3
35824565Szliu#
35924565Szliu#  Set  r2  to the size of the shift needed to
36024565Szliu#  obtain the correct portion of  2/pi .
36124565Szliu#
36224565Szliu	bicb2	$0xe0,r2
36324565Szliu#						p.4
36424565Szliu#
36524565Szliu#  Move the needed  128  bits of  2/pi  into
36624565Szliu#  r11 - r8 .  Adjust the numbers to allow
36724565Szliu#  for unsigned multiplication.
36824565Szliu#
36924565Szliu	ashq	r2,(r3),r10
37024565Szliu
37124565Szliu	subl2	$4,r3
37224565Szliu	ashq	r2,(r3),r9
37324565Szliu	bgeq	signoff1
37424565Szliu	incl	r11
37524565Szliusignoff1:
37624565Szliu	subl2	$4,r3
37724565Szliu	ashq	r2,(r3),r8
37824565Szliu	bgeq	signoff2
37924565Szliu	incl	r10
38024565Szliusignoff2:
38124565Szliu	subl2	$4,r3
38224565Szliu	ashq	r2,(r3),r7
38324565Szliu	bgeq	signoff3
38424565Szliu	incl	r9
38524565Szliusignoff3:
38624565Szliu#						p.5
38724565Szliu#
38824565Szliu#  Multiply the contents of  r0/r1  by the
38924565Szliu#  slice of  2/pi  in  r11 - r8 .
39024565Szliu#
39124565Szliu	emul	r0,r8,$0,r4
39224565Szliu	emul	r0,r9,r5,r5
39324565Szliu	emul	r0,r10,r6,r6
39424565Szliu
39524565Szliu	emul	r1,r8,$0,r7
39624565Szliu	emul	r1,r9,r8,r8
39724565Szliu	emul	r1,r10,r9,r9
39824565Szliu	emul	r1,r11,r10,r10
39924565Szliu
40024565Szliu	addl2	r4,r8
40124565Szliu	adwc	r5,r9
40224565Szliu	adwc	r6,r10
40324565Szliu#						p.6
40424565Szliu#
40524565Szliu#  If there are more than five leading zeros
40624565Szliu#  after the first two quotient bits or if there
40724565Szliu#  are more than five leading ones after the first
40824565Szliu#  two quotient bits, generate more fraction bits.
40924565Szliu#  Otherwise, branch to code to produce the result.
41024565Szliu#
41124565Szliu	bicl3	$0xc1ffffff,r10,r4
41224565Szliu	beql	more1
41324565Szliu	cmpl	$0x3e000000,r4
41424565Szliu	bneq	result
41524565Szliumore1:
41624565Szliu#						p.7
41724565Szliu#
41824565Szliu#  generate another  32  result bits.
41924565Szliu#
42024565Szliu	subl2	$4,r3
42124565Szliu	ashq	r2,(r3),r5
42224565Szliu	bgeq	signoff4
42324565Szliu
42424565Szliu	emul	r1,r6,$0,r4
42524565Szliu	addl2	r1,r5
42624565Szliu	emul	r0,r6,r5,r5
42724565Szliu	addl2	r0,r6
42824565Szliu	brb	addbits1
42924565Szliu
43024565Szliusignoff4:
43124565Szliu	emul	r1,r6,$0,r4
43224565Szliu	emul	r0,r6,r5,r5
43324565Szliu
43424565Szliuaddbits1:
43524565Szliu	addl2	r5,r7
43624565Szliu	adwc	r6,r8
43724565Szliu	adwc	$0,r9
43824565Szliu	adwc	$0,r10
43924565Szliu#						p.8
44024565Szliu#
44124565Szliu#  Check for massive cancellation.
44224565Szliu#
44324565Szliu	bicl3	$0xc0000000,r10,r6
44424565Szliu#	bneq	more2			-S.McD  Test was backwards
44524565Szliu	beql	more2
44624565Szliu	cmpl	$0x3fffffff,r6
44724565Szliu	bneq	result
44824565Szliumore2:
44924565Szliu#						p.9
45024565Szliu#
45124565Szliu#  If massive cancellation has occurred,
45224565Szliu#  generate another  24  result bits.
45324565Szliu#  Testing has shown there will always be
45424565Szliu#  enough bits after this point.
45524565Szliu#
45624565Szliu	subl2	$4,r3
45724565Szliu	ashq	r2,(r3),r5
45824565Szliu	bgeq	signoff5
45924565Szliu
46024565Szliu	emul	r0,r6,r4,r5
46124565Szliu	addl2	r0,r6
46224565Szliu	brb	addbits2
46324565Szliu
46424565Szliusignoff5:
46524565Szliu	emul	r0,r6,r4,r5
46624565Szliu
46724565Szliuaddbits2:
46824565Szliu	addl2	r6,r7
46924565Szliu	adwc	$0,r8
47024565Szliu	adwc	$0,r9
47124565Szliu	adwc	$0,r10
47224565Szliu#						p.10
47324565Szliu#
47424565Szliu#  The following code produces the reduced
47524565Szliu#  argument from the product bits contained
47624565Szliu#  in  r10 - r7 .
47724565Szliu#
47824565Szliuresult:
47924565Szliu#
48024565Szliu#  Extract the octant number from  r10 .
48124565Szliu#
48224565Szliu#	extzv	$29,$3,r10,r0	...used for  pi/4  reduction -S.McD
48324565Szliu	extzv	$30,$2,r10,r0
48424565Szliu#
48524565Szliu#  Clear the octant bits in  r10 .
48624565Szliu#
48724565Szliu#	bicl2	$0xe0000000,r10	...used for  pi/4  reduction -S.McD
48824565Szliu	bicl2	$0xc0000000,r10
48924565Szliu#
49024565Szliu#  Zero the sign flag.
49124565Szliu#
49224565Szliu	clrl	r5
49324565Szliu#						p.11
49424565Szliu#
49524565Szliu#  Check to see if the fraction is greater than
49624565Szliu#  or equal to one-half.  If it is, add one
49724565Szliu#  to the octant number, set the sign flag
49824565Szliu#  on, and replace the fraction with  1 minus
49924565Szliu#  the fraction.
50024565Szliu#
50124565Szliu#	bitl	$0x10000000,r10		...used for  pi/4  reduction -S.McD
50224565Szliu	bitl	$0x20000000,r10
50324565Szliu	beql	small
50424565Szliu	incl	r0
50524565Szliu	incl	r5
50624565Szliu#	subl3	r10,$0x1fffffff,r10	...used for  pi/4  reduction -S.McD
50724565Szliu	subl3	r10,$0x3fffffff,r10
50824565Szliu	mcoml	r9,r9
50924565Szliu	mcoml	r8,r8
51024565Szliu	mcoml	r7,r7
51124565Szliusmall:
51224565Szliu#						p.12
51324565Szliu#
51424565Szliu##  Test whether the first  29  bits of the ...used for  pi/4  reduction -S.McD
51524565Szliu#  Test whether the first  30  bits of the
51624565Szliu#  fraction are zero.
51724565Szliu#
51824565Szliu	tstl	r10
51924565Szliu	beql	tiny
52024565Szliu#
52124565Szliu#  Find the position of the first one bit in  r10 .
52224565Szliu#
52324565Szliu	cvtld	r10,r1
52424565Szliu	extzv	$7,$7,r1,r1
52524565Szliu#
52624565Szliu#  Compute the size of the shift needed.
52724565Szliu#
52824565Szliu	subl3	r1,$32,r6
52924565Szliu#
53024565Szliu#  Shift up the high order  64  bits of the
53124565Szliu#  product.
53224565Szliu#
53324565Szliu	ashq	r6,r9,r10
53424565Szliu	ashq	r6,r8,r9
53524565Szliu	brb	mult
53624565Szliu#						p.13
53724565Szliu#
53824565Szliu#  Test to see if the sign bit of  r9  is on.
53924565Szliu#
54024565Szliutiny:
54124565Szliu	tstl	r9
54224565Szliu	bgeq	tinier
54324565Szliu#
54424565Szliu#  If it is, shift the product bits up  32  bits.
54524565Szliu#
54624565Szliu	movl	$32,r6
54724565Szliu	movq	r8,r10
54824565Szliu	tstl	r10
54924565Szliu	brb	mult
55024565Szliu#						p.14
55124565Szliu#
55224565Szliu#  Test whether  r9  is zero.  It is probably
55324565Szliu#  impossible for both  r10  and  r9  to be
55424565Szliu#  zero, but until proven to be so, the test
55524565Szliu#  must be made.
55624565Szliu#
55724565Szliutinier:
55824565Szliu	beql	zero
55924565Szliu#
56024565Szliu#  Find the position of the first one bit in  r9 .
56124565Szliu#
56224565Szliu	cvtld	r9,r1
56324565Szliu	extzv	$7,$7,r1,r1
56424565Szliu#
56524565Szliu#  Compute the size of the shift needed.
56624565Szliu#
56724565Szliu	subl3	r1,$32,r1
56824565Szliu	addl3	$32,r1,r6
56924565Szliu#
57024565Szliu#  Shift up the high order  64  bits of the
57124565Szliu#  product.
57224565Szliu#
57324565Szliu	ashq	r1,r8,r10
57424565Szliu	ashq	r1,r7,r9
57524565Szliu	brb	mult
57624565Szliu#						p.15
57724565Szliu#
57824565Szliu#  The following code sets the reduced
57924565Szliu#  argument to zero.
58024565Szliu#
58124565Szliuzero:
58224565Szliu	clrl	r1
58324565Szliu	clrl	r2
58424565Szliu	clrl	r3
58524565Szliu	brw	return
58624565Szliu#						p.16
58724565Szliu#
58824565Szliu#  At this point,  r0  contains the octant number,
58924565Szliu#  r6  indicates the number of bits the fraction
59024565Szliu#  has been shifted,  r5  indicates the sign of
59124565Szliu#  the fraction,  r11/r10  contain the high order
59224565Szliu#  64  bits of the fraction, and the condition
59324565Szliu#  codes indicate where the sign bit of  r10
59424565Szliu#  is on.  The following code multiplies the
59524565Szliu#  fraction by  pi/2 .
59624565Szliu#
59724565Szliumult:
59824565Szliu#
59924565Szliu#  Save  r11/r10  in  r4/r1 .		-S.McD
60024565Szliu	movl	r11,r4
60124565Szliu	movl	r10,r1
60224565Szliu#
60324565Szliu#  If the sign bit of  r10  is on, add  1  to  r11 .
60424565Szliu#
60524565Szliu	bgeq	signoff6
60624565Szliu	incl	r11
60724565Szliusignoff6:
60824565Szliu#						p.17
60924565Szliu#
61024565Szliu#  Move  pi/2  into  r3/r2 .
61124565Szliu#
61224565Szliu	movq	$0xc90fdaa22168c235,r2
61324565Szliu#
61424565Szliu#  Multiply the fraction by the portion of  pi/2
61524565Szliu#  in  r2 .
61624565Szliu#
61724565Szliu	emul	r2,r10,$0,r7
61824565Szliu	emul	r2,r11,r8,r7
61924565Szliu#
62024565Szliu#  Multiply the fraction by the portion of  pi/2
62124565Szliu#  in  r3 .
62224565Szliu	emul	r3,r10,$0,r9
62324565Szliu	emul	r3,r11,r10,r10
62424565Szliu#
62524565Szliu#  Add the product bits together.
62624565Szliu#
62724565Szliu	addl2	r7,r9
62824565Szliu	adwc	r8,r10
62924565Szliu	adwc	$0,r11
63024565Szliu#
63124565Szliu#  Compensate for not sign extending  r8  above.-S.McD
63224565Szliu#
63324565Szliu	tstl	r8
63424565Szliu	bgeq	signoff6a
63524565Szliu	decl	r11
63624565Szliusignoff6a:
63724565Szliu#
63824565Szliu#  Compensate for  r11/r10  being unsigned.	-S.McD
63924565Szliu#
64024565Szliu	addl2	r2,r10
64124565Szliu	adwc	r3,r11
64224565Szliu#
64324565Szliu#  Compensate for  r3/r2  being unsigned.	-S.McD
64424565Szliu#
64524565Szliu	addl2	r1,r10
64624565Szliu	adwc	r4,r11
64724565Szliu#						p.18
64824565Szliu#
64924565Szliu#  If the sign bit of  r11  is zero, shift the
65024565Szliu#  product bits up one bit and increment  r6 .
65124565Szliu#
65224565Szliu	blss	signon
65324565Szliu	incl	r6
65424565Szliu	ashq	$1,r10,r10
65524565Szliu	tstl	r9
65624565Szliu	bgeq	signoff7
65724565Szliu	incl	r10
65824565Szliusignoff7:
65924565Szliusignon:
66024565Szliu#						p.19
66124565Szliu#
66224565Szliu#  Shift the  56  most significant product
66324565Szliu#  bits into  r9/r8 .  The sign extension
66424565Szliu#  will be handled later.
66524565Szliu#
66624565Szliu	ashq	$-8,r10,r8
66724565Szliu#
66824565Szliu#  Convert the low order  8  bits of  r10
66924565Szliu#  into an F-format number.
67024565Szliu#
67124565Szliu	cvtbf	r10,r3
67224565Szliu#
67324565Szliu#  If the result of the conversion was
67424565Szliu#  negative, add  1  to  r9/r8 .
67524565Szliu#
67624565Szliu	bgeq	chop
67724565Szliu	incl	r8
67824565Szliu	adwc	$0,r9
67924565Szliu#
68024565Szliu#  If  r9  is now zero, branch to special
68124565Szliu#  code to handle that possibility.
68224565Szliu#
68324565Szliu	beql	carryout
68424565Szliuchop:
68524565Szliu#						p.20
68624565Szliu#
68724565Szliu#  Convert the number in  r9/r8  into
68824565Szliu#  D-format number in  r2/r1 .
68924565Szliu#
69024565Szliu	rotl	$16,r8,r2
69124565Szliu	rotl	$16,r9,r1
69224565Szliu#
69324565Szliu#  Set the exponent field to the appropriate
69424565Szliu#  value.  Note that the extra bits created by
69524565Szliu#  sign extension are now eliminated.
69624565Szliu#
69724565Szliu	subw3	r6,$131,r6
69824565Szliu	insv	r6,$7,$9,r1
69924565Szliu#
70024565Szliu#  Set the exponent field of the F-format
70124565Szliu#  number in  r3  to the appropriate value.
70224565Szliu#
70324565Szliu	tstf	r3
70424565Szliu	beql	return
70524565Szliu#	extzv	$7,$8,r3,r4	-S.McD
70624565Szliu	extzv	$7,$7,r3,r4
70724565Szliu	addw2	r4,r6
70824565Szliu#	subw2	$217,r6		-S.McD
70924565Szliu	subw2	$64,r6
71024565Szliu	insv	r6,$7,$8,r3
71124565Szliu	brb	return
71224565Szliu#						p.21
71324565Szliu#
71424565Szliu#  The following code generates the appropriate
71524565Szliu#  result for the unlikely possibility that
71624565Szliu#  rounding the number in  r9/r8  resulted in
71724565Szliu#  a carry out.
71824565Szliu#
71924565Szliucarryout:
72024565Szliu	clrl	r1
72124565Szliu	clrl	r2
72224565Szliu	subw3	r6,$132,r6
72324565Szliu	insv	r6,$7,$9,r1
72424565Szliu	tstf	r3
72524565Szliu	beql	return
72624565Szliu	extzv	$7,$8,r3,r4
72724565Szliu	addw2	r4,r6
72824565Szliu	subw2	$218,r6
72924565Szliu	insv	r6,$7,$8,r3
73024565Szliu#						p.22
73124565Szliu#
73224565Szliu#  The following code makes an needed
73324565Szliu#  adjustments to the signs of the
73424565Szliu#  results or to the octant number, and
73524565Szliu#  then returns.
73624565Szliu#
73724565Szliureturn:
73824565Szliu#
73924565Szliu#  Test if the fraction was greater than or
74024565Szliu#  equal to  1/2 .  If so, negate the reduced
74124565Szliu#  argument.
74224565Szliu#
74324565Szliu	blbc	r5,signoff8
74424565Szliu	mnegf	r1,r1
74524565Szliu	mnegf	r3,r3
74624565Szliusignoff8:
74724565Szliu#						p.23
74824565Szliu#
74924565Szliu#  If the original argument was negative,
75024565Szliu#  negate the reduce argument and
75124565Szliu#  adjust the octant number.
75224565Szliu#
75324565Szliu	tstw	(sp)+
75424565Szliu	bgeq	signoff9
75524565Szliu	mnegf	r1,r1
75624565Szliu	mnegf	r3,r3
75724565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
75824565Szliu	subb3	r0,$4,r0
75924565Szliusignoff9:
76024565Szliu#
76124565Szliu#  Clear all unneeded octant bits.
76224565Szliu#
76324565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
76424565Szliu	bicb2	$0xfc,r0
76524565Szliu#
76624565Szliu#  Return.
76724565Szliu#
76824565Szliu	rsb
769