xref: /csrg-svn/lib/libm/vax/argred.s (revision 34125)
124565Szliu# Copyright (c) 1985 Regents of the University of California.
2*34125Sbostic# All rights reserved.
324565Szliu#
4*34125Sbostic# Redistribution and use in source and binary forms are permitted
5*34125Sbostic# provided that this notice is preserved and that due credit is given
6*34125Sbostic# to the University of California at Berkeley. The name of the University
7*34125Sbostic# may not be used to endorse or promote products derived from this
8*34125Sbostic# software without specific prior written permission. This software
9*34125Sbostic# is provided ``as is'' without express or implied warranty.
10*34125Sbostic#
11*34125Sbostic# All recipients should regard themselves as participants in an ongoing
12*34125Sbostic# research project and hence should feel obligated to report their
13*34125Sbostic# experiences (good or bad) with these elementary function codes, using
14*34125Sbostic# the sendbug(8) program, to the authors.
15*34125Sbostic#
16*34125Sbostic#	@(#)argred.s	5.2 (Berkeley) 04/29/88
17*34125Sbostic#
1824728Selefunt	.data
1924728Selefunt	.align	2
2024728Selefunt_sccsid:
21*34125Sbostic.asciz	"@(#)argred.s	1.1 (Berkeley) 8/21/85; 5.2 (ucb.elefunt) 04/29/88"
2224565Szliu
2324565Szliu#  libm$argred implements Bob Corbett's argument reduction and
2424565Szliu#  libm$sincos implements Peter Tang's double precision sin/cos.
2524565Szliu#
2624565Szliu#  Note: The two entry points libm$argred and libm$sincos are meant
2724565Szliu#        to be used only by _sin, _cos and _tan.
2824565Szliu#
2924565Szliu# method: true range reduction to [-pi/4,pi/4], P. Tang  &  B. Corbett
3024565Szliu# S. McDonald, April 4,  1985
3124565Szliu#
3224565Szliu	.globl	libm$argred
3324565Szliu	.globl	libm$sincos
3424565Szliu	.text
3524565Szliu	.align	1
3624565Szliu
3724565Szliulibm$argred:
3824565Szliu#
3924565Szliu#  Compare the argument with the largest possible that can
4024565Szliu#  be reduced by table lookup.  r3 := |x|  will be used in  table_lookup .
4124565Szliu#
4224565Szliu	movd	r0,r3
4324565Szliu	bgeq	abs1
4424565Szliu	mnegd	r3,r3
4524565Szliuabs1:
4624565Szliu	cmpd	r3,$0d+4.55530934770520019583e+01
4724565Szliu	blss	small_arg
4824565Szliu	jsb	trigred
4924565Szliu	rsb
5024565Szliusmall_arg:
5124565Szliu	jsb	table_lookup
5224565Szliu	rsb
5324565Szliu#
5424565Szliu#  At this point,
5524565Szliu#	   r0  contains the quadrant number, 0, 1, 2, or 3;
5624565Szliu#	r2/r1  contains the reduced argument as a D-format number;
5724565Szliu#  	   r3  contains a F-format extension to the reduced argument;
5824565Szliu#          r4  contains a  0 or 1  corresponding to a  sin or cos  entry.
5924565Szliu#
6024565Szliulibm$sincos:
6124565Szliu#
6224565Szliu#  Compensate for a cosine entry by adding one to the quadrant number.
6324565Szliu#
6424565Szliu	addl2	r4,r0
6524565Szliu#
6624565Szliu#  Polyd clobbers  r5-r0 ;  save  X  in  r7/r6 .
6724565Szliu#  This can be avoided by rewriting  trigred .
6824565Szliu#
6924565Szliu	movd	r1,r6
7024565Szliu#
7124565Szliu#  Likewise, save  alpha  in  r8 .
7224565Szliu#  This can be avoided by rewriting  trigred .
7324565Szliu#
7424565Szliu	movf	r3,r8
7524565Szliu#
7624565Szliu#  Odd or even quadrant?  cosine if odd, sine otherwise.
7724565Szliu#  Save  floor(quadrant/2) in  r9  ; it determines the final sign.
7824565Szliu#
7924565Szliu	rotl	$-1,r0,r9
8024565Szliu	blss	cosine
8124565Szliusine:
8224565Szliu	muld2	r1,r1		# Xsq = X * X
8326924Szliu	cmpw	$0x2480,r1	# [zl] Xsq > 2^-56?
8426924Szliu	blss	1f		# [zl] yes, go ahead and do polyd
8526924Szliu	clrq	r1		# [zl] work around 11/780 FPA polyd bug
8626924Szliu1:
8724565Szliu	polyd	r1,$7,sin_coef	# Q = P(Xsq) , of deg 7
8824565Szliu	mulf3	$0f3.0,r8,r4	# beta = 3 * alpha
8924565Szliu	mulf2	r0,r4		# beta = Q * beta
9024565Szliu	addf2	r8,r4		# beta = alpha + beta
9124565Szliu	muld2	r6,r0		# S(X) = X * Q
9224565Szliu#	cvtfd	r4,r4		... r5 = 0 after a polyd.
9324565Szliu	addd2	r4,r0		# S(X) = beta + S(X)
9424565Szliu	addd2	r6,r0		# S(X) = X + S(X)
9524565Szliu	brb	done
9624565Szliucosine:
9724565Szliu	muld2	r6,r6		# Xsq = X * X
9824565Szliu	beql	zero_arg
9924565Szliu	mulf2	r1,r8		# beta = X * alpha
10024565Szliu	polyd	r6,$7,cos_coef	# Q = P'(Xsq) , of deg 7
10124565Szliu	subd3	r0,r8,r0	# beta = beta - Q
10224565Szliu	subw2	$0x80,r6	# Xsq = Xsq / 2
10324565Szliu	addd2	r0,r6		# Xsq = Xsq + beta
10424565Szliuzero_arg:
10524565Szliu	subd3	r6,$0d1.0,r0	# C(X) = 1 - Xsq
10624565Szliudone:
10724565Szliu	blbc	r9,even
10824565Szliu	mnegd	r0,r0
10924565Szliueven:
11024565Szliu	rsb
11124565Szliu
11224565Szliu.data
11324565Szliu.align	2
11424565Szliu
11524565Szliusin_coef:
11624565Szliu	.double	0d-7.53080332264191085773e-13	# s7 = 2^-29 -1.a7f2504ffc49f8..
11724565Szliu	.double	0d+1.60573519267703489121e-10	# s6 = 2^-21  1.611adaede473c8..
11824565Szliu	.double	0d-2.50520965150706067211e-08	# s5 = 2^-1a -1.ae644921ed8382..
11924565Szliu	.double	0d+2.75573191800593885716e-06	# s4 = 2^-13  1.71de3a4b884278..
12024565Szliu	.double	0d-1.98412698411850507950e-04	# s3 = 2^-0d -1.a01a01a0125e7d..
12124565Szliu	.double	0d+8.33333333333325688985e-03	# s2 = 2^-07  1.11111111110e50
12224565Szliu	.double	0d-1.66666666666666664354e-01	# s1 = 2^-03 -1.55555555555554
12324565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
12424565Szliu
12524565Szliucos_coef:
12624565Szliu	.double	0d-1.13006966202629430300e-11	# s7 = 2^-25 -1.8D9BA04D1374BE..
12724565Szliu	.double	0d+2.08746646574796004700e-09	# s6 = 2^-1D  1.1EE632650350BA..
12824565Szliu	.double	0d-2.75573073031284417300e-07	# s5 = 2^-16 -1.27E4F31411719E..
12924565Szliu	.double	0d+2.48015872682668025200e-05	# s4 = 2^-10  1.A01A0196B902E8..
13024565Szliu	.double	0d-1.38888888888464709200e-03	# s3 = 2^-0A -1.6C16C16C11FACE..
13124565Szliu	.double	0d+4.16666666666664761400e-02	# s2 = 2^-05  1.5555555555539E
13224565Szliu	.double	0d+0.00000000000000000000e+00	# s1 = 0
13324565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
13424565Szliu
13524565Szliu#
13624565Szliu#  Multiples of  pi/2  expressed as the sum of three doubles,
13724565Szliu#
13824565Szliu#  trailing:	n * pi/2 ,  n = 0, 1, 2, ..., 29
13924565Szliu#			trailing[n] ,
14024565Szliu#
14124565Szliu#  middle:	n * pi/2 ,  n = 0, 1, 2, ..., 29
14224565Szliu#			middle[n]   ,
14324565Szliu#
14424565Szliu#  leading:	n * pi/2 ,  n = 0, 1, 2, ..., 29
14524565Szliu#			leading[n]  ,
14624565Szliu#
14724565Szliu#	where
14824565Szliu#		leading[n]  := (n * pi/2)  rounded,
14924565Szliu#		middle[n]   := (n * pi/2  -  leading[n])  rounded,
15024565Szliu#		trailing[n] := (( n * pi/2 - leading[n]) - middle[n])  rounded .
15124565Szliu
15224565Szliutrailing:
15324565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  trailing
15424565Szliu	.double	0d+4.33590506506189049611e-35	#  1 * pi/2  trailing
15524565Szliu	.double	0d+8.67181013012378099223e-35	#  2 * pi/2  trailing
15624565Szliu	.double	0d+1.30077151951856714215e-34	#  3 * pi/2  trailing
15724565Szliu	.double	0d+1.73436202602475619845e-34	#  4 * pi/2  trailing
15824565Szliu	.double	0d-1.68390735624352669192e-34	#  5 * pi/2  trailing
15924565Szliu	.double	0d+2.60154303903713428430e-34	#  6 * pi/2  trailing
16024565Szliu	.double	0d-8.16726343231148352150e-35	#  7 * pi/2  trailing
16124565Szliu	.double	0d+3.46872405204951239689e-34	#  8 * pi/2  trailing
16224565Szliu	.double	0d+3.90231455855570147991e-34	#  9 * pi/2  trailing
16324565Szliu	.double	0d-3.36781471248705338384e-34	# 10 * pi/2  trailing
16424565Szliu	.double	0d-1.06379439835298071785e-33	# 11 * pi/2  trailing
16524565Szliu	.double	0d+5.20308607807426856861e-34	# 12 * pi/2  trailing
16624565Szliu	.double	0d+5.63667658458045770509e-34	# 13 * pi/2  trailing
16724565Szliu	.double	0d-1.63345268646229670430e-34	# 14 * pi/2  trailing
16824565Szliu	.double	0d-1.19986217995610764801e-34	# 15 * pi/2  trailing
16924565Szliu	.double	0d+6.93744810409902479378e-34	# 16 * pi/2  trailing
17024565Szliu	.double	0d-8.03640094449267300110e-34	# 17 * pi/2  trailing
17124565Szliu	.double	0d+7.80462911711140295982e-34	# 18 * pi/2  trailing
17224565Szliu	.double	0d-7.16921993148029483506e-34	# 19 * pi/2  trailing
17324565Szliu	.double	0d-6.73562942497410676769e-34	# 20 * pi/2  trailing
17424565Szliu	.double	0d-6.30203891846791677593e-34	# 21 * pi/2  trailing
17524565Szliu	.double	0d-2.12758879670596143570e-33	# 22 * pi/2  trailing
17624565Szliu	.double	0d+2.53800212047402350390e-33	# 23 * pi/2  trailing
17724565Szliu	.double	0d+1.04061721561485371372e-33	# 24 * pi/2  trailing
17824565Szliu	.double	0d+6.11729905311472319056e-32	# 25 * pi/2  trailing
17924565Szliu	.double	0d+1.12733531691609154102e-33	# 26 * pi/2  trailing
18024565Szliu	.double	0d-3.70049587943078297272e-34	# 27 * pi/2  trailing
18124565Szliu	.double	0d-3.26690537292459340860e-34	# 28 * pi/2  trailing
18224565Szliu	.double	0d-1.14812616507957271361e-34	# 29 * pi/2  trailing
18324565Szliu
18424565Szliumiddle:
18524565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  middle
18624565Szliu	.double	0d+5.72118872610983179676e-18	#  1 * pi/2  middle
18724565Szliu	.double	0d+1.14423774522196635935e-17	#  2 * pi/2  middle
18824565Szliu	.double	0d-3.83475850529283316309e-17	#  3 * pi/2  middle
18924565Szliu	.double	0d+2.28847549044393271871e-17	#  4 * pi/2  middle
19024565Szliu	.double	0d-2.69052076007086676522e-17	#  5 * pi/2  middle
19124565Szliu	.double	0d-7.66951701058566632618e-17	#  6 * pi/2  middle
19224565Szliu	.double	0d-1.54628301484890040587e-17	#  7 * pi/2  middle
19324565Szliu	.double	0d+4.57695098088786543741e-17	#  8 * pi/2  middle
19424565Szliu	.double	0d+1.07001849766246313192e-16	#  9 * pi/2  middle
19524565Szliu	.double	0d-5.38104152014173353044e-17	# 10 * pi/2  middle
19624565Szliu	.double	0d-2.14622680169080983801e-16	# 11 * pi/2  middle
19724565Szliu	.double	0d-1.53390340211713326524e-16	# 12 * pi/2  middle
19824565Szliu	.double	0d-9.21580002543456677056e-17	# 13 * pi/2  middle
19924565Szliu	.double	0d-3.09256602969780081173e-17	# 14 * pi/2  middle
20024565Szliu	.double	0d+3.03066796603896507006e-17	# 15 * pi/2  middle
20124565Szliu	.double	0d+9.15390196177573087482e-17	# 16 * pi/2  middle
20224565Szliu	.double	0d+1.52771359575124969107e-16	# 17 * pi/2  middle
20324565Szliu	.double	0d+2.14003699532492626384e-16	# 18 * pi/2  middle
20424565Szliu	.double	0d-1.68853170360202329427e-16	# 19 * pi/2  middle
20524565Szliu	.double	0d-1.07620830402834670609e-16	# 20 * pi/2  middle
20624565Szliu	.double	0d+3.97700719404595604379e-16	# 21 * pi/2  middle
20724565Szliu	.double	0d-4.29245360338161967602e-16	# 22 * pi/2  middle
20824565Szliu	.double	0d-3.68013020380794313406e-16	# 23 * pi/2  middle
20924565Szliu	.double	0d-3.06780680423426653047e-16	# 24 * pi/2  middle
21024565Szliu	.double	0d-2.45548340466059054318e-16	# 25 * pi/2  middle
21124565Szliu	.double	0d-1.84316000508691335411e-16	# 26 * pi/2  middle
21224565Szliu	.double	0d-1.23083660551323675053e-16	# 27 * pi/2  middle
21324565Szliu	.double	0d-6.18513205939560162346e-17	# 28 * pi/2  middle
21424565Szliu	.double	0d-6.18980636588357585202e-19	# 29 * pi/2  middle
21524565Szliu
21624565Szliuleading:
21724565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  leading
21824565Szliu	.double	0d+1.57079632679489661351e+00	#  1 * pi/2  leading
21924565Szliu	.double	0d+3.14159265358979322702e+00	#  2 * pi/2  leading
22024565Szliu	.double	0d+4.71238898038468989604e+00	#  3 * pi/2  leading
22124565Szliu	.double	0d+6.28318530717958645404e+00	#  4 * pi/2  leading
22224565Szliu	.double	0d+7.85398163397448312306e+00	#  5 * pi/2  leading
22324565Szliu	.double	0d+9.42477796076937979208e+00	#  6 * pi/2  leading
22424565Szliu	.double	0d+1.09955742875642763501e+01	#  7 * pi/2  leading
22524565Szliu	.double	0d+1.25663706143591729081e+01	#  8 * pi/2  leading
22624565Szliu	.double	0d+1.41371669411540694661e+01	#  9 * pi/2  leading
22724565Szliu	.double	0d+1.57079632679489662461e+01	# 10 * pi/2  leading
22824565Szliu	.double	0d+1.72787595947438630262e+01	# 11 * pi/2  leading
22924565Szliu	.double	0d+1.88495559215387595842e+01	# 12 * pi/2  leading
23024565Szliu	.double	0d+2.04203522483336561422e+01	# 13 * pi/2  leading
23124565Szliu	.double	0d+2.19911485751285527002e+01	# 14 * pi/2  leading
23224565Szliu	.double	0d+2.35619449019234492582e+01	# 15 * pi/2  leading
23324565Szliu	.double	0d+2.51327412287183458162e+01	# 16 * pi/2  leading
23424565Szliu	.double	0d+2.67035375555132423742e+01	# 17 * pi/2  leading
23524565Szliu	.double	0d+2.82743338823081389322e+01	# 18 * pi/2  leading
23624565Szliu	.double	0d+2.98451302091030359342e+01	# 19 * pi/2  leading
23724565Szliu	.double	0d+3.14159265358979324922e+01	# 20 * pi/2  leading
23824565Szliu	.double	0d+3.29867228626928286062e+01	# 21 * pi/2  leading
23924565Szliu	.double	0d+3.45575191894877260523e+01	# 22 * pi/2  leading
24024565Szliu	.double	0d+3.61283155162826226103e+01	# 23 * pi/2  leading
24124565Szliu	.double	0d+3.76991118430775191683e+01	# 24 * pi/2  leading
24224565Szliu	.double	0d+3.92699081698724157263e+01	# 25 * pi/2  leading
24324565Szliu	.double	0d+4.08407044966673122843e+01	# 26 * pi/2  leading
24424565Szliu	.double	0d+4.24115008234622088423e+01	# 27 * pi/2  leading
24524565Szliu	.double	0d+4.39822971502571054003e+01	# 28 * pi/2  leading
24624565Szliu	.double	0d+4.55530934770520019583e+01	# 29 * pi/2  leading
24724565Szliu
24824565SzliutwoOverPi:
24924565Szliu	.double	0d+6.36619772367581343076e-01
25024565Szliu	.text
25124565Szliu	.align	1
25224565Szliu
25324565Szliutable_lookup:
25424565Szliu	muld3	r3,twoOverPi,r0
25524565Szliu	cvtrdl	r0,r0			# n = nearest int to ((2/pi)*|x|) rnded
25624565Szliu	mull3	$8,r0,r5
25724565Szliu	subd2	leading(r5),r3		# p = (|x| - leading n*pi/2) exactly
25824565Szliu	subd3	middle(r5),r3,r1	# q = (p - middle  n*pi/2) rounded
25924565Szliu	subd2	r1,r3			# r = (p - q)
26024565Szliu	subd2	middle(r5),r3		# r =  r - middle  n*pi/2
26124565Szliu	subd2	trailing(r5),r3		# r =  r - trailing n*pi/2  rounded
26224565Szliu#
26324565Szliu#  If the original argument was negative,
26424565Szliu#  negate the reduce argument and
26524565Szliu#  adjust the octant/quadrant number.
26624565Szliu#
26724565Szliu	tstw	4(ap)
26824565Szliu	bgeq	abs2
26924565Szliu	mnegf	r1,r1
27024565Szliu	mnegf	r3,r3
27124565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
27224565Szliu	subb3	r0,$4,r0
27324565Szliuabs2:
27424565Szliu#
27524565Szliu#  Clear all unneeded octant/quadrant bits.
27624565Szliu#
27724565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
27824565Szliu	bicb2	$0xfc,r0
27924565Szliu	rsb
28024565Szliu#
28124565Szliu#						p.0
28224565Szliu	.text
28324565Szliu	.align	2
28424565Szliu#
28524565Szliu# Only 256 (actually 225) bits of 2/pi are needed for VAX double
28624565Szliu# precision; this was determined by enumerating all the nearest
28724565Szliu# machine integer multiples of pi/2 using continued fractions.
28824565Szliu# (8a8d3673775b7ff7 required the most bits.)		-S.McD
28924565Szliu#
29024565Szliu	.long	0
29124565Szliu	.long	0
29224565Szliu	.long	0xaef1586d
29324565Szliu	.long	0x9458eaf7
29424565Szliu	.long	0x10e4107f
29524565Szliu	.long	0xd8a5664f
29624565Szliu	.long	0x4d377036
29724565Szliu	.long	0x09d5f47d
29824565Szliu	.long	0x91054a7f
29924565Szliu	.long	0xbe60db93
30024565Szliubits2opi:
30124565Szliu	.long	0x00000028
30224565Szliu	.long	0
30324565Szliu#
30424565Szliu#  Note: wherever you see the word `octant', read `quadrant'.
30524565Szliu#  Currently this code is set up for  pi/2  argument reduction.
30624565Szliu#  By uncommenting/commenting the appropriate lines, it will
30724565Szliu#  also serve as a  pi/4  argument reduction code.
30824565Szliu#
30924565Szliu
31024565Szliu#						p.1
31124565Szliu#  Trigred  preforms argument reduction
31224565Szliu#  for the trigonometric functions.  It
31324565Szliu#  takes one input argument, a D-format
31424565Szliu#  number in  r1/r0 .  The magnitude of
31524565Szliu#  the input argument must be greater
31624565Szliu#  than or equal to  1/2 .  Trigred produces
31724565Szliu#  three results:  the number of the octant
31824565Szliu#  occupied by the argument, the reduced
31924565Szliu#  argument, and an extension of the
32024565Szliu#  reduced argument.  The octant number is
32124565Szliu#  returned in  r0 .  The reduced argument
32224565Szliu#  is returned as a D-format number in
32324565Szliu#  r2/r1 .  An 8 bit extension of the
32424565Szliu#  reduced argument is returned as an
32524565Szliu#  F-format number in r3.
32624565Szliu#						p.2
32724565Szliutrigred:
32824565Szliu#
32924565Szliu#  Save the sign of the input argument.
33024565Szliu#
33124565Szliu	movw	r0,-(sp)
33224565Szliu#
33324565Szliu#  Extract the exponent field.
33424565Szliu#
33524565Szliu	extzv	$7,$7,r0,r2
33624565Szliu#
33724565Szliu#  Convert the fraction part of the input
33824565Szliu#  argument into a quadword integer.
33924565Szliu#
34024565Szliu	bicw2	$0xff80,r0
34124565Szliu	bisb2	$0x80,r0	# -S.McD
34224565Szliu	rotl	$16,r0,r0
34324565Szliu	rotl	$16,r1,r1
34424565Szliu#
34524565Szliu#  If  r1  is negative, add  1  to  r0 .  This
34624565Szliu#  adjustment is made so that the two's
34724565Szliu#  complement multiplications done later
34824565Szliu#  will produce unsigned results.
34924565Szliu#
35024565Szliu	bgeq	posmid
35124565Szliu	incl	r0
35224565Szliuposmid:
35324565Szliu#						p.3
35424565Szliu#
35524565Szliu#  Set  r3  to the address of the first quadword
35624565Szliu#  used to obtain the needed portion of  2/pi .
35724565Szliu#  The address is longword aligned to ensure
35824565Szliu#  efficient access.
35924565Szliu#
36024565Szliu	ashl	$-3,r2,r3
36124565Szliu	bicb2	$3,r3
36224565Szliu	subl3	r3,$bits2opi,r3
36324565Szliu#
36424565Szliu#  Set  r2  to the size of the shift needed to
36524565Szliu#  obtain the correct portion of  2/pi .
36624565Szliu#
36724565Szliu	bicb2	$0xe0,r2
36824565Szliu#						p.4
36924565Szliu#
37024565Szliu#  Move the needed  128  bits of  2/pi  into
37124565Szliu#  r11 - r8 .  Adjust the numbers to allow
37224565Szliu#  for unsigned multiplication.
37324565Szliu#
37424565Szliu	ashq	r2,(r3),r10
37524565Szliu
37624565Szliu	subl2	$4,r3
37724565Szliu	ashq	r2,(r3),r9
37824565Szliu	bgeq	signoff1
37924565Szliu	incl	r11
38024565Szliusignoff1:
38124565Szliu	subl2	$4,r3
38224565Szliu	ashq	r2,(r3),r8
38324565Szliu	bgeq	signoff2
38424565Szliu	incl	r10
38524565Szliusignoff2:
38624565Szliu	subl2	$4,r3
38724565Szliu	ashq	r2,(r3),r7
38824565Szliu	bgeq	signoff3
38924565Szliu	incl	r9
39024565Szliusignoff3:
39124565Szliu#						p.5
39224565Szliu#
39324565Szliu#  Multiply the contents of  r0/r1  by the
39424565Szliu#  slice of  2/pi  in  r11 - r8 .
39524565Szliu#
39624565Szliu	emul	r0,r8,$0,r4
39724565Szliu	emul	r0,r9,r5,r5
39824565Szliu	emul	r0,r10,r6,r6
39924565Szliu
40024565Szliu	emul	r1,r8,$0,r7
40124565Szliu	emul	r1,r9,r8,r8
40224565Szliu	emul	r1,r10,r9,r9
40324565Szliu	emul	r1,r11,r10,r10
40424565Szliu
40524565Szliu	addl2	r4,r8
40624565Szliu	adwc	r5,r9
40724565Szliu	adwc	r6,r10
40824565Szliu#						p.6
40924565Szliu#
41024565Szliu#  If there are more than five leading zeros
41124565Szliu#  after the first two quotient bits or if there
41224565Szliu#  are more than five leading ones after the first
41324565Szliu#  two quotient bits, generate more fraction bits.
41424565Szliu#  Otherwise, branch to code to produce the result.
41524565Szliu#
41624565Szliu	bicl3	$0xc1ffffff,r10,r4
41724565Szliu	beql	more1
41824565Szliu	cmpl	$0x3e000000,r4
41924565Szliu	bneq	result
42024565Szliumore1:
42124565Szliu#						p.7
42224565Szliu#
42324565Szliu#  generate another  32  result bits.
42424565Szliu#
42524565Szliu	subl2	$4,r3
42624565Szliu	ashq	r2,(r3),r5
42724565Szliu	bgeq	signoff4
42824565Szliu
42924565Szliu	emul	r1,r6,$0,r4
43024565Szliu	addl2	r1,r5
43124565Szliu	emul	r0,r6,r5,r5
43224565Szliu	addl2	r0,r6
43324565Szliu	brb	addbits1
43424565Szliu
43524565Szliusignoff4:
43624565Szliu	emul	r1,r6,$0,r4
43724565Szliu	emul	r0,r6,r5,r5
43824565Szliu
43924565Szliuaddbits1:
44024565Szliu	addl2	r5,r7
44124565Szliu	adwc	r6,r8
44224565Szliu	adwc	$0,r9
44324565Szliu	adwc	$0,r10
44424565Szliu#						p.8
44524565Szliu#
44624565Szliu#  Check for massive cancellation.
44724565Szliu#
44824565Szliu	bicl3	$0xc0000000,r10,r6
44924565Szliu#	bneq	more2			-S.McD  Test was backwards
45024565Szliu	beql	more2
45124565Szliu	cmpl	$0x3fffffff,r6
45224565Szliu	bneq	result
45324565Szliumore2:
45424565Szliu#						p.9
45524565Szliu#
45624565Szliu#  If massive cancellation has occurred,
45724565Szliu#  generate another  24  result bits.
45824565Szliu#  Testing has shown there will always be
45924565Szliu#  enough bits after this point.
46024565Szliu#
46124565Szliu	subl2	$4,r3
46224565Szliu	ashq	r2,(r3),r5
46324565Szliu	bgeq	signoff5
46424565Szliu
46524565Szliu	emul	r0,r6,r4,r5
46624565Szliu	addl2	r0,r6
46724565Szliu	brb	addbits2
46824565Szliu
46924565Szliusignoff5:
47024565Szliu	emul	r0,r6,r4,r5
47124565Szliu
47224565Szliuaddbits2:
47324565Szliu	addl2	r6,r7
47424565Szliu	adwc	$0,r8
47524565Szliu	adwc	$0,r9
47624565Szliu	adwc	$0,r10
47724565Szliu#						p.10
47824565Szliu#
47924565Szliu#  The following code produces the reduced
48024565Szliu#  argument from the product bits contained
48124565Szliu#  in  r10 - r7 .
48224565Szliu#
48324565Szliuresult:
48424565Szliu#
48524565Szliu#  Extract the octant number from  r10 .
48624565Szliu#
48724565Szliu#	extzv	$29,$3,r10,r0	...used for  pi/4  reduction -S.McD
48824565Szliu	extzv	$30,$2,r10,r0
48924565Szliu#
49024565Szliu#  Clear the octant bits in  r10 .
49124565Szliu#
49224565Szliu#	bicl2	$0xe0000000,r10	...used for  pi/4  reduction -S.McD
49324565Szliu	bicl2	$0xc0000000,r10
49424565Szliu#
49524565Szliu#  Zero the sign flag.
49624565Szliu#
49724565Szliu	clrl	r5
49824565Szliu#						p.11
49924565Szliu#
50024565Szliu#  Check to see if the fraction is greater than
50124565Szliu#  or equal to one-half.  If it is, add one
50224565Szliu#  to the octant number, set the sign flag
50324565Szliu#  on, and replace the fraction with  1 minus
50424565Szliu#  the fraction.
50524565Szliu#
50624565Szliu#	bitl	$0x10000000,r10		...used for  pi/4  reduction -S.McD
50724565Szliu	bitl	$0x20000000,r10
50824565Szliu	beql	small
50924565Szliu	incl	r0
51024565Szliu	incl	r5
51124565Szliu#	subl3	r10,$0x1fffffff,r10	...used for  pi/4  reduction -S.McD
51224565Szliu	subl3	r10,$0x3fffffff,r10
51324565Szliu	mcoml	r9,r9
51424565Szliu	mcoml	r8,r8
51524565Szliu	mcoml	r7,r7
51624565Szliusmall:
51724565Szliu#						p.12
51824565Szliu#
51924565Szliu##  Test whether the first  29  bits of the ...used for  pi/4  reduction -S.McD
52024565Szliu#  Test whether the first  30  bits of the
52124565Szliu#  fraction are zero.
52224565Szliu#
52324565Szliu	tstl	r10
52424565Szliu	beql	tiny
52524565Szliu#
52624565Szliu#  Find the position of the first one bit in  r10 .
52724565Szliu#
52824565Szliu	cvtld	r10,r1
52924565Szliu	extzv	$7,$7,r1,r1
53024565Szliu#
53124565Szliu#  Compute the size of the shift needed.
53224565Szliu#
53324565Szliu	subl3	r1,$32,r6
53424565Szliu#
53524565Szliu#  Shift up the high order  64  bits of the
53624565Szliu#  product.
53724565Szliu#
53824565Szliu	ashq	r6,r9,r10
53924565Szliu	ashq	r6,r8,r9
54024565Szliu	brb	mult
54124565Szliu#						p.13
54224565Szliu#
54324565Szliu#  Test to see if the sign bit of  r9  is on.
54424565Szliu#
54524565Szliutiny:
54624565Szliu	tstl	r9
54724565Szliu	bgeq	tinier
54824565Szliu#
54924565Szliu#  If it is, shift the product bits up  32  bits.
55024565Szliu#
55124565Szliu	movl	$32,r6
55224565Szliu	movq	r8,r10
55324565Szliu	tstl	r10
55424565Szliu	brb	mult
55524565Szliu#						p.14
55624565Szliu#
55724565Szliu#  Test whether  r9  is zero.  It is probably
55824565Szliu#  impossible for both  r10  and  r9  to be
55924565Szliu#  zero, but until proven to be so, the test
56024565Szliu#  must be made.
56124565Szliu#
56224565Szliutinier:
56324565Szliu	beql	zero
56424565Szliu#
56524565Szliu#  Find the position of the first one bit in  r9 .
56624565Szliu#
56724565Szliu	cvtld	r9,r1
56824565Szliu	extzv	$7,$7,r1,r1
56924565Szliu#
57024565Szliu#  Compute the size of the shift needed.
57124565Szliu#
57224565Szliu	subl3	r1,$32,r1
57324565Szliu	addl3	$32,r1,r6
57424565Szliu#
57524565Szliu#  Shift up the high order  64  bits of the
57624565Szliu#  product.
57724565Szliu#
57824565Szliu	ashq	r1,r8,r10
57924565Szliu	ashq	r1,r7,r9
58024565Szliu	brb	mult
58124565Szliu#						p.15
58224565Szliu#
58324565Szliu#  The following code sets the reduced
58424565Szliu#  argument to zero.
58524565Szliu#
58624565Szliuzero:
58724565Szliu	clrl	r1
58824565Szliu	clrl	r2
58924565Szliu	clrl	r3
59024565Szliu	brw	return
59124565Szliu#						p.16
59224565Szliu#
59324565Szliu#  At this point,  r0  contains the octant number,
59424565Szliu#  r6  indicates the number of bits the fraction
59524565Szliu#  has been shifted,  r5  indicates the sign of
59624565Szliu#  the fraction,  r11/r10  contain the high order
59724565Szliu#  64  bits of the fraction, and the condition
59824565Szliu#  codes indicate where the sign bit of  r10
59924565Szliu#  is on.  The following code multiplies the
60024565Szliu#  fraction by  pi/2 .
60124565Szliu#
60224565Szliumult:
60324565Szliu#
60424565Szliu#  Save  r11/r10  in  r4/r1 .		-S.McD
60524565Szliu	movl	r11,r4
60624565Szliu	movl	r10,r1
60724565Szliu#
60824565Szliu#  If the sign bit of  r10  is on, add  1  to  r11 .
60924565Szliu#
61024565Szliu	bgeq	signoff6
61124565Szliu	incl	r11
61224565Szliusignoff6:
61324565Szliu#						p.17
61424565Szliu#
61524565Szliu#  Move  pi/2  into  r3/r2 .
61624565Szliu#
61724565Szliu	movq	$0xc90fdaa22168c235,r2
61824565Szliu#
61924565Szliu#  Multiply the fraction by the portion of  pi/2
62024565Szliu#  in  r2 .
62124565Szliu#
62224565Szliu	emul	r2,r10,$0,r7
62324565Szliu	emul	r2,r11,r8,r7
62424565Szliu#
62524565Szliu#  Multiply the fraction by the portion of  pi/2
62624565Szliu#  in  r3 .
62724565Szliu	emul	r3,r10,$0,r9
62824565Szliu	emul	r3,r11,r10,r10
62924565Szliu#
63024565Szliu#  Add the product bits together.
63124565Szliu#
63224565Szliu	addl2	r7,r9
63324565Szliu	adwc	r8,r10
63424565Szliu	adwc	$0,r11
63524565Szliu#
63624565Szliu#  Compensate for not sign extending  r8  above.-S.McD
63724565Szliu#
63824565Szliu	tstl	r8
63924565Szliu	bgeq	signoff6a
64024565Szliu	decl	r11
64124565Szliusignoff6a:
64224565Szliu#
64324565Szliu#  Compensate for  r11/r10  being unsigned.	-S.McD
64424565Szliu#
64524565Szliu	addl2	r2,r10
64624565Szliu	adwc	r3,r11
64724565Szliu#
64824565Szliu#  Compensate for  r3/r2  being unsigned.	-S.McD
64924565Szliu#
65024565Szliu	addl2	r1,r10
65124565Szliu	adwc	r4,r11
65224565Szliu#						p.18
65324565Szliu#
65424565Szliu#  If the sign bit of  r11  is zero, shift the
65524565Szliu#  product bits up one bit and increment  r6 .
65624565Szliu#
65724565Szliu	blss	signon
65824565Szliu	incl	r6
65924565Szliu	ashq	$1,r10,r10
66024565Szliu	tstl	r9
66124565Szliu	bgeq	signoff7
66224565Szliu	incl	r10
66324565Szliusignoff7:
66424565Szliusignon:
66524565Szliu#						p.19
66624565Szliu#
66724565Szliu#  Shift the  56  most significant product
66824565Szliu#  bits into  r9/r8 .  The sign extension
66924565Szliu#  will be handled later.
67024565Szliu#
67124565Szliu	ashq	$-8,r10,r8
67224565Szliu#
67324565Szliu#  Convert the low order  8  bits of  r10
67424565Szliu#  into an F-format number.
67524565Szliu#
67624565Szliu	cvtbf	r10,r3
67724565Szliu#
67824565Szliu#  If the result of the conversion was
67924565Szliu#  negative, add  1  to  r9/r8 .
68024565Szliu#
68124565Szliu	bgeq	chop
68224565Szliu	incl	r8
68324565Szliu	adwc	$0,r9
68424565Szliu#
68524565Szliu#  If  r9  is now zero, branch to special
68624565Szliu#  code to handle that possibility.
68724565Szliu#
68824565Szliu	beql	carryout
68924565Szliuchop:
69024565Szliu#						p.20
69124565Szliu#
69224565Szliu#  Convert the number in  r9/r8  into
69324565Szliu#  D-format number in  r2/r1 .
69424565Szliu#
69524565Szliu	rotl	$16,r8,r2
69624565Szliu	rotl	$16,r9,r1
69724565Szliu#
69824565Szliu#  Set the exponent field to the appropriate
69924565Szliu#  value.  Note that the extra bits created by
70024565Szliu#  sign extension are now eliminated.
70124565Szliu#
70224565Szliu	subw3	r6,$131,r6
70324565Szliu	insv	r6,$7,$9,r1
70424565Szliu#
70524565Szliu#  Set the exponent field of the F-format
70624565Szliu#  number in  r3  to the appropriate value.
70724565Szliu#
70824565Szliu	tstf	r3
70924565Szliu	beql	return
71024565Szliu#	extzv	$7,$8,r3,r4	-S.McD
71124565Szliu	extzv	$7,$7,r3,r4
71224565Szliu	addw2	r4,r6
71324565Szliu#	subw2	$217,r6		-S.McD
71424565Szliu	subw2	$64,r6
71524565Szliu	insv	r6,$7,$8,r3
71624565Szliu	brb	return
71724565Szliu#						p.21
71824565Szliu#
71924565Szliu#  The following code generates the appropriate
72024565Szliu#  result for the unlikely possibility that
72124565Szliu#  rounding the number in  r9/r8  resulted in
72224565Szliu#  a carry out.
72324565Szliu#
72424565Szliucarryout:
72524565Szliu	clrl	r1
72624565Szliu	clrl	r2
72724565Szliu	subw3	r6,$132,r6
72824565Szliu	insv	r6,$7,$9,r1
72924565Szliu	tstf	r3
73024565Szliu	beql	return
73124565Szliu	extzv	$7,$8,r3,r4
73224565Szliu	addw2	r4,r6
73324565Szliu	subw2	$218,r6
73424565Szliu	insv	r6,$7,$8,r3
73524565Szliu#						p.22
73624565Szliu#
73724565Szliu#  The following code makes an needed
73824565Szliu#  adjustments to the signs of the
73924565Szliu#  results or to the octant number, and
74024565Szliu#  then returns.
74124565Szliu#
74224565Szliureturn:
74324565Szliu#
74424565Szliu#  Test if the fraction was greater than or
74524565Szliu#  equal to  1/2 .  If so, negate the reduced
74624565Szliu#  argument.
74724565Szliu#
74824565Szliu	blbc	r5,signoff8
74924565Szliu	mnegf	r1,r1
75024565Szliu	mnegf	r3,r3
75124565Szliusignoff8:
75224565Szliu#						p.23
75324565Szliu#
75424565Szliu#  If the original argument was negative,
75524565Szliu#  negate the reduce argument and
75624565Szliu#  adjust the octant number.
75724565Szliu#
75824565Szliu	tstw	(sp)+
75924565Szliu	bgeq	signoff9
76024565Szliu	mnegf	r1,r1
76124565Szliu	mnegf	r3,r3
76224565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
76324565Szliu	subb3	r0,$4,r0
76424565Szliusignoff9:
76524565Szliu#
76624565Szliu#  Clear all unneeded octant bits.
76724565Szliu#
76824565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
76924565Szliu	bicb2	$0xfc,r0
77024565Szliu#
77124565Szliu#  Return.
77224565Szliu#
77324565Szliu	rsb
774