xref: /csrg-svn/lib/libm/vax/argred.s (revision 61318)
1*61318Sbostic# Copyright (c) 1985, 1993
2*61318Sbostic#	The Regents of the University of California.  All rights reserved.
324565Szliu#
445308Sbostic# %sccs.include.redist.sh%
534125Sbostic#
6*61318Sbostic#	@(#)argred.s	8.1 (Berkeley) 06/04/93
734125Sbostic#
824728Selefunt	.data
924728Selefunt	.align	2
1024728Selefunt_sccsid:
11*61318Sbostic.asciz	"@(#)argred.s	1.1 (Berkeley) 8/21/85; 8.1 (ucb.elefunt) 06/04/93"
1224565Szliu
1324565Szliu#  libm$argred implements Bob Corbett's argument reduction and
1424565Szliu#  libm$sincos implements Peter Tang's double precision sin/cos.
1524565Szliu#
1624565Szliu#  Note: The two entry points libm$argred and libm$sincos are meant
1724565Szliu#        to be used only by _sin, _cos and _tan.
1824565Szliu#
1924565Szliu# method: true range reduction to [-pi/4,pi/4], P. Tang  &  B. Corbett
2024565Szliu# S. McDonald, April 4,  1985
2124565Szliu#
2224565Szliu	.globl	libm$argred
2324565Szliu	.globl	libm$sincos
2424565Szliu	.text
2524565Szliu	.align	1
2624565Szliu
2724565Szliulibm$argred:
2824565Szliu#
2924565Szliu#  Compare the argument with the largest possible that can
3024565Szliu#  be reduced by table lookup.  r3 := |x|  will be used in  table_lookup .
3124565Szliu#
3224565Szliu	movd	r0,r3
3324565Szliu	bgeq	abs1
3424565Szliu	mnegd	r3,r3
3524565Szliuabs1:
3624565Szliu	cmpd	r3,$0d+4.55530934770520019583e+01
3724565Szliu	blss	small_arg
3824565Szliu	jsb	trigred
3924565Szliu	rsb
4024565Szliusmall_arg:
4124565Szliu	jsb	table_lookup
4224565Szliu	rsb
4324565Szliu#
4424565Szliu#  At this point,
4524565Szliu#	   r0  contains the quadrant number, 0, 1, 2, or 3;
4624565Szliu#	r2/r1  contains the reduced argument as a D-format number;
4724565Szliu#  	   r3  contains a F-format extension to the reduced argument;
4824565Szliu#          r4  contains a  0 or 1  corresponding to a  sin or cos  entry.
4924565Szliu#
5024565Szliulibm$sincos:
5124565Szliu#
5224565Szliu#  Compensate for a cosine entry by adding one to the quadrant number.
5324565Szliu#
5424565Szliu	addl2	r4,r0
5524565Szliu#
5624565Szliu#  Polyd clobbers  r5-r0 ;  save  X  in  r7/r6 .
5724565Szliu#  This can be avoided by rewriting  trigred .
5824565Szliu#
5924565Szliu	movd	r1,r6
6024565Szliu#
6124565Szliu#  Likewise, save  alpha  in  r8 .
6224565Szliu#  This can be avoided by rewriting  trigred .
6324565Szliu#
6424565Szliu	movf	r3,r8
6524565Szliu#
6624565Szliu#  Odd or even quadrant?  cosine if odd, sine otherwise.
6724565Szliu#  Save  floor(quadrant/2) in  r9  ; it determines the final sign.
6824565Szliu#
6924565Szliu	rotl	$-1,r0,r9
7024565Szliu	blss	cosine
7124565Szliusine:
7224565Szliu	muld2	r1,r1		# Xsq = X * X
7326924Szliu	cmpw	$0x2480,r1	# [zl] Xsq > 2^-56?
7426924Szliu	blss	1f		# [zl] yes, go ahead and do polyd
7526924Szliu	clrq	r1		# [zl] work around 11/780 FPA polyd bug
7626924Szliu1:
7724565Szliu	polyd	r1,$7,sin_coef	# Q = P(Xsq) , of deg 7
7824565Szliu	mulf3	$0f3.0,r8,r4	# beta = 3 * alpha
7924565Szliu	mulf2	r0,r4		# beta = Q * beta
8024565Szliu	addf2	r8,r4		# beta = alpha + beta
8124565Szliu	muld2	r6,r0		# S(X) = X * Q
8224565Szliu#	cvtfd	r4,r4		... r5 = 0 after a polyd.
8324565Szliu	addd2	r4,r0		# S(X) = beta + S(X)
8424565Szliu	addd2	r6,r0		# S(X) = X + S(X)
8524565Szliu	brb	done
8624565Szliucosine:
8724565Szliu	muld2	r6,r6		# Xsq = X * X
8824565Szliu	beql	zero_arg
8924565Szliu	mulf2	r1,r8		# beta = X * alpha
9024565Szliu	polyd	r6,$7,cos_coef	# Q = P'(Xsq) , of deg 7
9124565Szliu	subd3	r0,r8,r0	# beta = beta - Q
9224565Szliu	subw2	$0x80,r6	# Xsq = Xsq / 2
9324565Szliu	addd2	r0,r6		# Xsq = Xsq + beta
9424565Szliuzero_arg:
9524565Szliu	subd3	r6,$0d1.0,r0	# C(X) = 1 - Xsq
9624565Szliudone:
9724565Szliu	blbc	r9,even
9824565Szliu	mnegd	r0,r0
9924565Szliueven:
10024565Szliu	rsb
10124565Szliu
10224565Szliu.data
10324565Szliu.align	2
10424565Szliu
10524565Szliusin_coef:
10624565Szliu	.double	0d-7.53080332264191085773e-13	# s7 = 2^-29 -1.a7f2504ffc49f8..
10724565Szliu	.double	0d+1.60573519267703489121e-10	# s6 = 2^-21  1.611adaede473c8..
10824565Szliu	.double	0d-2.50520965150706067211e-08	# s5 = 2^-1a -1.ae644921ed8382..
10924565Szliu	.double	0d+2.75573191800593885716e-06	# s4 = 2^-13  1.71de3a4b884278..
11024565Szliu	.double	0d-1.98412698411850507950e-04	# s3 = 2^-0d -1.a01a01a0125e7d..
11124565Szliu	.double	0d+8.33333333333325688985e-03	# s2 = 2^-07  1.11111111110e50
11224565Szliu	.double	0d-1.66666666666666664354e-01	# s1 = 2^-03 -1.55555555555554
11324565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
11424565Szliu
11524565Szliucos_coef:
11624565Szliu	.double	0d-1.13006966202629430300e-11	# s7 = 2^-25 -1.8D9BA04D1374BE..
11724565Szliu	.double	0d+2.08746646574796004700e-09	# s6 = 2^-1D  1.1EE632650350BA..
11824565Szliu	.double	0d-2.75573073031284417300e-07	# s5 = 2^-16 -1.27E4F31411719E..
11924565Szliu	.double	0d+2.48015872682668025200e-05	# s4 = 2^-10  1.A01A0196B902E8..
12024565Szliu	.double	0d-1.38888888888464709200e-03	# s3 = 2^-0A -1.6C16C16C11FACE..
12124565Szliu	.double	0d+4.16666666666664761400e-02	# s2 = 2^-05  1.5555555555539E
12224565Szliu	.double	0d+0.00000000000000000000e+00	# s1 = 0
12324565Szliu	.double	0d+0.00000000000000000000e+00	# s0 = 0
12424565Szliu
12524565Szliu#
12624565Szliu#  Multiples of  pi/2  expressed as the sum of three doubles,
12724565Szliu#
12824565Szliu#  trailing:	n * pi/2 ,  n = 0, 1, 2, ..., 29
12924565Szliu#			trailing[n] ,
13024565Szliu#
13124565Szliu#  middle:	n * pi/2 ,  n = 0, 1, 2, ..., 29
13224565Szliu#			middle[n]   ,
13324565Szliu#
13424565Szliu#  leading:	n * pi/2 ,  n = 0, 1, 2, ..., 29
13524565Szliu#			leading[n]  ,
13624565Szliu#
13724565Szliu#	where
13824565Szliu#		leading[n]  := (n * pi/2)  rounded,
13924565Szliu#		middle[n]   := (n * pi/2  -  leading[n])  rounded,
14024565Szliu#		trailing[n] := (( n * pi/2 - leading[n]) - middle[n])  rounded .
14124565Szliu
14224565Szliutrailing:
14324565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  trailing
14424565Szliu	.double	0d+4.33590506506189049611e-35	#  1 * pi/2  trailing
14524565Szliu	.double	0d+8.67181013012378099223e-35	#  2 * pi/2  trailing
14624565Szliu	.double	0d+1.30077151951856714215e-34	#  3 * pi/2  trailing
14724565Szliu	.double	0d+1.73436202602475619845e-34	#  4 * pi/2  trailing
14824565Szliu	.double	0d-1.68390735624352669192e-34	#  5 * pi/2  trailing
14924565Szliu	.double	0d+2.60154303903713428430e-34	#  6 * pi/2  trailing
15024565Szliu	.double	0d-8.16726343231148352150e-35	#  7 * pi/2  trailing
15124565Szliu	.double	0d+3.46872405204951239689e-34	#  8 * pi/2  trailing
15224565Szliu	.double	0d+3.90231455855570147991e-34	#  9 * pi/2  trailing
15324565Szliu	.double	0d-3.36781471248705338384e-34	# 10 * pi/2  trailing
15424565Szliu	.double	0d-1.06379439835298071785e-33	# 11 * pi/2  trailing
15524565Szliu	.double	0d+5.20308607807426856861e-34	# 12 * pi/2  trailing
15624565Szliu	.double	0d+5.63667658458045770509e-34	# 13 * pi/2  trailing
15724565Szliu	.double	0d-1.63345268646229670430e-34	# 14 * pi/2  trailing
15824565Szliu	.double	0d-1.19986217995610764801e-34	# 15 * pi/2  trailing
15924565Szliu	.double	0d+6.93744810409902479378e-34	# 16 * pi/2  trailing
16024565Szliu	.double	0d-8.03640094449267300110e-34	# 17 * pi/2  trailing
16124565Szliu	.double	0d+7.80462911711140295982e-34	# 18 * pi/2  trailing
16224565Szliu	.double	0d-7.16921993148029483506e-34	# 19 * pi/2  trailing
16324565Szliu	.double	0d-6.73562942497410676769e-34	# 20 * pi/2  trailing
16424565Szliu	.double	0d-6.30203891846791677593e-34	# 21 * pi/2  trailing
16524565Szliu	.double	0d-2.12758879670596143570e-33	# 22 * pi/2  trailing
16624565Szliu	.double	0d+2.53800212047402350390e-33	# 23 * pi/2  trailing
16724565Szliu	.double	0d+1.04061721561485371372e-33	# 24 * pi/2  trailing
16824565Szliu	.double	0d+6.11729905311472319056e-32	# 25 * pi/2  trailing
16924565Szliu	.double	0d+1.12733531691609154102e-33	# 26 * pi/2  trailing
17024565Szliu	.double	0d-3.70049587943078297272e-34	# 27 * pi/2  trailing
17124565Szliu	.double	0d-3.26690537292459340860e-34	# 28 * pi/2  trailing
17224565Szliu	.double	0d-1.14812616507957271361e-34	# 29 * pi/2  trailing
17324565Szliu
17424565Szliumiddle:
17524565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  middle
17624565Szliu	.double	0d+5.72118872610983179676e-18	#  1 * pi/2  middle
17724565Szliu	.double	0d+1.14423774522196635935e-17	#  2 * pi/2  middle
17824565Szliu	.double	0d-3.83475850529283316309e-17	#  3 * pi/2  middle
17924565Szliu	.double	0d+2.28847549044393271871e-17	#  4 * pi/2  middle
18024565Szliu	.double	0d-2.69052076007086676522e-17	#  5 * pi/2  middle
18124565Szliu	.double	0d-7.66951701058566632618e-17	#  6 * pi/2  middle
18224565Szliu	.double	0d-1.54628301484890040587e-17	#  7 * pi/2  middle
18324565Szliu	.double	0d+4.57695098088786543741e-17	#  8 * pi/2  middle
18424565Szliu	.double	0d+1.07001849766246313192e-16	#  9 * pi/2  middle
18524565Szliu	.double	0d-5.38104152014173353044e-17	# 10 * pi/2  middle
18624565Szliu	.double	0d-2.14622680169080983801e-16	# 11 * pi/2  middle
18724565Szliu	.double	0d-1.53390340211713326524e-16	# 12 * pi/2  middle
18824565Szliu	.double	0d-9.21580002543456677056e-17	# 13 * pi/2  middle
18924565Szliu	.double	0d-3.09256602969780081173e-17	# 14 * pi/2  middle
19024565Szliu	.double	0d+3.03066796603896507006e-17	# 15 * pi/2  middle
19124565Szliu	.double	0d+9.15390196177573087482e-17	# 16 * pi/2  middle
19224565Szliu	.double	0d+1.52771359575124969107e-16	# 17 * pi/2  middle
19324565Szliu	.double	0d+2.14003699532492626384e-16	# 18 * pi/2  middle
19424565Szliu	.double	0d-1.68853170360202329427e-16	# 19 * pi/2  middle
19524565Szliu	.double	0d-1.07620830402834670609e-16	# 20 * pi/2  middle
19624565Szliu	.double	0d+3.97700719404595604379e-16	# 21 * pi/2  middle
19724565Szliu	.double	0d-4.29245360338161967602e-16	# 22 * pi/2  middle
19824565Szliu	.double	0d-3.68013020380794313406e-16	# 23 * pi/2  middle
19924565Szliu	.double	0d-3.06780680423426653047e-16	# 24 * pi/2  middle
20024565Szliu	.double	0d-2.45548340466059054318e-16	# 25 * pi/2  middle
20124565Szliu	.double	0d-1.84316000508691335411e-16	# 26 * pi/2  middle
20224565Szliu	.double	0d-1.23083660551323675053e-16	# 27 * pi/2  middle
20324565Szliu	.double	0d-6.18513205939560162346e-17	# 28 * pi/2  middle
20424565Szliu	.double	0d-6.18980636588357585202e-19	# 29 * pi/2  middle
20524565Szliu
20624565Szliuleading:
20724565Szliu	.double	0d+0.00000000000000000000e+00	#  0 * pi/2  leading
20824565Szliu	.double	0d+1.57079632679489661351e+00	#  1 * pi/2  leading
20924565Szliu	.double	0d+3.14159265358979322702e+00	#  2 * pi/2  leading
21024565Szliu	.double	0d+4.71238898038468989604e+00	#  3 * pi/2  leading
21124565Szliu	.double	0d+6.28318530717958645404e+00	#  4 * pi/2  leading
21224565Szliu	.double	0d+7.85398163397448312306e+00	#  5 * pi/2  leading
21324565Szliu	.double	0d+9.42477796076937979208e+00	#  6 * pi/2  leading
21424565Szliu	.double	0d+1.09955742875642763501e+01	#  7 * pi/2  leading
21524565Szliu	.double	0d+1.25663706143591729081e+01	#  8 * pi/2  leading
21624565Szliu	.double	0d+1.41371669411540694661e+01	#  9 * pi/2  leading
21724565Szliu	.double	0d+1.57079632679489662461e+01	# 10 * pi/2  leading
21824565Szliu	.double	0d+1.72787595947438630262e+01	# 11 * pi/2  leading
21924565Szliu	.double	0d+1.88495559215387595842e+01	# 12 * pi/2  leading
22024565Szliu	.double	0d+2.04203522483336561422e+01	# 13 * pi/2  leading
22124565Szliu	.double	0d+2.19911485751285527002e+01	# 14 * pi/2  leading
22224565Szliu	.double	0d+2.35619449019234492582e+01	# 15 * pi/2  leading
22324565Szliu	.double	0d+2.51327412287183458162e+01	# 16 * pi/2  leading
22424565Szliu	.double	0d+2.67035375555132423742e+01	# 17 * pi/2  leading
22524565Szliu	.double	0d+2.82743338823081389322e+01	# 18 * pi/2  leading
22624565Szliu	.double	0d+2.98451302091030359342e+01	# 19 * pi/2  leading
22724565Szliu	.double	0d+3.14159265358979324922e+01	# 20 * pi/2  leading
22824565Szliu	.double	0d+3.29867228626928286062e+01	# 21 * pi/2  leading
22924565Szliu	.double	0d+3.45575191894877260523e+01	# 22 * pi/2  leading
23024565Szliu	.double	0d+3.61283155162826226103e+01	# 23 * pi/2  leading
23124565Szliu	.double	0d+3.76991118430775191683e+01	# 24 * pi/2  leading
23224565Szliu	.double	0d+3.92699081698724157263e+01	# 25 * pi/2  leading
23324565Szliu	.double	0d+4.08407044966673122843e+01	# 26 * pi/2  leading
23424565Szliu	.double	0d+4.24115008234622088423e+01	# 27 * pi/2  leading
23524565Szliu	.double	0d+4.39822971502571054003e+01	# 28 * pi/2  leading
23624565Szliu	.double	0d+4.55530934770520019583e+01	# 29 * pi/2  leading
23724565Szliu
23824565SzliutwoOverPi:
23924565Szliu	.double	0d+6.36619772367581343076e-01
24024565Szliu	.text
24124565Szliu	.align	1
24224565Szliu
24324565Szliutable_lookup:
24424565Szliu	muld3	r3,twoOverPi,r0
24524565Szliu	cvtrdl	r0,r0			# n = nearest int to ((2/pi)*|x|) rnded
24624565Szliu	mull3	$8,r0,r5
24724565Szliu	subd2	leading(r5),r3		# p = (|x| - leading n*pi/2) exactly
24824565Szliu	subd3	middle(r5),r3,r1	# q = (p - middle  n*pi/2) rounded
24924565Szliu	subd2	r1,r3			# r = (p - q)
25024565Szliu	subd2	middle(r5),r3		# r =  r - middle  n*pi/2
25124565Szliu	subd2	trailing(r5),r3		# r =  r - trailing n*pi/2  rounded
25224565Szliu#
25324565Szliu#  If the original argument was negative,
25424565Szliu#  negate the reduce argument and
25524565Szliu#  adjust the octant/quadrant number.
25624565Szliu#
25724565Szliu	tstw	4(ap)
25824565Szliu	bgeq	abs2
25924565Szliu	mnegf	r1,r1
26024565Szliu	mnegf	r3,r3
26124565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
26224565Szliu	subb3	r0,$4,r0
26324565Szliuabs2:
26424565Szliu#
26524565Szliu#  Clear all unneeded octant/quadrant bits.
26624565Szliu#
26724565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
26824565Szliu	bicb2	$0xfc,r0
26924565Szliu	rsb
27024565Szliu#
27124565Szliu#						p.0
27224565Szliu	.text
27324565Szliu	.align	2
27424565Szliu#
27524565Szliu# Only 256 (actually 225) bits of 2/pi are needed for VAX double
27624565Szliu# precision; this was determined by enumerating all the nearest
27724565Szliu# machine integer multiples of pi/2 using continued fractions.
27824565Szliu# (8a8d3673775b7ff7 required the most bits.)		-S.McD
27924565Szliu#
28024565Szliu	.long	0
28124565Szliu	.long	0
28224565Szliu	.long	0xaef1586d
28324565Szliu	.long	0x9458eaf7
28424565Szliu	.long	0x10e4107f
28524565Szliu	.long	0xd8a5664f
28624565Szliu	.long	0x4d377036
28724565Szliu	.long	0x09d5f47d
28824565Szliu	.long	0x91054a7f
28924565Szliu	.long	0xbe60db93
29024565Szliubits2opi:
29124565Szliu	.long	0x00000028
29224565Szliu	.long	0
29324565Szliu#
29424565Szliu#  Note: wherever you see the word `octant', read `quadrant'.
29524565Szliu#  Currently this code is set up for  pi/2  argument reduction.
29624565Szliu#  By uncommenting/commenting the appropriate lines, it will
29724565Szliu#  also serve as a  pi/4  argument reduction code.
29824565Szliu#
29924565Szliu
30024565Szliu#						p.1
30124565Szliu#  Trigred  preforms argument reduction
30224565Szliu#  for the trigonometric functions.  It
30324565Szliu#  takes one input argument, a D-format
30424565Szliu#  number in  r1/r0 .  The magnitude of
30524565Szliu#  the input argument must be greater
30624565Szliu#  than or equal to  1/2 .  Trigred produces
30724565Szliu#  three results:  the number of the octant
30824565Szliu#  occupied by the argument, the reduced
30924565Szliu#  argument, and an extension of the
31024565Szliu#  reduced argument.  The octant number is
31124565Szliu#  returned in  r0 .  The reduced argument
31224565Szliu#  is returned as a D-format number in
31324565Szliu#  r2/r1 .  An 8 bit extension of the
31424565Szliu#  reduced argument is returned as an
31524565Szliu#  F-format number in r3.
31624565Szliu#						p.2
31724565Szliutrigred:
31824565Szliu#
31924565Szliu#  Save the sign of the input argument.
32024565Szliu#
32124565Szliu	movw	r0,-(sp)
32224565Szliu#
32324565Szliu#  Extract the exponent field.
32424565Szliu#
32524565Szliu	extzv	$7,$7,r0,r2
32624565Szliu#
32724565Szliu#  Convert the fraction part of the input
32824565Szliu#  argument into a quadword integer.
32924565Szliu#
33024565Szliu	bicw2	$0xff80,r0
33124565Szliu	bisb2	$0x80,r0	# -S.McD
33224565Szliu	rotl	$16,r0,r0
33324565Szliu	rotl	$16,r1,r1
33424565Szliu#
33524565Szliu#  If  r1  is negative, add  1  to  r0 .  This
33624565Szliu#  adjustment is made so that the two's
33724565Szliu#  complement multiplications done later
33824565Szliu#  will produce unsigned results.
33924565Szliu#
34024565Szliu	bgeq	posmid
34124565Szliu	incl	r0
34224565Szliuposmid:
34324565Szliu#						p.3
34424565Szliu#
34524565Szliu#  Set  r3  to the address of the first quadword
34624565Szliu#  used to obtain the needed portion of  2/pi .
34724565Szliu#  The address is longword aligned to ensure
34824565Szliu#  efficient access.
34924565Szliu#
35024565Szliu	ashl	$-3,r2,r3
35124565Szliu	bicb2	$3,r3
35224565Szliu	subl3	r3,$bits2opi,r3
35324565Szliu#
35424565Szliu#  Set  r2  to the size of the shift needed to
35524565Szliu#  obtain the correct portion of  2/pi .
35624565Szliu#
35724565Szliu	bicb2	$0xe0,r2
35824565Szliu#						p.4
35924565Szliu#
36024565Szliu#  Move the needed  128  bits of  2/pi  into
36124565Szliu#  r11 - r8 .  Adjust the numbers to allow
36224565Szliu#  for unsigned multiplication.
36324565Szliu#
36424565Szliu	ashq	r2,(r3),r10
36524565Szliu
36624565Szliu	subl2	$4,r3
36724565Szliu	ashq	r2,(r3),r9
36824565Szliu	bgeq	signoff1
36924565Szliu	incl	r11
37024565Szliusignoff1:
37124565Szliu	subl2	$4,r3
37224565Szliu	ashq	r2,(r3),r8
37324565Szliu	bgeq	signoff2
37424565Szliu	incl	r10
37524565Szliusignoff2:
37624565Szliu	subl2	$4,r3
37724565Szliu	ashq	r2,(r3),r7
37824565Szliu	bgeq	signoff3
37924565Szliu	incl	r9
38024565Szliusignoff3:
38124565Szliu#						p.5
38224565Szliu#
38324565Szliu#  Multiply the contents of  r0/r1  by the
38424565Szliu#  slice of  2/pi  in  r11 - r8 .
38524565Szliu#
38624565Szliu	emul	r0,r8,$0,r4
38724565Szliu	emul	r0,r9,r5,r5
38824565Szliu	emul	r0,r10,r6,r6
38924565Szliu
39024565Szliu	emul	r1,r8,$0,r7
39124565Szliu	emul	r1,r9,r8,r8
39224565Szliu	emul	r1,r10,r9,r9
39324565Szliu	emul	r1,r11,r10,r10
39424565Szliu
39524565Szliu	addl2	r4,r8
39624565Szliu	adwc	r5,r9
39724565Szliu	adwc	r6,r10
39824565Szliu#						p.6
39924565Szliu#
40024565Szliu#  If there are more than five leading zeros
40124565Szliu#  after the first two quotient bits or if there
40224565Szliu#  are more than five leading ones after the first
40324565Szliu#  two quotient bits, generate more fraction bits.
40424565Szliu#  Otherwise, branch to code to produce the result.
40524565Szliu#
40624565Szliu	bicl3	$0xc1ffffff,r10,r4
40724565Szliu	beql	more1
40824565Szliu	cmpl	$0x3e000000,r4
40924565Szliu	bneq	result
41024565Szliumore1:
41124565Szliu#						p.7
41224565Szliu#
41324565Szliu#  generate another  32  result bits.
41424565Szliu#
41524565Szliu	subl2	$4,r3
41624565Szliu	ashq	r2,(r3),r5
41724565Szliu	bgeq	signoff4
41824565Szliu
41924565Szliu	emul	r1,r6,$0,r4
42024565Szliu	addl2	r1,r5
42124565Szliu	emul	r0,r6,r5,r5
42224565Szliu	addl2	r0,r6
42324565Szliu	brb	addbits1
42424565Szliu
42524565Szliusignoff4:
42624565Szliu	emul	r1,r6,$0,r4
42724565Szliu	emul	r0,r6,r5,r5
42824565Szliu
42924565Szliuaddbits1:
43024565Szliu	addl2	r5,r7
43124565Szliu	adwc	r6,r8
43224565Szliu	adwc	$0,r9
43324565Szliu	adwc	$0,r10
43424565Szliu#						p.8
43524565Szliu#
43624565Szliu#  Check for massive cancellation.
43724565Szliu#
43824565Szliu	bicl3	$0xc0000000,r10,r6
43924565Szliu#	bneq	more2			-S.McD  Test was backwards
44024565Szliu	beql	more2
44124565Szliu	cmpl	$0x3fffffff,r6
44224565Szliu	bneq	result
44324565Szliumore2:
44424565Szliu#						p.9
44524565Szliu#
44624565Szliu#  If massive cancellation has occurred,
44724565Szliu#  generate another  24  result bits.
44824565Szliu#  Testing has shown there will always be
44924565Szliu#  enough bits after this point.
45024565Szliu#
45124565Szliu	subl2	$4,r3
45224565Szliu	ashq	r2,(r3),r5
45324565Szliu	bgeq	signoff5
45424565Szliu
45524565Szliu	emul	r0,r6,r4,r5
45624565Szliu	addl2	r0,r6
45724565Szliu	brb	addbits2
45824565Szliu
45924565Szliusignoff5:
46024565Szliu	emul	r0,r6,r4,r5
46124565Szliu
46224565Szliuaddbits2:
46324565Szliu	addl2	r6,r7
46424565Szliu	adwc	$0,r8
46524565Szliu	adwc	$0,r9
46624565Szliu	adwc	$0,r10
46724565Szliu#						p.10
46824565Szliu#
46924565Szliu#  The following code produces the reduced
47024565Szliu#  argument from the product bits contained
47124565Szliu#  in  r10 - r7 .
47224565Szliu#
47324565Szliuresult:
47424565Szliu#
47524565Szliu#  Extract the octant number from  r10 .
47624565Szliu#
47724565Szliu#	extzv	$29,$3,r10,r0	...used for  pi/4  reduction -S.McD
47824565Szliu	extzv	$30,$2,r10,r0
47924565Szliu#
48024565Szliu#  Clear the octant bits in  r10 .
48124565Szliu#
48224565Szliu#	bicl2	$0xe0000000,r10	...used for  pi/4  reduction -S.McD
48324565Szliu	bicl2	$0xc0000000,r10
48424565Szliu#
48524565Szliu#  Zero the sign flag.
48624565Szliu#
48724565Szliu	clrl	r5
48824565Szliu#						p.11
48924565Szliu#
49024565Szliu#  Check to see if the fraction is greater than
49124565Szliu#  or equal to one-half.  If it is, add one
49224565Szliu#  to the octant number, set the sign flag
49324565Szliu#  on, and replace the fraction with  1 minus
49424565Szliu#  the fraction.
49524565Szliu#
49624565Szliu#	bitl	$0x10000000,r10		...used for  pi/4  reduction -S.McD
49724565Szliu	bitl	$0x20000000,r10
49824565Szliu	beql	small
49924565Szliu	incl	r0
50024565Szliu	incl	r5
50124565Szliu#	subl3	r10,$0x1fffffff,r10	...used for  pi/4  reduction -S.McD
50224565Szliu	subl3	r10,$0x3fffffff,r10
50324565Szliu	mcoml	r9,r9
50424565Szliu	mcoml	r8,r8
50524565Szliu	mcoml	r7,r7
50624565Szliusmall:
50724565Szliu#						p.12
50824565Szliu#
50924565Szliu##  Test whether the first  29  bits of the ...used for  pi/4  reduction -S.McD
51024565Szliu#  Test whether the first  30  bits of the
51124565Szliu#  fraction are zero.
51224565Szliu#
51324565Szliu	tstl	r10
51424565Szliu	beql	tiny
51524565Szliu#
51624565Szliu#  Find the position of the first one bit in  r10 .
51724565Szliu#
51824565Szliu	cvtld	r10,r1
51924565Szliu	extzv	$7,$7,r1,r1
52024565Szliu#
52124565Szliu#  Compute the size of the shift needed.
52224565Szliu#
52324565Szliu	subl3	r1,$32,r6
52424565Szliu#
52524565Szliu#  Shift up the high order  64  bits of the
52624565Szliu#  product.
52724565Szliu#
52824565Szliu	ashq	r6,r9,r10
52924565Szliu	ashq	r6,r8,r9
53024565Szliu	brb	mult
53124565Szliu#						p.13
53224565Szliu#
53324565Szliu#  Test to see if the sign bit of  r9  is on.
53424565Szliu#
53524565Szliutiny:
53624565Szliu	tstl	r9
53724565Szliu	bgeq	tinier
53824565Szliu#
53924565Szliu#  If it is, shift the product bits up  32  bits.
54024565Szliu#
54124565Szliu	movl	$32,r6
54224565Szliu	movq	r8,r10
54324565Szliu	tstl	r10
54424565Szliu	brb	mult
54524565Szliu#						p.14
54624565Szliu#
54724565Szliu#  Test whether  r9  is zero.  It is probably
54824565Szliu#  impossible for both  r10  and  r9  to be
54924565Szliu#  zero, but until proven to be so, the test
55024565Szliu#  must be made.
55124565Szliu#
55224565Szliutinier:
55324565Szliu	beql	zero
55424565Szliu#
55524565Szliu#  Find the position of the first one bit in  r9 .
55624565Szliu#
55724565Szliu	cvtld	r9,r1
55824565Szliu	extzv	$7,$7,r1,r1
55924565Szliu#
56024565Szliu#  Compute the size of the shift needed.
56124565Szliu#
56224565Szliu	subl3	r1,$32,r1
56324565Szliu	addl3	$32,r1,r6
56424565Szliu#
56524565Szliu#  Shift up the high order  64  bits of the
56624565Szliu#  product.
56724565Szliu#
56824565Szliu	ashq	r1,r8,r10
56924565Szliu	ashq	r1,r7,r9
57024565Szliu	brb	mult
57124565Szliu#						p.15
57224565Szliu#
57324565Szliu#  The following code sets the reduced
57424565Szliu#  argument to zero.
57524565Szliu#
57624565Szliuzero:
57724565Szliu	clrl	r1
57824565Szliu	clrl	r2
57924565Szliu	clrl	r3
58024565Szliu	brw	return
58124565Szliu#						p.16
58224565Szliu#
58324565Szliu#  At this point,  r0  contains the octant number,
58424565Szliu#  r6  indicates the number of bits the fraction
58524565Szliu#  has been shifted,  r5  indicates the sign of
58624565Szliu#  the fraction,  r11/r10  contain the high order
58724565Szliu#  64  bits of the fraction, and the condition
58824565Szliu#  codes indicate where the sign bit of  r10
58924565Szliu#  is on.  The following code multiplies the
59024565Szliu#  fraction by  pi/2 .
59124565Szliu#
59224565Szliumult:
59324565Szliu#
59424565Szliu#  Save  r11/r10  in  r4/r1 .		-S.McD
59524565Szliu	movl	r11,r4
59624565Szliu	movl	r10,r1
59724565Szliu#
59824565Szliu#  If the sign bit of  r10  is on, add  1  to  r11 .
59924565Szliu#
60024565Szliu	bgeq	signoff6
60124565Szliu	incl	r11
60224565Szliusignoff6:
60324565Szliu#						p.17
60424565Szliu#
60524565Szliu#  Move  pi/2  into  r3/r2 .
60624565Szliu#
60724565Szliu	movq	$0xc90fdaa22168c235,r2
60824565Szliu#
60924565Szliu#  Multiply the fraction by the portion of  pi/2
61024565Szliu#  in  r2 .
61124565Szliu#
61224565Szliu	emul	r2,r10,$0,r7
61324565Szliu	emul	r2,r11,r8,r7
61424565Szliu#
61524565Szliu#  Multiply the fraction by the portion of  pi/2
61624565Szliu#  in  r3 .
61724565Szliu	emul	r3,r10,$0,r9
61824565Szliu	emul	r3,r11,r10,r10
61924565Szliu#
62024565Szliu#  Add the product bits together.
62124565Szliu#
62224565Szliu	addl2	r7,r9
62324565Szliu	adwc	r8,r10
62424565Szliu	adwc	$0,r11
62524565Szliu#
62624565Szliu#  Compensate for not sign extending  r8  above.-S.McD
62724565Szliu#
62824565Szliu	tstl	r8
62924565Szliu	bgeq	signoff6a
63024565Szliu	decl	r11
63124565Szliusignoff6a:
63224565Szliu#
63324565Szliu#  Compensate for  r11/r10  being unsigned.	-S.McD
63424565Szliu#
63524565Szliu	addl2	r2,r10
63624565Szliu	adwc	r3,r11
63724565Szliu#
63824565Szliu#  Compensate for  r3/r2  being unsigned.	-S.McD
63924565Szliu#
64024565Szliu	addl2	r1,r10
64124565Szliu	adwc	r4,r11
64224565Szliu#						p.18
64324565Szliu#
64424565Szliu#  If the sign bit of  r11  is zero, shift the
64524565Szliu#  product bits up one bit and increment  r6 .
64624565Szliu#
64724565Szliu	blss	signon
64824565Szliu	incl	r6
64924565Szliu	ashq	$1,r10,r10
65024565Szliu	tstl	r9
65124565Szliu	bgeq	signoff7
65224565Szliu	incl	r10
65324565Szliusignoff7:
65424565Szliusignon:
65524565Szliu#						p.19
65624565Szliu#
65724565Szliu#  Shift the  56  most significant product
65824565Szliu#  bits into  r9/r8 .  The sign extension
65924565Szliu#  will be handled later.
66024565Szliu#
66124565Szliu	ashq	$-8,r10,r8
66224565Szliu#
66324565Szliu#  Convert the low order  8  bits of  r10
66424565Szliu#  into an F-format number.
66524565Szliu#
66624565Szliu	cvtbf	r10,r3
66724565Szliu#
66824565Szliu#  If the result of the conversion was
66924565Szliu#  negative, add  1  to  r9/r8 .
67024565Szliu#
67124565Szliu	bgeq	chop
67224565Szliu	incl	r8
67324565Szliu	adwc	$0,r9
67424565Szliu#
67524565Szliu#  If  r9  is now zero, branch to special
67624565Szliu#  code to handle that possibility.
67724565Szliu#
67824565Szliu	beql	carryout
67924565Szliuchop:
68024565Szliu#						p.20
68124565Szliu#
68224565Szliu#  Convert the number in  r9/r8  into
68324565Szliu#  D-format number in  r2/r1 .
68424565Szliu#
68524565Szliu	rotl	$16,r8,r2
68624565Szliu	rotl	$16,r9,r1
68724565Szliu#
68824565Szliu#  Set the exponent field to the appropriate
68924565Szliu#  value.  Note that the extra bits created by
69024565Szliu#  sign extension are now eliminated.
69124565Szliu#
69224565Szliu	subw3	r6,$131,r6
69324565Szliu	insv	r6,$7,$9,r1
69424565Szliu#
69524565Szliu#  Set the exponent field of the F-format
69624565Szliu#  number in  r3  to the appropriate value.
69724565Szliu#
69824565Szliu	tstf	r3
69924565Szliu	beql	return
70024565Szliu#	extzv	$7,$8,r3,r4	-S.McD
70124565Szliu	extzv	$7,$7,r3,r4
70224565Szliu	addw2	r4,r6
70324565Szliu#	subw2	$217,r6		-S.McD
70424565Szliu	subw2	$64,r6
70524565Szliu	insv	r6,$7,$8,r3
70624565Szliu	brb	return
70724565Szliu#						p.21
70824565Szliu#
70924565Szliu#  The following code generates the appropriate
71024565Szliu#  result for the unlikely possibility that
71124565Szliu#  rounding the number in  r9/r8  resulted in
71224565Szliu#  a carry out.
71324565Szliu#
71424565Szliucarryout:
71524565Szliu	clrl	r1
71624565Szliu	clrl	r2
71724565Szliu	subw3	r6,$132,r6
71824565Szliu	insv	r6,$7,$9,r1
71924565Szliu	tstf	r3
72024565Szliu	beql	return
72124565Szliu	extzv	$7,$8,r3,r4
72224565Szliu	addw2	r4,r6
72324565Szliu	subw2	$218,r6
72424565Szliu	insv	r6,$7,$8,r3
72524565Szliu#						p.22
72624565Szliu#
72724565Szliu#  The following code makes an needed
72824565Szliu#  adjustments to the signs of the
72924565Szliu#  results or to the octant number, and
73024565Szliu#  then returns.
73124565Szliu#
73224565Szliureturn:
73324565Szliu#
73424565Szliu#  Test if the fraction was greater than or
73524565Szliu#  equal to  1/2 .  If so, negate the reduced
73624565Szliu#  argument.
73724565Szliu#
73824565Szliu	blbc	r5,signoff8
73924565Szliu	mnegf	r1,r1
74024565Szliu	mnegf	r3,r3
74124565Szliusignoff8:
74224565Szliu#						p.23
74324565Szliu#
74424565Szliu#  If the original argument was negative,
74524565Szliu#  negate the reduce argument and
74624565Szliu#  adjust the octant number.
74724565Szliu#
74824565Szliu	tstw	(sp)+
74924565Szliu	bgeq	signoff9
75024565Szliu	mnegf	r1,r1
75124565Szliu	mnegf	r3,r3
75224565Szliu#	subb3	r0,$8,r0	...used for  pi/4  reduction -S.McD
75324565Szliu	subb3	r0,$4,r0
75424565Szliusignoff9:
75524565Szliu#
75624565Szliu#  Clear all unneeded octant bits.
75724565Szliu#
75824565Szliu#	bicb2	$0xf8,r0	...used for  pi/4  reduction -S.McD
75924565Szliu	bicb2	$0xfc,r0
76024565Szliu#
76124565Szliu#  Return.
76224565Szliu#
76324565Szliu	rsb
764