xref: /netbsd-src/sys/arch/powerpc/fpu/fpu_subr.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: fpu_subr.c,v 1.1 2001/06/13 06:01:47 simonb Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)fpu_subr.c	8.1 (Berkeley) 6/11/93
45  */
46 
47 /*
48  * FPU subroutines.
49  */
50 
51 #include <sys/types.h>
52 #if defined(DIAGNOSTIC)||defined(DEBUG)
53 #include <sys/systm.h>
54 #endif
55 
56 #include <machine/reg.h>
57 #include <powerpc/instr.h>
58 #include <machine/fpu.h>
59 
60 #include <powerpc/fpu/fpu_arith.h>
61 #include <powerpc/fpu/fpu_emu.h>
62 #include <powerpc/fpu/fpu_extern.h>
63 
64 /*
65  * Shift the given number right rsh bits.  Any bits that `fall off' will get
66  * shoved into the sticky field; we return the resulting sticky.  Note that
67  * shifting NaNs is legal (this will never shift all bits out); a NaN's
68  * sticky field is ignored anyway.
69  */
70 int
71 fpu_shr(struct fpn *fp, int rsh)
72 {
73 	u_int m0, m1, m2, m3, s;
74 	int lsh;
75 
76 #ifdef DIAGNOSTIC
77 	if (rsh <= 0 || (fp->fp_class != FPC_NUM && !ISNAN(fp)))
78 		panic("fpu_rightshift 1");
79 #endif
80 
81 	m0 = fp->fp_mant[0];
82 	m1 = fp->fp_mant[1];
83 	m2 = fp->fp_mant[2];
84 	m3 = fp->fp_mant[3];
85 
86 	/* If shifting all the bits out, take a shortcut. */
87 	if (rsh >= FP_NMANT) {
88 #ifdef DIAGNOSTIC
89 		if ((m0 | m1 | m2 | m3) == 0)
90 			panic("fpu_rightshift 2");
91 #endif
92 		fp->fp_mant[0] = 0;
93 		fp->fp_mant[1] = 0;
94 		fp->fp_mant[2] = 0;
95 		fp->fp_mant[3] = 0;
96 #ifdef notdef
97 		if ((m0 | m1 | m2 | m3) == 0)
98 			fp->fp_class = FPC_ZERO;
99 		else
100 #endif
101 			fp->fp_sticky = 1;
102 		return (1);
103 	}
104 
105 	/* Squish out full words. */
106 	s = fp->fp_sticky;
107 	if (rsh >= 32 * 3) {
108 		s |= m3 | m2 | m1;
109 		m3 = m0, m2 = 0, m1 = 0, m0 = 0;
110 	} else if (rsh >= 32 * 2) {
111 		s |= m3 | m2;
112 		m3 = m1, m2 = m0, m1 = 0, m0 = 0;
113 	} else if (rsh >= 32) {
114 		s |= m3;
115 		m3 = m2, m2 = m1, m1 = m0, m0 = 0;
116 	}
117 
118 	/* Handle any remaining partial word. */
119 	if ((rsh &= 31) != 0) {
120 		lsh = 32 - rsh;
121 		s |= m3 << lsh;
122 		m3 = (m3 >> rsh) | (m2 << lsh);
123 		m2 = (m2 >> rsh) | (m1 << lsh);
124 		m1 = (m1 >> rsh) | (m0 << lsh);
125 		m0 >>= rsh;
126 	}
127 	fp->fp_mant[0] = m0;
128 	fp->fp_mant[1] = m1;
129 	fp->fp_mant[2] = m2;
130 	fp->fp_mant[3] = m3;
131 	fp->fp_sticky = s;
132 	return (s);
133 }
134 
135 /*
136  * Force a number to be normal, i.e., make its fraction have all zero
137  * bits before FP_1, then FP_1, then all 1 bits.  This is used for denorms
138  * and (sometimes) for intermediate results.
139  *
140  * Internally, this may use a `supernormal' -- a number whose fp_mant
141  * is greater than or equal to 2.0 -- so as a side effect you can hand it
142  * a supernormal and it will fix it (provided fp->fp_mant[3] == 0).
143  */
144 void
145 fpu_norm(struct fpn *fp)
146 {
147 	u_int m0, m1, m2, m3, top, sup, nrm;
148 	int lsh, rsh, exp;
149 
150 	exp = fp->fp_exp;
151 	m0 = fp->fp_mant[0];
152 	m1 = fp->fp_mant[1];
153 	m2 = fp->fp_mant[2];
154 	m3 = fp->fp_mant[3];
155 
156 	/* Handle severe subnormals with 32-bit moves. */
157 	if (m0 == 0) {
158 		if (m1)
159 			m0 = m1, m1 = m2, m2 = m3, m3 = 0, exp -= 32;
160 		else if (m2)
161 			m0 = m2, m1 = m3, m2 = 0, m3 = 0, exp -= 2 * 32;
162 		else if (m3)
163 			m0 = m3, m1 = 0, m2 = 0, m3 = 0, exp -= 3 * 32;
164 		else {
165 			fp->fp_class = FPC_ZERO;
166 			return;
167 		}
168 	}
169 
170 	/* Now fix any supernormal or remaining subnormal. */
171 	nrm = FP_1;
172 	sup = nrm << 1;
173 	if (m0 >= sup) {
174 		/*
175 		 * We have a supernormal number.  We need to shift it right.
176 		 * We may assume m3==0.
177 		 */
178 		for (rsh = 1, top = m0 >> 1; top >= sup; rsh++)	/* XXX slow */
179 			top >>= 1;
180 		exp += rsh;
181 		lsh = 32 - rsh;
182 		m3 = m2 << lsh;
183 		m2 = (m2 >> rsh) | (m1 << lsh);
184 		m1 = (m1 >> rsh) | (m0 << lsh);
185 		m0 = top;
186 	} else if (m0 < nrm) {
187 		/*
188 		 * We have a regular denorm (a subnormal number), and need
189 		 * to shift it left.
190 		 */
191 		for (lsh = 1, top = m0 << 1; top < nrm; lsh++)	/* XXX slow */
192 			top <<= 1;
193 		exp -= lsh;
194 		rsh = 32 - lsh;
195 		m0 = top | (m1 >> rsh);
196 		m1 = (m1 << lsh) | (m2 >> rsh);
197 		m2 = (m2 << lsh) | (m3 >> rsh);
198 		m3 <<= lsh;
199 	}
200 
201 	fp->fp_exp = exp;
202 	fp->fp_mant[0] = m0;
203 	fp->fp_mant[1] = m1;
204 	fp->fp_mant[2] = m2;
205 	fp->fp_mant[3] = m3;
206 }
207 
208 /*
209  * Concoct a `fresh' Quiet NaN per Appendix N.
210  * As a side effect, we set NV (invalid) for the current exceptions.
211  */
212 struct fpn *
213 fpu_newnan(struct fpemu *fe)
214 {
215 	struct fpn *fp;
216 
217 	fe->fe_cx |= FPSCR_VXSNAN;
218 	fp = &fe->fe_f3;
219 	fp->fp_class = FPC_QNAN;
220 	fp->fp_sign = 0;
221 	fp->fp_mant[0] = FP_1 - 1;
222 	fp->fp_mant[1] = fp->fp_mant[2] = fp->fp_mant[3] = ~0;
223 	DUMPFPN(FPE_REG, fp);
224 	return (fp);
225 }
226