fpu_implode.c revision 1.8.6.3 1 1.8.6.3 nathanw /* $NetBSD: fpu_implode.c,v 1.8.6.3 2002/02/28 04:12:03 nathanw Exp $ */
2 1.8.6.2 nathanw
3 1.8.6.2 nathanw /*
4 1.8.6.2 nathanw * Copyright (c) 1992, 1993
5 1.8.6.2 nathanw * The Regents of the University of California. All rights reserved.
6 1.8.6.2 nathanw *
7 1.8.6.2 nathanw * This software was developed by the Computer Systems Engineering group
8 1.8.6.2 nathanw * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.8.6.2 nathanw * contributed to Berkeley.
10 1.8.6.2 nathanw *
11 1.8.6.2 nathanw * All advertising materials mentioning features or use of this software
12 1.8.6.2 nathanw * must display the following acknowledgement:
13 1.8.6.2 nathanw * This product includes software developed by the University of
14 1.8.6.2 nathanw * California, Lawrence Berkeley Laboratory.
15 1.8.6.2 nathanw *
16 1.8.6.2 nathanw * Redistribution and use in source and binary forms, with or without
17 1.8.6.2 nathanw * modification, are permitted provided that the following conditions
18 1.8.6.2 nathanw * are met:
19 1.8.6.2 nathanw * 1. Redistributions of source code must retain the above copyright
20 1.8.6.2 nathanw * notice, this list of conditions and the following disclaimer.
21 1.8.6.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
22 1.8.6.2 nathanw * notice, this list of conditions and the following disclaimer in the
23 1.8.6.2 nathanw * documentation and/or other materials provided with the distribution.
24 1.8.6.2 nathanw * 3. All advertising materials mentioning features or use of this software
25 1.8.6.2 nathanw * must display the following acknowledgement:
26 1.8.6.2 nathanw * This product includes software developed by the University of
27 1.8.6.2 nathanw * California, Berkeley and its contributors.
28 1.8.6.2 nathanw * 4. Neither the name of the University nor the names of its contributors
29 1.8.6.2 nathanw * may be used to endorse or promote products derived from this software
30 1.8.6.2 nathanw * without specific prior written permission.
31 1.8.6.2 nathanw *
32 1.8.6.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 1.8.6.2 nathanw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 1.8.6.2 nathanw * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 1.8.6.2 nathanw * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 1.8.6.2 nathanw * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 1.8.6.2 nathanw * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 1.8.6.2 nathanw * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 1.8.6.2 nathanw * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 1.8.6.2 nathanw * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 1.8.6.2 nathanw * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 1.8.6.2 nathanw * SUCH DAMAGE.
43 1.8.6.2 nathanw *
44 1.8.6.2 nathanw * @(#)fpu_implode.c 8.1 (Berkeley) 6/11/93
45 1.8.6.2 nathanw */
46 1.8.6.2 nathanw
47 1.8.6.2 nathanw /*
48 1.8.6.2 nathanw * FPU subroutines: `implode' internal format numbers into the machine's
49 1.8.6.2 nathanw * `packed binary' format.
50 1.8.6.2 nathanw */
51 1.8.6.2 nathanw
52 1.8.6.2 nathanw #if defined(_KERNEL_OPT)
53 1.8.6.2 nathanw #include "opt_sparc_arch.h"
54 1.8.6.2 nathanw #endif
55 1.8.6.2 nathanw
56 1.8.6.2 nathanw #include <sys/types.h>
57 1.8.6.2 nathanw #include <sys/systm.h>
58 1.8.6.2 nathanw
59 1.8.6.2 nathanw #include <machine/ieee.h>
60 1.8.6.2 nathanw #include <machine/instr.h>
61 1.8.6.2 nathanw #include <machine/reg.h>
62 1.8.6.2 nathanw
63 1.8.6.2 nathanw #include <sparc/fpu/fpu_arith.h>
64 1.8.6.2 nathanw #include <sparc/fpu/fpu_emu.h>
65 1.8.6.2 nathanw #include <sparc/fpu/fpu_extern.h>
66 1.8.6.2 nathanw
67 1.8.6.2 nathanw static int round __P((register struct fpemu *, register struct fpn *));
68 1.8.6.2 nathanw static int toinf __P((struct fpemu *, int));
69 1.8.6.2 nathanw
70 1.8.6.2 nathanw /*
71 1.8.6.2 nathanw * Round a number (algorithm from Motorola MC68882 manual, modified for
72 1.8.6.2 nathanw * our internal format). Set inexact exception if rounding is required.
73 1.8.6.2 nathanw * Return true iff we rounded up.
74 1.8.6.2 nathanw *
75 1.8.6.2 nathanw * After rounding, we discard the guard and round bits by shifting right
76 1.8.6.2 nathanw * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
77 1.8.6.2 nathanw * This saves effort later.
78 1.8.6.2 nathanw *
79 1.8.6.2 nathanw * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
80 1.8.6.2 nathanw * responsibility to fix this if necessary.
81 1.8.6.2 nathanw */
82 1.8.6.2 nathanw static int
83 1.8.6.2 nathanw round(register struct fpemu *fe, register struct fpn *fp)
84 1.8.6.2 nathanw {
85 1.8.6.2 nathanw register u_int m0, m1, m2, m3;
86 1.8.6.2 nathanw register int gr, s;
87 1.8.6.2 nathanw
88 1.8.6.2 nathanw m0 = fp->fp_mant[0];
89 1.8.6.2 nathanw m1 = fp->fp_mant[1];
90 1.8.6.2 nathanw m2 = fp->fp_mant[2];
91 1.8.6.2 nathanw m3 = fp->fp_mant[3];
92 1.8.6.2 nathanw gr = m3 & 3;
93 1.8.6.2 nathanw s = fp->fp_sticky;
94 1.8.6.2 nathanw
95 1.8.6.2 nathanw /* mant >>= FP_NG */
96 1.8.6.2 nathanw m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
97 1.8.6.2 nathanw m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
98 1.8.6.2 nathanw m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
99 1.8.6.2 nathanw m0 >>= FP_NG;
100 1.8.6.2 nathanw
101 1.8.6.2 nathanw if ((gr | s) == 0) /* result is exact: no rounding needed */
102 1.8.6.2 nathanw goto rounddown;
103 1.8.6.2 nathanw
104 1.8.6.2 nathanw fe->fe_cx |= FSR_NX; /* inexact */
105 1.8.6.2 nathanw
106 1.8.6.2 nathanw /* Go to rounddown to round down; break to round up. */
107 1.8.6.2 nathanw switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
108 1.8.6.2 nathanw
109 1.8.6.2 nathanw case FSR_RD_RN:
110 1.8.6.2 nathanw default:
111 1.8.6.2 nathanw /*
112 1.8.6.2 nathanw * Round only if guard is set (gr & 2). If guard is set,
113 1.8.6.2 nathanw * but round & sticky both clear, then we want to round
114 1.8.6.2 nathanw * but have a tie, so round to even, i.e., add 1 iff odd.
115 1.8.6.2 nathanw */
116 1.8.6.2 nathanw if ((gr & 2) == 0)
117 1.8.6.2 nathanw goto rounddown;
118 1.8.6.2 nathanw if ((gr & 1) || fp->fp_sticky || (m3 & 1))
119 1.8.6.2 nathanw break;
120 1.8.6.2 nathanw goto rounddown;
121 1.8.6.2 nathanw
122 1.8.6.2 nathanw case FSR_RD_RZ:
123 1.8.6.2 nathanw /* Round towards zero, i.e., down. */
124 1.8.6.2 nathanw goto rounddown;
125 1.8.6.2 nathanw
126 1.8.6.2 nathanw case FSR_RD_RM:
127 1.8.6.2 nathanw /* Round towards -Inf: up if negative, down if positive. */
128 1.8.6.2 nathanw if (fp->fp_sign)
129 1.8.6.2 nathanw break;
130 1.8.6.2 nathanw goto rounddown;
131 1.8.6.2 nathanw
132 1.8.6.2 nathanw case FSR_RD_RP:
133 1.8.6.2 nathanw /* Round towards +Inf: up if positive, down otherwise. */
134 1.8.6.2 nathanw if (!fp->fp_sign)
135 1.8.6.2 nathanw break;
136 1.8.6.2 nathanw goto rounddown;
137 1.8.6.2 nathanw }
138 1.8.6.2 nathanw
139 1.8.6.2 nathanw /* Bump low bit of mantissa, with carry. */
140 1.8.6.2 nathanw FPU_ADDS(m3, m3, 1);
141 1.8.6.2 nathanw FPU_ADDCS(m2, m2, 0);
142 1.8.6.2 nathanw FPU_ADDCS(m1, m1, 0);
143 1.8.6.2 nathanw FPU_ADDC(m0, m0, 0);
144 1.8.6.2 nathanw fp->fp_mant[0] = m0;
145 1.8.6.2 nathanw fp->fp_mant[1] = m1;
146 1.8.6.2 nathanw fp->fp_mant[2] = m2;
147 1.8.6.2 nathanw fp->fp_mant[3] = m3;
148 1.8.6.2 nathanw return (1);
149 1.8.6.2 nathanw
150 1.8.6.2 nathanw rounddown:
151 1.8.6.2 nathanw fp->fp_mant[0] = m0;
152 1.8.6.2 nathanw fp->fp_mant[1] = m1;
153 1.8.6.2 nathanw fp->fp_mant[2] = m2;
154 1.8.6.2 nathanw fp->fp_mant[3] = m3;
155 1.8.6.2 nathanw return (0);
156 1.8.6.2 nathanw }
157 1.8.6.2 nathanw
158 1.8.6.2 nathanw /*
159 1.8.6.2 nathanw * For overflow: return true if overflow is to go to +/-Inf, according
160 1.8.6.2 nathanw * to the sign of the overflowing result. If false, overflow is to go
161 1.8.6.2 nathanw * to the largest magnitude value instead.
162 1.8.6.2 nathanw */
163 1.8.6.2 nathanw static int
164 1.8.6.2 nathanw toinf(struct fpemu *fe, int sign)
165 1.8.6.2 nathanw {
166 1.8.6.2 nathanw int inf;
167 1.8.6.2 nathanw
168 1.8.6.2 nathanw /* look at rounding direction */
169 1.8.6.2 nathanw switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
170 1.8.6.2 nathanw
171 1.8.6.2 nathanw default:
172 1.8.6.2 nathanw case FSR_RD_RN: /* the nearest value is always Inf */
173 1.8.6.2 nathanw inf = 1;
174 1.8.6.2 nathanw break;
175 1.8.6.2 nathanw
176 1.8.6.2 nathanw case FSR_RD_RZ: /* toward 0 => never towards Inf */
177 1.8.6.2 nathanw inf = 0;
178 1.8.6.2 nathanw break;
179 1.8.6.2 nathanw
180 1.8.6.2 nathanw case FSR_RD_RP: /* toward +Inf iff positive */
181 1.8.6.2 nathanw inf = sign == 0;
182 1.8.6.2 nathanw break;
183 1.8.6.2 nathanw
184 1.8.6.2 nathanw case FSR_RD_RM: /* toward -Inf iff negative */
185 1.8.6.2 nathanw inf = sign;
186 1.8.6.2 nathanw break;
187 1.8.6.2 nathanw }
188 1.8.6.2 nathanw return (inf);
189 1.8.6.2 nathanw }
190 1.8.6.2 nathanw
191 1.8.6.2 nathanw /*
192 1.8.6.2 nathanw * fpn -> int (int value returned as return value).
193 1.8.6.2 nathanw *
194 1.8.6.2 nathanw * N.B.: this conversion always rounds towards zero (this is a peculiarity
195 1.8.6.2 nathanw * of the SPARC instruction set).
196 1.8.6.2 nathanw */
197 1.8.6.2 nathanw u_int
198 1.8.6.2 nathanw fpu_ftoi(fe, fp)
199 1.8.6.2 nathanw struct fpemu *fe;
200 1.8.6.2 nathanw register struct fpn *fp;
201 1.8.6.2 nathanw {
202 1.8.6.2 nathanw register u_int i;
203 1.8.6.2 nathanw register int sign, exp;
204 1.8.6.2 nathanw
205 1.8.6.2 nathanw sign = fp->fp_sign;
206 1.8.6.2 nathanw switch (fp->fp_class) {
207 1.8.6.2 nathanw
208 1.8.6.2 nathanw case FPC_ZERO:
209 1.8.6.2 nathanw return (0);
210 1.8.6.2 nathanw
211 1.8.6.2 nathanw case FPC_NUM:
212 1.8.6.2 nathanw /*
213 1.8.6.2 nathanw * If exp >= 2^32, overflow. Otherwise shift value right
214 1.8.6.2 nathanw * into last mantissa word (this will not exceed 0xffffffff),
215 1.8.6.2 nathanw * shifting any guard and round bits out into the sticky
216 1.8.6.2 nathanw * bit. Then ``round'' towards zero, i.e., just set an
217 1.8.6.2 nathanw * inexact exception if sticky is set (see round()).
218 1.8.6.2 nathanw * If the result is > 0x80000000, or is positive and equals
219 1.8.6.2 nathanw * 0x80000000, overflow; otherwise the last fraction word
220 1.8.6.2 nathanw * is the result.
221 1.8.6.2 nathanw */
222 1.8.6.2 nathanw if ((exp = fp->fp_exp) >= 32)
223 1.8.6.2 nathanw break;
224 1.8.6.2 nathanw /* NB: the following includes exp < 0 cases */
225 1.8.6.2 nathanw if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
226 1.8.6.2 nathanw fe->fe_cx |= FSR_NX;
227 1.8.6.2 nathanw i = fp->fp_mant[3];
228 1.8.6.2 nathanw if (i >= ((u_int)0x80000000 + sign))
229 1.8.6.2 nathanw break;
230 1.8.6.2 nathanw return (sign ? -i : i);
231 1.8.6.2 nathanw
232 1.8.6.2 nathanw default: /* Inf, qNaN, sNaN */
233 1.8.6.2 nathanw break;
234 1.8.6.2 nathanw }
235 1.8.6.2 nathanw /* overflow: replace any inexact exception with invalid */
236 1.8.6.2 nathanw fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
237 1.8.6.2 nathanw return (0x7fffffff + sign);
238 1.8.6.2 nathanw }
239 1.8.6.2 nathanw
240 1.8.6.2 nathanw #ifdef SUN4U
241 1.8.6.2 nathanw /*
242 1.8.6.2 nathanw * fpn -> extended int (high bits of int value returned as return value).
243 1.8.6.2 nathanw *
244 1.8.6.2 nathanw * N.B.: this conversion always rounds towards zero (this is a peculiarity
245 1.8.6.2 nathanw * of the SPARC instruction set).
246 1.8.6.2 nathanw */
247 1.8.6.2 nathanw u_int
248 1.8.6.2 nathanw fpu_ftox(fe, fp, res)
249 1.8.6.2 nathanw struct fpemu *fe;
250 1.8.6.2 nathanw register struct fpn *fp;
251 1.8.6.2 nathanw u_int *res;
252 1.8.6.2 nathanw {
253 1.8.6.2 nathanw register u_int64_t i;
254 1.8.6.2 nathanw register int sign, exp;
255 1.8.6.2 nathanw
256 1.8.6.2 nathanw sign = fp->fp_sign;
257 1.8.6.2 nathanw switch (fp->fp_class) {
258 1.8.6.2 nathanw
259 1.8.6.2 nathanw case FPC_ZERO:
260 1.8.6.2 nathanw res[1] = 0;
261 1.8.6.2 nathanw return (0);
262 1.8.6.2 nathanw
263 1.8.6.2 nathanw case FPC_NUM:
264 1.8.6.2 nathanw /*
265 1.8.6.2 nathanw * If exp >= 2^64, overflow. Otherwise shift value right
266 1.8.6.2 nathanw * into last mantissa word (this will not exceed 0xffffffffffffffff),
267 1.8.6.2 nathanw * shifting any guard and round bits out into the sticky
268 1.8.6.2 nathanw * bit. Then ``round'' towards zero, i.e., just set an
269 1.8.6.2 nathanw * inexact exception if sticky is set (see round()).
270 1.8.6.2 nathanw * If the result is > 0x8000000000000000, or is positive and equals
271 1.8.6.2 nathanw * 0x8000000000000000, overflow; otherwise the last fraction word
272 1.8.6.2 nathanw * is the result.
273 1.8.6.2 nathanw */
274 1.8.6.2 nathanw if ((exp = fp->fp_exp) >= 64)
275 1.8.6.2 nathanw break;
276 1.8.6.2 nathanw /* NB: the following includes exp < 0 cases */
277 1.8.6.2 nathanw if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
278 1.8.6.2 nathanw fe->fe_cx |= FSR_NX;
279 1.8.6.2 nathanw i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3];
280 1.8.6.2 nathanw if (i >= ((u_int64_t)0x8000000000000000LL + sign))
281 1.8.6.2 nathanw break;
282 1.8.6.3 nathanw if (sign) i = -i;
283 1.8.6.3 nathanw res[1] = (int)i;
284 1.8.6.3 nathanw return (i>>32);
285 1.8.6.2 nathanw
286 1.8.6.2 nathanw default: /* Inf, qNaN, sNaN */
287 1.8.6.2 nathanw break;
288 1.8.6.2 nathanw }
289 1.8.6.2 nathanw /* overflow: replace any inexact exception with invalid */
290 1.8.6.2 nathanw fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
291 1.8.6.2 nathanw return (0x7fffffffffffffffLL + sign);
292 1.8.6.2 nathanw }
293 1.8.6.2 nathanw #endif /* SUN4U */
294 1.8.6.2 nathanw
295 1.8.6.2 nathanw /*
296 1.8.6.2 nathanw * fpn -> single (32 bit single returned as return value).
297 1.8.6.2 nathanw * We assume <= 29 bits in a single-precision fraction (1.f part).
298 1.8.6.2 nathanw */
299 1.8.6.2 nathanw u_int
300 1.8.6.2 nathanw fpu_ftos(fe, fp)
301 1.8.6.2 nathanw struct fpemu *fe;
302 1.8.6.2 nathanw register struct fpn *fp;
303 1.8.6.2 nathanw {
304 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
305 1.8.6.2 nathanw register int exp;
306 1.8.6.2 nathanw
307 1.8.6.2 nathanw #define SNG_EXP(e) ((e) << SNG_FRACBITS) /* makes e an exponent */
308 1.8.6.2 nathanw #define SNG_MASK (SNG_EXP(1) - 1) /* mask for fraction */
309 1.8.6.2 nathanw
310 1.8.6.2 nathanw /* Take care of non-numbers first. */
311 1.8.6.2 nathanw if (ISNAN(fp)) {
312 1.8.6.2 nathanw /*
313 1.8.6.2 nathanw * Preserve upper bits of NaN, per SPARC V8 appendix N.
314 1.8.6.2 nathanw * Note that fp->fp_mant[0] has the quiet bit set,
315 1.8.6.2 nathanw * even if it is classified as a signalling NaN.
316 1.8.6.2 nathanw */
317 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
318 1.8.6.2 nathanw exp = SNG_EXP_INFNAN;
319 1.8.6.2 nathanw goto done;
320 1.8.6.2 nathanw }
321 1.8.6.2 nathanw if (ISINF(fp))
322 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN));
323 1.8.6.2 nathanw if (ISZERO(fp))
324 1.8.6.2 nathanw return (sign);
325 1.8.6.2 nathanw
326 1.8.6.2 nathanw /*
327 1.8.6.2 nathanw * Normals (including subnormals). Drop all the fraction bits
328 1.8.6.2 nathanw * (including the explicit ``implied'' 1 bit) down into the
329 1.8.6.2 nathanw * single-precision range. If the number is subnormal, move
330 1.8.6.2 nathanw * the ``implied'' 1 into the explicit range as well, and shift
331 1.8.6.2 nathanw * right to introduce leading zeroes. Rounding then acts
332 1.8.6.2 nathanw * differently for normals and subnormals: the largest subnormal
333 1.8.6.2 nathanw * may round to the smallest normal (1.0 x 2^minexp), or may
334 1.8.6.2 nathanw * remain subnormal. In the latter case, signal an underflow
335 1.8.6.2 nathanw * if the result was inexact or if underflow traps are enabled.
336 1.8.6.2 nathanw *
337 1.8.6.2 nathanw * Rounding a normal, on the other hand, always produces another
338 1.8.6.2 nathanw * normal (although either way the result might be too big for
339 1.8.6.2 nathanw * single precision, and cause an overflow). If rounding a
340 1.8.6.2 nathanw * normal produces 2.0 in the fraction, we need not adjust that
341 1.8.6.2 nathanw * fraction at all, since both 1.0 and 2.0 are zero under the
342 1.8.6.2 nathanw * fraction mask.
343 1.8.6.2 nathanw *
344 1.8.6.2 nathanw * Note that the guard and round bits vanish from the number after
345 1.8.6.2 nathanw * rounding.
346 1.8.6.2 nathanw */
347 1.8.6.2 nathanw if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) { /* subnormal */
348 1.8.6.2 nathanw /* -NG for g,r; -SNG_FRACBITS-exp for fraction */
349 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
350 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1))
351 1.8.6.2 nathanw return (sign | SNG_EXP(1) | 0);
352 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
353 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
354 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
355 1.8.6.2 nathanw return (sign | SNG_EXP(0) | fp->fp_mant[3]);
356 1.8.6.2 nathanw }
357 1.8.6.2 nathanw /* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
358 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
359 1.8.6.2 nathanw #ifdef DIAGNOSTIC
360 1.8.6.2 nathanw if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
361 1.8.6.2 nathanw panic("fpu_ftos");
362 1.8.6.2 nathanw #endif
363 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2))
364 1.8.6.2 nathanw exp++;
365 1.8.6.2 nathanw if (exp >= SNG_EXP_INFNAN) {
366 1.8.6.2 nathanw /* overflow to inf or to max single */
367 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
368 1.8.6.2 nathanw if (toinf(fe, sign))
369 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN));
370 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
371 1.8.6.2 nathanw }
372 1.8.6.2 nathanw done:
373 1.8.6.2 nathanw /* phew, made it */
374 1.8.6.2 nathanw return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
375 1.8.6.2 nathanw }
376 1.8.6.2 nathanw
377 1.8.6.2 nathanw /*
378 1.8.6.2 nathanw * fpn -> double (32 bit high-order result returned; 32-bit low order result
379 1.8.6.2 nathanw * left in res[1]). Assumes <= 61 bits in double precision fraction.
380 1.8.6.2 nathanw *
381 1.8.6.2 nathanw * This code mimics fpu_ftos; see it for comments.
382 1.8.6.2 nathanw */
383 1.8.6.2 nathanw u_int
384 1.8.6.2 nathanw fpu_ftod(fe, fp, res)
385 1.8.6.2 nathanw struct fpemu *fe;
386 1.8.6.2 nathanw register struct fpn *fp;
387 1.8.6.2 nathanw u_int *res;
388 1.8.6.2 nathanw {
389 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
390 1.8.6.2 nathanw register int exp;
391 1.8.6.2 nathanw
392 1.8.6.2 nathanw #define DBL_EXP(e) ((e) << (DBL_FRACBITS & 31))
393 1.8.6.2 nathanw #define DBL_MASK (DBL_EXP(1) - 1)
394 1.8.6.2 nathanw
395 1.8.6.2 nathanw if (ISNAN(fp)) {
396 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
397 1.8.6.2 nathanw exp = DBL_EXP_INFNAN;
398 1.8.6.2 nathanw goto done;
399 1.8.6.2 nathanw }
400 1.8.6.2 nathanw if (ISINF(fp)) {
401 1.8.6.2 nathanw sign |= DBL_EXP(DBL_EXP_INFNAN);
402 1.8.6.2 nathanw goto zero;
403 1.8.6.2 nathanw }
404 1.8.6.2 nathanw if (ISZERO(fp)) {
405 1.8.6.2 nathanw zero: res[1] = 0;
406 1.8.6.2 nathanw return (sign);
407 1.8.6.2 nathanw }
408 1.8.6.2 nathanw
409 1.8.6.2 nathanw if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
410 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
411 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) {
412 1.8.6.2 nathanw res[1] = 0;
413 1.8.6.2 nathanw return (sign | DBL_EXP(1) | 0);
414 1.8.6.2 nathanw }
415 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
416 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
417 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
418 1.8.6.2 nathanw exp = 0;
419 1.8.6.2 nathanw goto done;
420 1.8.6.2 nathanw }
421 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
422 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2))
423 1.8.6.2 nathanw exp++;
424 1.8.6.2 nathanw if (exp >= DBL_EXP_INFNAN) {
425 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
426 1.8.6.2 nathanw if (toinf(fe, sign)) {
427 1.8.6.2 nathanw res[1] = 0;
428 1.8.6.2 nathanw return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
429 1.8.6.2 nathanw }
430 1.8.6.2 nathanw res[1] = ~0;
431 1.8.6.2 nathanw return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
432 1.8.6.2 nathanw }
433 1.8.6.2 nathanw done:
434 1.8.6.2 nathanw res[1] = fp->fp_mant[3];
435 1.8.6.2 nathanw return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK));
436 1.8.6.2 nathanw }
437 1.8.6.2 nathanw
438 1.8.6.2 nathanw /*
439 1.8.6.2 nathanw * fpn -> extended (32 bit high-order result returned; low-order fraction
440 1.8.6.2 nathanw * words left in res[1]..res[3]). Like ftod, which is like ftos ... but
441 1.8.6.2 nathanw * our internal format *is* extended precision, plus 2 bits for guard/round,
442 1.8.6.2 nathanw * so we can avoid a small bit of work.
443 1.8.6.2 nathanw */
444 1.8.6.2 nathanw u_int
445 1.8.6.2 nathanw fpu_ftoq(fe, fp, res)
446 1.8.6.2 nathanw struct fpemu *fe;
447 1.8.6.2 nathanw register struct fpn *fp;
448 1.8.6.2 nathanw u_int *res;
449 1.8.6.2 nathanw {
450 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
451 1.8.6.2 nathanw register int exp;
452 1.8.6.2 nathanw
453 1.8.6.2 nathanw #define EXT_EXP(e) ((e) << (EXT_FRACBITS & 31))
454 1.8.6.2 nathanw #define EXT_MASK (EXT_EXP(1) - 1)
455 1.8.6.2 nathanw
456 1.8.6.2 nathanw if (ISNAN(fp)) {
457 1.8.6.2 nathanw (void) fpu_shr(fp, 2); /* since we are not rounding */
458 1.8.6.2 nathanw exp = EXT_EXP_INFNAN;
459 1.8.6.2 nathanw goto done;
460 1.8.6.2 nathanw }
461 1.8.6.2 nathanw if (ISINF(fp)) {
462 1.8.6.2 nathanw sign |= EXT_EXP(EXT_EXP_INFNAN);
463 1.8.6.2 nathanw goto zero;
464 1.8.6.2 nathanw }
465 1.8.6.2 nathanw if (ISZERO(fp)) {
466 1.8.6.2 nathanw zero: res[1] = res[2] = res[3] = 0;
467 1.8.6.2 nathanw return (sign);
468 1.8.6.2 nathanw }
469 1.8.6.2 nathanw
470 1.8.6.2 nathanw if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) {
471 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
472 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) {
473 1.8.6.2 nathanw res[1] = res[2] = res[3] = 0;
474 1.8.6.2 nathanw return (sign | EXT_EXP(1) | 0);
475 1.8.6.2 nathanw }
476 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
477 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
478 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
479 1.8.6.2 nathanw exp = 0;
480 1.8.6.2 nathanw goto done;
481 1.8.6.2 nathanw }
482 1.8.6.2 nathanw /* Since internal == extended, no need to shift here. */
483 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(2))
484 1.8.6.2 nathanw exp++;
485 1.8.6.2 nathanw if (exp >= EXT_EXP_INFNAN) {
486 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
487 1.8.6.2 nathanw if (toinf(fe, sign)) {
488 1.8.6.2 nathanw res[1] = res[2] = res[3] = 0;
489 1.8.6.2 nathanw return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
490 1.8.6.2 nathanw }
491 1.8.6.2 nathanw res[1] = res[2] = res[3] = ~0;
492 1.8.6.2 nathanw return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
493 1.8.6.2 nathanw }
494 1.8.6.2 nathanw done:
495 1.8.6.2 nathanw res[1] = fp->fp_mant[1];
496 1.8.6.2 nathanw res[2] = fp->fp_mant[2];
497 1.8.6.2 nathanw res[3] = fp->fp_mant[3];
498 1.8.6.2 nathanw return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK));
499 1.8.6.2 nathanw }
500 1.8.6.2 nathanw
501 1.8.6.2 nathanw /*
502 1.8.6.2 nathanw * Implode an fpn, writing the result into the given space.
503 1.8.6.2 nathanw */
504 1.8.6.2 nathanw void
505 1.8.6.2 nathanw fpu_implode(fe, fp, type, space)
506 1.8.6.2 nathanw struct fpemu *fe;
507 1.8.6.2 nathanw register struct fpn *fp;
508 1.8.6.2 nathanw int type;
509 1.8.6.2 nathanw register u_int *space;
510 1.8.6.2 nathanw {
511 1.8.6.2 nathanw
512 1.8.6.3 nathanw DPRINTF(FPE_REG, ("\n imploding: "));
513 1.8.6.3 nathanw DUMPFPN(FPE_REG, fp);
514 1.8.6.3 nathanw DPRINTF(FPE_REG, ("\n"));
515 1.8.6.3 nathanw
516 1.8.6.2 nathanw switch (type) {
517 1.8.6.2 nathanw
518 1.8.6.2 nathanw #ifdef SUN4U
519 1.8.6.2 nathanw case FTYPE_LNG:
520 1.8.6.2 nathanw space[0] = fpu_ftox(fe, fp, space);
521 1.8.6.2 nathanw break;
522 1.8.6.2 nathanw #endif /* SUN4U */
523 1.8.6.2 nathanw
524 1.8.6.2 nathanw case FTYPE_INT:
525 1.8.6.2 nathanw space[0] = fpu_ftoi(fe, fp);
526 1.8.6.2 nathanw break;
527 1.8.6.2 nathanw
528 1.8.6.2 nathanw case FTYPE_SNG:
529 1.8.6.2 nathanw space[0] = fpu_ftos(fe, fp);
530 1.8.6.2 nathanw break;
531 1.8.6.2 nathanw
532 1.8.6.2 nathanw case FTYPE_DBL:
533 1.8.6.2 nathanw space[0] = fpu_ftod(fe, fp, space);
534 1.8.6.2 nathanw break;
535 1.8.6.2 nathanw
536 1.8.6.2 nathanw case FTYPE_EXT:
537 1.8.6.2 nathanw /* funky rounding precision options ?? */
538 1.8.6.2 nathanw space[0] = fpu_ftoq(fe, fp, space);
539 1.8.6.2 nathanw break;
540 1.8.6.2 nathanw
541 1.8.6.2 nathanw default:
542 1.8.6.2 nathanw panic("fpu_implode");
543 1.8.6.2 nathanw }
544 1.8.6.2 nathanw #ifdef SUN4U
545 1.8.6.2 nathanw DPRINTF(FPE_REG, ("fpu_implode: %x %x %x %x\n",
546 1.8.6.2 nathanw space[0], space[1], space[2], space[3]));
547 1.8.6.2 nathanw #else
548 1.8.6.2 nathanw DPRINTF(FPE_REG, ("fpu_implode: %x %x\n",
549 1.8.6.2 nathanw space[0], space[1]));
550 1.8.6.2 nathanw #endif
551 1.8.6.2 nathanw }
552