fpu_implode.c revision 1.8.6.2 1 1.8.6.2 nathanw /* $NetBSD: fpu_implode.c,v 1.8.6.2 2002/01/08 00:27:36 nathanw Exp $ */
2 1.8.6.2 nathanw
3 1.8.6.2 nathanw /*
4 1.8.6.2 nathanw * Copyright (c) 1992, 1993
5 1.8.6.2 nathanw * The Regents of the University of California. All rights reserved.
6 1.8.6.2 nathanw *
7 1.8.6.2 nathanw * This software was developed by the Computer Systems Engineering group
8 1.8.6.2 nathanw * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.8.6.2 nathanw * contributed to Berkeley.
10 1.8.6.2 nathanw *
11 1.8.6.2 nathanw * All advertising materials mentioning features or use of this software
12 1.8.6.2 nathanw * must display the following acknowledgement:
13 1.8.6.2 nathanw * This product includes software developed by the University of
14 1.8.6.2 nathanw * California, Lawrence Berkeley Laboratory.
15 1.8.6.2 nathanw *
16 1.8.6.2 nathanw * Redistribution and use in source and binary forms, with or without
17 1.8.6.2 nathanw * modification, are permitted provided that the following conditions
18 1.8.6.2 nathanw * are met:
19 1.8.6.2 nathanw * 1. Redistributions of source code must retain the above copyright
20 1.8.6.2 nathanw * notice, this list of conditions and the following disclaimer.
21 1.8.6.2 nathanw * 2. Redistributions in binary form must reproduce the above copyright
22 1.8.6.2 nathanw * notice, this list of conditions and the following disclaimer in the
23 1.8.6.2 nathanw * documentation and/or other materials provided with the distribution.
24 1.8.6.2 nathanw * 3. All advertising materials mentioning features or use of this software
25 1.8.6.2 nathanw * must display the following acknowledgement:
26 1.8.6.2 nathanw * This product includes software developed by the University of
27 1.8.6.2 nathanw * California, Berkeley and its contributors.
28 1.8.6.2 nathanw * 4. Neither the name of the University nor the names of its contributors
29 1.8.6.2 nathanw * may be used to endorse or promote products derived from this software
30 1.8.6.2 nathanw * without specific prior written permission.
31 1.8.6.2 nathanw *
32 1.8.6.2 nathanw * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 1.8.6.2 nathanw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 1.8.6.2 nathanw * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 1.8.6.2 nathanw * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 1.8.6.2 nathanw * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 1.8.6.2 nathanw * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 1.8.6.2 nathanw * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 1.8.6.2 nathanw * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 1.8.6.2 nathanw * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 1.8.6.2 nathanw * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 1.8.6.2 nathanw * SUCH DAMAGE.
43 1.8.6.2 nathanw *
44 1.8.6.2 nathanw * @(#)fpu_implode.c 8.1 (Berkeley) 6/11/93
45 1.8.6.2 nathanw */
46 1.8.6.2 nathanw
47 1.8.6.2 nathanw /*
48 1.8.6.2 nathanw * FPU subroutines: `implode' internal format numbers into the machine's
49 1.8.6.2 nathanw * `packed binary' format.
50 1.8.6.2 nathanw */
51 1.8.6.2 nathanw
52 1.8.6.2 nathanw #if defined(_KERNEL_OPT)
53 1.8.6.2 nathanw #include "opt_sparc_arch.h"
54 1.8.6.2 nathanw #endif
55 1.8.6.2 nathanw
56 1.8.6.2 nathanw #include <sys/types.h>
57 1.8.6.2 nathanw #include <sys/systm.h>
58 1.8.6.2 nathanw
59 1.8.6.2 nathanw #include <machine/ieee.h>
60 1.8.6.2 nathanw #include <machine/instr.h>
61 1.8.6.2 nathanw #include <machine/reg.h>
62 1.8.6.2 nathanw
63 1.8.6.2 nathanw #include <sparc/fpu/fpu_arith.h>
64 1.8.6.2 nathanw #include <sparc/fpu/fpu_emu.h>
65 1.8.6.2 nathanw #include <sparc/fpu/fpu_extern.h>
66 1.8.6.2 nathanw
67 1.8.6.2 nathanw static int round __P((register struct fpemu *, register struct fpn *));
68 1.8.6.2 nathanw static int toinf __P((struct fpemu *, int));
69 1.8.6.2 nathanw
70 1.8.6.2 nathanw /*
71 1.8.6.2 nathanw * Round a number (algorithm from Motorola MC68882 manual, modified for
72 1.8.6.2 nathanw * our internal format). Set inexact exception if rounding is required.
73 1.8.6.2 nathanw * Return true iff we rounded up.
74 1.8.6.2 nathanw *
75 1.8.6.2 nathanw * After rounding, we discard the guard and round bits by shifting right
76 1.8.6.2 nathanw * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
77 1.8.6.2 nathanw * This saves effort later.
78 1.8.6.2 nathanw *
79 1.8.6.2 nathanw * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
80 1.8.6.2 nathanw * responsibility to fix this if necessary.
81 1.8.6.2 nathanw */
82 1.8.6.2 nathanw static int
83 1.8.6.2 nathanw round(register struct fpemu *fe, register struct fpn *fp)
84 1.8.6.2 nathanw {
85 1.8.6.2 nathanw register u_int m0, m1, m2, m3;
86 1.8.6.2 nathanw register int gr, s;
87 1.8.6.2 nathanw
88 1.8.6.2 nathanw m0 = fp->fp_mant[0];
89 1.8.6.2 nathanw m1 = fp->fp_mant[1];
90 1.8.6.2 nathanw m2 = fp->fp_mant[2];
91 1.8.6.2 nathanw m3 = fp->fp_mant[3];
92 1.8.6.2 nathanw gr = m3 & 3;
93 1.8.6.2 nathanw s = fp->fp_sticky;
94 1.8.6.2 nathanw
95 1.8.6.2 nathanw /* mant >>= FP_NG */
96 1.8.6.2 nathanw m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
97 1.8.6.2 nathanw m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
98 1.8.6.2 nathanw m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
99 1.8.6.2 nathanw m0 >>= FP_NG;
100 1.8.6.2 nathanw
101 1.8.6.2 nathanw if ((gr | s) == 0) /* result is exact: no rounding needed */
102 1.8.6.2 nathanw goto rounddown;
103 1.8.6.2 nathanw
104 1.8.6.2 nathanw fe->fe_cx |= FSR_NX; /* inexact */
105 1.8.6.2 nathanw
106 1.8.6.2 nathanw /* Go to rounddown to round down; break to round up. */
107 1.8.6.2 nathanw switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
108 1.8.6.2 nathanw
109 1.8.6.2 nathanw case FSR_RD_RN:
110 1.8.6.2 nathanw default:
111 1.8.6.2 nathanw /*
112 1.8.6.2 nathanw * Round only if guard is set (gr & 2). If guard is set,
113 1.8.6.2 nathanw * but round & sticky both clear, then we want to round
114 1.8.6.2 nathanw * but have a tie, so round to even, i.e., add 1 iff odd.
115 1.8.6.2 nathanw */
116 1.8.6.2 nathanw if ((gr & 2) == 0)
117 1.8.6.2 nathanw goto rounddown;
118 1.8.6.2 nathanw if ((gr & 1) || fp->fp_sticky || (m3 & 1))
119 1.8.6.2 nathanw break;
120 1.8.6.2 nathanw goto rounddown;
121 1.8.6.2 nathanw
122 1.8.6.2 nathanw case FSR_RD_RZ:
123 1.8.6.2 nathanw /* Round towards zero, i.e., down. */
124 1.8.6.2 nathanw goto rounddown;
125 1.8.6.2 nathanw
126 1.8.6.2 nathanw case FSR_RD_RM:
127 1.8.6.2 nathanw /* Round towards -Inf: up if negative, down if positive. */
128 1.8.6.2 nathanw if (fp->fp_sign)
129 1.8.6.2 nathanw break;
130 1.8.6.2 nathanw goto rounddown;
131 1.8.6.2 nathanw
132 1.8.6.2 nathanw case FSR_RD_RP:
133 1.8.6.2 nathanw /* Round towards +Inf: up if positive, down otherwise. */
134 1.8.6.2 nathanw if (!fp->fp_sign)
135 1.8.6.2 nathanw break;
136 1.8.6.2 nathanw goto rounddown;
137 1.8.6.2 nathanw }
138 1.8.6.2 nathanw
139 1.8.6.2 nathanw /* Bump low bit of mantissa, with carry. */
140 1.8.6.2 nathanw FPU_ADDS(m3, m3, 1);
141 1.8.6.2 nathanw FPU_ADDCS(m2, m2, 0);
142 1.8.6.2 nathanw FPU_ADDCS(m1, m1, 0);
143 1.8.6.2 nathanw FPU_ADDC(m0, m0, 0);
144 1.8.6.2 nathanw fp->fp_mant[0] = m0;
145 1.8.6.2 nathanw fp->fp_mant[1] = m1;
146 1.8.6.2 nathanw fp->fp_mant[2] = m2;
147 1.8.6.2 nathanw fp->fp_mant[3] = m3;
148 1.8.6.2 nathanw return (1);
149 1.8.6.2 nathanw
150 1.8.6.2 nathanw rounddown:
151 1.8.6.2 nathanw fp->fp_mant[0] = m0;
152 1.8.6.2 nathanw fp->fp_mant[1] = m1;
153 1.8.6.2 nathanw fp->fp_mant[2] = m2;
154 1.8.6.2 nathanw fp->fp_mant[3] = m3;
155 1.8.6.2 nathanw return (0);
156 1.8.6.2 nathanw }
157 1.8.6.2 nathanw
158 1.8.6.2 nathanw /*
159 1.8.6.2 nathanw * For overflow: return true if overflow is to go to +/-Inf, according
160 1.8.6.2 nathanw * to the sign of the overflowing result. If false, overflow is to go
161 1.8.6.2 nathanw * to the largest magnitude value instead.
162 1.8.6.2 nathanw */
163 1.8.6.2 nathanw static int
164 1.8.6.2 nathanw toinf(struct fpemu *fe, int sign)
165 1.8.6.2 nathanw {
166 1.8.6.2 nathanw int inf;
167 1.8.6.2 nathanw
168 1.8.6.2 nathanw /* look at rounding direction */
169 1.8.6.2 nathanw switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
170 1.8.6.2 nathanw
171 1.8.6.2 nathanw default:
172 1.8.6.2 nathanw case FSR_RD_RN: /* the nearest value is always Inf */
173 1.8.6.2 nathanw inf = 1;
174 1.8.6.2 nathanw break;
175 1.8.6.2 nathanw
176 1.8.6.2 nathanw case FSR_RD_RZ: /* toward 0 => never towards Inf */
177 1.8.6.2 nathanw inf = 0;
178 1.8.6.2 nathanw break;
179 1.8.6.2 nathanw
180 1.8.6.2 nathanw case FSR_RD_RP: /* toward +Inf iff positive */
181 1.8.6.2 nathanw inf = sign == 0;
182 1.8.6.2 nathanw break;
183 1.8.6.2 nathanw
184 1.8.6.2 nathanw case FSR_RD_RM: /* toward -Inf iff negative */
185 1.8.6.2 nathanw inf = sign;
186 1.8.6.2 nathanw break;
187 1.8.6.2 nathanw }
188 1.8.6.2 nathanw return (inf);
189 1.8.6.2 nathanw }
190 1.8.6.2 nathanw
191 1.8.6.2 nathanw /*
192 1.8.6.2 nathanw * fpn -> int (int value returned as return value).
193 1.8.6.2 nathanw *
194 1.8.6.2 nathanw * N.B.: this conversion always rounds towards zero (this is a peculiarity
195 1.8.6.2 nathanw * of the SPARC instruction set).
196 1.8.6.2 nathanw */
197 1.8.6.2 nathanw u_int
198 1.8.6.2 nathanw fpu_ftoi(fe, fp)
199 1.8.6.2 nathanw struct fpemu *fe;
200 1.8.6.2 nathanw register struct fpn *fp;
201 1.8.6.2 nathanw {
202 1.8.6.2 nathanw register u_int i;
203 1.8.6.2 nathanw register int sign, exp;
204 1.8.6.2 nathanw
205 1.8.6.2 nathanw sign = fp->fp_sign;
206 1.8.6.2 nathanw switch (fp->fp_class) {
207 1.8.6.2 nathanw
208 1.8.6.2 nathanw case FPC_ZERO:
209 1.8.6.2 nathanw return (0);
210 1.8.6.2 nathanw
211 1.8.6.2 nathanw case FPC_NUM:
212 1.8.6.2 nathanw /*
213 1.8.6.2 nathanw * If exp >= 2^32, overflow. Otherwise shift value right
214 1.8.6.2 nathanw * into last mantissa word (this will not exceed 0xffffffff),
215 1.8.6.2 nathanw * shifting any guard and round bits out into the sticky
216 1.8.6.2 nathanw * bit. Then ``round'' towards zero, i.e., just set an
217 1.8.6.2 nathanw * inexact exception if sticky is set (see round()).
218 1.8.6.2 nathanw * If the result is > 0x80000000, or is positive and equals
219 1.8.6.2 nathanw * 0x80000000, overflow; otherwise the last fraction word
220 1.8.6.2 nathanw * is the result.
221 1.8.6.2 nathanw */
222 1.8.6.2 nathanw if ((exp = fp->fp_exp) >= 32)
223 1.8.6.2 nathanw break;
224 1.8.6.2 nathanw /* NB: the following includes exp < 0 cases */
225 1.8.6.2 nathanw if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
226 1.8.6.2 nathanw fe->fe_cx |= FSR_NX;
227 1.8.6.2 nathanw i = fp->fp_mant[3];
228 1.8.6.2 nathanw if (i >= ((u_int)0x80000000 + sign))
229 1.8.6.2 nathanw break;
230 1.8.6.2 nathanw return (sign ? -i : i);
231 1.8.6.2 nathanw
232 1.8.6.2 nathanw default: /* Inf, qNaN, sNaN */
233 1.8.6.2 nathanw break;
234 1.8.6.2 nathanw }
235 1.8.6.2 nathanw /* overflow: replace any inexact exception with invalid */
236 1.8.6.2 nathanw fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
237 1.8.6.2 nathanw return (0x7fffffff + sign);
238 1.8.6.2 nathanw }
239 1.8.6.2 nathanw
240 1.8.6.2 nathanw #ifdef SUN4U
241 1.8.6.2 nathanw /*
242 1.8.6.2 nathanw * fpn -> extended int (high bits of int value returned as return value).
243 1.8.6.2 nathanw *
244 1.8.6.2 nathanw * N.B.: this conversion always rounds towards zero (this is a peculiarity
245 1.8.6.2 nathanw * of the SPARC instruction set).
246 1.8.6.2 nathanw */
247 1.8.6.2 nathanw u_int
248 1.8.6.2 nathanw fpu_ftox(fe, fp, res)
249 1.8.6.2 nathanw struct fpemu *fe;
250 1.8.6.2 nathanw register struct fpn *fp;
251 1.8.6.2 nathanw u_int *res;
252 1.8.6.2 nathanw {
253 1.8.6.2 nathanw register u_int64_t i;
254 1.8.6.2 nathanw register int sign, exp;
255 1.8.6.2 nathanw
256 1.8.6.2 nathanw sign = fp->fp_sign;
257 1.8.6.2 nathanw switch (fp->fp_class) {
258 1.8.6.2 nathanw
259 1.8.6.2 nathanw case FPC_ZERO:
260 1.8.6.2 nathanw res[1] = 0;
261 1.8.6.2 nathanw return (0);
262 1.8.6.2 nathanw
263 1.8.6.2 nathanw case FPC_NUM:
264 1.8.6.2 nathanw /*
265 1.8.6.2 nathanw * If exp >= 2^64, overflow. Otherwise shift value right
266 1.8.6.2 nathanw * into last mantissa word (this will not exceed 0xffffffffffffffff),
267 1.8.6.2 nathanw * shifting any guard and round bits out into the sticky
268 1.8.6.2 nathanw * bit. Then ``round'' towards zero, i.e., just set an
269 1.8.6.2 nathanw * inexact exception if sticky is set (see round()).
270 1.8.6.2 nathanw * If the result is > 0x8000000000000000, or is positive and equals
271 1.8.6.2 nathanw * 0x8000000000000000, overflow; otherwise the last fraction word
272 1.8.6.2 nathanw * is the result.
273 1.8.6.2 nathanw */
274 1.8.6.2 nathanw if ((exp = fp->fp_exp) >= 64)
275 1.8.6.2 nathanw break;
276 1.8.6.2 nathanw /* NB: the following includes exp < 0 cases */
277 1.8.6.2 nathanw if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
278 1.8.6.2 nathanw fe->fe_cx |= FSR_NX;
279 1.8.6.2 nathanw i = ((u_int64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3];
280 1.8.6.2 nathanw if (i >= ((u_int64_t)0x8000000000000000LL + sign))
281 1.8.6.2 nathanw break;
282 1.8.6.2 nathanw return (sign ? -i : i);
283 1.8.6.2 nathanw
284 1.8.6.2 nathanw default: /* Inf, qNaN, sNaN */
285 1.8.6.2 nathanw break;
286 1.8.6.2 nathanw }
287 1.8.6.2 nathanw /* overflow: replace any inexact exception with invalid */
288 1.8.6.2 nathanw fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
289 1.8.6.2 nathanw return (0x7fffffffffffffffLL + sign);
290 1.8.6.2 nathanw }
291 1.8.6.2 nathanw #endif /* SUN4U */
292 1.8.6.2 nathanw
293 1.8.6.2 nathanw /*
294 1.8.6.2 nathanw * fpn -> single (32 bit single returned as return value).
295 1.8.6.2 nathanw * We assume <= 29 bits in a single-precision fraction (1.f part).
296 1.8.6.2 nathanw */
297 1.8.6.2 nathanw u_int
298 1.8.6.2 nathanw fpu_ftos(fe, fp)
299 1.8.6.2 nathanw struct fpemu *fe;
300 1.8.6.2 nathanw register struct fpn *fp;
301 1.8.6.2 nathanw {
302 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
303 1.8.6.2 nathanw register int exp;
304 1.8.6.2 nathanw
305 1.8.6.2 nathanw #define SNG_EXP(e) ((e) << SNG_FRACBITS) /* makes e an exponent */
306 1.8.6.2 nathanw #define SNG_MASK (SNG_EXP(1) - 1) /* mask for fraction */
307 1.8.6.2 nathanw
308 1.8.6.2 nathanw /* Take care of non-numbers first. */
309 1.8.6.2 nathanw if (ISNAN(fp)) {
310 1.8.6.2 nathanw /*
311 1.8.6.2 nathanw * Preserve upper bits of NaN, per SPARC V8 appendix N.
312 1.8.6.2 nathanw * Note that fp->fp_mant[0] has the quiet bit set,
313 1.8.6.2 nathanw * even if it is classified as a signalling NaN.
314 1.8.6.2 nathanw */
315 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
316 1.8.6.2 nathanw exp = SNG_EXP_INFNAN;
317 1.8.6.2 nathanw goto done;
318 1.8.6.2 nathanw }
319 1.8.6.2 nathanw if (ISINF(fp))
320 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN));
321 1.8.6.2 nathanw if (ISZERO(fp))
322 1.8.6.2 nathanw return (sign);
323 1.8.6.2 nathanw
324 1.8.6.2 nathanw /*
325 1.8.6.2 nathanw * Normals (including subnormals). Drop all the fraction bits
326 1.8.6.2 nathanw * (including the explicit ``implied'' 1 bit) down into the
327 1.8.6.2 nathanw * single-precision range. If the number is subnormal, move
328 1.8.6.2 nathanw * the ``implied'' 1 into the explicit range as well, and shift
329 1.8.6.2 nathanw * right to introduce leading zeroes. Rounding then acts
330 1.8.6.2 nathanw * differently for normals and subnormals: the largest subnormal
331 1.8.6.2 nathanw * may round to the smallest normal (1.0 x 2^minexp), or may
332 1.8.6.2 nathanw * remain subnormal. In the latter case, signal an underflow
333 1.8.6.2 nathanw * if the result was inexact or if underflow traps are enabled.
334 1.8.6.2 nathanw *
335 1.8.6.2 nathanw * Rounding a normal, on the other hand, always produces another
336 1.8.6.2 nathanw * normal (although either way the result might be too big for
337 1.8.6.2 nathanw * single precision, and cause an overflow). If rounding a
338 1.8.6.2 nathanw * normal produces 2.0 in the fraction, we need not adjust that
339 1.8.6.2 nathanw * fraction at all, since both 1.0 and 2.0 are zero under the
340 1.8.6.2 nathanw * fraction mask.
341 1.8.6.2 nathanw *
342 1.8.6.2 nathanw * Note that the guard and round bits vanish from the number after
343 1.8.6.2 nathanw * rounding.
344 1.8.6.2 nathanw */
345 1.8.6.2 nathanw if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) { /* subnormal */
346 1.8.6.2 nathanw /* -NG for g,r; -SNG_FRACBITS-exp for fraction */
347 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
348 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1))
349 1.8.6.2 nathanw return (sign | SNG_EXP(1) | 0);
350 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
351 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
352 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
353 1.8.6.2 nathanw return (sign | SNG_EXP(0) | fp->fp_mant[3]);
354 1.8.6.2 nathanw }
355 1.8.6.2 nathanw /* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
356 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
357 1.8.6.2 nathanw #ifdef DIAGNOSTIC
358 1.8.6.2 nathanw if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
359 1.8.6.2 nathanw panic("fpu_ftos");
360 1.8.6.2 nathanw #endif
361 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2))
362 1.8.6.2 nathanw exp++;
363 1.8.6.2 nathanw if (exp >= SNG_EXP_INFNAN) {
364 1.8.6.2 nathanw /* overflow to inf or to max single */
365 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
366 1.8.6.2 nathanw if (toinf(fe, sign))
367 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN));
368 1.8.6.2 nathanw return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
369 1.8.6.2 nathanw }
370 1.8.6.2 nathanw done:
371 1.8.6.2 nathanw /* phew, made it */
372 1.8.6.2 nathanw return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
373 1.8.6.2 nathanw }
374 1.8.6.2 nathanw
375 1.8.6.2 nathanw /*
376 1.8.6.2 nathanw * fpn -> double (32 bit high-order result returned; 32-bit low order result
377 1.8.6.2 nathanw * left in res[1]). Assumes <= 61 bits in double precision fraction.
378 1.8.6.2 nathanw *
379 1.8.6.2 nathanw * This code mimics fpu_ftos; see it for comments.
380 1.8.6.2 nathanw */
381 1.8.6.2 nathanw u_int
382 1.8.6.2 nathanw fpu_ftod(fe, fp, res)
383 1.8.6.2 nathanw struct fpemu *fe;
384 1.8.6.2 nathanw register struct fpn *fp;
385 1.8.6.2 nathanw u_int *res;
386 1.8.6.2 nathanw {
387 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
388 1.8.6.2 nathanw register int exp;
389 1.8.6.2 nathanw
390 1.8.6.2 nathanw #define DBL_EXP(e) ((e) << (DBL_FRACBITS & 31))
391 1.8.6.2 nathanw #define DBL_MASK (DBL_EXP(1) - 1)
392 1.8.6.2 nathanw
393 1.8.6.2 nathanw if (ISNAN(fp)) {
394 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
395 1.8.6.2 nathanw exp = DBL_EXP_INFNAN;
396 1.8.6.2 nathanw goto done;
397 1.8.6.2 nathanw }
398 1.8.6.2 nathanw if (ISINF(fp)) {
399 1.8.6.2 nathanw sign |= DBL_EXP(DBL_EXP_INFNAN);
400 1.8.6.2 nathanw goto zero;
401 1.8.6.2 nathanw }
402 1.8.6.2 nathanw if (ISZERO(fp)) {
403 1.8.6.2 nathanw zero: res[1] = 0;
404 1.8.6.2 nathanw return (sign);
405 1.8.6.2 nathanw }
406 1.8.6.2 nathanw
407 1.8.6.2 nathanw if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
408 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
409 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) {
410 1.8.6.2 nathanw res[1] = 0;
411 1.8.6.2 nathanw return (sign | DBL_EXP(1) | 0);
412 1.8.6.2 nathanw }
413 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
414 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
415 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
416 1.8.6.2 nathanw exp = 0;
417 1.8.6.2 nathanw goto done;
418 1.8.6.2 nathanw }
419 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
420 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2))
421 1.8.6.2 nathanw exp++;
422 1.8.6.2 nathanw if (exp >= DBL_EXP_INFNAN) {
423 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
424 1.8.6.2 nathanw if (toinf(fe, sign)) {
425 1.8.6.2 nathanw res[1] = 0;
426 1.8.6.2 nathanw return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
427 1.8.6.2 nathanw }
428 1.8.6.2 nathanw res[1] = ~0;
429 1.8.6.2 nathanw return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
430 1.8.6.2 nathanw }
431 1.8.6.2 nathanw done:
432 1.8.6.2 nathanw res[1] = fp->fp_mant[3];
433 1.8.6.2 nathanw return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK));
434 1.8.6.2 nathanw }
435 1.8.6.2 nathanw
436 1.8.6.2 nathanw /*
437 1.8.6.2 nathanw * fpn -> extended (32 bit high-order result returned; low-order fraction
438 1.8.6.2 nathanw * words left in res[1]..res[3]). Like ftod, which is like ftos ... but
439 1.8.6.2 nathanw * our internal format *is* extended precision, plus 2 bits for guard/round,
440 1.8.6.2 nathanw * so we can avoid a small bit of work.
441 1.8.6.2 nathanw */
442 1.8.6.2 nathanw u_int
443 1.8.6.2 nathanw fpu_ftoq(fe, fp, res)
444 1.8.6.2 nathanw struct fpemu *fe;
445 1.8.6.2 nathanw register struct fpn *fp;
446 1.8.6.2 nathanw u_int *res;
447 1.8.6.2 nathanw {
448 1.8.6.2 nathanw register u_int sign = fp->fp_sign << 31;
449 1.8.6.2 nathanw register int exp;
450 1.8.6.2 nathanw
451 1.8.6.2 nathanw #define EXT_EXP(e) ((e) << (EXT_FRACBITS & 31))
452 1.8.6.2 nathanw #define EXT_MASK (EXT_EXP(1) - 1)
453 1.8.6.2 nathanw
454 1.8.6.2 nathanw if (ISNAN(fp)) {
455 1.8.6.2 nathanw (void) fpu_shr(fp, 2); /* since we are not rounding */
456 1.8.6.2 nathanw exp = EXT_EXP_INFNAN;
457 1.8.6.2 nathanw goto done;
458 1.8.6.2 nathanw }
459 1.8.6.2 nathanw if (ISINF(fp)) {
460 1.8.6.2 nathanw sign |= EXT_EXP(EXT_EXP_INFNAN);
461 1.8.6.2 nathanw goto zero;
462 1.8.6.2 nathanw }
463 1.8.6.2 nathanw if (ISZERO(fp)) {
464 1.8.6.2 nathanw zero: res[1] = res[2] = res[3] = 0;
465 1.8.6.2 nathanw return (sign);
466 1.8.6.2 nathanw }
467 1.8.6.2 nathanw
468 1.8.6.2 nathanw if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) {
469 1.8.6.2 nathanw (void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
470 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) {
471 1.8.6.2 nathanw res[1] = res[2] = res[3] = 0;
472 1.8.6.2 nathanw return (sign | EXT_EXP(1) | 0);
473 1.8.6.2 nathanw }
474 1.8.6.2 nathanw if ((fe->fe_cx & FSR_NX) ||
475 1.8.6.2 nathanw (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
476 1.8.6.2 nathanw fe->fe_cx |= FSR_UF;
477 1.8.6.2 nathanw exp = 0;
478 1.8.6.2 nathanw goto done;
479 1.8.6.2 nathanw }
480 1.8.6.2 nathanw /* Since internal == extended, no need to shift here. */
481 1.8.6.2 nathanw if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(2))
482 1.8.6.2 nathanw exp++;
483 1.8.6.2 nathanw if (exp >= EXT_EXP_INFNAN) {
484 1.8.6.2 nathanw fe->fe_cx |= FSR_OF | FSR_NX;
485 1.8.6.2 nathanw if (toinf(fe, sign)) {
486 1.8.6.2 nathanw res[1] = res[2] = res[3] = 0;
487 1.8.6.2 nathanw return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
488 1.8.6.2 nathanw }
489 1.8.6.2 nathanw res[1] = res[2] = res[3] = ~0;
490 1.8.6.2 nathanw return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
491 1.8.6.2 nathanw }
492 1.8.6.2 nathanw done:
493 1.8.6.2 nathanw res[1] = fp->fp_mant[1];
494 1.8.6.2 nathanw res[2] = fp->fp_mant[2];
495 1.8.6.2 nathanw res[3] = fp->fp_mant[3];
496 1.8.6.2 nathanw return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK));
497 1.8.6.2 nathanw }
498 1.8.6.2 nathanw
499 1.8.6.2 nathanw /*
500 1.8.6.2 nathanw * Implode an fpn, writing the result into the given space.
501 1.8.6.2 nathanw */
502 1.8.6.2 nathanw void
503 1.8.6.2 nathanw fpu_implode(fe, fp, type, space)
504 1.8.6.2 nathanw struct fpemu *fe;
505 1.8.6.2 nathanw register struct fpn *fp;
506 1.8.6.2 nathanw int type;
507 1.8.6.2 nathanw register u_int *space;
508 1.8.6.2 nathanw {
509 1.8.6.2 nathanw
510 1.8.6.2 nathanw switch (type) {
511 1.8.6.2 nathanw
512 1.8.6.2 nathanw #ifdef SUN4U
513 1.8.6.2 nathanw case FTYPE_LNG:
514 1.8.6.2 nathanw space[0] = fpu_ftox(fe, fp, space);
515 1.8.6.2 nathanw break;
516 1.8.6.2 nathanw #endif /* SUN4U */
517 1.8.6.2 nathanw
518 1.8.6.2 nathanw case FTYPE_INT:
519 1.8.6.2 nathanw space[0] = fpu_ftoi(fe, fp);
520 1.8.6.2 nathanw break;
521 1.8.6.2 nathanw
522 1.8.6.2 nathanw case FTYPE_SNG:
523 1.8.6.2 nathanw space[0] = fpu_ftos(fe, fp);
524 1.8.6.2 nathanw break;
525 1.8.6.2 nathanw
526 1.8.6.2 nathanw case FTYPE_DBL:
527 1.8.6.2 nathanw space[0] = fpu_ftod(fe, fp, space);
528 1.8.6.2 nathanw break;
529 1.8.6.2 nathanw
530 1.8.6.2 nathanw case FTYPE_EXT:
531 1.8.6.2 nathanw /* funky rounding precision options ?? */
532 1.8.6.2 nathanw space[0] = fpu_ftoq(fe, fp, space);
533 1.8.6.2 nathanw break;
534 1.8.6.2 nathanw
535 1.8.6.2 nathanw default:
536 1.8.6.2 nathanw panic("fpu_implode");
537 1.8.6.2 nathanw }
538 1.8.6.2 nathanw #ifdef SUN4U
539 1.8.6.2 nathanw DPRINTF(FPE_REG, ("fpu_implode: %x %x %x %x\n",
540 1.8.6.2 nathanw space[0], space[1], space[2], space[3]));
541 1.8.6.2 nathanw #else
542 1.8.6.2 nathanw DPRINTF(FPE_REG, ("fpu_implode: %x %x\n",
543 1.8.6.2 nathanw space[0], space[1]));
544 1.8.6.2 nathanw #endif
545 1.8.6.2 nathanw }
546