fpu_explode.c revision 1.11 1 /* $NetBSD: fpu_explode.c,v 1.11 2003/08/07 16:29:37 agc Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93
41 */
42
43 /*
44 * FPU subroutines: `explode' the machine's `packed binary' format numbers
45 * into our internal format.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: fpu_explode.c,v 1.11 2003/08/07 16:29:37 agc Exp $");
50
51 #if defined(_KERNEL_OPT)
52 #include "opt_sparc_arch.h"
53 #endif
54
55 #include <sys/types.h>
56 #include <sys/systm.h>
57
58 #include <machine/ieee.h>
59 #include <machine/instr.h>
60 #include <machine/reg.h>
61
62 #include <sparc/fpu/fpu_arith.h>
63 #include <sparc/fpu/fpu_emu.h>
64 #include <sparc/fpu/fpu_extern.h>
65
66 /*
67 * N.B.: in all of the following, we assume the FP format is
68 *
69 * ---------------------------
70 * | s | exponent | fraction |
71 * ---------------------------
72 *
73 * (which represents -1**s * 1.fraction * 2**exponent), so that the
74 * sign bit is way at the top (bit 31), the exponent is next, and
75 * then the remaining bits mark the fraction. A zero exponent means
76 * zero or denormalized (0.fraction rather than 1.fraction), and the
77 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
78 *
79 * Since the sign bit is always the topmost bit---this holds even for
80 * integers---we set that outside all the *tof functions. Each function
81 * returns the class code for the new number (but note that we use
82 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
83 */
84
85 /*
86 * int -> fpn.
87 */
88 int
89 fpu_itof(fp, i)
90 register struct fpn *fp;
91 register u_int i;
92 {
93
94 if (i == 0)
95 return (FPC_ZERO);
96 /*
97 * The value FP_1 represents 2^FP_LG, so set the exponent
98 * there and let normalization fix it up. Convert negative
99 * numbers to sign-and-magnitude. Note that this relies on
100 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
101 */
102 fp->fp_exp = FP_LG;
103 fp->fp_mant[0] = (int)i < 0 ? -i : i;
104 fp->fp_mant[1] = 0;
105 fp->fp_mant[2] = 0;
106 fp->fp_mant[3] = 0;
107 fpu_norm(fp);
108 return (FPC_NUM);
109 }
110
111 #ifdef SUN4U
112 /*
113 * 64-bit int -> fpn.
114 */
115 int
116 fpu_xtof(fp, i)
117 register struct fpn *fp;
118 register u_int64_t i;
119 {
120
121 if (i == 0)
122 return (FPC_ZERO);
123 /*
124 * The value FP_1 represents 2^FP_LG, so set the exponent
125 * there and let normalization fix it up. Convert negative
126 * numbers to sign-and-magnitude. Note that this relies on
127 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
128 */
129 fp->fp_exp = FP_LG2;
130 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
131 fp->fp_mant[2] = 0;
132 fp->fp_mant[3] = 0;
133 fpu_norm(fp);
134 return (FPC_NUM);
135 }
136 #endif /* SUN4U */
137
138 #define mask(nbits) ((1L << (nbits)) - 1)
139
140 /*
141 * All external floating formats convert to internal in the same manner,
142 * as defined here. Note that only normals get an implied 1.0 inserted.
143 */
144 #define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
145 if (exp == 0) { \
146 if (allfrac == 0) \
147 return (FPC_ZERO); \
148 fp->fp_exp = 1 - expbias; \
149 fp->fp_mant[0] = f0; \
150 fp->fp_mant[1] = f1; \
151 fp->fp_mant[2] = f2; \
152 fp->fp_mant[3] = f3; \
153 fpu_norm(fp); \
154 return (FPC_NUM); \
155 } \
156 if (exp == (2 * expbias + 1)) { \
157 if (allfrac == 0) \
158 return (FPC_INF); \
159 fp->fp_mant[0] = f0; \
160 fp->fp_mant[1] = f1; \
161 fp->fp_mant[2] = f2; \
162 fp->fp_mant[3] = f3; \
163 return (FPC_QNAN); \
164 } \
165 fp->fp_exp = exp - expbias; \
166 fp->fp_mant[0] = FP_1 | f0; \
167 fp->fp_mant[1] = f1; \
168 fp->fp_mant[2] = f2; \
169 fp->fp_mant[3] = f3; \
170 return (FPC_NUM)
171
172 /*
173 * 32-bit single precision -> fpn.
174 * We assume a single occupies at most (64-FP_LG) bits in the internal
175 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
176 */
177 int
178 fpu_stof(fp, i)
179 register struct fpn *fp;
180 register u_int i;
181 {
182 register int exp;
183 register u_int frac, f0, f1;
184 #define SNG_SHIFT (SNG_FRACBITS - FP_LG)
185
186 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
187 frac = i & mask(SNG_FRACBITS);
188 f0 = frac >> SNG_SHIFT;
189 f1 = frac << (32 - SNG_SHIFT);
190 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
191 }
192
193 /*
194 * 64-bit double -> fpn.
195 * We assume this uses at most (96-FP_LG) bits.
196 */
197 int
198 fpu_dtof(fp, i, j)
199 register struct fpn *fp;
200 register u_int i, j;
201 {
202 register int exp;
203 register u_int frac, f0, f1, f2;
204 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
205
206 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
207 frac = i & mask(DBL_FRACBITS - 32);
208 f0 = frac >> DBL_SHIFT;
209 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
210 f2 = j << (32 - DBL_SHIFT);
211 frac |= j;
212 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
213 }
214
215 /*
216 * 128-bit extended -> fpn.
217 */
218 int
219 fpu_qtof(fp, i, j, k, l)
220 register struct fpn *fp;
221 register u_int i, j, k, l;
222 {
223 register int exp;
224 register u_int frac, f0, f1, f2, f3;
225 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */
226
227 /*
228 * Note that ext and fpn `line up', hence no shifting needed.
229 */
230 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
231 frac = i & mask(EXT_FRACBITS - 3 * 32);
232 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
233 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
234 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
235 f3 = l << EXT_SHIFT;
236 frac |= j | k | l;
237 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
238 }
239
240 /*
241 * Explode the contents of a register / regpair / regquad.
242 * If the input is a signalling NaN, an NV (invalid) exception
243 * will be set. (Note that nothing but NV can occur until ALU
244 * operations are performed.)
245 */
246 void
247 fpu_explode(fe, fp, type, reg)
248 register struct fpemu *fe;
249 register struct fpn *fp;
250 int type, reg;
251 {
252 register u_int s, *space;
253 #ifdef SUN4U
254 u_int64_t l, *xspace;
255
256 xspace = (u_int64_t *)&fe->fe_fpstate->fs_regs[reg & ~1];
257 l = xspace[0];
258 #endif /* SUN4U */
259 space = &fe->fe_fpstate->fs_regs[reg];
260 s = space[0];
261 fp->fp_sign = s >> 31;
262 fp->fp_sticky = 0;
263 switch (type) {
264 #ifdef SUN4U
265 case FTYPE_LNG:
266 s = fpu_xtof(fp, l);
267 break;
268 #endif /* SUN4U */
269
270 case FTYPE_INT:
271 s = fpu_itof(fp, s);
272 break;
273
274 case FTYPE_SNG:
275 s = fpu_stof(fp, s);
276 break;
277
278 case FTYPE_DBL:
279 s = fpu_dtof(fp, s, space[1]);
280 break;
281
282 case FTYPE_EXT:
283 s = fpu_qtof(fp, s, space[1], space[2], space[3]);
284 break;
285
286 default:
287 panic("fpu_explode");
288 }
289
290 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
291 /*
292 * Input is a signalling NaN. All operations that return
293 * an input NaN operand put it through a ``NaN conversion'',
294 * which basically just means ``turn on the quiet bit''.
295 * We do this here so that all NaNs internally look quiet
296 * (we can tell signalling ones by their class).
297 */
298 fp->fp_mant[0] |= FP_QUIETBIT;
299 fe->fe_cx = FSR_NV; /* assert invalid operand */
300 s = FPC_SNAN;
301 }
302 fp->fp_class = s;
303 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
304 ((type == FTYPE_INT) ? 'i' :
305 ((type == FTYPE_SNG) ? 's' :
306 ((type == FTYPE_DBL) ? 'd' :
307 ((type == FTYPE_EXT) ? 'q' : '?')))),
308 reg));
309 #ifdef DEBUG
310 if (fpe_debug & FPE_REG) {
311 if (type == FTYPE_INT) printf("%d ", s);
312 #ifdef SUN4U
313 #ifdef _LP64
314 if (type == FTYPE_LNG) printf("%ld ", l);
315 #else
316 if (type == FTYPE_LNG) printf("%lld ", l);
317 #endif
318 #endif /* SUN4U */
319 }
320 #endif /* DEBUG */
321 DUMPFPN(FPE_REG, fp);
322 DPRINTF(FPE_REG, ("\n"));
323 }
324