fpu_explode.c revision 1.2 1 /* $NetBSD: fpu_explode.c,v 1.2 2003/07/15 02:54:43 lukem Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by the University of
27 * California, Berkeley and its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93
45 */
46
47 /*
48 * FPU subroutines: `explode' the machine's `packed binary' format numbers
49 * into our internal format.
50 */
51
52 #include <sys/cdefs.h>
53 __KERNEL_RCSID(0, "$NetBSD: fpu_explode.c,v 1.2 2003/07/15 02:54:43 lukem Exp $");
54
55 #include <sys/types.h>
56 #include <sys/systm.h>
57
58 #include <machine/ieee.h>
59 #include <powerpc/instr.h>
60 #include <machine/reg.h>
61 #include <machine/fpu.h>
62
63 #include <powerpc/fpu/fpu_arith.h>
64 #include <powerpc/fpu/fpu_emu.h>
65 #include <powerpc/fpu/fpu_extern.h>
66
67 /*
68 * N.B.: in all of the following, we assume the FP format is
69 *
70 * ---------------------------
71 * | s | exponent | fraction |
72 * ---------------------------
73 *
74 * (which represents -1**s * 1.fraction * 2**exponent), so that the
75 * sign bit is way at the top (bit 31), the exponent is next, and
76 * then the remaining bits mark the fraction. A zero exponent means
77 * zero or denormalized (0.fraction rather than 1.fraction), and the
78 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
79 *
80 * Since the sign bit is always the topmost bit---this holds even for
81 * integers---we set that outside all the *tof functions. Each function
82 * returns the class code for the new number (but note that we use
83 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
84 */
85
86 /*
87 * int -> fpn.
88 */
89 int
90 fpu_itof(struct fpn *fp, u_int i)
91 {
92
93 if (i == 0)
94 return (FPC_ZERO);
95 /*
96 * The value FP_1 represents 2^FP_LG, so set the exponent
97 * there and let normalization fix it up. Convert negative
98 * numbers to sign-and-magnitude. Note that this relies on
99 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
100 */
101 fp->fp_exp = FP_LG;
102 fp->fp_mant[0] = (int)i < 0 ? -i : i;
103 fp->fp_mant[1] = 0;
104 fp->fp_mant[2] = 0;
105 fp->fp_mant[3] = 0;
106 fpu_norm(fp);
107 return (FPC_NUM);
108 }
109
110 /*
111 * 64-bit int -> fpn.
112 */
113 int
114 fpu_xtof(struct fpn *fp, u_int64_t i)
115 {
116
117 if (i == 0)
118 return (FPC_ZERO);
119 /*
120 * The value FP_1 represents 2^FP_LG, so set the exponent
121 * there and let normalization fix it up. Convert negative
122 * numbers to sign-and-magnitude. Note that this relies on
123 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
124 */
125 fp->fp_exp = FP_LG2;
126 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
127 fp->fp_mant[2] = 0;
128 fp->fp_mant[3] = 0;
129 fpu_norm(fp);
130 return (FPC_NUM);
131 }
132
133 #define mask(nbits) ((1L << (nbits)) - 1)
134
135 /*
136 * All external floating formats convert to internal in the same manner,
137 * as defined here. Note that only normals get an implied 1.0 inserted.
138 */
139 #define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
140 if (exp == 0) { \
141 if (allfrac == 0) \
142 return (FPC_ZERO); \
143 fp->fp_exp = 1 - expbias; \
144 fp->fp_mant[0] = f0; \
145 fp->fp_mant[1] = f1; \
146 fp->fp_mant[2] = f2; \
147 fp->fp_mant[3] = f3; \
148 fpu_norm(fp); \
149 return (FPC_NUM); \
150 } \
151 if (exp == (2 * expbias + 1)) { \
152 if (allfrac == 0) \
153 return (FPC_INF); \
154 fp->fp_mant[0] = f0; \
155 fp->fp_mant[1] = f1; \
156 fp->fp_mant[2] = f2; \
157 fp->fp_mant[3] = f3; \
158 return (FPC_QNAN); \
159 } \
160 fp->fp_exp = exp - expbias; \
161 fp->fp_mant[0] = FP_1 | f0; \
162 fp->fp_mant[1] = f1; \
163 fp->fp_mant[2] = f2; \
164 fp->fp_mant[3] = f3; \
165 return (FPC_NUM)
166
167 /*
168 * 32-bit single precision -> fpn.
169 * We assume a single occupies at most (64-FP_LG) bits in the internal
170 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
171 */
172 int
173 fpu_stof(struct fpn *fp, u_int i)
174 {
175 int exp;
176 u_int frac, f0, f1;
177 #define SNG_SHIFT (SNG_FRACBITS - FP_LG)
178
179 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
180 frac = i & mask(SNG_FRACBITS);
181 f0 = frac >> SNG_SHIFT;
182 f1 = frac << (32 - SNG_SHIFT);
183 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
184 }
185
186 /*
187 * 64-bit double -> fpn.
188 * We assume this uses at most (96-FP_LG) bits.
189 */
190 int
191 fpu_dtof(struct fpn *fp, u_int i, u_int j)
192 {
193 int exp;
194 u_int frac, f0, f1, f2;
195 #define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
196
197 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
198 frac = i & mask(DBL_FRACBITS - 32);
199 f0 = frac >> DBL_SHIFT;
200 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
201 f2 = j << (32 - DBL_SHIFT);
202 frac |= j;
203 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
204 }
205
206 /*
207 * 128-bit extended -> fpn.
208 */
209 int
210 fpu_qtof(struct fpn *fp, u_int i, u_int j, u_int k, u_int l)
211 {
212 int exp;
213 u_int frac, f0, f1, f2, f3;
214 #define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */
215
216 /*
217 * Note that ext and fpn `line up', hence no shifting needed.
218 */
219 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
220 frac = i & mask(EXT_FRACBITS - 3 * 32);
221 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
222 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
223 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
224 f3 = l << EXT_SHIFT;
225 frac |= j | k | l;
226 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
227 }
228
229 /*
230 * Explode the contents of a register / regpair / regquad.
231 * If the input is a signalling NaN, an NV (invalid) exception
232 * will be set. (Note that nothing but NV can occur until ALU
233 * operations are performed.)
234 */
235 void
236 fpu_explode(struct fpemu *fe, struct fpn *fp, int type, int reg)
237 {
238 u_int s, *space;
239 u_int64_t l, *xspace;
240
241 xspace = (u_int64_t *)&fe->fe_fpstate->fpreg[reg];
242 l = xspace[0];
243 space = (u_int *)&fe->fe_fpstate->fpreg[reg];
244 s = space[0];
245 fp->fp_sign = s >> 31;
246 fp->fp_sticky = 0;
247 switch (type) {
248
249 case FTYPE_LNG:
250 s = fpu_xtof(fp, l);
251 break;
252
253 case FTYPE_INT:
254 s = fpu_itof(fp, space[1]);
255 break;
256
257 case FTYPE_SNG:
258 s = fpu_stof(fp, s);
259 break;
260
261 case FTYPE_DBL:
262 s = fpu_dtof(fp, s, space[1]);
263 break;
264
265 case FTYPE_EXT:
266 s = fpu_qtof(fp, s, space[1], space[2], space[3]);
267 break;
268
269 default:
270 panic("fpu_explode");
271 }
272
273 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
274 /*
275 * Input is a signalling NaN. All operations that return
276 * an input NaN operand put it through a ``NaN conversion'',
277 * which basically just means ``turn on the quiet bit''.
278 * We do this here so that all NaNs internally look quiet
279 * (we can tell signalling ones by their class).
280 */
281 fp->fp_mant[0] |= FP_QUIETBIT;
282 fe->fe_cx = FPSCR_VXSNAN; /* assert invalid operand */
283 s = FPC_SNAN;
284 }
285 fp->fp_class = s;
286 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
287 ((type == FTYPE_INT) ? 'i' :
288 ((type == FTYPE_SNG) ? 's' :
289 ((type == FTYPE_DBL) ? 'd' :
290 ((type == FTYPE_EXT) ? 'q' : '?')))),
291 reg));
292 DUMPFPN(FPE_REG, fp);
293 DPRINTF(FPE_REG, ("\n"));
294 }
295