Home | History | Annotate | Line # | Download | only in fpu
fpu_mul.c revision 1.2
      1 /*	$NetBSD: fpu_mul.c,v 1.2 2003/07/15 02:54:43 lukem Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by the University of
     27  *	California, Berkeley and its contributors.
     28  * 4. Neither the name of the University nor the names of its contributors
     29  *    may be used to endorse or promote products derived from this software
     30  *    without specific prior written permission.
     31  *
     32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     42  * SUCH DAMAGE.
     43  *
     44  *	@(#)fpu_mul.c	8.1 (Berkeley) 6/11/93
     45  */
     46 
     47 /*
     48  * Perform an FPU multiply (return x * y).
     49  */
     50 
     51 #include <sys/cdefs.h>
     52 __KERNEL_RCSID(0, "$NetBSD: fpu_mul.c,v 1.2 2003/07/15 02:54:43 lukem Exp $");
     53 
     54 #include <sys/types.h>
     55 #if defined(DIAGNOSTIC)||defined(DEBUG)
     56 #include <sys/systm.h>
     57 #endif
     58 
     59 #include <machine/reg.h>
     60 #include <machine/fpu.h>
     61 
     62 #include <powerpc/fpu/fpu_arith.h>
     63 #include <powerpc/fpu/fpu_emu.h>
     64 
     65 /*
     66  * The multiplication algorithm for normal numbers is as follows:
     67  *
     68  * The fraction of the product is built in the usual stepwise fashion.
     69  * Each step consists of shifting the accumulator right one bit
     70  * (maintaining any guard bits) and, if the next bit in y is set,
     71  * adding the multiplicand (x) to the accumulator.  Then, in any case,
     72  * we advance one bit leftward in y.  Algorithmically:
     73  *
     74  *	A = 0;
     75  *	for (bit = 0; bit < FP_NMANT; bit++) {
     76  *		sticky |= A & 1, A >>= 1;
     77  *		if (Y & (1 << bit))
     78  *			A += X;
     79  *	}
     80  *
     81  * (X and Y here represent the mantissas of x and y respectively.)
     82  * The resultant accumulator (A) is the product's mantissa.  It may
     83  * be as large as 11.11111... in binary and hence may need to be
     84  * shifted right, but at most one bit.
     85  *
     86  * Since we do not have efficient multiword arithmetic, we code the
     87  * accumulator as four separate words, just like any other mantissa.
     88  * We use local variables in the hope that this is faster than memory.
     89  * We keep x->fp_mant in locals for the same reason.
     90  *
     91  * In the algorithm above, the bits in y are inspected one at a time.
     92  * We will pick them up 32 at a time and then deal with those 32, one
     93  * at a time.  Note, however, that we know several things about y:
     94  *
     95  *    - the guard and round bits at the bottom are sure to be zero;
     96  *
     97  *    - often many low bits are zero (y is often from a single or double
     98  *	precision source);
     99  *
    100  *    - bit FP_NMANT-1 is set, and FP_1*2 fits in a word.
    101  *
    102  * We can also test for 32-zero-bits swiftly.  In this case, the center
    103  * part of the loop---setting sticky, shifting A, and not adding---will
    104  * run 32 times without adding X to A.  We can do a 32-bit shift faster
    105  * by simply moving words.  Since zeros are common, we optimize this case.
    106  * Furthermore, since A is initially zero, we can omit the shift as well
    107  * until we reach a nonzero word.
    108  */
    109 struct fpn *
    110 fpu_mul(struct fpemu *fe)
    111 {
    112 	struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2;
    113 	u_int a3, a2, a1, a0, x3, x2, x1, x0, bit, m;
    114 	int sticky;
    115 	FPU_DECL_CARRY;
    116 
    117 	/*
    118 	 * Put the `heavier' operand on the right (see fpu_emu.h).
    119 	 * Then we will have one of the following cases, taken in the
    120 	 * following order:
    121 	 *
    122 	 *  - y = NaN.  Implied: if only one is a signalling NaN, y is.
    123 	 *	The result is y.
    124 	 *  - y = Inf.  Implied: x != NaN (is 0, number, or Inf: the NaN
    125 	 *    case was taken care of earlier).
    126 	 *	If x = 0, the result is NaN.  Otherwise the result
    127 	 *	is y, with its sign reversed if x is negative.
    128 	 *  - x = 0.  Implied: y is 0 or number.
    129 	 *	The result is 0 (with XORed sign as usual).
    130 	 *  - other.  Implied: both x and y are numbers.
    131 	 *	The result is x * y (XOR sign, multiply bits, add exponents).
    132 	 */
    133 	DPRINTF(FPE_REG, ("fpu_mul:\n"));
    134 	DUMPFPN(FPE_REG, x);
    135 	DUMPFPN(FPE_REG, y);
    136 	DPRINTF(FPE_REG, ("=>\n"));
    137 
    138 	ORDER(x, y);
    139 	if (ISNAN(y)) {
    140 		y->fp_sign ^= x->fp_sign;
    141 		fe->fe_cx |= FPSCR_VXSNAN;
    142 		DUMPFPN(FPE_REG, y);
    143 		return (y);
    144 	}
    145 	if (ISINF(y)) {
    146 		if (ISZERO(x)) {
    147 			fe->fe_cx |= FPSCR_VXIMZ;
    148 			return (fpu_newnan(fe));
    149 		}
    150 		y->fp_sign ^= x->fp_sign;
    151 			DUMPFPN(FPE_REG, y);
    152 		return (y);
    153 	}
    154 	if (ISZERO(x)) {
    155 		x->fp_sign ^= y->fp_sign;
    156 		DUMPFPN(FPE_REG, x);
    157 		return (x);
    158 	}
    159 
    160 	/*
    161 	 * Setup.  In the code below, the mask `m' will hold the current
    162 	 * mantissa byte from y.  The variable `bit' denotes the bit
    163 	 * within m.  We also define some macros to deal with everything.
    164 	 */
    165 	x3 = x->fp_mant[3];
    166 	x2 = x->fp_mant[2];
    167 	x1 = x->fp_mant[1];
    168 	x0 = x->fp_mant[0];
    169 	sticky = a3 = a2 = a1 = a0 = 0;
    170 
    171 #define	ADD	/* A += X */ \
    172 	FPU_ADDS(a3, a3, x3); \
    173 	FPU_ADDCS(a2, a2, x2); \
    174 	FPU_ADDCS(a1, a1, x1); \
    175 	FPU_ADDC(a0, a0, x0)
    176 
    177 #define	SHR1	/* A >>= 1, with sticky */ \
    178 	sticky |= a3 & 1, a3 = (a3 >> 1) | (a2 << 31), \
    179 	a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1
    180 
    181 #define	SHR32	/* A >>= 32, with sticky */ \
    182 	sticky |= a3, a3 = a2, a2 = a1, a1 = a0, a0 = 0
    183 
    184 #define	STEP	/* each 1-bit step of the multiplication */ \
    185 	SHR1; if (bit & m) { ADD; }; bit <<= 1
    186 
    187 	/*
    188 	 * We are ready to begin.  The multiply loop runs once for each
    189 	 * of the four 32-bit words.  Some words, however, are special.
    190 	 * As noted above, the low order bits of Y are often zero.  Even
    191 	 * if not, the first loop can certainly skip the guard bits.
    192 	 * The last word of y has its highest 1-bit in position FP_NMANT-1,
    193 	 * so we stop the loop when we move past that bit.
    194 	 */
    195 	if ((m = y->fp_mant[3]) == 0) {
    196 		/* SHR32; */			/* unneeded since A==0 */
    197 	} else {
    198 		bit = 1 << FP_NG;
    199 		do {
    200 			STEP;
    201 		} while (bit != 0);
    202 	}
    203 	if ((m = y->fp_mant[2]) == 0) {
    204 		SHR32;
    205 	} else {
    206 		bit = 1;
    207 		do {
    208 			STEP;
    209 		} while (bit != 0);
    210 	}
    211 	if ((m = y->fp_mant[1]) == 0) {
    212 		SHR32;
    213 	} else {
    214 		bit = 1;
    215 		do {
    216 			STEP;
    217 		} while (bit != 0);
    218 	}
    219 	m = y->fp_mant[0];		/* definitely != 0 */
    220 	bit = 1;
    221 	do {
    222 		STEP;
    223 	} while (bit <= m);
    224 
    225 	/*
    226 	 * Done with mantissa calculation.  Get exponent and handle
    227 	 * 11.111...1 case, then put result in place.  We reuse x since
    228 	 * it already has the right class (FP_NUM).
    229 	 */
    230 	m = x->fp_exp + y->fp_exp;
    231 	if (a0 >= FP_2) {
    232 		SHR1;
    233 		m++;
    234 	}
    235 	x->fp_sign ^= y->fp_sign;
    236 	x->fp_exp = m;
    237 	x->fp_sticky = sticky;
    238 	x->fp_mant[3] = a3;
    239 	x->fp_mant[2] = a2;
    240 	x->fp_mant[1] = a1;
    241 	x->fp_mant[0] = a0;
    242 
    243 	DUMPFPN(FPE_REG, x);
    244 	return (x);
    245 }
    246