1 /* $NetBSD: fpu_mul.c,v 1.9 2016/12/06 06:41:14 isaki Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)fpu_mul.c 8.1 (Berkeley) 6/11/93 41 */ 42 43 /* 44 * Perform an FPU multiply (return x * y). 45 */ 46 47 #include <sys/cdefs.h> 48 __KERNEL_RCSID(0, "$NetBSD: fpu_mul.c,v 1.9 2016/12/06 06:41:14 isaki Exp $"); 49 50 #include <sys/types.h> 51 52 #include <machine/reg.h> 53 54 #include "fpu_arith.h" 55 #include "fpu_emulate.h" 56 57 /* 58 * The multiplication algorithm for normal numbers is as follows: 59 * 60 * The fraction of the product is built in the usual stepwise fashion. 61 * Each step consists of shifting the accumulator right one bit 62 * (maintaining any guard bits) and, if the next bit in y is set, 63 * adding the multiplicand (x) to the accumulator. Then, in any case, 64 * we advance one bit leftward in y. Algorithmically: 65 * 66 * A = 0; 67 * for (bit = 0; bit < FP_NMANT; bit++) { 68 * sticky |= A & 1, A >>= 1; 69 * if (Y & (1 << bit)) 70 * A += X; 71 * } 72 * 73 * (X and Y here represent the mantissas of x and y respectively.) 74 * The resultant accumulator (A) is the product's mantissa. It may 75 * be as large as 11.11111... in binary and hence may need to be 76 * shifted right, but at most one bit. 77 * 78 * Since we do not have efficient multiword arithmetic, we code the 79 * accumulator as four separate words, just like any other mantissa. 80 * 81 * In the algorithm above, the bits in y are inspected one at a time. 82 * We will pick them up 32 at a time and then deal with those 32, one 83 * at a time. Note, however, that we know several things about y: 84 * 85 * - the guard and round bits at the bottom are sure to be zero; 86 * 87 * - often many low bits are zero (y is often from a single or double 88 * precision source); 89 * 90 * - bit FP_NMANT-1 is set, and FP_1*2 fits in a word. 91 * 92 * We can also test for 32-zero-bits swiftly. In this case, the center 93 * part of the loop---setting sticky, shifting A, and not adding---will 94 * run 32 times without adding X to A. We can do a 32-bit shift faster 95 * by simply moving words. Since zeros are common, we optimize this case. 96 * Furthermore, since A is initially zero, we can omit the shift as well 97 * until we reach a nonzero word. 98 */ 99 struct fpn * 100 fpu_mul(struct fpemu *fe) 101 { 102 struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2; 103 uint32_t a2, a1, a0, x2, x1, x0, bit, m; 104 int sticky; 105 FPU_DECL_CARRY 106 107 /* 108 * Put the `heavier' operand on the right (see fpu_emu.h). 109 * Then we will have one of the following cases, taken in the 110 * following order: 111 * 112 * - y = NaN. Implied: if only one is a signalling NaN, y is. 113 * The result is y. 114 * - y = Inf. Implied: x != NaN (is 0, number, or Inf: the NaN 115 * case was taken care of earlier). 116 * If x = 0, the result is NaN. Otherwise the result 117 * is y, with its sign reversed if x is negative. 118 * - x = 0. Implied: y is 0 or number. 119 * The result is 0 (with XORed sign as usual). 120 * - other. Implied: both x and y are numbers. 121 * The result is x * y (XOR sign, multiply bits, add exponents). 122 */ 123 ORDER(x, y); 124 if (ISNAN(y)) { 125 return (y); 126 } 127 if (ISINF(y)) { 128 if (ISZERO(x)) 129 return (fpu_newnan(fe)); 130 y->fp_sign ^= x->fp_sign; 131 return (y); 132 } 133 if (ISZERO(x)) { 134 x->fp_sign ^= y->fp_sign; 135 return (x); 136 } 137 138 /* 139 * Setup. In the code below, the mask `m' will hold the current 140 * mantissa byte from y. The variable `bit' denotes the bit 141 * within m. We also define some macros to deal with everything. 142 */ 143 x2 = x->fp_mant[2]; 144 x1 = x->fp_mant[1]; 145 x0 = x->fp_mant[0]; 146 sticky = a2 = a1 = a0 = 0; 147 148 #define ADD /* A += X */ \ 149 FPU_ADDS(a2, a2, x2); \ 150 FPU_ADDCS(a1, a1, x1); \ 151 FPU_ADDC(a0, a0, x0) 152 153 #define SHR1 /* A >>= 1, with sticky */ \ 154 sticky |= a2 & 1, \ 155 a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1 156 157 #define SHR32 /* A >>= 32, with sticky */ \ 158 sticky |= a2, a2 = a1, a1 = a0, a0 = 0 159 160 #define STEP /* each 1-bit step of the multiplication */ \ 161 SHR1; if (bit & m) { ADD; }; bit <<= 1 162 163 /* 164 * We are ready to begin. The multiply loop runs once for each 165 * of the four 32-bit words. Some words, however, are special. 166 * As noted above, the low order bits of Y are often zero. Even 167 * if not, the first loop can certainly skip the guard bits. 168 * The last word of y has its highest 1-bit in position FP_NMANT-1, 169 * so we stop the loop when we move past that bit. 170 */ 171 if ((m = y->fp_mant[2]) == 0) { 172 /* SHR32; */ /* unneeded since A==0 */ 173 } else { 174 bit = 1 << FP_NG; 175 do { 176 STEP; 177 } while (bit != 0); 178 } 179 if ((m = y->fp_mant[1]) == 0) { 180 SHR32; 181 } else { 182 bit = 1; 183 do { 184 STEP; 185 } while (bit != 0); 186 } 187 m = y->fp_mant[0]; /* definitely != 0 */ 188 bit = 1; 189 do { 190 STEP; 191 } while (bit <= m); 192 193 /* 194 * Done with mantissa calculation. Get exponent and handle 195 * 11.111...1 case, then put result in place. We reuse x since 196 * it already has the right class (FP_NUM). 197 */ 198 m = x->fp_exp + y->fp_exp; 199 if (a0 >= FP_2) { 200 SHR1; 201 m++; 202 } 203 x->fp_sign ^= y->fp_sign; 204 x->fp_exp = m; 205 x->fp_sticky = sticky; 206 x->fp_mant[2] = a2; 207 x->fp_mant[1] = a1; 208 x->fp_mant[0] = a0; 209 return (x); 210 } 211