Home | History | Annotate | Line # | Download | only in generic
      1      1.1  mrg /* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
      2      1.1  mrg    add the N least significant limbs of the product to the limb vector
      3      1.1  mrg    pointed to by RP.  Return the most significant limb of the product,
      4      1.1  mrg    adjusted for carry-out from the addition.
      5      1.1  mrg 
      6  1.1.1.3  mrg Copyright 1992-1994, 1996, 2000, 2002, 2004, 2016 Free Software Foundation,
      7  1.1.1.3  mrg Inc.
      8      1.1  mrg 
      9      1.1  mrg This file is part of the GNU MP Library.
     10      1.1  mrg 
     11      1.1  mrg The GNU MP Library is free software; you can redistribute it and/or modify
     12  1.1.1.2  mrg it under the terms of either:
     13  1.1.1.2  mrg 
     14  1.1.1.2  mrg   * the GNU Lesser General Public License as published by the Free
     15  1.1.1.2  mrg     Software Foundation; either version 3 of the License, or (at your
     16  1.1.1.2  mrg     option) any later version.
     17  1.1.1.2  mrg 
     18  1.1.1.2  mrg or
     19  1.1.1.2  mrg 
     20  1.1.1.2  mrg   * the GNU General Public License as published by the Free Software
     21  1.1.1.2  mrg     Foundation; either version 2 of the License, or (at your option) any
     22  1.1.1.2  mrg     later version.
     23  1.1.1.2  mrg 
     24  1.1.1.2  mrg or both in parallel, as here.
     25      1.1  mrg 
     26      1.1  mrg The GNU MP Library is distributed in the hope that it will be useful, but
     27      1.1  mrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     28  1.1.1.2  mrg or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     29  1.1.1.2  mrg for more details.
     30      1.1  mrg 
     31  1.1.1.2  mrg You should have received copies of the GNU General Public License and the
     32  1.1.1.2  mrg GNU Lesser General Public License along with the GNU MP Library.  If not,
     33  1.1.1.2  mrg see https://www.gnu.org/licenses/.  */
     34      1.1  mrg 
     35      1.1  mrg #include "gmp-impl.h"
     36      1.1  mrg #include "longlong.h"
     37      1.1  mrg 
     38      1.1  mrg 
     39      1.1  mrg #if GMP_NAIL_BITS == 0
     40      1.1  mrg 
     41      1.1  mrg mp_limb_t
     42  1.1.1.3  mrg mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
     43      1.1  mrg {
     44  1.1.1.3  mrg   mp_limb_t u0, crec, c, p1, p0, r0;
     45      1.1  mrg 
     46      1.1  mrg   ASSERT (n >= 1);
     47      1.1  mrg   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
     48      1.1  mrg 
     49  1.1.1.3  mrg   crec = 0;
     50      1.1  mrg   do
     51      1.1  mrg     {
     52  1.1.1.3  mrg       u0 = *up++;
     53  1.1.1.3  mrg       umul_ppmm (p1, p0, u0, v0);
     54      1.1  mrg 
     55  1.1.1.3  mrg       r0 = *rp;
     56      1.1  mrg 
     57  1.1.1.3  mrg       p0 = r0 + p0;
     58  1.1.1.3  mrg       c = r0 > p0;
     59  1.1.1.3  mrg 
     60  1.1.1.3  mrg       p1 = p1 + c;
     61  1.1.1.3  mrg 
     62  1.1.1.3  mrg       r0 = p0 + crec;		/* cycle 0, 3, ... */
     63  1.1.1.3  mrg       c = p0 > r0;		/* cycle 1, 4, ... */
     64  1.1.1.3  mrg 
     65  1.1.1.3  mrg       crec = p1 + c;		/* cycle 2, 5, ... */
     66  1.1.1.3  mrg 
     67  1.1.1.3  mrg       *rp++ = r0;
     68      1.1  mrg     }
     69      1.1  mrg   while (--n != 0);
     70      1.1  mrg 
     71  1.1.1.3  mrg   return crec;
     72      1.1  mrg }
     73      1.1  mrg 
     74      1.1  mrg #endif
     75      1.1  mrg 
     76      1.1  mrg #if GMP_NAIL_BITS == 1
     77      1.1  mrg 
     78      1.1  mrg mp_limb_t
     79  1.1.1.3  mrg mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
     80      1.1  mrg {
     81  1.1.1.3  mrg   mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, crec, xl, c1, c2, c3;
     82      1.1  mrg 
     83      1.1  mrg   ASSERT (n >= 1);
     84      1.1  mrg   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
     85      1.1  mrg   ASSERT_MPN (rp, n);
     86      1.1  mrg   ASSERT_MPN (up, n);
     87  1.1.1.3  mrg   ASSERT_LIMB (v0);
     88      1.1  mrg 
     89  1.1.1.3  mrg   shifted_v0 = v0 << GMP_NAIL_BITS;
     90  1.1.1.3  mrg   crec = 0;
     91  1.1.1.3  mrg   prev_p1 = 0;
     92      1.1  mrg   do
     93      1.1  mrg     {
     94  1.1.1.3  mrg       u0 = *up++;
     95  1.1.1.3  mrg       r0 = *rp;
     96  1.1.1.3  mrg       umul_ppmm (p1, p0, u0, shifted_v0);
     97  1.1.1.3  mrg       p0 >>= GMP_NAIL_BITS;
     98  1.1.1.3  mrg       ADDC_LIMB (c1, xl, prev_p1, p0);
     99  1.1.1.3  mrg       ADDC_LIMB (c2, xl, xl, r0);
    100  1.1.1.3  mrg       ADDC_LIMB (c3, xl, xl, crec);
    101  1.1.1.3  mrg       crec = c1 + c2 + c3;
    102      1.1  mrg       *rp++ = xl;
    103  1.1.1.3  mrg       prev_p1 = p1;
    104      1.1  mrg     }
    105      1.1  mrg   while (--n != 0);
    106      1.1  mrg 
    107  1.1.1.3  mrg   return prev_p1 + crec;
    108      1.1  mrg }
    109      1.1  mrg 
    110      1.1  mrg #endif
    111      1.1  mrg 
    112      1.1  mrg #if GMP_NAIL_BITS >= 2
    113      1.1  mrg 
    114      1.1  mrg mp_limb_t
    115  1.1.1.3  mrg mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
    116      1.1  mrg {
    117  1.1.1.3  mrg   mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, xw, crec, xl;
    118      1.1  mrg 
    119      1.1  mrg   ASSERT (n >= 1);
    120      1.1  mrg   ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
    121      1.1  mrg   ASSERT_MPN (rp, n);
    122      1.1  mrg   ASSERT_MPN (up, n);
    123  1.1.1.3  mrg   ASSERT_LIMB (v0);
    124      1.1  mrg 
    125  1.1.1.3  mrg   shifted_v0 = v0 << GMP_NAIL_BITS;
    126  1.1.1.3  mrg   crec = 0;
    127  1.1.1.3  mrg   prev_p1 = 0;
    128      1.1  mrg   do
    129      1.1  mrg     {
    130  1.1.1.3  mrg       u0 = *up++;
    131  1.1.1.3  mrg       r0 = *rp;
    132  1.1.1.3  mrg       umul_ppmm (p1, p0, u0, shifted_v0);
    133  1.1.1.3  mrg       p0 >>= GMP_NAIL_BITS;
    134  1.1.1.3  mrg       xw = prev_p1 + p0 + r0 + crec;
    135  1.1.1.3  mrg       crec = xw >> GMP_NUMB_BITS;
    136      1.1  mrg       xl = xw & GMP_NUMB_MASK;
    137      1.1  mrg       *rp++ = xl;
    138  1.1.1.3  mrg       prev_p1 = p1;
    139      1.1  mrg     }
    140      1.1  mrg   while (--n != 0);
    141      1.1  mrg 
    142  1.1.1.3  mrg   return prev_p1 + crec;
    143      1.1  mrg }
    144      1.1  mrg 
    145      1.1  mrg #endif
    146