Home | History | Annotate | Line # | Download | only in generic
      1  1.1  mrg /* mpn_sec_invert
      2  1.1  mrg 
      3  1.1  mrg    Contributed to the GNU project by Niels Mller
      4  1.1  mrg 
      5  1.1  mrg Copyright 2013 Free Software Foundation, Inc.
      6  1.1  mrg 
      7  1.1  mrg This file is part of the GNU MP Library.
      8  1.1  mrg 
      9  1.1  mrg The GNU MP Library is free software; you can redistribute it and/or modify
     10  1.1  mrg it under the terms of either:
     11  1.1  mrg 
     12  1.1  mrg   * the GNU Lesser General Public License as published by the Free
     13  1.1  mrg     Software Foundation; either version 3 of the License, or (at your
     14  1.1  mrg     option) any later version.
     15  1.1  mrg 
     16  1.1  mrg or
     17  1.1  mrg 
     18  1.1  mrg   * the GNU General Public License as published by the Free Software
     19  1.1  mrg     Foundation; either version 2 of the License, or (at your option) any
     20  1.1  mrg     later version.
     21  1.1  mrg 
     22  1.1  mrg or both in parallel, as here.
     23  1.1  mrg 
     24  1.1  mrg The GNU MP Library is distributed in the hope that it will be useful, but
     25  1.1  mrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
     26  1.1  mrg or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     27  1.1  mrg for more details.
     28  1.1  mrg 
     29  1.1  mrg You should have received copies of the GNU General Public License and the
     30  1.1  mrg GNU Lesser General Public License along with the GNU MP Library.  If not,
     31  1.1  mrg see https://www.gnu.org/licenses/.  */
     32  1.1  mrg 
     33  1.1  mrg #include "gmp-impl.h"
     34  1.1  mrg 
     35  1.1  mrg #if 0
     36  1.1  mrg /* Currently unused. Should be resurrected once mpn_cnd_neg is
     37  1.1  mrg    advertised. */
     38  1.1  mrg static mp_size_t
     39  1.1  mrg mpn_cnd_neg_itch (mp_size_t n)
     40  1.1  mrg {
     41  1.1  mrg   return n;
     42  1.1  mrg }
     43  1.1  mrg #endif
     44  1.1  mrg 
     45  1.1  mrg /* FIXME: Ought to return carry */
     46  1.1  mrg static void
     47  1.1  mrg mpn_cnd_neg (int cnd, mp_limb_t *rp, const mp_limb_t *ap, mp_size_t n,
     48  1.1  mrg 	     mp_ptr scratch)
     49  1.1  mrg {
     50  1.1  mrg   mpn_lshift (scratch, ap, n, 1);
     51  1.1  mrg   mpn_cnd_sub_n (cnd, rp, ap, scratch, n);
     52  1.1  mrg }
     53  1.1  mrg 
     54  1.1  mrg static int
     55  1.1  mrg mpn_sec_eq_ui (mp_srcptr ap, mp_size_t n, mp_limb_t b)
     56  1.1  mrg {
     57  1.1  mrg   mp_limb_t d;
     58  1.1  mrg   ASSERT (n > 0);
     59  1.1  mrg 
     60  1.1  mrg   d = ap[0] ^ b;
     61  1.1  mrg 
     62  1.1  mrg   while (--n > 0)
     63  1.1  mrg     d |= ap[n];
     64  1.1  mrg 
     65  1.1  mrg   return d == 0;
     66  1.1  mrg }
     67  1.1  mrg 
     68  1.1  mrg mp_size_t
     69  1.1  mrg mpn_sec_invert_itch (mp_size_t n)
     70  1.1  mrg {
     71  1.1  mrg   return 4*n;
     72  1.1  mrg }
     73  1.1  mrg 
     74  1.1  mrg /* Compute V <-- A^{-1} (mod M), in data-independent time. M must be
     75  1.1  mrg    odd. Returns 1 on success, and 0 on failure (i.e., if gcd (A, m) !=
     76  1.1  mrg    1). Inputs and outputs of size n, and no overlap allowed. The {ap,
     77  1.1  mrg    n} area is destroyed. For arbitrary inputs, bit_size should be
     78  1.1  mrg    2*n*GMP_NUMB_BITS, but if A or M are known to be smaller, e.g., if
     79  1.1  mrg    M = 2^521 - 1 and A < M, bit_size can be any bound on the sum of
     80  1.1  mrg    the bit sizes of A and M. */
     81  1.1  mrg int
     82  1.1  mrg mpn_sec_invert (mp_ptr vp, mp_ptr ap, mp_srcptr mp,
     83  1.1  mrg 		mp_size_t n, mp_bitcnt_t bit_size,
     84  1.1  mrg 		mp_ptr scratch)
     85  1.1  mrg {
     86  1.1  mrg   ASSERT (n > 0);
     87  1.1  mrg   ASSERT (bit_size > 0);
     88  1.1  mrg   ASSERT (mp[0] & 1);
     89  1.1  mrg   ASSERT (! MPN_OVERLAP_P (ap, n, vp, n));
     90  1.1  mrg #define bp (scratch + n)
     91  1.1  mrg #define up (scratch + 2*n)
     92  1.1  mrg #define m1hp (scratch + 3*n)
     93  1.1  mrg 
     94  1.1  mrg   /* Maintain
     95  1.1  mrg 
     96  1.1  mrg        a = u * orig_a (mod m)
     97  1.1  mrg        b = v * orig_a (mod m)
     98  1.1  mrg 
     99  1.1  mrg      and b odd at all times. Initially,
    100  1.1  mrg 
    101  1.1  mrg        a = a_orig, u = 1
    102  1.1  mrg        b = m,      v = 0
    103  1.1  mrg      */
    104  1.1  mrg 
    105  1.1  mrg 
    106  1.1  mrg   up[0] = 1;
    107  1.1  mrg   mpn_zero (up+1, n - 1);
    108  1.1  mrg   mpn_copyi (bp, mp, n);
    109  1.1  mrg   mpn_zero (vp, n);
    110  1.1  mrg 
    111  1.1  mrg   ASSERT_CARRY (mpn_rshift (m1hp, mp, n, 1));
    112  1.1  mrg   ASSERT_NOCARRY (mpn_sec_add_1 (m1hp, m1hp, n, 1, scratch));
    113  1.1  mrg 
    114  1.1  mrg   while (bit_size-- > 0)
    115  1.1  mrg     {
    116  1.1  mrg       mp_limb_t odd, swap, cy;
    117  1.1  mrg 
    118  1.1  mrg       /* Always maintain b odd. The logic of the iteration is as
    119  1.1  mrg 	 follows. For a, b:
    120  1.1  mrg 
    121  1.1  mrg 	   odd = a & 1
    122  1.1  mrg 	   a -= odd * b
    123  1.1  mrg 	   if (underflow from a-b)
    124  1.1  mrg 	     {
    125  1.1  mrg 	       b += a, assigns old a
    126  1.1  mrg 	       a = B^n-a
    127  1.1  mrg 	     }
    128  1.1  mrg 
    129  1.1  mrg 	   a /= 2
    130  1.1  mrg 
    131  1.1  mrg 	 For u, v:
    132  1.1  mrg 
    133  1.1  mrg 	   if (underflow from a - b)
    134  1.1  mrg 	     swap u, v
    135  1.1  mrg 	   u -= odd * v
    136  1.1  mrg 	   if (underflow from u - v)
    137  1.1  mrg 	     u += m
    138  1.1  mrg 
    139  1.1  mrg 	   u /= 2
    140  1.1  mrg 	   if (a one bit was shifted out)
    141  1.1  mrg 	     u += (m+1)/2
    142  1.1  mrg 
    143  1.1  mrg 	 As long as a > 0, the quantity
    144  1.1  mrg 
    145  1.1  mrg 	   (bitsize of a) + (bitsize of b)
    146  1.1  mrg 
    147  1.1  mrg 	 is reduced by at least one bit per iteration, hence after (bit_size of
    148  1.1  mrg 	 orig_a) + (bit_size of m) - 1 iterations we surely have a = 0. Then b
    149  1.1  mrg 	 = gcd(orig_a, m) and if b = 1 then also v = orig_a^{-1} (mod m).
    150  1.1  mrg       */
    151  1.1  mrg 
    152  1.1  mrg       ASSERT (bp[0] & 1);
    153  1.1  mrg       odd = ap[0] & 1;
    154  1.1  mrg 
    155  1.1  mrg       swap = mpn_cnd_sub_n (odd, ap, ap, bp, n);
    156  1.1  mrg       mpn_cnd_add_n (swap, bp, bp, ap, n);
    157  1.1  mrg       mpn_cnd_neg (swap, ap, ap, n, scratch);
    158  1.1  mrg 
    159  1.1  mrg       mpn_cnd_swap (swap, up, vp, n);
    160  1.1  mrg       cy = mpn_cnd_sub_n (odd, up, up, vp, n);
    161  1.1  mrg       cy -= mpn_cnd_add_n (cy, up, up, mp, n);
    162  1.1  mrg       ASSERT (cy == 0);
    163  1.1  mrg 
    164  1.1  mrg       cy = mpn_rshift (ap, ap, n, 1);
    165  1.1  mrg       ASSERT (cy == 0);
    166  1.1  mrg       cy = mpn_rshift (up, up, n, 1);
    167  1.1  mrg       cy = mpn_cnd_add_n (cy, up, up, m1hp, n);
    168  1.1  mrg       ASSERT (cy == 0);
    169  1.1  mrg     }
    170  1.1  mrg   /* Should be all zeros, but check only extreme limbs */
    171  1.1  mrg   ASSERT ( (ap[0] | ap[n-1]) == 0);
    172  1.1  mrg   /* Check if indeed gcd == 1. */
    173  1.1  mrg   return mpn_sec_eq_ui (bp, n, 1);
    174  1.1  mrg #undef bp
    175  1.1  mrg #undef up
    176  1.1  mrg #undef m1hp
    177  1.1  mrg }
    178