invertappr.c revision 1.1.1.1.8.1 1 /* mpn_invertappr and helper functions. Compute I such that
2 floor((B^{2n}-1)/U - 1 <= I + B^n <= floor((B^{2n}-1)/U.
3
4 Contributed to the GNU project by Marco Bodrato.
5
6 The algorithm used here was inspired by ApproximateReciprocal from "Modern
7 Computer Arithmetic", by Richard P. Brent and Paul Zimmermann. Special
8 thanks to Paul Zimmermann for his very valuable suggestions on all the
9 theoretical aspects during the work on this code.
10
11 THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES. IT IS ONLY
12 SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
13 GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
14
15 Copyright (C) 2007, 2009, 2010, 2012 Free Software Foundation, Inc.
16
17 This file is part of the GNU MP Library.
18
19 The GNU MP Library is free software; you can redistribute it and/or modify
20 it under the terms of the GNU Lesser General Public License as published by
21 the Free Software Foundation; either version 3 of the License, or (at your
22 option) any later version.
23
24 The GNU MP Library is distributed in the hope that it will be useful, but
25 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
26 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
27 License for more details.
28
29 You should have received a copy of the GNU Lesser General Public License
30 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
31
32 /* FIXME: Remove NULL and TMP_*, as soon as all the callers properly
33 allocate and pass the scratch to the function. */
34 #include <stdlib.h> /* for NULL */
35
36 #include "gmp.h"
37 #include "gmp-impl.h"
38 #include "longlong.h"
39
40 /* FIXME: The iterative version splits the operand in two slighty unbalanced
41 parts, the use of log_2 (or counting the bits) underestimate the maximum
42 number of iterations. */
43
44 #if TUNE_PROGRAM_BUILD
45 #define NPOWS \
46 ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)))
47 #define MAYBE_dcpi1_divappr 1
48 #else
49 #define NPOWS \
50 ((sizeof(mp_size_t) > 6 ? 48 : 8*sizeof(mp_size_t)) - LOG2C (INV_NEWTON_THRESHOLD))
51 #define MAYBE_dcpi1_divappr \
52 (INV_NEWTON_THRESHOLD < DC_DIVAPPR_Q_THRESHOLD)
53 #if (INV_NEWTON_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD) && \
54 (INV_APPR_THRESHOLD > INV_MULMOD_BNM1_THRESHOLD)
55 #undef INV_MULMOD_BNM1_THRESHOLD
56 #define INV_MULMOD_BNM1_THRESHOLD 0 /* always when Newton */
57 #endif
58 #endif
59
60 /* All the three functions mpn{,_bc,_ni}_invertappr (ip, dp, n, scratch), take
61 the strictly normalised value {dp,n} (i.e., most significant bit must be set)
62 as an input, and compute {ip,n}: the approximate reciprocal of {dp,n}.
63
64 Let e = mpn*_invertappr (ip, dp, n, scratch) be the returned value; the
65 following conditions are satisfied by the output:
66 0 <= e <= 1;
67 {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1+e) .
68 I.e. e=0 means that the result {ip,n} equals the one given by mpn_invert.
69 e=1 means that the result _may_ be one less than expected.
70
71 The _bc version returns e=1 most of the time.
72 The _ni version should return e=0 most of the time; only about 1% of
73 possible random input should give e=1.
74
75 When the strict result is needed, i.e., e=0 in the relation above:
76 {dp,n}*(B^n+{ip,n}) < B^{2n} <= {dp,n}*(B^n+{ip,n}+1) ;
77 the function mpn_invert (ip, dp, n, scratch) should be used instead. */
78
79 /* Maximum scratch needed by this branch (at tp): 3*n + 2 */
80 static mp_limb_t
81 mpn_bc_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr tp)
82 {
83 mp_ptr xp;
84
85 ASSERT (n > 0);
86 ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
87 ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
88 ASSERT (! MPN_OVERLAP_P (ip, n, tp, mpn_invertappr_itch(n)));
89 ASSERT (! MPN_OVERLAP_P (dp, n, tp, mpn_invertappr_itch(n)));
90
91 /* Compute a base value of r limbs. */
92 if (n == 1)
93 invert_limb (*ip, *dp);
94 else {
95 mp_size_t i;
96 xp = tp + n + 2; /* 2 * n limbs */
97
98 for (i = n - 1; i >= 0; i--)
99 xp[i] = GMP_NUMB_MAX;
100 mpn_com (xp + n, dp, n);
101
102 /* Now xp contains B^2n - {dp,n}*B^n - 1 */
103
104 /* FIXME: if mpn_*pi1_divappr_q handles n==2, use it! */
105 if (n == 2) {
106 mpn_divrem_2 (ip, 0, xp, 4, dp);
107 } else {
108 gmp_pi1_t inv;
109 invert_pi1 (inv, dp[n-1], dp[n-2]);
110 if (! MAYBE_dcpi1_divappr
111 || BELOW_THRESHOLD (n, DC_DIVAPPR_Q_THRESHOLD))
112 mpn_sbpi1_divappr_q (ip, xp, 2 * n, dp, n, inv.inv32);
113 else
114 mpn_dcpi1_divappr_q (ip, xp, 2 * n, dp, n, &inv);
115 MPN_DECR_U(ip, n, 1);
116 return 1;
117 }
118 }
119 return 0;
120 }
121
122 /* mpn_ni_invertappr: computes the approximate reciprocal using Newton's
123 iterations (at least one).
124
125 Inspired by Algorithm "ApproximateReciprocal", published in "Modern Computer
126 Arithmetic" by Richard P. Brent and Paul Zimmermann, algorithm 3.5, page 121
127 in version 0.4 of the book.
128
129 Some adaptations were introduced, to allow product mod B^m-1 and return the
130 value e.
131
132 USE_MUL_N = 1 (default) introduces a correction in such a way that "the
133 value of B^{n+h}-T computed at step 8 cannot exceed B^n-1" (the book reads
134 "2B^n-1"). This correction should not require to modify the proof.
135
136 We use a wrapped product modulo B^m-1. NOTE: is there any normalisation
137 problem for the [0] class? It shouldn't: we compute 2*|A*X_h - B^{n+h}| <
138 B^m-1. We may get [0] if and only if we get AX_h = B^{n+h}. This can
139 happen only if A=B^{n}/2, but this implies X_h = B^{h}*2-1 i.e., AX_h =
140 B^{n+h} - A, then we get into the "negative" branch, where X_h is not
141 incremented (because A < B^n).
142
143 FIXME: the scratch for mulmod_bnm1 does not currently fit in the scratch, it
144 is allocated apart. */
145
146 #define USE_MUL_N 1
147
148 mp_limb_t
149 mpn_ni_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
150 {
151 mp_limb_t cy;
152 mp_ptr xp;
153 mp_size_t rn, mn;
154 mp_size_t sizes[NPOWS], *sizp;
155 mp_ptr tp;
156 TMP_DECL;
157 #define rp scratch
158
159 ASSERT (n > 2);
160 ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
161 ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
162 ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
163 ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
164
165 /* Compute the computation precisions from highest to lowest, leaving the
166 base case size in 'rn'. */
167 sizp = sizes;
168 rn = n;
169 do {
170 *sizp = rn;
171 rn = ((rn) >> 1) + 1;
172 sizp ++;
173 } while (ABOVE_THRESHOLD (rn, INV_NEWTON_THRESHOLD));
174
175 /* We search the inverse of 0.{dp,n}, we compute it as 1.{ip,n} */
176 dp += n;
177 ip += n;
178
179 /* Compute a base value of rn limbs. */
180 mpn_bc_invertappr (ip - rn, dp - rn, rn, scratch);
181
182 TMP_MARK;
183
184 if (ABOVE_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD))
185 {
186 mn = mpn_mulmod_bnm1_next_size (n + 1);
187 tp = TMP_ALLOC_LIMBS (mpn_mulmod_bnm1_itch (mn, n, (n >> 1) + 1));
188 }
189 /* Use Newton's iterations to get the desired precision.*/
190
191 /* define rp scratch; 2rn + 1 limbs <= 2(n>>1 + 1) + 1 <= n + 3 limbs */
192 /* Maximum scratch needed by this branch <= 3*n + 2 */
193 xp = scratch + n + 3; /* n + rn limbs */
194 while (1) {
195 mp_limb_t method;
196
197 n = *--sizp;
198 /*
199 v n v
200 +----+--+
201 ^ rn ^
202 */
203
204 /* Compute i_jd . */
205 if (BELOW_THRESHOLD (n, INV_MULMOD_BNM1_THRESHOLD)
206 || ((mn = mpn_mulmod_bnm1_next_size (n + 1)) > (n + rn))) {
207 /* FIXME: We do only need {xp,n+1}*/
208 mpn_mul (xp, dp - n, n, ip - rn, rn);
209 mpn_add_n (xp + rn, xp + rn, dp - n, n - rn + 1);
210 method = 1; /* Remember we used (truncated) product */
211 /* We computed cy.{xp,rn+n} <- 1.{ip,rn} * 0.{dp,n} */
212 } else { /* Use B^n-1 wraparound */
213 mpn_mulmod_bnm1 (xp, mn, dp - n, n, ip - rn, rn, tp);
214 /* We computed {xp,mn} <- {ip,rn} * {dp,n} mod (B^mn-1) */
215 /* We know that 2*|ip*dp + dp*B^rn - B^{rn+n}| < B^mn-1 */
216 /* Add dp*B^rn mod (B^mn-1) */
217 ASSERT (n >= mn - rn);
218 xp[mn] = 1 + mpn_add_n (xp + rn, xp + rn, dp - n, mn - rn);
219 cy = mpn_add_n (xp, xp, dp - (n - (mn - rn)), n - (mn - rn));
220 MPN_INCR_U (xp + n - (mn - rn), mn + 1 - n + (mn - rn), cy);
221 ASSERT (n + rn >= mn);
222 /* Subtract B^{rn+n} */
223 MPN_DECR_U (xp + rn + n - mn, 2*mn + 1 - rn - n, 1);
224 if (xp[mn])
225 MPN_INCR_U (xp, mn, xp[mn] - 1);
226 else
227 MPN_DECR_U (xp, mn, 1);
228 method = 0; /* Remember we are working Mod B^m-1 */
229 }
230
231 if (xp[n] < 2) { /* "positive" residue class */
232 cy = 1;
233 while (xp[n] || mpn_cmp (xp, dp - n, n)>0) {
234 xp[n] -= mpn_sub_n (xp, xp, dp - n, n);
235 cy ++;
236 }
237 MPN_DECR_U(ip - rn, rn, cy);
238 ASSERT (cy <= 4); /* at most 3 cycles for the while above */
239 ASSERT_NOCARRY (mpn_sub_n (xp, dp - n, xp, n));
240 ASSERT (xp[n] == 0);
241 } else { /* "negative" residue class */
242 mpn_com (xp, xp, n + 1);
243 MPN_INCR_U(xp, n + 1, method);
244 ASSERT (xp[n] <= 1);
245 #if USE_MUL_N
246 if (xp[n]) {
247 MPN_INCR_U(ip - rn, rn, 1);
248 ASSERT_CARRY (mpn_sub_n (xp, xp, dp - n, n));
249 }
250 #endif
251 }
252
253 /* Compute x_ju_j. FIXME:We need {rp+rn,rn}, mulhi? */
254 #if USE_MUL_N
255 mpn_mul_n (rp, xp + n - rn, ip - rn, rn);
256 #else
257 rp[2*rn] = 0;
258 mpn_mul (rp, xp + n - rn, rn + xp[n], ip - rn, rn);
259 #endif
260 /* We need _only_ the carry from the next addition */
261 /* Anyway 2rn-n <= 2... we don't need to optimise. */
262 cy = mpn_add_n (rp + rn, rp + rn, xp + n - rn, 2*rn - n);
263 cy = mpn_add_nc (ip - n, rp + 3*rn - n, xp + rn, n - rn, cy);
264 MPN_INCR_U (ip - rn, rn, cy + (1-USE_MUL_N)*(rp[2*rn] + xp[n]));
265 if (sizp == sizes) { /* Get out of the cycle */
266 /* Check for possible carry propagation from below. */
267 cy = rp[3*rn - n - 1] > GMP_NUMB_MAX - 7; /* Be conservative. */
268 /* cy = mpn_add_1 (rp + rn, rp + rn, 2*rn - n, 4); */
269 break;
270 }
271 rn = n;
272 }
273 TMP_FREE;
274
275 return cy;
276 #undef rp
277 }
278
279 mp_limb_t
280 mpn_invertappr (mp_ptr ip, mp_srcptr dp, mp_size_t n, mp_ptr scratch)
281 {
282 mp_limb_t res;
283 TMP_DECL;
284
285 TMP_MARK;
286
287 if (scratch == NULL)
288 scratch = TMP_ALLOC_LIMBS (mpn_invertappr_itch (n));
289
290 ASSERT (n > 0);
291 ASSERT (dp[n-1] & GMP_NUMB_HIGHBIT);
292 ASSERT (! MPN_OVERLAP_P (ip, n, dp, n));
293 ASSERT (! MPN_OVERLAP_P (ip, n, scratch, mpn_invertappr_itch(n)));
294 ASSERT (! MPN_OVERLAP_P (dp, n, scratch, mpn_invertappr_itch(n)));
295
296 if (BELOW_THRESHOLD (n, INV_NEWTON_THRESHOLD))
297 res = mpn_bc_invertappr (ip, dp, n, scratch);
298 else
299 res = mpn_ni_invertappr (ip, dp, n, scratch);
300
301 TMP_FREE;
302 return res;
303 }
304