wide-int.cc revision 1.1.1.1.2.1 1 1.1 mrg /* Operations with very long integers.
2 1.1.1.1.2.1 pgoyette Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Kenneth Zadeck <zadeck (at) naturalbridge.com>
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it
8 1.1 mrg under the terms of the GNU General Public License as published by the
9 1.1 mrg Free Software Foundation; either version 3, or (at your option) any
10 1.1 mrg later version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT
13 1.1 mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg You should have received a copy of the GNU General Public License
18 1.1 mrg along with GCC; see the file COPYING3. If not see
19 1.1 mrg <http://www.gnu.org/licenses/>. */
20 1.1 mrg
21 1.1 mrg #include "config.h"
22 1.1 mrg #include "system.h"
23 1.1 mrg #include "coretypes.h"
24 1.1 mrg #include "tm.h"
25 1.1 mrg #include "tree.h"
26 1.1 mrg
27 1.1 mrg
28 1.1 mrg #define HOST_BITS_PER_HALF_WIDE_INT 32
29 1.1 mrg #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
30 1.1 mrg # define HOST_HALF_WIDE_INT long
31 1.1 mrg #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
32 1.1 mrg # define HOST_HALF_WIDE_INT int
33 1.1 mrg #else
34 1.1 mrg #error Please add support for HOST_HALF_WIDE_INT
35 1.1 mrg #endif
36 1.1 mrg
37 1.1 mrg #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
38 1.1 mrg /* Do not include longlong.h when compiler is clang-based. See PR61146. */
39 1.1 mrg #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
40 1.1 mrg typedef unsigned HOST_HALF_WIDE_INT UHWtype;
41 1.1 mrg typedef unsigned HOST_WIDE_INT UWtype;
42 1.1 mrg typedef unsigned int UQItype __attribute__ ((mode (QI)));
43 1.1 mrg typedef unsigned int USItype __attribute__ ((mode (SI)));
44 1.1 mrg typedef unsigned int UDItype __attribute__ ((mode (DI)));
45 1.1 mrg #if W_TYPE_SIZE == 32
46 1.1 mrg typedef unsigned int UDWtype __attribute__ ((mode (DI)));
47 1.1 mrg #else
48 1.1 mrg typedef unsigned int UDWtype __attribute__ ((mode (TI)));
49 1.1 mrg #endif
50 1.1 mrg #include "longlong.h"
51 1.1 mrg #endif
52 1.1 mrg
53 1.1 mrg static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
54 1.1 mrg
55 1.1 mrg /*
56 1.1 mrg * Internal utilities.
57 1.1 mrg */
58 1.1 mrg
59 1.1 mrg /* Quantities to deal with values that hold half of a wide int. Used
60 1.1 mrg in multiply and divide. */
61 1.1 mrg #define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
62 1.1 mrg
63 1.1 mrg #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
64 1.1 mrg #define BLOCKS_NEEDED(PREC) \
65 1.1 mrg (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
66 1.1 mrg #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
67 1.1 mrg
68 1.1 mrg /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 1.1 mrg based on the top existing bit of VAL. */
70 1.1 mrg
71 1.1 mrg static unsigned HOST_WIDE_INT
72 1.1 mrg safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
73 1.1 mrg {
74 1.1 mrg return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
75 1.1 mrg }
76 1.1 mrg
77 1.1 mrg /* Convert the integer in VAL to canonical form, returning its new length.
78 1.1 mrg LEN is the number of blocks currently in VAL and PRECISION is the number
79 1.1 mrg of bits in the integer it represents.
80 1.1 mrg
81 1.1 mrg This function only changes the representation, not the value. */
82 1.1 mrg static unsigned int
83 1.1 mrg canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
84 1.1 mrg {
85 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 1.1 mrg HOST_WIDE_INT top;
87 1.1 mrg int i;
88 1.1 mrg
89 1.1 mrg if (len > blocks_needed)
90 1.1 mrg len = blocks_needed;
91 1.1 mrg
92 1.1 mrg if (len == 1)
93 1.1 mrg return len;
94 1.1 mrg
95 1.1 mrg top = val[len - 1];
96 1.1 mrg if (len * HOST_BITS_PER_WIDE_INT > precision)
97 1.1 mrg val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98 1.1 mrg if (top != 0 && top != (HOST_WIDE_INT)-1)
99 1.1 mrg return len;
100 1.1 mrg
101 1.1 mrg /* At this point we know that the top is either 0 or -1. Find the
102 1.1 mrg first block that is not a copy of this. */
103 1.1 mrg for (i = len - 2; i >= 0; i--)
104 1.1 mrg {
105 1.1 mrg HOST_WIDE_INT x = val[i];
106 1.1 mrg if (x != top)
107 1.1 mrg {
108 1.1 mrg if (SIGN_MASK (x) == top)
109 1.1 mrg return i + 1;
110 1.1 mrg
111 1.1 mrg /* We need an extra block because the top bit block i does
112 1.1 mrg not match the extension. */
113 1.1 mrg return i + 2;
114 1.1 mrg }
115 1.1 mrg }
116 1.1 mrg
117 1.1 mrg /* The number is 0 or -1. */
118 1.1 mrg return 1;
119 1.1 mrg }
120 1.1 mrg
121 1.1.1.1.2.1 pgoyette /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 1.1.1.1.2.1 pgoyette another 0 block if needed, and return number of blocks needed. */
123 1.1.1.1.2.1 pgoyette
124 1.1.1.1.2.1 pgoyette static inline unsigned int
125 1.1.1.1.2.1 pgoyette canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
126 1.1.1.1.2.1 pgoyette {
127 1.1.1.1.2.1 pgoyette if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
128 1.1.1.1.2.1 pgoyette {
129 1.1.1.1.2.1 pgoyette val[1] = 0;
130 1.1.1.1.2.1 pgoyette return 2;
131 1.1.1.1.2.1 pgoyette }
132 1.1.1.1.2.1 pgoyette return 1;
133 1.1.1.1.2.1 pgoyette }
134 1.1.1.1.2.1 pgoyette
135 1.1 mrg /*
136 1.1 mrg * Conversion routines in and out of wide_int.
137 1.1 mrg */
138 1.1 mrg
139 1.1 mrg /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 1.1 mrg result for an integer with precision PRECISION. Return the length
141 1.1 mrg of VAL (after any canonization. */
142 1.1 mrg unsigned int
143 1.1 mrg wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 1.1 mrg unsigned int xlen, unsigned int precision, bool need_canon)
145 1.1 mrg {
146 1.1 mrg for (unsigned i = 0; i < xlen; i++)
147 1.1 mrg val[i] = xval[i];
148 1.1 mrg return need_canon ? canonize (val, xlen, precision) : xlen;
149 1.1 mrg }
150 1.1 mrg
151 1.1 mrg /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 1.1 mrg read according to byte endianess and word endianess of the target.
153 1.1 mrg Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 1.1 mrg high bytes are cleared. */
155 1.1 mrg wide_int
156 1.1 mrg wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
157 1.1 mrg {
158 1.1 mrg unsigned int precision = buffer_len * BITS_PER_UNIT;
159 1.1 mrg wide_int result = wide_int::create (precision);
160 1.1 mrg unsigned int words = buffer_len / UNITS_PER_WORD;
161 1.1 mrg
162 1.1 mrg /* We have to clear all the bits ourself, as we merely or in values
163 1.1 mrg below. */
164 1.1 mrg unsigned int len = BLOCKS_NEEDED (precision);
165 1.1 mrg HOST_WIDE_INT *val = result.write_val ();
166 1.1 mrg for (unsigned int i = 0; i < len; ++i)
167 1.1 mrg val[i] = 0;
168 1.1 mrg
169 1.1 mrg for (unsigned int byte = 0; byte < buffer_len; byte++)
170 1.1 mrg {
171 1.1 mrg unsigned int offset;
172 1.1 mrg unsigned int index;
173 1.1 mrg unsigned int bitpos = byte * BITS_PER_UNIT;
174 1.1 mrg unsigned HOST_WIDE_INT value;
175 1.1 mrg
176 1.1 mrg if (buffer_len > UNITS_PER_WORD)
177 1.1 mrg {
178 1.1 mrg unsigned int word = byte / UNITS_PER_WORD;
179 1.1 mrg
180 1.1 mrg if (WORDS_BIG_ENDIAN)
181 1.1 mrg word = (words - 1) - word;
182 1.1 mrg
183 1.1 mrg offset = word * UNITS_PER_WORD;
184 1.1 mrg
185 1.1 mrg if (BYTES_BIG_ENDIAN)
186 1.1 mrg offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 1.1 mrg else
188 1.1 mrg offset += byte % UNITS_PER_WORD;
189 1.1 mrg }
190 1.1 mrg else
191 1.1 mrg offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
192 1.1 mrg
193 1.1 mrg value = (unsigned HOST_WIDE_INT) buffer[offset];
194 1.1 mrg
195 1.1 mrg index = bitpos / HOST_BITS_PER_WIDE_INT;
196 1.1 mrg val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
197 1.1 mrg }
198 1.1 mrg
199 1.1 mrg result.set_len (canonize (val, len, precision));
200 1.1 mrg
201 1.1 mrg return result;
202 1.1 mrg }
203 1.1 mrg
204 1.1 mrg /* Sets RESULT from X, the sign is taken according to SGN. */
205 1.1 mrg void
206 1.1 mrg wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
207 1.1 mrg {
208 1.1 mrg int len = x.get_len ();
209 1.1 mrg const HOST_WIDE_INT *v = x.get_val ();
210 1.1 mrg int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
211 1.1 mrg
212 1.1 mrg if (wi::neg_p (x, sgn))
213 1.1 mrg {
214 1.1 mrg /* We use ones complement to avoid -x80..0 edge case that -
215 1.1 mrg won't work on. */
216 1.1 mrg HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 1.1 mrg for (int i = 0; i < len; i++)
218 1.1 mrg t[i] = ~v[i];
219 1.1 mrg if (excess > 0)
220 1.1 mrg t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 1.1 mrg mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 1.1 mrg mpz_com (result, result);
223 1.1 mrg }
224 1.1 mrg else if (excess > 0)
225 1.1 mrg {
226 1.1 mrg HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 1.1 mrg for (int i = 0; i < len - 1; i++)
228 1.1 mrg t[i] = v[i];
229 1.1 mrg t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 1.1 mrg mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
231 1.1 mrg }
232 1.1 mrg else
233 1.1 mrg mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
234 1.1 mrg }
235 1.1 mrg
236 1.1 mrg /* Returns X converted to TYPE. If WRAP is true, then out-of-range
237 1.1 mrg values of VAL will be wrapped; otherwise, they will be set to the
238 1.1 mrg appropriate minimum or maximum TYPE bound. */
239 1.1 mrg wide_int
240 1.1 mrg wi::from_mpz (const_tree type, mpz_t x, bool wrap)
241 1.1 mrg {
242 1.1 mrg size_t count, numb;
243 1.1 mrg unsigned int prec = TYPE_PRECISION (type);
244 1.1 mrg wide_int res = wide_int::create (prec);
245 1.1 mrg
246 1.1 mrg if (!wrap)
247 1.1 mrg {
248 1.1 mrg mpz_t min, max;
249 1.1 mrg
250 1.1 mrg mpz_init (min);
251 1.1 mrg mpz_init (max);
252 1.1 mrg get_type_static_bounds (type, min, max);
253 1.1 mrg
254 1.1 mrg if (mpz_cmp (x, min) < 0)
255 1.1 mrg mpz_set (x, min);
256 1.1 mrg else if (mpz_cmp (x, max) > 0)
257 1.1 mrg mpz_set (x, max);
258 1.1 mrg
259 1.1 mrg mpz_clear (min);
260 1.1 mrg mpz_clear (max);
261 1.1 mrg }
262 1.1 mrg
263 1.1 mrg /* Determine the number of unsigned HOST_WIDE_INTs that are required
264 1.1 mrg for representing the absolute value. The code to calculate count is
265 1.1 mrg extracted from the GMP manual, section "Integer Import and Export":
266 1.1 mrg http://gmplib.org/manual/Integer-Import-and-Export.html */
267 1.1 mrg numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
268 1.1 mrg count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
269 1.1 mrg HOST_WIDE_INT *val = res.write_val ();
270 1.1 mrg /* Read the absolute value.
271 1.1 mrg
272 1.1 mrg Write directly to the wide_int storage if possible, otherwise leave
273 1.1 mrg GMP to allocate the memory for us. It might be slightly more efficient
274 1.1 mrg to use mpz_tdiv_r_2exp for the latter case, but the situation is
275 1.1 mrg pathological and it seems safer to operate on the original mpz value
276 1.1 mrg in all cases. */
277 1.1 mrg void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
278 1.1 mrg &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
279 1.1 mrg if (count < 1)
280 1.1 mrg {
281 1.1 mrg val[0] = 0;
282 1.1 mrg count = 1;
283 1.1 mrg }
284 1.1 mrg count = MIN (count, BLOCKS_NEEDED (prec));
285 1.1 mrg if (valres != val)
286 1.1 mrg {
287 1.1 mrg memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
288 1.1 mrg free (valres);
289 1.1 mrg }
290 1.1 mrg /* Zero-extend the absolute value to PREC bits. */
291 1.1 mrg if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
292 1.1 mrg val[count++] = 0;
293 1.1 mrg else
294 1.1 mrg count = canonize (val, count, prec);
295 1.1 mrg res.set_len (count);
296 1.1 mrg
297 1.1 mrg if (mpz_sgn (x) < 0)
298 1.1 mrg res = -res;
299 1.1 mrg
300 1.1 mrg return res;
301 1.1 mrg }
302 1.1 mrg
303 1.1 mrg /*
304 1.1 mrg * Largest and smallest values in a mode.
305 1.1 mrg */
306 1.1 mrg
307 1.1 mrg /* Return the largest SGNed number that is representable in PRECISION bits.
308 1.1 mrg
309 1.1 mrg TODO: There is still code from the double_int era that trys to
310 1.1 mrg make up for the fact that double int's could not represent the
311 1.1 mrg min and max values of all types. This code should be removed
312 1.1 mrg because the min and max values can always be represented in
313 1.1 mrg wide_ints and int-csts. */
314 1.1 mrg wide_int
315 1.1 mrg wi::max_value (unsigned int precision, signop sgn)
316 1.1 mrg {
317 1.1 mrg gcc_checking_assert (precision != 0);
318 1.1 mrg if (sgn == UNSIGNED)
319 1.1 mrg /* The unsigned max is just all ones. */
320 1.1 mrg return shwi (-1, precision);
321 1.1 mrg else
322 1.1 mrg /* The signed max is all ones except the top bit. This must be
323 1.1 mrg explicitly represented. */
324 1.1 mrg return mask (precision - 1, false, precision);
325 1.1 mrg }
326 1.1 mrg
327 1.1 mrg /* Return the largest SGNed number that is representable in PRECISION bits. */
328 1.1 mrg wide_int
329 1.1 mrg wi::min_value (unsigned int precision, signop sgn)
330 1.1 mrg {
331 1.1 mrg gcc_checking_assert (precision != 0);
332 1.1 mrg if (sgn == UNSIGNED)
333 1.1 mrg return uhwi (0, precision);
334 1.1 mrg else
335 1.1 mrg /* The signed min is all zeros except the top bit. This must be
336 1.1 mrg explicitly represented. */
337 1.1 mrg return wi::set_bit_in_zero (precision - 1, precision);
338 1.1 mrg }
339 1.1 mrg
340 1.1 mrg /*
341 1.1 mrg * Public utilities.
342 1.1 mrg */
343 1.1 mrg
344 1.1 mrg /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
345 1.1 mrg signedness SGN, to an integer that has PRECISION bits. Store the blocks
346 1.1 mrg in VAL and return the number of blocks used.
347 1.1 mrg
348 1.1 mrg This function can handle both extension (PRECISION > XPRECISION)
349 1.1 mrg and truncation (PRECISION < XPRECISION). */
350 1.1 mrg unsigned int
351 1.1 mrg wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
352 1.1 mrg unsigned int xlen, unsigned int xprecision,
353 1.1 mrg unsigned int precision, signop sgn)
354 1.1 mrg {
355 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
356 1.1 mrg unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
357 1.1 mrg for (unsigned i = 0; i < len; i++)
358 1.1 mrg val[i] = xval[i];
359 1.1 mrg
360 1.1 mrg if (precision > xprecision)
361 1.1 mrg {
362 1.1 mrg unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
363 1.1 mrg
364 1.1 mrg /* Expanding. */
365 1.1 mrg if (sgn == UNSIGNED)
366 1.1 mrg {
367 1.1 mrg if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
368 1.1 mrg val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
369 1.1 mrg else if (val[len - 1] < 0)
370 1.1 mrg {
371 1.1 mrg while (len < BLOCKS_NEEDED (xprecision))
372 1.1 mrg val[len++] = -1;
373 1.1 mrg if (small_xprecision)
374 1.1 mrg val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
375 1.1 mrg else
376 1.1 mrg val[len++] = 0;
377 1.1 mrg }
378 1.1 mrg }
379 1.1 mrg else
380 1.1 mrg {
381 1.1 mrg if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
382 1.1 mrg val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
383 1.1 mrg }
384 1.1 mrg }
385 1.1 mrg len = canonize (val, len, precision);
386 1.1 mrg
387 1.1 mrg return len;
388 1.1 mrg }
389 1.1 mrg
390 1.1 mrg /* This function hides the fact that we cannot rely on the bits beyond
391 1.1 mrg the precision. This issue comes up in the relational comparisions
392 1.1 mrg where we do allow comparisons of values of different precisions. */
393 1.1 mrg static inline HOST_WIDE_INT
394 1.1 mrg selt (const HOST_WIDE_INT *a, unsigned int len,
395 1.1 mrg unsigned int blocks_needed, unsigned int small_prec,
396 1.1 mrg unsigned int index, signop sgn)
397 1.1 mrg {
398 1.1 mrg HOST_WIDE_INT val;
399 1.1 mrg if (index < len)
400 1.1 mrg val = a[index];
401 1.1 mrg else if (index < blocks_needed || sgn == SIGNED)
402 1.1 mrg /* Signed or within the precision. */
403 1.1 mrg val = SIGN_MASK (a[len - 1]);
404 1.1 mrg else
405 1.1 mrg /* Unsigned extension beyond the precision. */
406 1.1 mrg val = 0;
407 1.1 mrg
408 1.1 mrg if (small_prec && index == blocks_needed - 1)
409 1.1 mrg return (sgn == SIGNED
410 1.1 mrg ? sext_hwi (val, small_prec)
411 1.1 mrg : zext_hwi (val, small_prec));
412 1.1 mrg else
413 1.1 mrg return val;
414 1.1 mrg }
415 1.1 mrg
416 1.1 mrg /* Find the highest bit represented in a wide int. This will in
417 1.1 mrg general have the same value as the sign bit. */
418 1.1 mrg static inline HOST_WIDE_INT
419 1.1 mrg top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
420 1.1 mrg {
421 1.1 mrg int excess = len * HOST_BITS_PER_WIDE_INT - prec;
422 1.1 mrg unsigned HOST_WIDE_INT val = a[len - 1];
423 1.1 mrg if (excess > 0)
424 1.1 mrg val <<= excess;
425 1.1 mrg return val >> (HOST_BITS_PER_WIDE_INT - 1);
426 1.1 mrg }
427 1.1 mrg
428 1.1 mrg /*
429 1.1 mrg * Comparisons, note that only equality is an operator. The other
430 1.1 mrg * comparisons cannot be operators since they are inherently signed or
431 1.1 mrg * unsigned and C++ has no such operators.
432 1.1 mrg */
433 1.1 mrg
434 1.1 mrg /* Return true if OP0 == OP1. */
435 1.1 mrg bool
436 1.1 mrg wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
437 1.1 mrg const HOST_WIDE_INT *op1, unsigned int op1len,
438 1.1 mrg unsigned int prec)
439 1.1 mrg {
440 1.1 mrg int l0 = op0len - 1;
441 1.1 mrg unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
442 1.1 mrg
443 1.1 mrg if (op0len != op1len)
444 1.1 mrg return false;
445 1.1 mrg
446 1.1 mrg if (op0len == BLOCKS_NEEDED (prec) && small_prec)
447 1.1 mrg {
448 1.1 mrg /* It does not matter if we zext or sext here, we just have to
449 1.1 mrg do both the same way. */
450 1.1 mrg if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
451 1.1 mrg return false;
452 1.1 mrg l0--;
453 1.1 mrg }
454 1.1 mrg
455 1.1 mrg while (l0 >= 0)
456 1.1 mrg if (op0[l0] != op1[l0])
457 1.1 mrg return false;
458 1.1 mrg else
459 1.1 mrg l0--;
460 1.1 mrg
461 1.1 mrg return true;
462 1.1 mrg }
463 1.1 mrg
464 1.1 mrg /* Return true if OP0 < OP1 using signed comparisons. */
465 1.1 mrg bool
466 1.1 mrg wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
467 1.1 mrg unsigned int precision,
468 1.1 mrg const HOST_WIDE_INT *op1, unsigned int op1len)
469 1.1 mrg {
470 1.1 mrg HOST_WIDE_INT s0, s1;
471 1.1 mrg unsigned HOST_WIDE_INT u0, u1;
472 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
473 1.1 mrg unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
474 1.1 mrg int l = MAX (op0len - 1, op1len - 1);
475 1.1 mrg
476 1.1 mrg /* Only the top block is compared as signed. The rest are unsigned
477 1.1 mrg comparisons. */
478 1.1 mrg s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
479 1.1 mrg s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
480 1.1 mrg if (s0 < s1)
481 1.1 mrg return true;
482 1.1 mrg if (s0 > s1)
483 1.1 mrg return false;
484 1.1 mrg
485 1.1 mrg l--;
486 1.1 mrg while (l >= 0)
487 1.1 mrg {
488 1.1 mrg u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
489 1.1 mrg u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
490 1.1 mrg
491 1.1 mrg if (u0 < u1)
492 1.1 mrg return true;
493 1.1 mrg if (u0 > u1)
494 1.1 mrg return false;
495 1.1 mrg l--;
496 1.1 mrg }
497 1.1 mrg
498 1.1 mrg return false;
499 1.1 mrg }
500 1.1 mrg
501 1.1 mrg /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
502 1.1 mrg signed compares. */
503 1.1 mrg int
504 1.1 mrg wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
505 1.1 mrg unsigned int precision,
506 1.1 mrg const HOST_WIDE_INT *op1, unsigned int op1len)
507 1.1 mrg {
508 1.1 mrg HOST_WIDE_INT s0, s1;
509 1.1 mrg unsigned HOST_WIDE_INT u0, u1;
510 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
511 1.1 mrg unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
512 1.1 mrg int l = MAX (op0len - 1, op1len - 1);
513 1.1 mrg
514 1.1 mrg /* Only the top block is compared as signed. The rest are unsigned
515 1.1 mrg comparisons. */
516 1.1 mrg s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
517 1.1 mrg s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
518 1.1 mrg if (s0 < s1)
519 1.1 mrg return -1;
520 1.1 mrg if (s0 > s1)
521 1.1 mrg return 1;
522 1.1 mrg
523 1.1 mrg l--;
524 1.1 mrg while (l >= 0)
525 1.1 mrg {
526 1.1 mrg u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
527 1.1 mrg u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
528 1.1 mrg
529 1.1 mrg if (u0 < u1)
530 1.1 mrg return -1;
531 1.1 mrg if (u0 > u1)
532 1.1 mrg return 1;
533 1.1 mrg l--;
534 1.1 mrg }
535 1.1 mrg
536 1.1 mrg return 0;
537 1.1 mrg }
538 1.1 mrg
539 1.1 mrg /* Return true if OP0 < OP1 using unsigned comparisons. */
540 1.1 mrg bool
541 1.1 mrg wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
542 1.1 mrg unsigned int precision,
543 1.1 mrg const HOST_WIDE_INT *op1, unsigned int op1len)
544 1.1 mrg {
545 1.1 mrg unsigned HOST_WIDE_INT x0;
546 1.1 mrg unsigned HOST_WIDE_INT x1;
547 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
548 1.1 mrg unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
549 1.1 mrg int l = MAX (op0len - 1, op1len - 1);
550 1.1 mrg
551 1.1 mrg while (l >= 0)
552 1.1 mrg {
553 1.1 mrg x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
554 1.1 mrg x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
555 1.1 mrg if (x0 < x1)
556 1.1 mrg return true;
557 1.1 mrg if (x0 > x1)
558 1.1 mrg return false;
559 1.1 mrg l--;
560 1.1 mrg }
561 1.1 mrg
562 1.1 mrg return false;
563 1.1 mrg }
564 1.1 mrg
565 1.1 mrg /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
566 1.1 mrg unsigned compares. */
567 1.1 mrg int
568 1.1 mrg wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
569 1.1 mrg unsigned int precision,
570 1.1 mrg const HOST_WIDE_INT *op1, unsigned int op1len)
571 1.1 mrg {
572 1.1 mrg unsigned HOST_WIDE_INT x0;
573 1.1 mrg unsigned HOST_WIDE_INT x1;
574 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
575 1.1 mrg unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
576 1.1 mrg int l = MAX (op0len - 1, op1len - 1);
577 1.1 mrg
578 1.1 mrg while (l >= 0)
579 1.1 mrg {
580 1.1 mrg x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
581 1.1 mrg x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
582 1.1 mrg if (x0 < x1)
583 1.1 mrg return -1;
584 1.1 mrg if (x0 > x1)
585 1.1 mrg return 1;
586 1.1 mrg l--;
587 1.1 mrg }
588 1.1 mrg
589 1.1 mrg return 0;
590 1.1 mrg }
591 1.1 mrg
592 1.1 mrg /*
593 1.1 mrg * Extension.
594 1.1 mrg */
595 1.1 mrg
596 1.1 mrg /* Sign-extend the number represented by XVAL and XLEN into VAL,
597 1.1 mrg starting at OFFSET. Return the number of blocks in VAL. Both XVAL
598 1.1 mrg and VAL have PRECISION bits. */
599 1.1 mrg unsigned int
600 1.1 mrg wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
601 1.1 mrg unsigned int xlen, unsigned int precision, unsigned int offset)
602 1.1 mrg {
603 1.1 mrg unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
604 1.1 mrg /* Extending beyond the precision is a no-op. If we have only stored
605 1.1 mrg OFFSET bits or fewer, the rest are already signs. */
606 1.1 mrg if (offset >= precision || len >= xlen)
607 1.1 mrg {
608 1.1 mrg for (unsigned i = 0; i < xlen; ++i)
609 1.1 mrg val[i] = xval[i];
610 1.1 mrg return xlen;
611 1.1 mrg }
612 1.1 mrg unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
613 1.1 mrg for (unsigned int i = 0; i < len; i++)
614 1.1 mrg val[i] = xval[i];
615 1.1 mrg if (suboffset > 0)
616 1.1 mrg {
617 1.1 mrg val[len] = sext_hwi (xval[len], suboffset);
618 1.1 mrg len += 1;
619 1.1 mrg }
620 1.1 mrg return canonize (val, len, precision);
621 1.1 mrg }
622 1.1 mrg
623 1.1 mrg /* Zero-extend the number represented by XVAL and XLEN into VAL,
624 1.1 mrg starting at OFFSET. Return the number of blocks in VAL. Both XVAL
625 1.1 mrg and VAL have PRECISION bits. */
626 1.1 mrg unsigned int
627 1.1 mrg wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
628 1.1 mrg unsigned int xlen, unsigned int precision, unsigned int offset)
629 1.1 mrg {
630 1.1 mrg unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
631 1.1 mrg /* Extending beyond the precision is a no-op. If we have only stored
632 1.1 mrg OFFSET bits or fewer, and the upper stored bit is zero, then there
633 1.1 mrg is nothing to do. */
634 1.1 mrg if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
635 1.1 mrg {
636 1.1 mrg for (unsigned i = 0; i < xlen; ++i)
637 1.1 mrg val[i] = xval[i];
638 1.1 mrg return xlen;
639 1.1 mrg }
640 1.1 mrg unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
641 1.1 mrg for (unsigned int i = 0; i < len; i++)
642 1.1 mrg val[i] = i < xlen ? xval[i] : -1;
643 1.1 mrg if (suboffset > 0)
644 1.1 mrg val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
645 1.1 mrg else
646 1.1 mrg val[len] = 0;
647 1.1 mrg return canonize (val, len + 1, precision);
648 1.1 mrg }
649 1.1 mrg
650 1.1 mrg /*
651 1.1 mrg * Masking, inserting, shifting, rotating.
652 1.1 mrg */
653 1.1 mrg
654 1.1 mrg /* Insert WIDTH bits from Y into X starting at START. */
655 1.1 mrg wide_int
656 1.1 mrg wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
657 1.1 mrg unsigned int width)
658 1.1 mrg {
659 1.1 mrg wide_int result;
660 1.1 mrg wide_int mask;
661 1.1 mrg wide_int tmp;
662 1.1 mrg
663 1.1 mrg unsigned int precision = x.get_precision ();
664 1.1 mrg if (start >= precision)
665 1.1 mrg return x;
666 1.1 mrg
667 1.1 mrg gcc_checking_assert (precision >= width);
668 1.1 mrg
669 1.1 mrg if (start + width >= precision)
670 1.1 mrg width = precision - start;
671 1.1 mrg
672 1.1 mrg mask = wi::shifted_mask (start, width, false, precision);
673 1.1 mrg tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
674 1.1 mrg result = tmp & mask;
675 1.1 mrg
676 1.1 mrg tmp = wi::bit_and_not (x, mask);
677 1.1 mrg result = result | tmp;
678 1.1 mrg
679 1.1 mrg return result;
680 1.1 mrg }
681 1.1 mrg
682 1.1 mrg /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
683 1.1 mrg Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
684 1.1 mrg bits. */
685 1.1 mrg unsigned int
686 1.1 mrg wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
687 1.1 mrg unsigned int xlen, unsigned int precision, unsigned int bit)
688 1.1 mrg {
689 1.1 mrg unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
690 1.1 mrg unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
691 1.1 mrg
692 1.1 mrg if (block + 1 >= xlen)
693 1.1 mrg {
694 1.1 mrg /* The operation either affects the last current block or needs
695 1.1 mrg a new block. */
696 1.1 mrg unsigned int len = block + 1;
697 1.1 mrg for (unsigned int i = 0; i < len; i++)
698 1.1 mrg val[i] = safe_uhwi (xval, xlen, i);
699 1.1 mrg val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
700 1.1 mrg
701 1.1 mrg /* If the bit we just set is at the msb of the block, make sure
702 1.1 mrg that any higher bits are zeros. */
703 1.1 mrg if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
704 1.1 mrg val[len++] = 0;
705 1.1 mrg return len;
706 1.1 mrg }
707 1.1 mrg else
708 1.1 mrg {
709 1.1 mrg for (unsigned int i = 0; i < xlen; i++)
710 1.1 mrg val[i] = xval[i];
711 1.1 mrg val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
712 1.1 mrg return canonize (val, xlen, precision);
713 1.1 mrg }
714 1.1 mrg }
715 1.1 mrg
716 1.1 mrg /* bswap THIS. */
717 1.1 mrg wide_int
718 1.1 mrg wide_int_storage::bswap () const
719 1.1 mrg {
720 1.1 mrg wide_int result = wide_int::create (precision);
721 1.1 mrg unsigned int i, s;
722 1.1 mrg unsigned int len = BLOCKS_NEEDED (precision);
723 1.1 mrg unsigned int xlen = get_len ();
724 1.1 mrg const HOST_WIDE_INT *xval = get_val ();
725 1.1 mrg HOST_WIDE_INT *val = result.write_val ();
726 1.1 mrg
727 1.1 mrg /* This is not a well defined operation if the precision is not a
728 1.1 mrg multiple of 8. */
729 1.1 mrg gcc_assert ((precision & 0x7) == 0);
730 1.1 mrg
731 1.1 mrg for (i = 0; i < len; i++)
732 1.1 mrg val[i] = 0;
733 1.1 mrg
734 1.1 mrg /* Only swap the bytes that are not the padding. */
735 1.1 mrg for (s = 0; s < precision; s += 8)
736 1.1 mrg {
737 1.1 mrg unsigned int d = precision - s - 8;
738 1.1 mrg unsigned HOST_WIDE_INT byte;
739 1.1 mrg
740 1.1 mrg unsigned int block = s / HOST_BITS_PER_WIDE_INT;
741 1.1 mrg unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
742 1.1 mrg
743 1.1 mrg byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
744 1.1 mrg
745 1.1 mrg block = d / HOST_BITS_PER_WIDE_INT;
746 1.1 mrg offset = d & (HOST_BITS_PER_WIDE_INT - 1);
747 1.1 mrg
748 1.1 mrg val[block] |= byte << offset;
749 1.1 mrg }
750 1.1 mrg
751 1.1 mrg result.set_len (canonize (val, len, precision));
752 1.1 mrg return result;
753 1.1 mrg }
754 1.1 mrg
755 1.1 mrg /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
756 1.1 mrg above that up to PREC are zeros. The result is inverted if NEGATE
757 1.1 mrg is true. Return the number of blocks in VAL. */
758 1.1 mrg unsigned int
759 1.1 mrg wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
760 1.1 mrg unsigned int prec)
761 1.1 mrg {
762 1.1 mrg if (width >= prec)
763 1.1 mrg {
764 1.1 mrg val[0] = negate ? 0 : -1;
765 1.1 mrg return 1;
766 1.1 mrg }
767 1.1 mrg else if (width == 0)
768 1.1 mrg {
769 1.1 mrg val[0] = negate ? -1 : 0;
770 1.1 mrg return 1;
771 1.1 mrg }
772 1.1 mrg
773 1.1 mrg unsigned int i = 0;
774 1.1 mrg while (i < width / HOST_BITS_PER_WIDE_INT)
775 1.1 mrg val[i++] = negate ? 0 : -1;
776 1.1 mrg
777 1.1 mrg unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
778 1.1 mrg if (shift != 0)
779 1.1 mrg {
780 1.1 mrg HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
781 1.1 mrg val[i++] = negate ? ~last : last;
782 1.1 mrg }
783 1.1 mrg else
784 1.1 mrg val[i++] = negate ? -1 : 0;
785 1.1 mrg
786 1.1 mrg return i;
787 1.1 mrg }
788 1.1 mrg
789 1.1 mrg /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
790 1.1 mrg bits are ones, and the bits above that up to PREC are zeros. The result
791 1.1 mrg is inverted if NEGATE is true. Return the number of blocks in VAL. */
792 1.1 mrg unsigned int
793 1.1 mrg wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
794 1.1 mrg bool negate, unsigned int prec)
795 1.1 mrg {
796 1.1 mrg if (start >= prec || width == 0)
797 1.1 mrg {
798 1.1 mrg val[0] = negate ? -1 : 0;
799 1.1 mrg return 1;
800 1.1 mrg }
801 1.1 mrg
802 1.1 mrg if (width > prec - start)
803 1.1 mrg width = prec - start;
804 1.1 mrg unsigned int end = start + width;
805 1.1 mrg
806 1.1 mrg unsigned int i = 0;
807 1.1 mrg while (i < start / HOST_BITS_PER_WIDE_INT)
808 1.1 mrg val[i++] = negate ? -1 : 0;
809 1.1 mrg
810 1.1 mrg unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
811 1.1 mrg if (shift)
812 1.1 mrg {
813 1.1 mrg HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
814 1.1 mrg shift += width;
815 1.1 mrg if (shift < HOST_BITS_PER_WIDE_INT)
816 1.1 mrg {
817 1.1 mrg /* case 000111000 */
818 1.1 mrg block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
819 1.1 mrg val[i++] = negate ? ~block : block;
820 1.1 mrg return i;
821 1.1 mrg }
822 1.1 mrg else
823 1.1 mrg /* ...111000 */
824 1.1 mrg val[i++] = negate ? block : ~block;
825 1.1 mrg }
826 1.1 mrg
827 1.1 mrg while (i < end / HOST_BITS_PER_WIDE_INT)
828 1.1 mrg /* 1111111 */
829 1.1 mrg val[i++] = negate ? 0 : -1;
830 1.1 mrg
831 1.1 mrg shift = end & (HOST_BITS_PER_WIDE_INT - 1);
832 1.1 mrg if (shift != 0)
833 1.1 mrg {
834 1.1 mrg /* 000011111 */
835 1.1 mrg HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
836 1.1 mrg val[i++] = negate ? ~block : block;
837 1.1 mrg }
838 1.1 mrg else if (end < prec)
839 1.1 mrg val[i++] = negate ? -1 : 0;
840 1.1 mrg
841 1.1 mrg return i;
842 1.1 mrg }
843 1.1 mrg
844 1.1 mrg /*
845 1.1 mrg * logical operations.
846 1.1 mrg */
847 1.1 mrg
848 1.1 mrg /* Set VAL to OP0 & OP1. Return the number of blocks used. */
849 1.1 mrg unsigned int
850 1.1 mrg wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
851 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
852 1.1 mrg unsigned int op1len, unsigned int prec)
853 1.1 mrg {
854 1.1 mrg int l0 = op0len - 1;
855 1.1 mrg int l1 = op1len - 1;
856 1.1 mrg bool need_canon = true;
857 1.1 mrg
858 1.1 mrg unsigned int len = MAX (op0len, op1len);
859 1.1 mrg if (l0 > l1)
860 1.1 mrg {
861 1.1 mrg HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
862 1.1 mrg if (op1mask == 0)
863 1.1 mrg {
864 1.1 mrg l0 = l1;
865 1.1 mrg len = l1 + 1;
866 1.1 mrg }
867 1.1 mrg else
868 1.1 mrg {
869 1.1 mrg need_canon = false;
870 1.1 mrg while (l0 > l1)
871 1.1 mrg {
872 1.1 mrg val[l0] = op0[l0];
873 1.1 mrg l0--;
874 1.1 mrg }
875 1.1 mrg }
876 1.1 mrg }
877 1.1 mrg else if (l1 > l0)
878 1.1 mrg {
879 1.1 mrg HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
880 1.1 mrg if (op0mask == 0)
881 1.1 mrg len = l0 + 1;
882 1.1 mrg else
883 1.1 mrg {
884 1.1 mrg need_canon = false;
885 1.1 mrg while (l1 > l0)
886 1.1 mrg {
887 1.1 mrg val[l1] = op1[l1];
888 1.1 mrg l1--;
889 1.1 mrg }
890 1.1 mrg }
891 1.1 mrg }
892 1.1 mrg
893 1.1 mrg while (l0 >= 0)
894 1.1 mrg {
895 1.1 mrg val[l0] = op0[l0] & op1[l0];
896 1.1 mrg l0--;
897 1.1 mrg }
898 1.1 mrg
899 1.1 mrg if (need_canon)
900 1.1 mrg len = canonize (val, len, prec);
901 1.1 mrg
902 1.1 mrg return len;
903 1.1 mrg }
904 1.1 mrg
905 1.1 mrg /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
906 1.1 mrg unsigned int
907 1.1 mrg wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
908 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
909 1.1 mrg unsigned int op1len, unsigned int prec)
910 1.1 mrg {
911 1.1 mrg wide_int result;
912 1.1 mrg int l0 = op0len - 1;
913 1.1 mrg int l1 = op1len - 1;
914 1.1 mrg bool need_canon = true;
915 1.1 mrg
916 1.1 mrg unsigned int len = MAX (op0len, op1len);
917 1.1 mrg if (l0 > l1)
918 1.1 mrg {
919 1.1 mrg HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
920 1.1 mrg if (op1mask != 0)
921 1.1 mrg {
922 1.1 mrg l0 = l1;
923 1.1 mrg len = l1 + 1;
924 1.1 mrg }
925 1.1 mrg else
926 1.1 mrg {
927 1.1 mrg need_canon = false;
928 1.1 mrg while (l0 > l1)
929 1.1 mrg {
930 1.1 mrg val[l0] = op0[l0];
931 1.1 mrg l0--;
932 1.1 mrg }
933 1.1 mrg }
934 1.1 mrg }
935 1.1 mrg else if (l1 > l0)
936 1.1 mrg {
937 1.1 mrg HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
938 1.1 mrg if (op0mask == 0)
939 1.1 mrg len = l0 + 1;
940 1.1 mrg else
941 1.1 mrg {
942 1.1 mrg need_canon = false;
943 1.1 mrg while (l1 > l0)
944 1.1 mrg {
945 1.1 mrg val[l1] = ~op1[l1];
946 1.1 mrg l1--;
947 1.1 mrg }
948 1.1 mrg }
949 1.1 mrg }
950 1.1 mrg
951 1.1 mrg while (l0 >= 0)
952 1.1 mrg {
953 1.1 mrg val[l0] = op0[l0] & ~op1[l0];
954 1.1 mrg l0--;
955 1.1 mrg }
956 1.1 mrg
957 1.1 mrg if (need_canon)
958 1.1 mrg len = canonize (val, len, prec);
959 1.1 mrg
960 1.1 mrg return len;
961 1.1 mrg }
962 1.1 mrg
963 1.1 mrg /* Set VAL to OP0 | OP1. Return the number of blocks used. */
964 1.1 mrg unsigned int
965 1.1 mrg wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
966 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
967 1.1 mrg unsigned int op1len, unsigned int prec)
968 1.1 mrg {
969 1.1 mrg wide_int result;
970 1.1 mrg int l0 = op0len - 1;
971 1.1 mrg int l1 = op1len - 1;
972 1.1 mrg bool need_canon = true;
973 1.1 mrg
974 1.1 mrg unsigned int len = MAX (op0len, op1len);
975 1.1 mrg if (l0 > l1)
976 1.1 mrg {
977 1.1 mrg HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
978 1.1 mrg if (op1mask != 0)
979 1.1 mrg {
980 1.1 mrg l0 = l1;
981 1.1 mrg len = l1 + 1;
982 1.1 mrg }
983 1.1 mrg else
984 1.1 mrg {
985 1.1 mrg need_canon = false;
986 1.1 mrg while (l0 > l1)
987 1.1 mrg {
988 1.1 mrg val[l0] = op0[l0];
989 1.1 mrg l0--;
990 1.1 mrg }
991 1.1 mrg }
992 1.1 mrg }
993 1.1 mrg else if (l1 > l0)
994 1.1 mrg {
995 1.1 mrg HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
996 1.1 mrg if (op0mask != 0)
997 1.1 mrg len = l0 + 1;
998 1.1 mrg else
999 1.1 mrg {
1000 1.1 mrg need_canon = false;
1001 1.1 mrg while (l1 > l0)
1002 1.1 mrg {
1003 1.1 mrg val[l1] = op1[l1];
1004 1.1 mrg l1--;
1005 1.1 mrg }
1006 1.1 mrg }
1007 1.1 mrg }
1008 1.1 mrg
1009 1.1 mrg while (l0 >= 0)
1010 1.1 mrg {
1011 1.1 mrg val[l0] = op0[l0] | op1[l0];
1012 1.1 mrg l0--;
1013 1.1 mrg }
1014 1.1 mrg
1015 1.1 mrg if (need_canon)
1016 1.1 mrg len = canonize (val, len, prec);
1017 1.1 mrg
1018 1.1 mrg return len;
1019 1.1 mrg }
1020 1.1 mrg
1021 1.1 mrg /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1022 1.1 mrg unsigned int
1023 1.1 mrg wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1024 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
1025 1.1 mrg unsigned int op1len, unsigned int prec)
1026 1.1 mrg {
1027 1.1 mrg wide_int result;
1028 1.1 mrg int l0 = op0len - 1;
1029 1.1 mrg int l1 = op1len - 1;
1030 1.1 mrg bool need_canon = true;
1031 1.1 mrg
1032 1.1 mrg unsigned int len = MAX (op0len, op1len);
1033 1.1 mrg if (l0 > l1)
1034 1.1 mrg {
1035 1.1 mrg HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1036 1.1 mrg if (op1mask == 0)
1037 1.1 mrg {
1038 1.1 mrg l0 = l1;
1039 1.1 mrg len = l1 + 1;
1040 1.1 mrg }
1041 1.1 mrg else
1042 1.1 mrg {
1043 1.1 mrg need_canon = false;
1044 1.1 mrg while (l0 > l1)
1045 1.1 mrg {
1046 1.1 mrg val[l0] = op0[l0];
1047 1.1 mrg l0--;
1048 1.1 mrg }
1049 1.1 mrg }
1050 1.1 mrg }
1051 1.1 mrg else if (l1 > l0)
1052 1.1 mrg {
1053 1.1 mrg HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1054 1.1 mrg if (op0mask != 0)
1055 1.1 mrg len = l0 + 1;
1056 1.1 mrg else
1057 1.1 mrg {
1058 1.1 mrg need_canon = false;
1059 1.1 mrg while (l1 > l0)
1060 1.1 mrg {
1061 1.1 mrg val[l1] = ~op1[l1];
1062 1.1 mrg l1--;
1063 1.1 mrg }
1064 1.1 mrg }
1065 1.1 mrg }
1066 1.1 mrg
1067 1.1 mrg while (l0 >= 0)
1068 1.1 mrg {
1069 1.1 mrg val[l0] = op0[l0] | ~op1[l0];
1070 1.1 mrg l0--;
1071 1.1 mrg }
1072 1.1 mrg
1073 1.1 mrg if (need_canon)
1074 1.1 mrg len = canonize (val, len, prec);
1075 1.1 mrg
1076 1.1 mrg return len;
1077 1.1 mrg }
1078 1.1 mrg
1079 1.1 mrg /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1080 1.1 mrg unsigned int
1081 1.1 mrg wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1082 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
1083 1.1 mrg unsigned int op1len, unsigned int prec)
1084 1.1 mrg {
1085 1.1 mrg wide_int result;
1086 1.1 mrg int l0 = op0len - 1;
1087 1.1 mrg int l1 = op1len - 1;
1088 1.1 mrg
1089 1.1 mrg unsigned int len = MAX (op0len, op1len);
1090 1.1 mrg if (l0 > l1)
1091 1.1 mrg {
1092 1.1 mrg HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1093 1.1 mrg while (l0 > l1)
1094 1.1 mrg {
1095 1.1 mrg val[l0] = op0[l0] ^ op1mask;
1096 1.1 mrg l0--;
1097 1.1 mrg }
1098 1.1 mrg }
1099 1.1 mrg
1100 1.1 mrg if (l1 > l0)
1101 1.1 mrg {
1102 1.1 mrg HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1103 1.1 mrg while (l1 > l0)
1104 1.1 mrg {
1105 1.1 mrg val[l1] = op0mask ^ op1[l1];
1106 1.1 mrg l1--;
1107 1.1 mrg }
1108 1.1 mrg }
1109 1.1 mrg
1110 1.1 mrg while (l0 >= 0)
1111 1.1 mrg {
1112 1.1 mrg val[l0] = op0[l0] ^ op1[l0];
1113 1.1 mrg l0--;
1114 1.1 mrg }
1115 1.1 mrg
1116 1.1 mrg return canonize (val, len, prec);
1117 1.1 mrg }
1118 1.1 mrg
1119 1.1 mrg /*
1120 1.1 mrg * math
1121 1.1 mrg */
1122 1.1 mrg
1123 1.1 mrg /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1124 1.1 mrg whether the result overflows when OP0 and OP1 are treated as having
1125 1.1 mrg signedness SGN. Return the number of blocks in VAL. */
1126 1.1 mrg unsigned int
1127 1.1 mrg wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1128 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
1129 1.1 mrg unsigned int op1len, unsigned int prec,
1130 1.1 mrg signop sgn, bool *overflow)
1131 1.1 mrg {
1132 1.1 mrg unsigned HOST_WIDE_INT o0 = 0;
1133 1.1 mrg unsigned HOST_WIDE_INT o1 = 0;
1134 1.1 mrg unsigned HOST_WIDE_INT x = 0;
1135 1.1 mrg unsigned HOST_WIDE_INT carry = 0;
1136 1.1 mrg unsigned HOST_WIDE_INT old_carry = 0;
1137 1.1 mrg unsigned HOST_WIDE_INT mask0, mask1;
1138 1.1 mrg unsigned int i;
1139 1.1 mrg
1140 1.1 mrg unsigned int len = MAX (op0len, op1len);
1141 1.1 mrg mask0 = -top_bit_of (op0, op0len, prec);
1142 1.1 mrg mask1 = -top_bit_of (op1, op1len, prec);
1143 1.1 mrg /* Add all of the explicitly defined elements. */
1144 1.1 mrg
1145 1.1 mrg for (i = 0; i < len; i++)
1146 1.1 mrg {
1147 1.1 mrg o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1148 1.1 mrg o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1149 1.1 mrg x = o0 + o1 + carry;
1150 1.1 mrg val[i] = x;
1151 1.1 mrg old_carry = carry;
1152 1.1 mrg carry = carry == 0 ? x < o0 : x <= o0;
1153 1.1 mrg }
1154 1.1 mrg
1155 1.1 mrg if (len * HOST_BITS_PER_WIDE_INT < prec)
1156 1.1 mrg {
1157 1.1 mrg val[len] = mask0 + mask1 + carry;
1158 1.1 mrg len++;
1159 1.1 mrg if (overflow)
1160 1.1 mrg *overflow = false;
1161 1.1 mrg }
1162 1.1 mrg else if (overflow)
1163 1.1 mrg {
1164 1.1 mrg unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1165 1.1 mrg if (sgn == SIGNED)
1166 1.1 mrg {
1167 1.1 mrg unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1168 1.1 mrg *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1169 1.1 mrg }
1170 1.1 mrg else
1171 1.1 mrg {
1172 1.1 mrg /* Put the MSB of X and O0 and in the top of the HWI. */
1173 1.1 mrg x <<= shift;
1174 1.1 mrg o0 <<= shift;
1175 1.1 mrg if (old_carry)
1176 1.1 mrg *overflow = (x <= o0);
1177 1.1 mrg else
1178 1.1 mrg *overflow = (x < o0);
1179 1.1 mrg }
1180 1.1 mrg }
1181 1.1 mrg
1182 1.1 mrg return canonize (val, len, prec);
1183 1.1 mrg }
1184 1.1 mrg
1185 1.1 mrg /* Subroutines of the multiplication and division operations. Unpack
1186 1.1 mrg the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1187 1.1 mrg HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1188 1.1 mrg uncompressing the top bit of INPUT[IN_LEN - 1]. */
1189 1.1 mrg static void
1190 1.1 mrg wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1191 1.1 mrg unsigned int in_len, unsigned int out_len,
1192 1.1 mrg unsigned int prec, signop sgn)
1193 1.1 mrg {
1194 1.1 mrg unsigned int i;
1195 1.1 mrg unsigned int j = 0;
1196 1.1 mrg unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1197 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1198 1.1 mrg HOST_WIDE_INT mask;
1199 1.1 mrg
1200 1.1 mrg if (sgn == SIGNED)
1201 1.1 mrg {
1202 1.1 mrg mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1203 1.1 mrg mask &= HALF_INT_MASK;
1204 1.1 mrg }
1205 1.1 mrg else
1206 1.1 mrg mask = 0;
1207 1.1 mrg
1208 1.1 mrg for (i = 0; i < blocks_needed - 1; i++)
1209 1.1 mrg {
1210 1.1 mrg HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1211 1.1 mrg result[j++] = x;
1212 1.1 mrg result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1213 1.1 mrg }
1214 1.1 mrg
1215 1.1 mrg HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1216 1.1 mrg if (small_prec)
1217 1.1 mrg {
1218 1.1 mrg if (sgn == SIGNED)
1219 1.1 mrg x = sext_hwi (x, small_prec);
1220 1.1 mrg else
1221 1.1 mrg x = zext_hwi (x, small_prec);
1222 1.1 mrg }
1223 1.1 mrg result[j++] = x;
1224 1.1 mrg result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1225 1.1 mrg
1226 1.1 mrg /* Smear the sign bit. */
1227 1.1 mrg while (j < out_len)
1228 1.1 mrg result[j++] = mask;
1229 1.1 mrg }
1230 1.1 mrg
1231 1.1 mrg /* The inverse of wi_unpack. IN_LEN is the number of input
1232 1.1 mrg blocks and PRECISION is the precision of the result. Return the
1233 1.1 mrg number of blocks in the canonicalized result. */
1234 1.1 mrg static unsigned int
1235 1.1 mrg wi_pack (HOST_WIDE_INT *result,
1236 1.1 mrg const unsigned HOST_HALF_WIDE_INT *input,
1237 1.1 mrg unsigned int in_len, unsigned int precision)
1238 1.1 mrg {
1239 1.1 mrg unsigned int i = 0;
1240 1.1 mrg unsigned int j = 0;
1241 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1242 1.1 mrg
1243 1.1 mrg while (i + 1 < in_len)
1244 1.1 mrg {
1245 1.1 mrg result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1246 1.1 mrg | ((unsigned HOST_WIDE_INT) input[i + 1]
1247 1.1 mrg << HOST_BITS_PER_HALF_WIDE_INT));
1248 1.1 mrg i += 2;
1249 1.1 mrg }
1250 1.1 mrg
1251 1.1 mrg /* Handle the case where in_len is odd. For this we zero extend. */
1252 1.1 mrg if (in_len & 1)
1253 1.1 mrg result[j++] = (unsigned HOST_WIDE_INT) input[i];
1254 1.1 mrg else if (j < blocks_needed)
1255 1.1 mrg result[j++] = 0;
1256 1.1 mrg return canonize (result, j, precision);
1257 1.1 mrg }
1258 1.1 mrg
1259 1.1 mrg /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1260 1.1 mrg result is returned.
1261 1.1 mrg
1262 1.1 mrg If HIGH is not set, throw away the upper half after the check is
1263 1.1 mrg made to see if it overflows. Unfortunately there is no better way
1264 1.1 mrg to check for overflow than to do this. If OVERFLOW is nonnull,
1265 1.1 mrg record in *OVERFLOW whether the result overflowed. SGN controls
1266 1.1 mrg the signedness and is used to check overflow or if HIGH is set. */
1267 1.1 mrg unsigned int
1268 1.1 mrg wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1269 1.1 mrg unsigned int op1len, const HOST_WIDE_INT *op2val,
1270 1.1 mrg unsigned int op2len, unsigned int prec, signop sgn,
1271 1.1 mrg bool *overflow, bool high)
1272 1.1 mrg {
1273 1.1 mrg unsigned HOST_WIDE_INT o0, o1, k, t;
1274 1.1 mrg unsigned int i;
1275 1.1 mrg unsigned int j;
1276 1.1 mrg unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1277 1.1 mrg unsigned int half_blocks_needed = blocks_needed * 2;
1278 1.1 mrg /* The sizes here are scaled to support a 2x largest mode by 2x
1279 1.1 mrg largest mode yielding a 4x largest mode result. This is what is
1280 1.1 mrg needed by vpn. */
1281 1.1 mrg
1282 1.1 mrg unsigned HOST_HALF_WIDE_INT
1283 1.1 mrg u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1284 1.1 mrg unsigned HOST_HALF_WIDE_INT
1285 1.1 mrg v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1286 1.1 mrg /* The '2' in 'R' is because we are internally doing a full
1287 1.1 mrg multiply. */
1288 1.1 mrg unsigned HOST_HALF_WIDE_INT
1289 1.1 mrg r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1290 1.1 mrg HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1291 1.1 mrg
1292 1.1 mrg /* If the top level routine did not really pass in an overflow, then
1293 1.1 mrg just make sure that we never attempt to set it. */
1294 1.1 mrg bool needs_overflow = (overflow != 0);
1295 1.1 mrg if (needs_overflow)
1296 1.1 mrg *overflow = false;
1297 1.1 mrg
1298 1.1 mrg wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1299 1.1 mrg wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1300 1.1 mrg
1301 1.1 mrg /* This is a surprisingly common case, so do it first. */
1302 1.1 mrg if (op1 == 0 || op2 == 0)
1303 1.1 mrg {
1304 1.1 mrg val[0] = 0;
1305 1.1 mrg return 1;
1306 1.1 mrg }
1307 1.1 mrg
1308 1.1 mrg #ifdef umul_ppmm
1309 1.1 mrg if (sgn == UNSIGNED)
1310 1.1 mrg {
1311 1.1 mrg /* If the inputs are single HWIs and the output has room for at
1312 1.1 mrg least two HWIs, we can use umul_ppmm directly. */
1313 1.1 mrg if (prec >= HOST_BITS_PER_WIDE_INT * 2
1314 1.1 mrg && wi::fits_uhwi_p (op1)
1315 1.1 mrg && wi::fits_uhwi_p (op2))
1316 1.1 mrg {
1317 1.1 mrg /* This case never overflows. */
1318 1.1 mrg if (high)
1319 1.1 mrg {
1320 1.1 mrg val[0] = 0;
1321 1.1 mrg return 1;
1322 1.1 mrg }
1323 1.1 mrg umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1324 1.1 mrg if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1325 1.1 mrg {
1326 1.1 mrg val[2] = 0;
1327 1.1 mrg return 3;
1328 1.1 mrg }
1329 1.1 mrg return 1 + (val[1] != 0 || val[0] < 0);
1330 1.1 mrg }
1331 1.1 mrg /* Likewise if the output is a full single HWI, except that the
1332 1.1 mrg upper HWI of the result is only used for determining overflow.
1333 1.1 mrg (We handle this case inline when overflow isn't needed.) */
1334 1.1 mrg else if (prec == HOST_BITS_PER_WIDE_INT)
1335 1.1 mrg {
1336 1.1 mrg unsigned HOST_WIDE_INT upper;
1337 1.1 mrg umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1338 1.1 mrg if (needs_overflow)
1339 1.1 mrg *overflow = (upper != 0);
1340 1.1 mrg if (high)
1341 1.1 mrg val[0] = upper;
1342 1.1 mrg return 1;
1343 1.1 mrg }
1344 1.1 mrg }
1345 1.1 mrg #endif
1346 1.1 mrg
1347 1.1 mrg /* Handle multiplications by 1. */
1348 1.1 mrg if (op1 == 1)
1349 1.1 mrg {
1350 1.1 mrg if (high)
1351 1.1 mrg {
1352 1.1 mrg val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1353 1.1 mrg return 1;
1354 1.1 mrg }
1355 1.1 mrg for (i = 0; i < op2len; i++)
1356 1.1 mrg val[i] = op2val[i];
1357 1.1 mrg return op2len;
1358 1.1 mrg }
1359 1.1 mrg if (op2 == 1)
1360 1.1 mrg {
1361 1.1 mrg if (high)
1362 1.1 mrg {
1363 1.1 mrg val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1364 1.1 mrg return 1;
1365 1.1 mrg }
1366 1.1 mrg for (i = 0; i < op1len; i++)
1367 1.1 mrg val[i] = op1val[i];
1368 1.1 mrg return op1len;
1369 1.1 mrg }
1370 1.1 mrg
1371 1.1 mrg /* If we need to check for overflow, we can only do half wide
1372 1.1 mrg multiplies quickly because we need to look at the top bits to
1373 1.1 mrg check for the overflow. */
1374 1.1 mrg if ((high || needs_overflow)
1375 1.1 mrg && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1376 1.1 mrg {
1377 1.1 mrg unsigned HOST_WIDE_INT r;
1378 1.1 mrg
1379 1.1 mrg if (sgn == SIGNED)
1380 1.1 mrg {
1381 1.1 mrg o0 = op1.to_shwi ();
1382 1.1 mrg o1 = op2.to_shwi ();
1383 1.1 mrg }
1384 1.1 mrg else
1385 1.1 mrg {
1386 1.1 mrg o0 = op1.to_uhwi ();
1387 1.1 mrg o1 = op2.to_uhwi ();
1388 1.1 mrg }
1389 1.1 mrg
1390 1.1 mrg r = o0 * o1;
1391 1.1 mrg if (needs_overflow)
1392 1.1 mrg {
1393 1.1 mrg if (sgn == SIGNED)
1394 1.1 mrg {
1395 1.1 mrg if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1396 1.1 mrg *overflow = true;
1397 1.1 mrg }
1398 1.1 mrg else
1399 1.1 mrg {
1400 1.1 mrg if ((r >> prec) != 0)
1401 1.1 mrg *overflow = true;
1402 1.1 mrg }
1403 1.1 mrg }
1404 1.1 mrg val[0] = high ? r >> prec : r;
1405 1.1 mrg return 1;
1406 1.1 mrg }
1407 1.1 mrg
1408 1.1 mrg /* We do unsigned mul and then correct it. */
1409 1.1 mrg wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
1410 1.1 mrg wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
1411 1.1 mrg
1412 1.1 mrg /* The 2 is for a full mult. */
1413 1.1 mrg memset (r, 0, half_blocks_needed * 2
1414 1.1 mrg * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1415 1.1 mrg
1416 1.1 mrg for (j = 0; j < half_blocks_needed; j++)
1417 1.1 mrg {
1418 1.1 mrg k = 0;
1419 1.1 mrg for (i = 0; i < half_blocks_needed; i++)
1420 1.1 mrg {
1421 1.1 mrg t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1422 1.1 mrg + r[i + j] + k);
1423 1.1 mrg r[i + j] = t & HALF_INT_MASK;
1424 1.1 mrg k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1425 1.1 mrg }
1426 1.1 mrg r[j + half_blocks_needed] = k;
1427 1.1 mrg }
1428 1.1 mrg
1429 1.1 mrg /* We did unsigned math above. For signed we must adjust the
1430 1.1 mrg product (assuming we need to see that). */
1431 1.1 mrg if (sgn == SIGNED && (high || needs_overflow))
1432 1.1 mrg {
1433 1.1 mrg unsigned HOST_WIDE_INT b;
1434 1.1 mrg if (wi::neg_p (op1))
1435 1.1 mrg {
1436 1.1 mrg b = 0;
1437 1.1 mrg for (i = 0; i < half_blocks_needed; i++)
1438 1.1 mrg {
1439 1.1 mrg t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1440 1.1 mrg - (unsigned HOST_WIDE_INT)v[i] - b;
1441 1.1 mrg r[i + half_blocks_needed] = t & HALF_INT_MASK;
1442 1.1 mrg b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1443 1.1 mrg }
1444 1.1 mrg }
1445 1.1 mrg if (wi::neg_p (op2))
1446 1.1 mrg {
1447 1.1 mrg b = 0;
1448 1.1 mrg for (i = 0; i < half_blocks_needed; i++)
1449 1.1 mrg {
1450 1.1 mrg t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1451 1.1 mrg - (unsigned HOST_WIDE_INT)u[i] - b;
1452 1.1 mrg r[i + half_blocks_needed] = t & HALF_INT_MASK;
1453 1.1 mrg b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1454 1.1 mrg }
1455 1.1 mrg }
1456 1.1 mrg }
1457 1.1 mrg
1458 1.1 mrg if (needs_overflow)
1459 1.1 mrg {
1460 1.1 mrg HOST_WIDE_INT top;
1461 1.1 mrg
1462 1.1 mrg /* For unsigned, overflow is true if any of the top bits are set.
1463 1.1 mrg For signed, overflow is true if any of the top bits are not equal
1464 1.1 mrg to the sign bit. */
1465 1.1 mrg if (sgn == UNSIGNED)
1466 1.1 mrg top = 0;
1467 1.1 mrg else
1468 1.1 mrg {
1469 1.1 mrg top = r[(half_blocks_needed) - 1];
1470 1.1 mrg top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
1471 1.1 mrg top &= mask;
1472 1.1 mrg }
1473 1.1 mrg
1474 1.1 mrg for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1475 1.1 mrg if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1476 1.1 mrg *overflow = true;
1477 1.1 mrg }
1478 1.1 mrg
1479 1.1 mrg int r_offset = high ? half_blocks_needed : 0;
1480 1.1 mrg return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1481 1.1 mrg }
1482 1.1 mrg
1483 1.1 mrg /* Compute the population count of X. */
1484 1.1 mrg int
1485 1.1 mrg wi::popcount (const wide_int_ref &x)
1486 1.1 mrg {
1487 1.1 mrg unsigned int i;
1488 1.1 mrg int count;
1489 1.1 mrg
1490 1.1 mrg /* The high order block is special if it is the last block and the
1491 1.1 mrg precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1492 1.1 mrg have to clear out any ones above the precision before doing
1493 1.1 mrg popcount on this block. */
1494 1.1 mrg count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1495 1.1 mrg unsigned int stop = x.len;
1496 1.1 mrg if (count < 0)
1497 1.1 mrg {
1498 1.1 mrg count = popcount_hwi (x.uhigh () << -count);
1499 1.1 mrg stop -= 1;
1500 1.1 mrg }
1501 1.1 mrg else
1502 1.1 mrg {
1503 1.1 mrg if (x.sign_mask () >= 0)
1504 1.1 mrg count = 0;
1505 1.1 mrg }
1506 1.1 mrg
1507 1.1 mrg for (i = 0; i < stop; ++i)
1508 1.1 mrg count += popcount_hwi (x.val[i]);
1509 1.1 mrg
1510 1.1 mrg return count;
1511 1.1 mrg }
1512 1.1 mrg
1513 1.1 mrg /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1514 1.1 mrg whether the result overflows when OP0 and OP1 are treated as having
1515 1.1 mrg signedness SGN. Return the number of blocks in VAL. */
1516 1.1 mrg unsigned int
1517 1.1 mrg wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1518 1.1 mrg unsigned int op0len, const HOST_WIDE_INT *op1,
1519 1.1 mrg unsigned int op1len, unsigned int prec,
1520 1.1 mrg signop sgn, bool *overflow)
1521 1.1 mrg {
1522 1.1 mrg unsigned HOST_WIDE_INT o0 = 0;
1523 1.1 mrg unsigned HOST_WIDE_INT o1 = 0;
1524 1.1 mrg unsigned HOST_WIDE_INT x = 0;
1525 1.1 mrg /* We implement subtraction as an in place negate and add. Negation
1526 1.1 mrg is just inversion and add 1, so we can do the add of 1 by just
1527 1.1 mrg starting the borrow in of the first element at 1. */
1528 1.1 mrg unsigned HOST_WIDE_INT borrow = 0;
1529 1.1 mrg unsigned HOST_WIDE_INT old_borrow = 0;
1530 1.1 mrg
1531 1.1 mrg unsigned HOST_WIDE_INT mask0, mask1;
1532 1.1 mrg unsigned int i;
1533 1.1 mrg
1534 1.1 mrg unsigned int len = MAX (op0len, op1len);
1535 1.1 mrg mask0 = -top_bit_of (op0, op0len, prec);
1536 1.1 mrg mask1 = -top_bit_of (op1, op1len, prec);
1537 1.1 mrg
1538 1.1 mrg /* Subtract all of the explicitly defined elements. */
1539 1.1 mrg for (i = 0; i < len; i++)
1540 1.1 mrg {
1541 1.1 mrg o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1542 1.1 mrg o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1543 1.1 mrg x = o0 - o1 - borrow;
1544 1.1 mrg val[i] = x;
1545 1.1 mrg old_borrow = borrow;
1546 1.1 mrg borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1547 1.1 mrg }
1548 1.1 mrg
1549 1.1 mrg if (len * HOST_BITS_PER_WIDE_INT < prec)
1550 1.1 mrg {
1551 1.1 mrg val[len] = mask0 - mask1 - borrow;
1552 1.1 mrg len++;
1553 1.1 mrg if (overflow)
1554 1.1 mrg *overflow = false;
1555 1.1 mrg }
1556 1.1 mrg else if (overflow)
1557 1.1 mrg {
1558 1.1 mrg unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1559 1.1 mrg if (sgn == SIGNED)
1560 1.1 mrg {
1561 1.1 mrg unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1562 1.1 mrg *overflow = (HOST_WIDE_INT) (x << shift) < 0;
1563 1.1 mrg }
1564 1.1 mrg else
1565 1.1 mrg {
1566 1.1 mrg /* Put the MSB of X and O0 and in the top of the HWI. */
1567 1.1 mrg x <<= shift;
1568 1.1 mrg o0 <<= shift;
1569 1.1 mrg if (old_borrow)
1570 1.1 mrg *overflow = (x >= o0);
1571 1.1 mrg else
1572 1.1 mrg *overflow = (x > o0);
1573 1.1 mrg }
1574 1.1 mrg }
1575 1.1 mrg
1576 1.1 mrg return canonize (val, len, prec);
1577 1.1 mrg }
1578 1.1 mrg
1579 1.1 mrg
1580 1.1 mrg /*
1581 1.1 mrg * Division and Mod
1582 1.1 mrg */
1583 1.1 mrg
1584 1.1 mrg /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1585 1.1 mrg algorithm is a small modification of the algorithm in Hacker's
1586 1.1 mrg Delight by Warren, which itself is a small modification of Knuth's
1587 1.1 mrg algorithm. M is the number of significant elements of U however
1588 1.1 mrg there needs to be at least one extra element of B_DIVIDEND
1589 1.1 mrg allocated, N is the number of elements of B_DIVISOR. */
1590 1.1 mrg static void
1591 1.1 mrg divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1592 1.1 mrg unsigned HOST_HALF_WIDE_INT *b_remainder,
1593 1.1 mrg unsigned HOST_HALF_WIDE_INT *b_dividend,
1594 1.1 mrg unsigned HOST_HALF_WIDE_INT *b_divisor,
1595 1.1 mrg int m, int n)
1596 1.1 mrg {
1597 1.1 mrg /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1598 1.1 mrg HOST_WIDE_INT and stored in the lower bits of each word. This
1599 1.1 mrg algorithm should work properly on both 32 and 64 bit
1600 1.1 mrg machines. */
1601 1.1 mrg unsigned HOST_WIDE_INT b
1602 1.1 mrg = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
1603 1.1 mrg unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1604 1.1 mrg unsigned HOST_WIDE_INT rhat; /* A remainder. */
1605 1.1 mrg unsigned HOST_WIDE_INT p; /* Product of two digits. */
1606 1.1 mrg HOST_WIDE_INT t, k;
1607 1.1 mrg int i, j, s;
1608 1.1 mrg
1609 1.1 mrg /* Single digit divisor. */
1610 1.1 mrg if (n == 1)
1611 1.1 mrg {
1612 1.1 mrg k = 0;
1613 1.1 mrg for (j = m - 1; j >= 0; j--)
1614 1.1 mrg {
1615 1.1 mrg b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1616 1.1 mrg k = ((k * b + b_dividend[j])
1617 1.1 mrg - ((unsigned HOST_WIDE_INT)b_quotient[j]
1618 1.1 mrg * (unsigned HOST_WIDE_INT)b_divisor[0]));
1619 1.1 mrg }
1620 1.1 mrg b_remainder[0] = k;
1621 1.1 mrg return;
1622 1.1 mrg }
1623 1.1 mrg
1624 1.1 mrg s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1625 1.1 mrg
1626 1.1 mrg if (s)
1627 1.1 mrg {
1628 1.1 mrg /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1629 1.1 mrg algorithm, we can overwrite b_dividend and b_divisor, so we do
1630 1.1 mrg that. */
1631 1.1 mrg for (i = n - 1; i > 0; i--)
1632 1.1 mrg b_divisor[i] = (b_divisor[i] << s)
1633 1.1 mrg | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1634 1.1 mrg b_divisor[0] = b_divisor[0] << s;
1635 1.1 mrg
1636 1.1 mrg b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1637 1.1 mrg for (i = m - 1; i > 0; i--)
1638 1.1 mrg b_dividend[i] = (b_dividend[i] << s)
1639 1.1 mrg | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1640 1.1 mrg b_dividend[0] = b_dividend[0] << s;
1641 1.1 mrg }
1642 1.1 mrg
1643 1.1 mrg /* Main loop. */
1644 1.1 mrg for (j = m - n; j >= 0; j--)
1645 1.1 mrg {
1646 1.1 mrg qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1647 1.1 mrg rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1648 1.1 mrg again:
1649 1.1 mrg if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1650 1.1 mrg {
1651 1.1 mrg qhat -= 1;
1652 1.1 mrg rhat += b_divisor[n-1];
1653 1.1 mrg if (rhat < b)
1654 1.1 mrg goto again;
1655 1.1 mrg }
1656 1.1 mrg
1657 1.1 mrg /* Multiply and subtract. */
1658 1.1 mrg k = 0;
1659 1.1 mrg for (i = 0; i < n; i++)
1660 1.1 mrg {
1661 1.1 mrg p = qhat * b_divisor[i];
1662 1.1 mrg t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1663 1.1 mrg b_dividend[i + j] = t;
1664 1.1 mrg k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1665 1.1 mrg - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1666 1.1 mrg }
1667 1.1 mrg t = b_dividend[j+n] - k;
1668 1.1 mrg b_dividend[j+n] = t;
1669 1.1 mrg
1670 1.1 mrg b_quotient[j] = qhat;
1671 1.1 mrg if (t < 0)
1672 1.1 mrg {
1673 1.1 mrg b_quotient[j] -= 1;
1674 1.1 mrg k = 0;
1675 1.1 mrg for (i = 0; i < n; i++)
1676 1.1 mrg {
1677 1.1 mrg t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1678 1.1 mrg b_dividend[i+j] = t;
1679 1.1 mrg k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1680 1.1 mrg }
1681 1.1 mrg b_dividend[j+n] += k;
1682 1.1 mrg }
1683 1.1 mrg }
1684 1.1 mrg if (s)
1685 1.1 mrg for (i = 0; i < n; i++)
1686 1.1 mrg b_remainder[i] = (b_dividend[i] >> s)
1687 1.1 mrg | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1688 1.1 mrg else
1689 1.1 mrg for (i = 0; i < n; i++)
1690 1.1 mrg b_remainder[i] = b_dividend[i];
1691 1.1 mrg }
1692 1.1 mrg
1693 1.1 mrg
1694 1.1 mrg /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1695 1.1 mrg the result. If QUOTIENT is nonnull, store the value of the quotient
1696 1.1 mrg there and return the number of blocks in it. The return value is
1697 1.1 mrg not defined otherwise. If REMAINDER is nonnull, store the value
1698 1.1 mrg of the remainder there and store the number of blocks in
1699 1.1 mrg *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1700 1.1 mrg the division overflowed. */
1701 1.1 mrg unsigned int
1702 1.1 mrg wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1703 1.1 mrg HOST_WIDE_INT *remainder,
1704 1.1 mrg const HOST_WIDE_INT *dividend_val,
1705 1.1 mrg unsigned int dividend_len, unsigned int dividend_prec,
1706 1.1 mrg const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1707 1.1 mrg unsigned int divisor_prec, signop sgn,
1708 1.1 mrg bool *oflow)
1709 1.1 mrg {
1710 1.1 mrg unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1711 1.1 mrg unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1712 1.1 mrg unsigned HOST_HALF_WIDE_INT
1713 1.1 mrg b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1714 1.1 mrg unsigned HOST_HALF_WIDE_INT
1715 1.1 mrg b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1716 1.1 mrg unsigned HOST_HALF_WIDE_INT
1717 1.1 mrg b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1718 1.1 mrg unsigned HOST_HALF_WIDE_INT
1719 1.1 mrg b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
1720 1.1 mrg unsigned int m, n;
1721 1.1 mrg bool dividend_neg = false;
1722 1.1 mrg bool divisor_neg = false;
1723 1.1 mrg bool overflow = false;
1724 1.1 mrg wide_int neg_dividend, neg_divisor;
1725 1.1 mrg
1726 1.1 mrg wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1727 1.1 mrg dividend_prec);
1728 1.1 mrg wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1729 1.1 mrg divisor_prec);
1730 1.1 mrg if (divisor == 0)
1731 1.1 mrg overflow = true;
1732 1.1 mrg
1733 1.1 mrg /* The smallest signed number / -1 causes overflow. The dividend_len
1734 1.1 mrg check is for speed rather than correctness. */
1735 1.1 mrg if (sgn == SIGNED
1736 1.1 mrg && dividend_len == BLOCKS_NEEDED (dividend_prec)
1737 1.1 mrg && divisor == -1
1738 1.1 mrg && wi::only_sign_bit_p (dividend))
1739 1.1 mrg overflow = true;
1740 1.1 mrg
1741 1.1 mrg /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1742 1.1 mrg (signed min / -1) has the same representation as the orignal dividend.
1743 1.1 mrg We have traditionally made division by zero act as division by one,
1744 1.1 mrg so there too we use the original dividend. */
1745 1.1 mrg if (overflow)
1746 1.1 mrg {
1747 1.1 mrg if (remainder)
1748 1.1 mrg {
1749 1.1 mrg *remainder_len = 1;
1750 1.1 mrg remainder[0] = 0;
1751 1.1 mrg }
1752 1.1 mrg if (oflow != 0)
1753 1.1 mrg *oflow = true;
1754 1.1 mrg if (quotient)
1755 1.1 mrg for (unsigned int i = 0; i < dividend_len; ++i)
1756 1.1 mrg quotient[i] = dividend_val[i];
1757 1.1 mrg return dividend_len;
1758 1.1 mrg }
1759 1.1 mrg
1760 1.1 mrg if (oflow)
1761 1.1 mrg *oflow = false;
1762 1.1 mrg
1763 1.1 mrg /* Do it on the host if you can. */
1764 1.1 mrg if (sgn == SIGNED
1765 1.1 mrg && wi::fits_shwi_p (dividend)
1766 1.1 mrg && wi::fits_shwi_p (divisor))
1767 1.1 mrg {
1768 1.1 mrg HOST_WIDE_INT o0 = dividend.to_shwi ();
1769 1.1 mrg HOST_WIDE_INT o1 = divisor.to_shwi ();
1770 1.1 mrg
1771 1.1 mrg if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1772 1.1 mrg {
1773 1.1 mrg gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1774 1.1 mrg if (quotient)
1775 1.1 mrg {
1776 1.1 mrg quotient[0] = HOST_WIDE_INT_MIN;
1777 1.1 mrg quotient[1] = 0;
1778 1.1 mrg }
1779 1.1 mrg if (remainder)
1780 1.1 mrg {
1781 1.1 mrg remainder[0] = 0;
1782 1.1 mrg *remainder_len = 1;
1783 1.1 mrg }
1784 1.1 mrg return 2;
1785 1.1 mrg }
1786 1.1 mrg else
1787 1.1 mrg {
1788 1.1 mrg if (quotient)
1789 1.1 mrg quotient[0] = o0 / o1;
1790 1.1 mrg if (remainder)
1791 1.1 mrg {
1792 1.1 mrg remainder[0] = o0 % o1;
1793 1.1 mrg *remainder_len = 1;
1794 1.1 mrg }
1795 1.1 mrg return 1;
1796 1.1 mrg }
1797 1.1 mrg }
1798 1.1 mrg
1799 1.1 mrg if (sgn == UNSIGNED
1800 1.1 mrg && wi::fits_uhwi_p (dividend)
1801 1.1 mrg && wi::fits_uhwi_p (divisor))
1802 1.1 mrg {
1803 1.1 mrg unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1804 1.1 mrg unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1805 1.1 mrg unsigned int quotient_len = 1;
1806 1.1 mrg
1807 1.1 mrg if (quotient)
1808 1.1 mrg {
1809 1.1 mrg quotient[0] = o0 / o1;
1810 1.1.1.1.2.1 pgoyette quotient_len = canonize_uhwi (quotient, dividend_prec);
1811 1.1 mrg }
1812 1.1 mrg if (remainder)
1813 1.1 mrg {
1814 1.1 mrg remainder[0] = o0 % o1;
1815 1.1.1.1.2.1 pgoyette *remainder_len = canonize_uhwi (remainder, dividend_prec);
1816 1.1 mrg }
1817 1.1 mrg return quotient_len;
1818 1.1 mrg }
1819 1.1 mrg
1820 1.1 mrg /* Make the divisor and dividend positive and remember what we
1821 1.1 mrg did. */
1822 1.1 mrg if (sgn == SIGNED)
1823 1.1 mrg {
1824 1.1 mrg if (wi::neg_p (dividend))
1825 1.1 mrg {
1826 1.1 mrg neg_dividend = -dividend;
1827 1.1 mrg dividend = neg_dividend;
1828 1.1 mrg dividend_neg = true;
1829 1.1 mrg }
1830 1.1 mrg if (wi::neg_p (divisor))
1831 1.1 mrg {
1832 1.1 mrg neg_divisor = -divisor;
1833 1.1 mrg divisor = neg_divisor;
1834 1.1 mrg divisor_neg = true;
1835 1.1 mrg }
1836 1.1 mrg }
1837 1.1 mrg
1838 1.1 mrg wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
1839 1.1 mrg dividend_blocks_needed, dividend_prec, sgn);
1840 1.1 mrg wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
1841 1.1 mrg divisor_blocks_needed, divisor_prec, sgn);
1842 1.1 mrg
1843 1.1 mrg m = dividend_blocks_needed;
1844 1.1 mrg b_dividend[m] = 0;
1845 1.1 mrg while (m > 1 && b_dividend[m - 1] == 0)
1846 1.1 mrg m--;
1847 1.1 mrg
1848 1.1 mrg n = divisor_blocks_needed;
1849 1.1 mrg while (n > 1 && b_divisor[n - 1] == 0)
1850 1.1 mrg n--;
1851 1.1 mrg
1852 1.1 mrg memset (b_quotient, 0, sizeof (b_quotient));
1853 1.1 mrg
1854 1.1 mrg divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
1855 1.1 mrg
1856 1.1 mrg unsigned int quotient_len = 0;
1857 1.1 mrg if (quotient)
1858 1.1 mrg {
1859 1.1 mrg quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
1860 1.1 mrg /* The quotient is neg if exactly one of the divisor or dividend is
1861 1.1 mrg neg. */
1862 1.1 mrg if (dividend_neg != divisor_neg)
1863 1.1 mrg quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
1864 1.1 mrg quotient_len, dividend_prec,
1865 1.1 mrg UNSIGNED, 0);
1866 1.1 mrg }
1867 1.1 mrg
1868 1.1 mrg if (remainder)
1869 1.1 mrg {
1870 1.1 mrg *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
1871 1.1 mrg /* The remainder is always the same sign as the dividend. */
1872 1.1 mrg if (dividend_neg)
1873 1.1 mrg *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
1874 1.1 mrg *remainder_len, dividend_prec,
1875 1.1 mrg UNSIGNED, 0);
1876 1.1 mrg }
1877 1.1 mrg
1878 1.1 mrg return quotient_len;
1879 1.1 mrg }
1880 1.1 mrg
1881 1.1 mrg /*
1882 1.1 mrg * Shifting, rotating and extraction.
1883 1.1 mrg */
1884 1.1 mrg
1885 1.1 mrg /* Left shift XVAL by SHIFT and store the result in VAL. Return the
1886 1.1 mrg number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
1887 1.1 mrg unsigned int
1888 1.1 mrg wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1889 1.1 mrg unsigned int xlen, unsigned int precision,
1890 1.1 mrg unsigned int shift)
1891 1.1 mrg {
1892 1.1 mrg /* Split the shift into a whole-block shift and a subblock shift. */
1893 1.1 mrg unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1894 1.1 mrg unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1895 1.1 mrg
1896 1.1 mrg /* The whole-block shift fills with zeros. */
1897 1.1 mrg unsigned int len = BLOCKS_NEEDED (precision);
1898 1.1 mrg for (unsigned int i = 0; i < skip; ++i)
1899 1.1 mrg val[i] = 0;
1900 1.1 mrg
1901 1.1 mrg /* It's easier to handle the simple block case specially. */
1902 1.1 mrg if (small_shift == 0)
1903 1.1 mrg for (unsigned int i = skip; i < len; ++i)
1904 1.1 mrg val[i] = safe_uhwi (xval, xlen, i - skip);
1905 1.1 mrg else
1906 1.1 mrg {
1907 1.1 mrg /* The first unfilled output block is a left shift of the first
1908 1.1 mrg block in XVAL. The other output blocks contain bits from two
1909 1.1 mrg consecutive input blocks. */
1910 1.1 mrg unsigned HOST_WIDE_INT carry = 0;
1911 1.1 mrg for (unsigned int i = skip; i < len; ++i)
1912 1.1 mrg {
1913 1.1 mrg unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
1914 1.1 mrg val[i] = (x << small_shift) | carry;
1915 1.1 mrg carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
1916 1.1 mrg }
1917 1.1 mrg }
1918 1.1 mrg return canonize (val, len, precision);
1919 1.1 mrg }
1920 1.1 mrg
1921 1.1 mrg /* Right shift XVAL by SHIFT and store the result in VAL. Return the
1922 1.1 mrg number of blocks in VAL. The input has XPRECISION bits and the
1923 1.1 mrg output has XPRECISION - SHIFT bits. */
1924 1.1 mrg static unsigned int
1925 1.1 mrg rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1926 1.1 mrg unsigned int xlen, unsigned int xprecision,
1927 1.1 mrg unsigned int shift)
1928 1.1 mrg {
1929 1.1 mrg /* Split the shift into a whole-block shift and a subblock shift. */
1930 1.1 mrg unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
1931 1.1 mrg unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
1932 1.1 mrg
1933 1.1 mrg /* Work out how many blocks are needed to store the significant bits
1934 1.1 mrg (excluding the upper zeros or signs). */
1935 1.1 mrg unsigned int len = BLOCKS_NEEDED (xprecision - shift);
1936 1.1 mrg
1937 1.1 mrg /* It's easier to handle the simple block case specially. */
1938 1.1 mrg if (small_shift == 0)
1939 1.1 mrg for (unsigned int i = 0; i < len; ++i)
1940 1.1 mrg val[i] = safe_uhwi (xval, xlen, i + skip);
1941 1.1 mrg else
1942 1.1 mrg {
1943 1.1 mrg /* Each output block but the last is a combination of two input blocks.
1944 1.1 mrg The last block is a right shift of the last block in XVAL. */
1945 1.1 mrg unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
1946 1.1 mrg for (unsigned int i = 0; i < len; ++i)
1947 1.1 mrg {
1948 1.1 mrg val[i] = curr >> small_shift;
1949 1.1 mrg curr = safe_uhwi (xval, xlen, i + skip + 1);
1950 1.1 mrg val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
1951 1.1 mrg }
1952 1.1 mrg }
1953 1.1 mrg return len;
1954 1.1 mrg }
1955 1.1 mrg
1956 1.1 mrg /* Logically right shift XVAL by SHIFT and store the result in VAL.
1957 1.1 mrg Return the number of blocks in VAL. XVAL has XPRECISION bits and
1958 1.1 mrg VAL has PRECISION bits. */
1959 1.1 mrg unsigned int
1960 1.1 mrg wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1961 1.1 mrg unsigned int xlen, unsigned int xprecision,
1962 1.1 mrg unsigned int precision, unsigned int shift)
1963 1.1 mrg {
1964 1.1 mrg unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1965 1.1 mrg
1966 1.1 mrg /* The value we just created has precision XPRECISION - SHIFT.
1967 1.1 mrg Zero-extend it to wider precisions. */
1968 1.1 mrg if (precision > xprecision - shift)
1969 1.1 mrg {
1970 1.1 mrg unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1971 1.1 mrg if (small_prec)
1972 1.1 mrg val[len - 1] = zext_hwi (val[len - 1], small_prec);
1973 1.1 mrg else if (val[len - 1] < 0)
1974 1.1 mrg {
1975 1.1 mrg /* Add a new block with a zero. */
1976 1.1 mrg val[len++] = 0;
1977 1.1 mrg return len;
1978 1.1 mrg }
1979 1.1 mrg }
1980 1.1 mrg return canonize (val, len, precision);
1981 1.1 mrg }
1982 1.1 mrg
1983 1.1 mrg /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
1984 1.1 mrg Return the number of blocks in VAL. XVAL has XPRECISION bits and
1985 1.1 mrg VAL has PRECISION bits. */
1986 1.1 mrg unsigned int
1987 1.1 mrg wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
1988 1.1 mrg unsigned int xlen, unsigned int xprecision,
1989 1.1 mrg unsigned int precision, unsigned int shift)
1990 1.1 mrg {
1991 1.1 mrg unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
1992 1.1 mrg
1993 1.1 mrg /* The value we just created has precision XPRECISION - SHIFT.
1994 1.1 mrg Sign-extend it to wider types. */
1995 1.1 mrg if (precision > xprecision - shift)
1996 1.1 mrg {
1997 1.1 mrg unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
1998 1.1 mrg if (small_prec)
1999 1.1 mrg val[len - 1] = sext_hwi (val[len - 1], small_prec);
2000 1.1 mrg }
2001 1.1 mrg return canonize (val, len, precision);
2002 1.1 mrg }
2003 1.1 mrg
2004 1.1 mrg /* Return the number of leading (upper) zeros in X. */
2005 1.1 mrg int
2006 1.1 mrg wi::clz (const wide_int_ref &x)
2007 1.1 mrg {
2008 1.1 mrg /* Calculate how many bits there above the highest represented block. */
2009 1.1 mrg int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2010 1.1 mrg
2011 1.1 mrg unsigned HOST_WIDE_INT high = x.uhigh ();
2012 1.1 mrg if (count < 0)
2013 1.1 mrg /* The upper -COUNT bits of HIGH are not part of the value.
2014 1.1 mrg Clear them out. */
2015 1.1 mrg high = (high << -count) >> -count;
2016 1.1 mrg else if (x.sign_mask () < 0)
2017 1.1 mrg /* The upper bit is set, so there are no leading zeros. */
2018 1.1 mrg return 0;
2019 1.1 mrg
2020 1.1 mrg /* We don't need to look below HIGH. Either HIGH is nonzero,
2021 1.1 mrg or the top bit of the block below is nonzero; clz_hwi is
2022 1.1 mrg HOST_BITS_PER_WIDE_INT in the latter case. */
2023 1.1 mrg return count + clz_hwi (high);
2024 1.1 mrg }
2025 1.1 mrg
2026 1.1 mrg /* Return the number of redundant sign bits in X. (That is, the number
2027 1.1 mrg of bits immediately below the sign bit that have the same value as
2028 1.1 mrg the sign bit.) */
2029 1.1 mrg int
2030 1.1 mrg wi::clrsb (const wide_int_ref &x)
2031 1.1 mrg {
2032 1.1 mrg /* Calculate how many bits there above the highest represented block. */
2033 1.1 mrg int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2034 1.1 mrg
2035 1.1 mrg unsigned HOST_WIDE_INT high = x.uhigh ();
2036 1.1 mrg unsigned HOST_WIDE_INT mask = -1;
2037 1.1 mrg if (count < 0)
2038 1.1 mrg {
2039 1.1 mrg /* The upper -COUNT bits of HIGH are not part of the value.
2040 1.1 mrg Clear them from both MASK and HIGH. */
2041 1.1 mrg mask >>= -count;
2042 1.1 mrg high &= mask;
2043 1.1 mrg }
2044 1.1 mrg
2045 1.1 mrg /* If the top bit is 1, count the number of leading 1s. If the top
2046 1.1 mrg bit is zero, count the number of leading zeros. */
2047 1.1 mrg if (high > mask / 2)
2048 1.1 mrg high ^= mask;
2049 1.1 mrg
2050 1.1 mrg /* There are no sign bits below the top block, so we don't need to look
2051 1.1 mrg beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2052 1.1 mrg HIGH is 0. */
2053 1.1 mrg return count + clz_hwi (high) - 1;
2054 1.1 mrg }
2055 1.1 mrg
2056 1.1 mrg /* Return the number of trailing (lower) zeros in X. */
2057 1.1 mrg int
2058 1.1 mrg wi::ctz (const wide_int_ref &x)
2059 1.1 mrg {
2060 1.1 mrg if (x.len == 1 && x.ulow () == 0)
2061 1.1 mrg return x.precision;
2062 1.1 mrg
2063 1.1 mrg /* Having dealt with the zero case, there must be a block with a
2064 1.1 mrg nonzero bit. We don't care about the bits above the first 1. */
2065 1.1 mrg unsigned int i = 0;
2066 1.1 mrg while (x.val[i] == 0)
2067 1.1 mrg ++i;
2068 1.1 mrg return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2069 1.1 mrg }
2070 1.1 mrg
2071 1.1 mrg /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2072 1.1 mrg return -1. */
2073 1.1 mrg int
2074 1.1 mrg wi::exact_log2 (const wide_int_ref &x)
2075 1.1 mrg {
2076 1.1 mrg /* Reject cases where there are implicit -1 blocks above HIGH. */
2077 1.1 mrg if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2078 1.1 mrg return -1;
2079 1.1 mrg
2080 1.1 mrg /* Set CRUX to the index of the entry that should be nonzero.
2081 1.1 mrg If the top block is zero then the next lowest block (if any)
2082 1.1 mrg must have the high bit set. */
2083 1.1 mrg unsigned int crux = x.len - 1;
2084 1.1 mrg if (crux > 0 && x.val[crux] == 0)
2085 1.1 mrg crux -= 1;
2086 1.1 mrg
2087 1.1 mrg /* Check that all lower blocks are zero. */
2088 1.1 mrg for (unsigned int i = 0; i < crux; ++i)
2089 1.1 mrg if (x.val[i] != 0)
2090 1.1 mrg return -1;
2091 1.1 mrg
2092 1.1 mrg /* Get a zero-extended form of block CRUX. */
2093 1.1 mrg unsigned HOST_WIDE_INT hwi = x.val[crux];
2094 1.1 mrg if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2095 1.1 mrg hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2096 1.1 mrg
2097 1.1 mrg /* Now it's down to whether HWI is a power of 2. */
2098 1.1 mrg int res = ::exact_log2 (hwi);
2099 1.1 mrg if (res >= 0)
2100 1.1 mrg res += crux * HOST_BITS_PER_WIDE_INT;
2101 1.1 mrg return res;
2102 1.1 mrg }
2103 1.1 mrg
2104 1.1 mrg /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2105 1.1 mrg int
2106 1.1 mrg wi::floor_log2 (const wide_int_ref &x)
2107 1.1 mrg {
2108 1.1 mrg return x.precision - 1 - clz (x);
2109 1.1 mrg }
2110 1.1 mrg
2111 1.1 mrg /* Return the index of the first (lowest) set bit in X, counting from 1.
2112 1.1 mrg Return 0 if X is 0. */
2113 1.1 mrg int
2114 1.1 mrg wi::ffs (const wide_int_ref &x)
2115 1.1 mrg {
2116 1.1 mrg return eq_p (x, 0) ? 0 : ctz (x) + 1;
2117 1.1 mrg }
2118 1.1 mrg
2119 1.1 mrg /* Return true if sign-extending X to have precision PRECISION would give
2120 1.1 mrg the minimum signed value at that precision. */
2121 1.1 mrg bool
2122 1.1 mrg wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2123 1.1 mrg {
2124 1.1 mrg return ctz (x) + 1 == int (precision);
2125 1.1 mrg }
2126 1.1 mrg
2127 1.1 mrg /* Return true if X represents the minimum signed value. */
2128 1.1 mrg bool
2129 1.1 mrg wi::only_sign_bit_p (const wide_int_ref &x)
2130 1.1 mrg {
2131 1.1 mrg return only_sign_bit_p (x, x.precision);
2132 1.1 mrg }
2133 1.1 mrg
2134 1.1 mrg /*
2135 1.1 mrg * Private utilities.
2136 1.1 mrg */
2137 1.1 mrg
2138 1.1 mrg void gt_ggc_mx (widest_int *) { }
2139 1.1 mrg void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2140 1.1 mrg void gt_pch_nx (widest_int *) { }
2141 1.1 mrg
2142 1.1 mrg template void wide_int::dump () const;
2143 1.1 mrg template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2144 1.1 mrg template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2145 1.1 mrg template void offset_int::dump () const;
2146 1.1 mrg template void widest_int::dump () const;
2147