Home | History | Annotate | Line # | Download | only in gcc
wide-int.cc revision 1.1.1.8
      1      1.1  mrg /* Operations with very long integers.
      2  1.1.1.8  mrg    Copyright (C) 2012-2022 Free Software Foundation, Inc.
      3      1.1  mrg    Contributed by Kenneth Zadeck <zadeck (at) naturalbridge.com>
      4      1.1  mrg 
      5      1.1  mrg This file is part of GCC.
      6      1.1  mrg 
      7      1.1  mrg GCC is free software; you can redistribute it and/or modify it
      8      1.1  mrg under the terms of the GNU General Public License as published by the
      9      1.1  mrg Free Software Foundation; either version 3, or (at your option) any
     10      1.1  mrg later version.
     11      1.1  mrg 
     12      1.1  mrg GCC is distributed in the hope that it will be useful, but WITHOUT
     13      1.1  mrg ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     14      1.1  mrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     15      1.1  mrg for more details.
     16      1.1  mrg 
     17      1.1  mrg You should have received a copy of the GNU General Public License
     18      1.1  mrg along with GCC; see the file COPYING3.  If not see
     19      1.1  mrg <http://www.gnu.org/licenses/>.  */
     20      1.1  mrg 
     21      1.1  mrg #include "config.h"
     22      1.1  mrg #include "system.h"
     23      1.1  mrg #include "coretypes.h"
     24      1.1  mrg #include "tm.h"
     25      1.1  mrg #include "tree.h"
     26  1.1.1.3  mrg #include "selftest.h"
     27      1.1  mrg 
     28      1.1  mrg 
     29      1.1  mrg #define HOST_BITS_PER_HALF_WIDE_INT 32
     30      1.1  mrg #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
     31      1.1  mrg # define HOST_HALF_WIDE_INT long
     32      1.1  mrg #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
     33      1.1  mrg # define HOST_HALF_WIDE_INT int
     34      1.1  mrg #else
     35      1.1  mrg #error Please add support for HOST_HALF_WIDE_INT
     36      1.1  mrg #endif
     37      1.1  mrg 
     38      1.1  mrg #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
     39      1.1  mrg /* Do not include longlong.h when compiler is clang-based. See PR61146.  */
     40      1.1  mrg #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
     41      1.1  mrg typedef unsigned HOST_HALF_WIDE_INT UHWtype;
     42      1.1  mrg typedef unsigned HOST_WIDE_INT UWtype;
     43      1.1  mrg typedef unsigned int UQItype __attribute__ ((mode (QI)));
     44      1.1  mrg typedef unsigned int USItype __attribute__ ((mode (SI)));
     45      1.1  mrg typedef unsigned int UDItype __attribute__ ((mode (DI)));
     46      1.1  mrg #if W_TYPE_SIZE == 32
     47      1.1  mrg typedef unsigned int UDWtype __attribute__ ((mode (DI)));
     48      1.1  mrg #else
     49      1.1  mrg typedef unsigned int UDWtype __attribute__ ((mode (TI)));
     50      1.1  mrg #endif
     51      1.1  mrg #include "longlong.h"
     52      1.1  mrg #endif
     53      1.1  mrg 
     54      1.1  mrg static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
     55      1.1  mrg 
     56      1.1  mrg /*
     57      1.1  mrg  * Internal utilities.
     58      1.1  mrg  */
     59      1.1  mrg 
     60      1.1  mrg /* Quantities to deal with values that hold half of a wide int.  Used
     61      1.1  mrg    in multiply and divide.  */
     62  1.1.1.3  mrg #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
     63      1.1  mrg 
     64      1.1  mrg #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
     65      1.1  mrg #define BLOCKS_NEEDED(PREC) \
     66      1.1  mrg   (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
     67      1.1  mrg #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
     68      1.1  mrg 
     69      1.1  mrg /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
     70      1.1  mrg    based on the top existing bit of VAL. */
     71      1.1  mrg 
     72      1.1  mrg static unsigned HOST_WIDE_INT
     73      1.1  mrg safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
     74      1.1  mrg {
     75  1.1.1.3  mrg   return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
     76      1.1  mrg }
     77      1.1  mrg 
     78      1.1  mrg /* Convert the integer in VAL to canonical form, returning its new length.
     79      1.1  mrg    LEN is the number of blocks currently in VAL and PRECISION is the number
     80      1.1  mrg    of bits in the integer it represents.
     81      1.1  mrg 
     82      1.1  mrg    This function only changes the representation, not the value.  */
     83      1.1  mrg static unsigned int
     84      1.1  mrg canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
     85      1.1  mrg {
     86      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
     87      1.1  mrg   HOST_WIDE_INT top;
     88      1.1  mrg   int i;
     89      1.1  mrg 
     90      1.1  mrg   if (len > blocks_needed)
     91      1.1  mrg     len = blocks_needed;
     92      1.1  mrg 
     93      1.1  mrg   if (len == 1)
     94      1.1  mrg     return len;
     95      1.1  mrg 
     96      1.1  mrg   top = val[len - 1];
     97      1.1  mrg   if (len * HOST_BITS_PER_WIDE_INT > precision)
     98      1.1  mrg     val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
     99      1.1  mrg   if (top != 0 && top != (HOST_WIDE_INT)-1)
    100      1.1  mrg     return len;
    101      1.1  mrg 
    102      1.1  mrg   /* At this point we know that the top is either 0 or -1.  Find the
    103      1.1  mrg      first block that is not a copy of this.  */
    104      1.1  mrg   for (i = len - 2; i >= 0; i--)
    105      1.1  mrg     {
    106      1.1  mrg       HOST_WIDE_INT x = val[i];
    107      1.1  mrg       if (x != top)
    108      1.1  mrg 	{
    109      1.1  mrg 	  if (SIGN_MASK (x) == top)
    110      1.1  mrg 	    return i + 1;
    111      1.1  mrg 
    112      1.1  mrg 	  /* We need an extra block because the top bit block i does
    113      1.1  mrg 	     not match the extension.  */
    114      1.1  mrg 	  return i + 2;
    115      1.1  mrg 	}
    116      1.1  mrg     }
    117      1.1  mrg 
    118      1.1  mrg   /* The number is 0 or -1.  */
    119      1.1  mrg   return 1;
    120      1.1  mrg }
    121      1.1  mrg 
    122  1.1.1.2  mrg /* VAL[0] is the unsigned result of an operation.  Canonize it by adding
    123  1.1.1.2  mrg    another 0 block if needed, and return number of blocks needed.  */
    124  1.1.1.2  mrg 
    125  1.1.1.2  mrg static inline unsigned int
    126  1.1.1.2  mrg canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
    127  1.1.1.2  mrg {
    128  1.1.1.2  mrg   if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
    129  1.1.1.2  mrg     {
    130  1.1.1.2  mrg       val[1] = 0;
    131  1.1.1.2  mrg       return 2;
    132  1.1.1.2  mrg     }
    133  1.1.1.2  mrg   return 1;
    134  1.1.1.2  mrg }
    135  1.1.1.2  mrg 
    136      1.1  mrg /*
    137      1.1  mrg  * Conversion routines in and out of wide_int.
    138      1.1  mrg  */
    139      1.1  mrg 
    140      1.1  mrg /* Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
    141      1.1  mrg    result for an integer with precision PRECISION.  Return the length
    142      1.1  mrg    of VAL (after any canonization.  */
    143      1.1  mrg unsigned int
    144      1.1  mrg wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
    145      1.1  mrg 		unsigned int xlen, unsigned int precision, bool need_canon)
    146      1.1  mrg {
    147      1.1  mrg   for (unsigned i = 0; i < xlen; i++)
    148      1.1  mrg     val[i] = xval[i];
    149      1.1  mrg   return need_canon ? canonize (val, xlen, precision) : xlen;
    150      1.1  mrg }
    151      1.1  mrg 
    152      1.1  mrg /* Construct a wide int from a buffer of length LEN.  BUFFER will be
    153  1.1.1.3  mrg    read according to byte endianness and word endianness of the target.
    154      1.1  mrg    Only the lower BUFFER_LEN bytes of the result are set; the remaining
    155      1.1  mrg    high bytes are cleared.  */
    156      1.1  mrg wide_int
    157      1.1  mrg wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
    158      1.1  mrg {
    159      1.1  mrg   unsigned int precision = buffer_len * BITS_PER_UNIT;
    160      1.1  mrg   wide_int result = wide_int::create (precision);
    161      1.1  mrg   unsigned int words = buffer_len / UNITS_PER_WORD;
    162      1.1  mrg 
    163      1.1  mrg   /* We have to clear all the bits ourself, as we merely or in values
    164      1.1  mrg      below.  */
    165      1.1  mrg   unsigned int len = BLOCKS_NEEDED (precision);
    166      1.1  mrg   HOST_WIDE_INT *val = result.write_val ();
    167      1.1  mrg   for (unsigned int i = 0; i < len; ++i)
    168      1.1  mrg     val[i] = 0;
    169      1.1  mrg 
    170      1.1  mrg   for (unsigned int byte = 0; byte < buffer_len; byte++)
    171      1.1  mrg     {
    172      1.1  mrg       unsigned int offset;
    173      1.1  mrg       unsigned int index;
    174      1.1  mrg       unsigned int bitpos = byte * BITS_PER_UNIT;
    175      1.1  mrg       unsigned HOST_WIDE_INT value;
    176      1.1  mrg 
    177      1.1  mrg       if (buffer_len > UNITS_PER_WORD)
    178      1.1  mrg 	{
    179      1.1  mrg 	  unsigned int word = byte / UNITS_PER_WORD;
    180      1.1  mrg 
    181      1.1  mrg 	  if (WORDS_BIG_ENDIAN)
    182      1.1  mrg 	    word = (words - 1) - word;
    183      1.1  mrg 
    184      1.1  mrg 	  offset = word * UNITS_PER_WORD;
    185      1.1  mrg 
    186      1.1  mrg 	  if (BYTES_BIG_ENDIAN)
    187      1.1  mrg 	    offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
    188      1.1  mrg 	  else
    189      1.1  mrg 	    offset += byte % UNITS_PER_WORD;
    190      1.1  mrg 	}
    191      1.1  mrg       else
    192      1.1  mrg 	offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
    193      1.1  mrg 
    194      1.1  mrg       value = (unsigned HOST_WIDE_INT) buffer[offset];
    195      1.1  mrg 
    196      1.1  mrg       index = bitpos / HOST_BITS_PER_WIDE_INT;
    197      1.1  mrg       val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
    198      1.1  mrg     }
    199      1.1  mrg 
    200      1.1  mrg   result.set_len (canonize (val, len, precision));
    201      1.1  mrg 
    202      1.1  mrg   return result;
    203      1.1  mrg }
    204      1.1  mrg 
    205      1.1  mrg /* Sets RESULT from X, the sign is taken according to SGN.  */
    206      1.1  mrg void
    207      1.1  mrg wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
    208      1.1  mrg {
    209      1.1  mrg   int len = x.get_len ();
    210      1.1  mrg   const HOST_WIDE_INT *v = x.get_val ();
    211      1.1  mrg   int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
    212      1.1  mrg 
    213      1.1  mrg   if (wi::neg_p (x, sgn))
    214      1.1  mrg     {
    215      1.1  mrg       /* We use ones complement to avoid -x80..0 edge case that -
    216      1.1  mrg 	 won't work on.  */
    217      1.1  mrg       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
    218      1.1  mrg       for (int i = 0; i < len; i++)
    219      1.1  mrg 	t[i] = ~v[i];
    220      1.1  mrg       if (excess > 0)
    221      1.1  mrg 	t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
    222      1.1  mrg       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
    223      1.1  mrg       mpz_com (result, result);
    224      1.1  mrg     }
    225      1.1  mrg   else if (excess > 0)
    226      1.1  mrg     {
    227      1.1  mrg       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
    228      1.1  mrg       for (int i = 0; i < len - 1; i++)
    229      1.1  mrg 	t[i] = v[i];
    230      1.1  mrg       t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
    231      1.1  mrg       mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
    232      1.1  mrg     }
    233  1.1.1.6  mrg   else if (excess < 0 && wi::neg_p (x))
    234  1.1.1.6  mrg     {
    235  1.1.1.6  mrg       int extra
    236  1.1.1.6  mrg 	= (-excess + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
    237  1.1.1.6  mrg       HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
    238  1.1.1.6  mrg       for (int i = 0; i < len; i++)
    239  1.1.1.6  mrg 	t[i] = v[i];
    240  1.1.1.6  mrg       for (int i = 0; i < extra; i++)
    241  1.1.1.6  mrg 	t[len + i] = -1;
    242  1.1.1.6  mrg       excess = (-excess) % HOST_BITS_PER_WIDE_INT;
    243  1.1.1.6  mrg       if (excess)
    244  1.1.1.6  mrg 	t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
    245  1.1.1.6  mrg       mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
    246  1.1.1.6  mrg     }
    247      1.1  mrg   else
    248      1.1  mrg     mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
    249      1.1  mrg }
    250      1.1  mrg 
    251      1.1  mrg /* Returns X converted to TYPE.  If WRAP is true, then out-of-range
    252      1.1  mrg    values of VAL will be wrapped; otherwise, they will be set to the
    253      1.1  mrg    appropriate minimum or maximum TYPE bound.  */
    254      1.1  mrg wide_int
    255      1.1  mrg wi::from_mpz (const_tree type, mpz_t x, bool wrap)
    256      1.1  mrg {
    257      1.1  mrg   size_t count, numb;
    258      1.1  mrg   unsigned int prec = TYPE_PRECISION (type);
    259      1.1  mrg   wide_int res = wide_int::create (prec);
    260      1.1  mrg 
    261      1.1  mrg   if (!wrap)
    262      1.1  mrg     {
    263      1.1  mrg       mpz_t min, max;
    264      1.1  mrg 
    265      1.1  mrg       mpz_init (min);
    266      1.1  mrg       mpz_init (max);
    267      1.1  mrg       get_type_static_bounds (type, min, max);
    268      1.1  mrg 
    269      1.1  mrg       if (mpz_cmp (x, min) < 0)
    270      1.1  mrg 	mpz_set (x, min);
    271      1.1  mrg       else if (mpz_cmp (x, max) > 0)
    272      1.1  mrg 	mpz_set (x, max);
    273      1.1  mrg 
    274      1.1  mrg       mpz_clear (min);
    275      1.1  mrg       mpz_clear (max);
    276      1.1  mrg     }
    277      1.1  mrg 
    278      1.1  mrg   /* Determine the number of unsigned HOST_WIDE_INTs that are required
    279      1.1  mrg      for representing the absolute value.  The code to calculate count is
    280      1.1  mrg      extracted from the GMP manual, section "Integer Import and Export":
    281      1.1  mrg      http://gmplib.org/manual/Integer-Import-and-Export.html  */
    282      1.1  mrg   numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
    283      1.1  mrg   count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
    284      1.1  mrg   HOST_WIDE_INT *val = res.write_val ();
    285      1.1  mrg   /* Read the absolute value.
    286      1.1  mrg 
    287      1.1  mrg      Write directly to the wide_int storage if possible, otherwise leave
    288      1.1  mrg      GMP to allocate the memory for us.  It might be slightly more efficient
    289      1.1  mrg      to use mpz_tdiv_r_2exp for the latter case, but the situation is
    290      1.1  mrg      pathological and it seems safer to operate on the original mpz value
    291      1.1  mrg      in all cases.  */
    292      1.1  mrg   void *valres = mpz_export (count <= WIDE_INT_MAX_ELTS ? val : 0,
    293      1.1  mrg 			     &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
    294      1.1  mrg   if (count < 1)
    295      1.1  mrg     {
    296      1.1  mrg       val[0] = 0;
    297      1.1  mrg       count = 1;
    298      1.1  mrg     }
    299      1.1  mrg   count = MIN (count, BLOCKS_NEEDED (prec));
    300      1.1  mrg   if (valres != val)
    301      1.1  mrg     {
    302      1.1  mrg       memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
    303      1.1  mrg       free (valres);
    304      1.1  mrg     }
    305      1.1  mrg   /* Zero-extend the absolute value to PREC bits.  */
    306      1.1  mrg   if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
    307      1.1  mrg     val[count++] = 0;
    308      1.1  mrg   else
    309      1.1  mrg     count = canonize (val, count, prec);
    310      1.1  mrg   res.set_len (count);
    311      1.1  mrg 
    312      1.1  mrg   if (mpz_sgn (x) < 0)
    313      1.1  mrg     res = -res;
    314      1.1  mrg 
    315      1.1  mrg   return res;
    316      1.1  mrg }
    317      1.1  mrg 
    318      1.1  mrg /*
    319      1.1  mrg  * Largest and smallest values in a mode.
    320      1.1  mrg  */
    321      1.1  mrg 
    322      1.1  mrg /* Return the largest SGNed number that is representable in PRECISION bits.
    323      1.1  mrg 
    324      1.1  mrg    TODO: There is still code from the double_int era that trys to
    325      1.1  mrg    make up for the fact that double int's could not represent the
    326      1.1  mrg    min and max values of all types.  This code should be removed
    327      1.1  mrg    because the min and max values can always be represented in
    328      1.1  mrg    wide_ints and int-csts.  */
    329      1.1  mrg wide_int
    330      1.1  mrg wi::max_value (unsigned int precision, signop sgn)
    331      1.1  mrg {
    332      1.1  mrg   gcc_checking_assert (precision != 0);
    333      1.1  mrg   if (sgn == UNSIGNED)
    334      1.1  mrg     /* The unsigned max is just all ones.  */
    335      1.1  mrg     return shwi (-1, precision);
    336      1.1  mrg   else
    337      1.1  mrg     /* The signed max is all ones except the top bit.  This must be
    338      1.1  mrg        explicitly represented.  */
    339      1.1  mrg     return mask (precision - 1, false, precision);
    340      1.1  mrg }
    341      1.1  mrg 
    342      1.1  mrg /* Return the largest SGNed number that is representable in PRECISION bits.  */
    343      1.1  mrg wide_int
    344      1.1  mrg wi::min_value (unsigned int precision, signop sgn)
    345      1.1  mrg {
    346      1.1  mrg   gcc_checking_assert (precision != 0);
    347      1.1  mrg   if (sgn == UNSIGNED)
    348      1.1  mrg     return uhwi (0, precision);
    349      1.1  mrg   else
    350      1.1  mrg     /* The signed min is all zeros except the top bit.  This must be
    351      1.1  mrg        explicitly represented.  */
    352      1.1  mrg     return wi::set_bit_in_zero (precision - 1, precision);
    353      1.1  mrg }
    354      1.1  mrg 
    355      1.1  mrg /*
    356      1.1  mrg  * Public utilities.
    357      1.1  mrg  */
    358      1.1  mrg 
    359      1.1  mrg /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
    360      1.1  mrg    signedness SGN, to an integer that has PRECISION bits.  Store the blocks
    361      1.1  mrg    in VAL and return the number of blocks used.
    362      1.1  mrg 
    363      1.1  mrg    This function can handle both extension (PRECISION > XPRECISION)
    364      1.1  mrg    and truncation (PRECISION < XPRECISION).  */
    365      1.1  mrg unsigned int
    366      1.1  mrg wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
    367      1.1  mrg 		   unsigned int xlen, unsigned int xprecision,
    368      1.1  mrg 		   unsigned int precision, signop sgn)
    369      1.1  mrg {
    370      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
    371      1.1  mrg   unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
    372      1.1  mrg   for (unsigned i = 0; i < len; i++)
    373      1.1  mrg     val[i] = xval[i];
    374      1.1  mrg 
    375      1.1  mrg   if (precision > xprecision)
    376      1.1  mrg     {
    377      1.1  mrg       unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
    378      1.1  mrg 
    379      1.1  mrg       /* Expanding.  */
    380      1.1  mrg       if (sgn == UNSIGNED)
    381      1.1  mrg 	{
    382      1.1  mrg 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
    383      1.1  mrg 	    val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
    384      1.1  mrg 	  else if (val[len - 1] < 0)
    385      1.1  mrg 	    {
    386      1.1  mrg 	      while (len < BLOCKS_NEEDED (xprecision))
    387      1.1  mrg 		val[len++] = -1;
    388      1.1  mrg 	      if (small_xprecision)
    389      1.1  mrg 		val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
    390      1.1  mrg 	      else
    391      1.1  mrg 		val[len++] = 0;
    392      1.1  mrg 	    }
    393      1.1  mrg 	}
    394      1.1  mrg       else
    395      1.1  mrg 	{
    396      1.1  mrg 	  if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
    397      1.1  mrg 	    val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
    398      1.1  mrg 	}
    399      1.1  mrg     }
    400      1.1  mrg   len = canonize (val, len, precision);
    401      1.1  mrg 
    402      1.1  mrg   return len;
    403      1.1  mrg }
    404      1.1  mrg 
    405      1.1  mrg /* This function hides the fact that we cannot rely on the bits beyond
    406      1.1  mrg    the precision.  This issue comes up in the relational comparisions
    407      1.1  mrg    where we do allow comparisons of values of different precisions.  */
    408      1.1  mrg static inline HOST_WIDE_INT
    409      1.1  mrg selt (const HOST_WIDE_INT *a, unsigned int len,
    410      1.1  mrg       unsigned int blocks_needed, unsigned int small_prec,
    411      1.1  mrg       unsigned int index, signop sgn)
    412      1.1  mrg {
    413      1.1  mrg   HOST_WIDE_INT val;
    414      1.1  mrg   if (index < len)
    415      1.1  mrg     val = a[index];
    416      1.1  mrg   else if (index < blocks_needed || sgn == SIGNED)
    417      1.1  mrg     /* Signed or within the precision.  */
    418      1.1  mrg     val = SIGN_MASK (a[len - 1]);
    419      1.1  mrg   else
    420      1.1  mrg     /* Unsigned extension beyond the precision. */
    421      1.1  mrg     val = 0;
    422      1.1  mrg 
    423      1.1  mrg   if (small_prec && index == blocks_needed - 1)
    424      1.1  mrg     return (sgn == SIGNED
    425      1.1  mrg 	    ? sext_hwi (val, small_prec)
    426      1.1  mrg 	    : zext_hwi (val, small_prec));
    427      1.1  mrg   else
    428      1.1  mrg     return val;
    429      1.1  mrg }
    430      1.1  mrg 
    431      1.1  mrg /* Find the highest bit represented in a wide int.  This will in
    432      1.1  mrg    general have the same value as the sign bit.  */
    433      1.1  mrg static inline HOST_WIDE_INT
    434      1.1  mrg top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
    435      1.1  mrg {
    436      1.1  mrg   int excess = len * HOST_BITS_PER_WIDE_INT - prec;
    437      1.1  mrg   unsigned HOST_WIDE_INT val = a[len - 1];
    438      1.1  mrg   if (excess > 0)
    439      1.1  mrg     val <<= excess;
    440      1.1  mrg   return val >> (HOST_BITS_PER_WIDE_INT - 1);
    441      1.1  mrg }
    442      1.1  mrg 
    443      1.1  mrg /*
    444      1.1  mrg  * Comparisons, note that only equality is an operator.  The other
    445      1.1  mrg  * comparisons cannot be operators since they are inherently signed or
    446      1.1  mrg  * unsigned and C++ has no such operators.
    447      1.1  mrg  */
    448      1.1  mrg 
    449      1.1  mrg /* Return true if OP0 == OP1.  */
    450      1.1  mrg bool
    451      1.1  mrg wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
    452      1.1  mrg 		const HOST_WIDE_INT *op1, unsigned int op1len,
    453      1.1  mrg 		unsigned int prec)
    454      1.1  mrg {
    455      1.1  mrg   int l0 = op0len - 1;
    456      1.1  mrg   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
    457      1.1  mrg 
    458      1.1  mrg   if (op0len != op1len)
    459      1.1  mrg     return false;
    460      1.1  mrg 
    461      1.1  mrg   if (op0len == BLOCKS_NEEDED (prec) && small_prec)
    462      1.1  mrg     {
    463      1.1  mrg       /* It does not matter if we zext or sext here, we just have to
    464      1.1  mrg 	 do both the same way.  */
    465      1.1  mrg       if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
    466      1.1  mrg 	return false;
    467      1.1  mrg       l0--;
    468      1.1  mrg     }
    469      1.1  mrg 
    470      1.1  mrg   while (l0 >= 0)
    471      1.1  mrg     if (op0[l0] != op1[l0])
    472      1.1  mrg       return false;
    473      1.1  mrg     else
    474      1.1  mrg       l0--;
    475      1.1  mrg 
    476      1.1  mrg   return true;
    477      1.1  mrg }
    478      1.1  mrg 
    479      1.1  mrg /* Return true if OP0 < OP1 using signed comparisons.  */
    480      1.1  mrg bool
    481      1.1  mrg wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
    482      1.1  mrg 		 unsigned int precision,
    483      1.1  mrg 		 const HOST_WIDE_INT *op1, unsigned int op1len)
    484      1.1  mrg {
    485      1.1  mrg   HOST_WIDE_INT s0, s1;
    486      1.1  mrg   unsigned HOST_WIDE_INT u0, u1;
    487      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
    488      1.1  mrg   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
    489      1.1  mrg   int l = MAX (op0len - 1, op1len - 1);
    490      1.1  mrg 
    491      1.1  mrg   /* Only the top block is compared as signed.  The rest are unsigned
    492      1.1  mrg      comparisons.  */
    493      1.1  mrg   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
    494      1.1  mrg   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
    495      1.1  mrg   if (s0 < s1)
    496      1.1  mrg     return true;
    497      1.1  mrg   if (s0 > s1)
    498      1.1  mrg     return false;
    499      1.1  mrg 
    500      1.1  mrg   l--;
    501      1.1  mrg   while (l >= 0)
    502      1.1  mrg     {
    503      1.1  mrg       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
    504      1.1  mrg       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
    505      1.1  mrg 
    506      1.1  mrg       if (u0 < u1)
    507      1.1  mrg 	return true;
    508      1.1  mrg       if (u0 > u1)
    509      1.1  mrg 	return false;
    510      1.1  mrg       l--;
    511      1.1  mrg     }
    512      1.1  mrg 
    513      1.1  mrg   return false;
    514      1.1  mrg }
    515      1.1  mrg 
    516      1.1  mrg /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
    517      1.1  mrg    signed compares.  */
    518      1.1  mrg int
    519      1.1  mrg wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
    520      1.1  mrg 		unsigned int precision,
    521      1.1  mrg 		const HOST_WIDE_INT *op1, unsigned int op1len)
    522      1.1  mrg {
    523      1.1  mrg   HOST_WIDE_INT s0, s1;
    524      1.1  mrg   unsigned HOST_WIDE_INT u0, u1;
    525      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
    526      1.1  mrg   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
    527      1.1  mrg   int l = MAX (op0len - 1, op1len - 1);
    528      1.1  mrg 
    529      1.1  mrg   /* Only the top block is compared as signed.  The rest are unsigned
    530      1.1  mrg      comparisons.  */
    531      1.1  mrg   s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
    532      1.1  mrg   s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
    533      1.1  mrg   if (s0 < s1)
    534      1.1  mrg     return -1;
    535      1.1  mrg   if (s0 > s1)
    536      1.1  mrg     return 1;
    537      1.1  mrg 
    538      1.1  mrg   l--;
    539      1.1  mrg   while (l >= 0)
    540      1.1  mrg     {
    541      1.1  mrg       u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
    542      1.1  mrg       u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
    543      1.1  mrg 
    544      1.1  mrg       if (u0 < u1)
    545      1.1  mrg 	return -1;
    546      1.1  mrg       if (u0 > u1)
    547      1.1  mrg 	return 1;
    548      1.1  mrg       l--;
    549      1.1  mrg     }
    550      1.1  mrg 
    551      1.1  mrg   return 0;
    552      1.1  mrg }
    553      1.1  mrg 
    554      1.1  mrg /* Return true if OP0 < OP1 using unsigned comparisons.  */
    555      1.1  mrg bool
    556      1.1  mrg wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
    557      1.1  mrg 		 unsigned int precision,
    558      1.1  mrg 		 const HOST_WIDE_INT *op1, unsigned int op1len)
    559      1.1  mrg {
    560      1.1  mrg   unsigned HOST_WIDE_INT x0;
    561      1.1  mrg   unsigned HOST_WIDE_INT x1;
    562      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
    563      1.1  mrg   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
    564      1.1  mrg   int l = MAX (op0len - 1, op1len - 1);
    565      1.1  mrg 
    566      1.1  mrg   while (l >= 0)
    567      1.1  mrg     {
    568      1.1  mrg       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
    569      1.1  mrg       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
    570      1.1  mrg       if (x0 < x1)
    571      1.1  mrg 	return true;
    572      1.1  mrg       if (x0 > x1)
    573      1.1  mrg 	return false;
    574      1.1  mrg       l--;
    575      1.1  mrg     }
    576      1.1  mrg 
    577      1.1  mrg   return false;
    578      1.1  mrg }
    579      1.1  mrg 
    580      1.1  mrg /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
    581      1.1  mrg    unsigned compares.  */
    582      1.1  mrg int
    583      1.1  mrg wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
    584      1.1  mrg 		unsigned int precision,
    585      1.1  mrg 		const HOST_WIDE_INT *op1, unsigned int op1len)
    586      1.1  mrg {
    587      1.1  mrg   unsigned HOST_WIDE_INT x0;
    588      1.1  mrg   unsigned HOST_WIDE_INT x1;
    589      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
    590      1.1  mrg   unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
    591      1.1  mrg   int l = MAX (op0len - 1, op1len - 1);
    592      1.1  mrg 
    593      1.1  mrg   while (l >= 0)
    594      1.1  mrg     {
    595      1.1  mrg       x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
    596      1.1  mrg       x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
    597      1.1  mrg       if (x0 < x1)
    598      1.1  mrg 	return -1;
    599      1.1  mrg       if (x0 > x1)
    600      1.1  mrg 	return 1;
    601      1.1  mrg       l--;
    602      1.1  mrg     }
    603      1.1  mrg 
    604      1.1  mrg   return 0;
    605      1.1  mrg }
    606      1.1  mrg 
    607      1.1  mrg /*
    608      1.1  mrg  * Extension.
    609      1.1  mrg  */
    610      1.1  mrg 
    611      1.1  mrg /* Sign-extend the number represented by XVAL and XLEN into VAL,
    612      1.1  mrg    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
    613      1.1  mrg    and VAL have PRECISION bits.  */
    614      1.1  mrg unsigned int
    615      1.1  mrg wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
    616      1.1  mrg 		unsigned int xlen, unsigned int precision, unsigned int offset)
    617      1.1  mrg {
    618      1.1  mrg   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
    619      1.1  mrg   /* Extending beyond the precision is a no-op.  If we have only stored
    620      1.1  mrg      OFFSET bits or fewer, the rest are already signs.  */
    621      1.1  mrg   if (offset >= precision || len >= xlen)
    622      1.1  mrg     {
    623      1.1  mrg       for (unsigned i = 0; i < xlen; ++i)
    624      1.1  mrg 	val[i] = xval[i];
    625      1.1  mrg       return xlen;
    626      1.1  mrg     }
    627      1.1  mrg   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
    628      1.1  mrg   for (unsigned int i = 0; i < len; i++)
    629      1.1  mrg     val[i] = xval[i];
    630      1.1  mrg   if (suboffset > 0)
    631      1.1  mrg     {
    632      1.1  mrg       val[len] = sext_hwi (xval[len], suboffset);
    633      1.1  mrg       len += 1;
    634      1.1  mrg     }
    635      1.1  mrg   return canonize (val, len, precision);
    636      1.1  mrg }
    637      1.1  mrg 
    638      1.1  mrg /* Zero-extend the number represented by XVAL and XLEN into VAL,
    639      1.1  mrg    starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
    640      1.1  mrg    and VAL have PRECISION bits.  */
    641      1.1  mrg unsigned int
    642      1.1  mrg wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
    643      1.1  mrg 		unsigned int xlen, unsigned int precision, unsigned int offset)
    644      1.1  mrg {
    645      1.1  mrg   unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
    646      1.1  mrg   /* Extending beyond the precision is a no-op.  If we have only stored
    647      1.1  mrg      OFFSET bits or fewer, and the upper stored bit is zero, then there
    648      1.1  mrg      is nothing to do.  */
    649      1.1  mrg   if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
    650      1.1  mrg     {
    651      1.1  mrg       for (unsigned i = 0; i < xlen; ++i)
    652      1.1  mrg 	val[i] = xval[i];
    653      1.1  mrg       return xlen;
    654      1.1  mrg     }
    655      1.1  mrg   unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
    656      1.1  mrg   for (unsigned int i = 0; i < len; i++)
    657      1.1  mrg     val[i] = i < xlen ? xval[i] : -1;
    658      1.1  mrg   if (suboffset > 0)
    659      1.1  mrg     val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
    660      1.1  mrg   else
    661      1.1  mrg     val[len] = 0;
    662      1.1  mrg   return canonize (val, len + 1, precision);
    663      1.1  mrg }
    664      1.1  mrg 
    665      1.1  mrg /*
    666      1.1  mrg  * Masking, inserting, shifting, rotating.
    667      1.1  mrg  */
    668      1.1  mrg 
    669      1.1  mrg /* Insert WIDTH bits from Y into X starting at START.  */
    670      1.1  mrg wide_int
    671      1.1  mrg wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
    672      1.1  mrg 	    unsigned int width)
    673      1.1  mrg {
    674      1.1  mrg   wide_int result;
    675      1.1  mrg   wide_int mask;
    676      1.1  mrg   wide_int tmp;
    677      1.1  mrg 
    678      1.1  mrg   unsigned int precision = x.get_precision ();
    679      1.1  mrg   if (start >= precision)
    680      1.1  mrg     return x;
    681      1.1  mrg 
    682      1.1  mrg   gcc_checking_assert (precision >= width);
    683      1.1  mrg 
    684      1.1  mrg   if (start + width >= precision)
    685      1.1  mrg     width = precision - start;
    686      1.1  mrg 
    687      1.1  mrg   mask = wi::shifted_mask (start, width, false, precision);
    688      1.1  mrg   tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
    689      1.1  mrg   result = tmp & mask;
    690      1.1  mrg 
    691      1.1  mrg   tmp = wi::bit_and_not (x, mask);
    692      1.1  mrg   result = result | tmp;
    693      1.1  mrg 
    694      1.1  mrg   return result;
    695      1.1  mrg }
    696      1.1  mrg 
    697      1.1  mrg /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
    698      1.1  mrg    Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
    699      1.1  mrg    bits.  */
    700      1.1  mrg unsigned int
    701      1.1  mrg wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
    702      1.1  mrg 		   unsigned int xlen, unsigned int precision, unsigned int bit)
    703      1.1  mrg {
    704      1.1  mrg   unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
    705      1.1  mrg   unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
    706      1.1  mrg 
    707      1.1  mrg   if (block + 1 >= xlen)
    708      1.1  mrg     {
    709      1.1  mrg       /* The operation either affects the last current block or needs
    710      1.1  mrg 	 a new block.  */
    711      1.1  mrg       unsigned int len = block + 1;
    712      1.1  mrg       for (unsigned int i = 0; i < len; i++)
    713      1.1  mrg 	val[i] = safe_uhwi (xval, xlen, i);
    714  1.1.1.3  mrg       val[block] |= HOST_WIDE_INT_1U << subbit;
    715      1.1  mrg 
    716      1.1  mrg       /* If the bit we just set is at the msb of the block, make sure
    717      1.1  mrg 	 that any higher bits are zeros.  */
    718      1.1  mrg       if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
    719  1.1.1.6  mrg 	{
    720  1.1.1.6  mrg 	  val[len++] = 0;
    721  1.1.1.6  mrg 	  return len;
    722  1.1.1.6  mrg 	}
    723  1.1.1.6  mrg       return canonize (val, len, precision);
    724      1.1  mrg     }
    725      1.1  mrg   else
    726      1.1  mrg     {
    727      1.1  mrg       for (unsigned int i = 0; i < xlen; i++)
    728      1.1  mrg 	val[i] = xval[i];
    729  1.1.1.3  mrg       val[block] |= HOST_WIDE_INT_1U << subbit;
    730      1.1  mrg       return canonize (val, xlen, precision);
    731      1.1  mrg     }
    732      1.1  mrg }
    733      1.1  mrg 
    734      1.1  mrg /* bswap THIS.  */
    735      1.1  mrg wide_int
    736      1.1  mrg wide_int_storage::bswap () const
    737      1.1  mrg {
    738      1.1  mrg   wide_int result = wide_int::create (precision);
    739      1.1  mrg   unsigned int i, s;
    740      1.1  mrg   unsigned int len = BLOCKS_NEEDED (precision);
    741      1.1  mrg   unsigned int xlen = get_len ();
    742      1.1  mrg   const HOST_WIDE_INT *xval = get_val ();
    743      1.1  mrg   HOST_WIDE_INT *val = result.write_val ();
    744      1.1  mrg 
    745      1.1  mrg   /* This is not a well defined operation if the precision is not a
    746      1.1  mrg      multiple of 8.  */
    747      1.1  mrg   gcc_assert ((precision & 0x7) == 0);
    748      1.1  mrg 
    749      1.1  mrg   for (i = 0; i < len; i++)
    750      1.1  mrg     val[i] = 0;
    751      1.1  mrg 
    752      1.1  mrg   /* Only swap the bytes that are not the padding.  */
    753      1.1  mrg   for (s = 0; s < precision; s += 8)
    754      1.1  mrg     {
    755      1.1  mrg       unsigned int d = precision - s - 8;
    756      1.1  mrg       unsigned HOST_WIDE_INT byte;
    757      1.1  mrg 
    758      1.1  mrg       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
    759      1.1  mrg       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
    760      1.1  mrg 
    761      1.1  mrg       byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
    762      1.1  mrg 
    763      1.1  mrg       block = d / HOST_BITS_PER_WIDE_INT;
    764      1.1  mrg       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
    765      1.1  mrg 
    766      1.1  mrg       val[block] |= byte << offset;
    767      1.1  mrg     }
    768      1.1  mrg 
    769      1.1  mrg   result.set_len (canonize (val, len, precision));
    770      1.1  mrg   return result;
    771      1.1  mrg }
    772      1.1  mrg 
    773      1.1  mrg /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
    774      1.1  mrg    above that up to PREC are zeros.  The result is inverted if NEGATE
    775      1.1  mrg    is true.  Return the number of blocks in VAL.  */
    776      1.1  mrg unsigned int
    777      1.1  mrg wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
    778      1.1  mrg 	  unsigned int prec)
    779      1.1  mrg {
    780      1.1  mrg   if (width >= prec)
    781      1.1  mrg     {
    782      1.1  mrg       val[0] = negate ? 0 : -1;
    783      1.1  mrg       return 1;
    784      1.1  mrg     }
    785      1.1  mrg   else if (width == 0)
    786      1.1  mrg     {
    787      1.1  mrg       val[0] = negate ? -1 : 0;
    788      1.1  mrg       return 1;
    789      1.1  mrg     }
    790      1.1  mrg 
    791      1.1  mrg   unsigned int i = 0;
    792      1.1  mrg   while (i < width / HOST_BITS_PER_WIDE_INT)
    793      1.1  mrg     val[i++] = negate ? 0 : -1;
    794      1.1  mrg 
    795      1.1  mrg   unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
    796      1.1  mrg   if (shift != 0)
    797      1.1  mrg     {
    798  1.1.1.3  mrg       HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
    799      1.1  mrg       val[i++] = negate ? ~last : last;
    800      1.1  mrg     }
    801      1.1  mrg   else
    802      1.1  mrg     val[i++] = negate ? -1 : 0;
    803      1.1  mrg 
    804      1.1  mrg   return i;
    805      1.1  mrg }
    806      1.1  mrg 
    807      1.1  mrg /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
    808      1.1  mrg    bits are ones, and the bits above that up to PREC are zeros.  The result
    809      1.1  mrg    is inverted if NEGATE is true.  Return the number of blocks in VAL.  */
    810      1.1  mrg unsigned int
    811      1.1  mrg wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
    812      1.1  mrg 		  bool negate, unsigned int prec)
    813      1.1  mrg {
    814      1.1  mrg   if (start >= prec || width == 0)
    815      1.1  mrg     {
    816      1.1  mrg       val[0] = negate ? -1 : 0;
    817      1.1  mrg       return 1;
    818      1.1  mrg     }
    819      1.1  mrg 
    820      1.1  mrg   if (width > prec - start)
    821      1.1  mrg     width = prec - start;
    822      1.1  mrg   unsigned int end = start + width;
    823      1.1  mrg 
    824      1.1  mrg   unsigned int i = 0;
    825      1.1  mrg   while (i < start / HOST_BITS_PER_WIDE_INT)
    826      1.1  mrg     val[i++] = negate ? -1 : 0;
    827      1.1  mrg 
    828      1.1  mrg   unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
    829      1.1  mrg   if (shift)
    830      1.1  mrg     {
    831  1.1.1.3  mrg       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
    832      1.1  mrg       shift += width;
    833      1.1  mrg       if (shift < HOST_BITS_PER_WIDE_INT)
    834      1.1  mrg 	{
    835      1.1  mrg 	  /* case 000111000 */
    836  1.1.1.3  mrg 	  block = (HOST_WIDE_INT_1U << shift) - block - 1;
    837      1.1  mrg 	  val[i++] = negate ? ~block : block;
    838      1.1  mrg 	  return i;
    839      1.1  mrg 	}
    840      1.1  mrg       else
    841      1.1  mrg 	/* ...111000 */
    842      1.1  mrg 	val[i++] = negate ? block : ~block;
    843      1.1  mrg     }
    844      1.1  mrg 
    845  1.1.1.7  mrg   if (end >= prec)
    846  1.1.1.7  mrg     {
    847  1.1.1.7  mrg       if (!shift)
    848  1.1.1.7  mrg 	val[i++] = negate ? 0 : -1;
    849  1.1.1.7  mrg       return i;
    850  1.1.1.7  mrg     }
    851  1.1.1.7  mrg 
    852      1.1  mrg   while (i < end / HOST_BITS_PER_WIDE_INT)
    853      1.1  mrg     /* 1111111 */
    854      1.1  mrg     val[i++] = negate ? 0 : -1;
    855      1.1  mrg 
    856      1.1  mrg   shift = end & (HOST_BITS_PER_WIDE_INT - 1);
    857      1.1  mrg   if (shift != 0)
    858      1.1  mrg     {
    859      1.1  mrg       /* 000011111 */
    860  1.1.1.3  mrg       HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
    861      1.1  mrg       val[i++] = negate ? ~block : block;
    862      1.1  mrg     }
    863  1.1.1.7  mrg   else
    864      1.1  mrg     val[i++] = negate ? -1 : 0;
    865      1.1  mrg 
    866      1.1  mrg   return i;
    867      1.1  mrg }
    868      1.1  mrg 
    869      1.1  mrg /*
    870      1.1  mrg  * logical operations.
    871      1.1  mrg  */
    872      1.1  mrg 
    873      1.1  mrg /* Set VAL to OP0 & OP1.  Return the number of blocks used.  */
    874      1.1  mrg unsigned int
    875      1.1  mrg wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
    876      1.1  mrg 	       unsigned int op0len, const HOST_WIDE_INT *op1,
    877      1.1  mrg 	       unsigned int op1len, unsigned int prec)
    878      1.1  mrg {
    879      1.1  mrg   int l0 = op0len - 1;
    880      1.1  mrg   int l1 = op1len - 1;
    881      1.1  mrg   bool need_canon = true;
    882      1.1  mrg 
    883      1.1  mrg   unsigned int len = MAX (op0len, op1len);
    884      1.1  mrg   if (l0 > l1)
    885      1.1  mrg     {
    886      1.1  mrg       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
    887      1.1  mrg       if (op1mask == 0)
    888      1.1  mrg 	{
    889      1.1  mrg 	  l0 = l1;
    890      1.1  mrg 	  len = l1 + 1;
    891      1.1  mrg 	}
    892      1.1  mrg       else
    893      1.1  mrg 	{
    894      1.1  mrg 	  need_canon = false;
    895      1.1  mrg 	  while (l0 > l1)
    896      1.1  mrg 	    {
    897      1.1  mrg 	      val[l0] = op0[l0];
    898      1.1  mrg 	      l0--;
    899      1.1  mrg 	    }
    900      1.1  mrg 	}
    901      1.1  mrg     }
    902      1.1  mrg   else if (l1 > l0)
    903      1.1  mrg     {
    904      1.1  mrg       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
    905      1.1  mrg       if (op0mask == 0)
    906      1.1  mrg 	len = l0 + 1;
    907      1.1  mrg       else
    908      1.1  mrg 	{
    909      1.1  mrg 	  need_canon = false;
    910      1.1  mrg 	  while (l1 > l0)
    911      1.1  mrg 	    {
    912      1.1  mrg 	      val[l1] = op1[l1];
    913      1.1  mrg 	      l1--;
    914      1.1  mrg 	    }
    915      1.1  mrg 	}
    916      1.1  mrg     }
    917      1.1  mrg 
    918      1.1  mrg   while (l0 >= 0)
    919      1.1  mrg     {
    920      1.1  mrg       val[l0] = op0[l0] & op1[l0];
    921      1.1  mrg       l0--;
    922      1.1  mrg     }
    923      1.1  mrg 
    924      1.1  mrg   if (need_canon)
    925      1.1  mrg     len = canonize (val, len, prec);
    926      1.1  mrg 
    927      1.1  mrg   return len;
    928      1.1  mrg }
    929      1.1  mrg 
    930      1.1  mrg /* Set VAL to OP0 & ~OP1.  Return the number of blocks used.  */
    931      1.1  mrg unsigned int
    932      1.1  mrg wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
    933      1.1  mrg 		   unsigned int op0len, const HOST_WIDE_INT *op1,
    934      1.1  mrg 		   unsigned int op1len, unsigned int prec)
    935      1.1  mrg {
    936      1.1  mrg   wide_int result;
    937      1.1  mrg   int l0 = op0len - 1;
    938      1.1  mrg   int l1 = op1len - 1;
    939      1.1  mrg   bool need_canon = true;
    940      1.1  mrg 
    941      1.1  mrg   unsigned int len = MAX (op0len, op1len);
    942      1.1  mrg   if (l0 > l1)
    943      1.1  mrg     {
    944      1.1  mrg       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
    945      1.1  mrg       if (op1mask != 0)
    946      1.1  mrg 	{
    947      1.1  mrg 	  l0 = l1;
    948      1.1  mrg 	  len = l1 + 1;
    949      1.1  mrg 	}
    950      1.1  mrg       else
    951      1.1  mrg 	{
    952      1.1  mrg 	  need_canon = false;
    953      1.1  mrg 	  while (l0 > l1)
    954      1.1  mrg 	    {
    955      1.1  mrg 	      val[l0] = op0[l0];
    956      1.1  mrg 	      l0--;
    957      1.1  mrg 	    }
    958      1.1  mrg 	}
    959      1.1  mrg     }
    960      1.1  mrg   else if (l1 > l0)
    961      1.1  mrg     {
    962      1.1  mrg       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
    963      1.1  mrg       if (op0mask == 0)
    964      1.1  mrg 	len = l0 + 1;
    965      1.1  mrg       else
    966      1.1  mrg 	{
    967      1.1  mrg 	  need_canon = false;
    968      1.1  mrg 	  while (l1 > l0)
    969      1.1  mrg 	    {
    970      1.1  mrg 	      val[l1] = ~op1[l1];
    971      1.1  mrg 	      l1--;
    972      1.1  mrg 	    }
    973      1.1  mrg 	}
    974      1.1  mrg     }
    975      1.1  mrg 
    976      1.1  mrg   while (l0 >= 0)
    977      1.1  mrg     {
    978      1.1  mrg       val[l0] = op0[l0] & ~op1[l0];
    979      1.1  mrg       l0--;
    980      1.1  mrg     }
    981      1.1  mrg 
    982      1.1  mrg   if (need_canon)
    983      1.1  mrg     len = canonize (val, len, prec);
    984      1.1  mrg 
    985      1.1  mrg   return len;
    986      1.1  mrg }
    987      1.1  mrg 
    988      1.1  mrg /* Set VAL to OP0 | OP1.  Return the number of blocks used.  */
    989      1.1  mrg unsigned int
    990      1.1  mrg wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
    991      1.1  mrg 	      unsigned int op0len, const HOST_WIDE_INT *op1,
    992      1.1  mrg 	      unsigned int op1len, unsigned int prec)
    993      1.1  mrg {
    994      1.1  mrg   wide_int result;
    995      1.1  mrg   int l0 = op0len - 1;
    996      1.1  mrg   int l1 = op1len - 1;
    997      1.1  mrg   bool need_canon = true;
    998      1.1  mrg 
    999      1.1  mrg   unsigned int len = MAX (op0len, op1len);
   1000      1.1  mrg   if (l0 > l1)
   1001      1.1  mrg     {
   1002      1.1  mrg       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
   1003      1.1  mrg       if (op1mask != 0)
   1004      1.1  mrg 	{
   1005      1.1  mrg 	  l0 = l1;
   1006      1.1  mrg 	  len = l1 + 1;
   1007      1.1  mrg 	}
   1008      1.1  mrg       else
   1009      1.1  mrg 	{
   1010      1.1  mrg 	  need_canon = false;
   1011      1.1  mrg 	  while (l0 > l1)
   1012      1.1  mrg 	    {
   1013      1.1  mrg 	      val[l0] = op0[l0];
   1014      1.1  mrg 	      l0--;
   1015      1.1  mrg 	    }
   1016      1.1  mrg 	}
   1017      1.1  mrg     }
   1018      1.1  mrg   else if (l1 > l0)
   1019      1.1  mrg     {
   1020      1.1  mrg       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
   1021      1.1  mrg       if (op0mask != 0)
   1022      1.1  mrg 	len = l0 + 1;
   1023      1.1  mrg       else
   1024      1.1  mrg 	{
   1025      1.1  mrg 	  need_canon = false;
   1026      1.1  mrg 	  while (l1 > l0)
   1027      1.1  mrg 	    {
   1028      1.1  mrg 	      val[l1] = op1[l1];
   1029      1.1  mrg 	      l1--;
   1030      1.1  mrg 	    }
   1031      1.1  mrg 	}
   1032      1.1  mrg     }
   1033      1.1  mrg 
   1034      1.1  mrg   while (l0 >= 0)
   1035      1.1  mrg     {
   1036      1.1  mrg       val[l0] = op0[l0] | op1[l0];
   1037      1.1  mrg       l0--;
   1038      1.1  mrg     }
   1039      1.1  mrg 
   1040      1.1  mrg   if (need_canon)
   1041      1.1  mrg     len = canonize (val, len, prec);
   1042      1.1  mrg 
   1043      1.1  mrg   return len;
   1044      1.1  mrg }
   1045      1.1  mrg 
   1046      1.1  mrg /* Set VAL to OP0 | ~OP1.  Return the number of blocks used.  */
   1047      1.1  mrg unsigned int
   1048      1.1  mrg wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
   1049      1.1  mrg 		  unsigned int op0len, const HOST_WIDE_INT *op1,
   1050      1.1  mrg 		  unsigned int op1len, unsigned int prec)
   1051      1.1  mrg {
   1052      1.1  mrg   wide_int result;
   1053      1.1  mrg   int l0 = op0len - 1;
   1054      1.1  mrg   int l1 = op1len - 1;
   1055      1.1  mrg   bool need_canon = true;
   1056      1.1  mrg 
   1057      1.1  mrg   unsigned int len = MAX (op0len, op1len);
   1058      1.1  mrg   if (l0 > l1)
   1059      1.1  mrg     {
   1060      1.1  mrg       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
   1061      1.1  mrg       if (op1mask == 0)
   1062      1.1  mrg 	{
   1063      1.1  mrg 	  l0 = l1;
   1064      1.1  mrg 	  len = l1 + 1;
   1065      1.1  mrg 	}
   1066      1.1  mrg       else
   1067      1.1  mrg 	{
   1068      1.1  mrg 	  need_canon = false;
   1069      1.1  mrg 	  while (l0 > l1)
   1070      1.1  mrg 	    {
   1071      1.1  mrg 	      val[l0] = op0[l0];
   1072      1.1  mrg 	      l0--;
   1073      1.1  mrg 	    }
   1074      1.1  mrg 	}
   1075      1.1  mrg     }
   1076      1.1  mrg   else if (l1 > l0)
   1077      1.1  mrg     {
   1078      1.1  mrg       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
   1079      1.1  mrg       if (op0mask != 0)
   1080      1.1  mrg 	len = l0 + 1;
   1081      1.1  mrg       else
   1082      1.1  mrg 	{
   1083      1.1  mrg 	  need_canon = false;
   1084      1.1  mrg 	  while (l1 > l0)
   1085      1.1  mrg 	    {
   1086      1.1  mrg 	      val[l1] = ~op1[l1];
   1087      1.1  mrg 	      l1--;
   1088      1.1  mrg 	    }
   1089      1.1  mrg 	}
   1090      1.1  mrg     }
   1091      1.1  mrg 
   1092      1.1  mrg   while (l0 >= 0)
   1093      1.1  mrg     {
   1094      1.1  mrg       val[l0] = op0[l0] | ~op1[l0];
   1095      1.1  mrg       l0--;
   1096      1.1  mrg     }
   1097      1.1  mrg 
   1098      1.1  mrg   if (need_canon)
   1099      1.1  mrg     len = canonize (val, len, prec);
   1100      1.1  mrg 
   1101      1.1  mrg   return len;
   1102      1.1  mrg }
   1103      1.1  mrg 
   1104      1.1  mrg /* Set VAL to OP0 ^ OP1.  Return the number of blocks used.  */
   1105      1.1  mrg unsigned int
   1106      1.1  mrg wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
   1107      1.1  mrg 	       unsigned int op0len, const HOST_WIDE_INT *op1,
   1108      1.1  mrg 	       unsigned int op1len, unsigned int prec)
   1109      1.1  mrg {
   1110      1.1  mrg   wide_int result;
   1111      1.1  mrg   int l0 = op0len - 1;
   1112      1.1  mrg   int l1 = op1len - 1;
   1113      1.1  mrg 
   1114      1.1  mrg   unsigned int len = MAX (op0len, op1len);
   1115      1.1  mrg   if (l0 > l1)
   1116      1.1  mrg     {
   1117      1.1  mrg       HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
   1118      1.1  mrg       while (l0 > l1)
   1119      1.1  mrg 	{
   1120      1.1  mrg 	  val[l0] = op0[l0] ^ op1mask;
   1121      1.1  mrg 	  l0--;
   1122      1.1  mrg 	}
   1123      1.1  mrg     }
   1124      1.1  mrg 
   1125      1.1  mrg   if (l1 > l0)
   1126      1.1  mrg     {
   1127      1.1  mrg       HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
   1128      1.1  mrg       while (l1 > l0)
   1129      1.1  mrg 	{
   1130      1.1  mrg 	  val[l1] = op0mask ^ op1[l1];
   1131      1.1  mrg 	  l1--;
   1132      1.1  mrg 	}
   1133      1.1  mrg     }
   1134      1.1  mrg 
   1135      1.1  mrg   while (l0 >= 0)
   1136      1.1  mrg     {
   1137      1.1  mrg       val[l0] = op0[l0] ^ op1[l0];
   1138      1.1  mrg       l0--;
   1139      1.1  mrg     }
   1140      1.1  mrg 
   1141      1.1  mrg   return canonize (val, len, prec);
   1142      1.1  mrg }
   1143      1.1  mrg 
   1144      1.1  mrg /*
   1145      1.1  mrg  * math
   1146      1.1  mrg  */
   1147      1.1  mrg 
   1148      1.1  mrg /* Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
   1149      1.1  mrg    whether the result overflows when OP0 and OP1 are treated as having
   1150      1.1  mrg    signedness SGN.  Return the number of blocks in VAL.  */
   1151      1.1  mrg unsigned int
   1152      1.1  mrg wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
   1153      1.1  mrg 	       unsigned int op0len, const HOST_WIDE_INT *op1,
   1154      1.1  mrg 	       unsigned int op1len, unsigned int prec,
   1155  1.1.1.5  mrg 	       signop sgn, wi::overflow_type *overflow)
   1156      1.1  mrg {
   1157      1.1  mrg   unsigned HOST_WIDE_INT o0 = 0;
   1158      1.1  mrg   unsigned HOST_WIDE_INT o1 = 0;
   1159      1.1  mrg   unsigned HOST_WIDE_INT x = 0;
   1160      1.1  mrg   unsigned HOST_WIDE_INT carry = 0;
   1161      1.1  mrg   unsigned HOST_WIDE_INT old_carry = 0;
   1162      1.1  mrg   unsigned HOST_WIDE_INT mask0, mask1;
   1163      1.1  mrg   unsigned int i;
   1164      1.1  mrg 
   1165      1.1  mrg   unsigned int len = MAX (op0len, op1len);
   1166      1.1  mrg   mask0 = -top_bit_of (op0, op0len, prec);
   1167      1.1  mrg   mask1 = -top_bit_of (op1, op1len, prec);
   1168      1.1  mrg   /* Add all of the explicitly defined elements.  */
   1169      1.1  mrg 
   1170      1.1  mrg   for (i = 0; i < len; i++)
   1171      1.1  mrg     {
   1172      1.1  mrg       o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
   1173      1.1  mrg       o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
   1174      1.1  mrg       x = o0 + o1 + carry;
   1175      1.1  mrg       val[i] = x;
   1176      1.1  mrg       old_carry = carry;
   1177      1.1  mrg       carry = carry == 0 ? x < o0 : x <= o0;
   1178      1.1  mrg     }
   1179      1.1  mrg 
   1180      1.1  mrg   if (len * HOST_BITS_PER_WIDE_INT < prec)
   1181      1.1  mrg     {
   1182      1.1  mrg       val[len] = mask0 + mask1 + carry;
   1183      1.1  mrg       len++;
   1184      1.1  mrg       if (overflow)
   1185  1.1.1.5  mrg 	*overflow
   1186  1.1.1.5  mrg 	  = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
   1187      1.1  mrg     }
   1188      1.1  mrg   else if (overflow)
   1189      1.1  mrg     {
   1190      1.1  mrg       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
   1191      1.1  mrg       if (sgn == SIGNED)
   1192      1.1  mrg 	{
   1193      1.1  mrg 	  unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
   1194  1.1.1.5  mrg 	  if ((HOST_WIDE_INT) (x << shift) < 0)
   1195  1.1.1.5  mrg 	    {
   1196  1.1.1.5  mrg 	      if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
   1197  1.1.1.5  mrg 		*overflow = wi::OVF_UNDERFLOW;
   1198  1.1.1.5  mrg 	      else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
   1199  1.1.1.5  mrg 		*overflow = wi::OVF_OVERFLOW;
   1200  1.1.1.5  mrg 	      else
   1201  1.1.1.5  mrg 		*overflow = wi::OVF_NONE;
   1202  1.1.1.5  mrg 	    }
   1203  1.1.1.5  mrg 	  else
   1204  1.1.1.5  mrg 	    *overflow = wi::OVF_NONE;
   1205      1.1  mrg 	}
   1206      1.1  mrg       else
   1207      1.1  mrg 	{
   1208      1.1  mrg 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
   1209      1.1  mrg 	  x <<= shift;
   1210      1.1  mrg 	  o0 <<= shift;
   1211      1.1  mrg 	  if (old_carry)
   1212  1.1.1.5  mrg 	    *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
   1213      1.1  mrg 	  else
   1214  1.1.1.5  mrg 	    *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
   1215      1.1  mrg 	}
   1216      1.1  mrg     }
   1217      1.1  mrg 
   1218      1.1  mrg   return canonize (val, len, prec);
   1219      1.1  mrg }
   1220      1.1  mrg 
   1221      1.1  mrg /* Subroutines of the multiplication and division operations.  Unpack
   1222      1.1  mrg    the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
   1223      1.1  mrg    HOST_HALF_WIDE_INTs of RESULT.  The rest of RESULT is filled by
   1224      1.1  mrg    uncompressing the top bit of INPUT[IN_LEN - 1].  */
   1225      1.1  mrg static void
   1226      1.1  mrg wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
   1227      1.1  mrg 	   unsigned int in_len, unsigned int out_len,
   1228      1.1  mrg 	   unsigned int prec, signop sgn)
   1229      1.1  mrg {
   1230      1.1  mrg   unsigned int i;
   1231      1.1  mrg   unsigned int j = 0;
   1232      1.1  mrg   unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
   1233      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
   1234      1.1  mrg   HOST_WIDE_INT mask;
   1235      1.1  mrg 
   1236      1.1  mrg   if (sgn == SIGNED)
   1237      1.1  mrg     {
   1238      1.1  mrg       mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
   1239      1.1  mrg       mask &= HALF_INT_MASK;
   1240      1.1  mrg     }
   1241      1.1  mrg   else
   1242      1.1  mrg     mask = 0;
   1243      1.1  mrg 
   1244      1.1  mrg   for (i = 0; i < blocks_needed - 1; i++)
   1245      1.1  mrg     {
   1246      1.1  mrg       HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
   1247      1.1  mrg       result[j++] = x;
   1248      1.1  mrg       result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
   1249      1.1  mrg     }
   1250      1.1  mrg 
   1251      1.1  mrg   HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
   1252      1.1  mrg   if (small_prec)
   1253      1.1  mrg     {
   1254      1.1  mrg       if (sgn == SIGNED)
   1255      1.1  mrg 	x = sext_hwi (x, small_prec);
   1256      1.1  mrg       else
   1257      1.1  mrg 	x = zext_hwi (x, small_prec);
   1258      1.1  mrg     }
   1259      1.1  mrg   result[j++] = x;
   1260      1.1  mrg   result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
   1261      1.1  mrg 
   1262      1.1  mrg   /* Smear the sign bit.  */
   1263      1.1  mrg   while (j < out_len)
   1264      1.1  mrg     result[j++] = mask;
   1265      1.1  mrg }
   1266      1.1  mrg 
   1267      1.1  mrg /* The inverse of wi_unpack.  IN_LEN is the number of input
   1268      1.1  mrg    blocks and PRECISION is the precision of the result.  Return the
   1269      1.1  mrg    number of blocks in the canonicalized result.  */
   1270      1.1  mrg static unsigned int
   1271      1.1  mrg wi_pack (HOST_WIDE_INT *result,
   1272      1.1  mrg 	 const unsigned HOST_HALF_WIDE_INT *input,
   1273      1.1  mrg 	 unsigned int in_len, unsigned int precision)
   1274      1.1  mrg {
   1275      1.1  mrg   unsigned int i = 0;
   1276      1.1  mrg   unsigned int j = 0;
   1277      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (precision);
   1278      1.1  mrg 
   1279      1.1  mrg   while (i + 1 < in_len)
   1280      1.1  mrg     {
   1281      1.1  mrg       result[j++] = ((unsigned HOST_WIDE_INT) input[i]
   1282      1.1  mrg 		     | ((unsigned HOST_WIDE_INT) input[i + 1]
   1283      1.1  mrg 			<< HOST_BITS_PER_HALF_WIDE_INT));
   1284      1.1  mrg       i += 2;
   1285      1.1  mrg     }
   1286      1.1  mrg 
   1287      1.1  mrg   /* Handle the case where in_len is odd.   For this we zero extend.  */
   1288      1.1  mrg   if (in_len & 1)
   1289      1.1  mrg     result[j++] = (unsigned HOST_WIDE_INT) input[i];
   1290      1.1  mrg   else if (j < blocks_needed)
   1291      1.1  mrg     result[j++] = 0;
   1292      1.1  mrg   return canonize (result, j, precision);
   1293      1.1  mrg }
   1294      1.1  mrg 
   1295      1.1  mrg /* Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
   1296      1.1  mrg    result is returned.
   1297      1.1  mrg 
   1298      1.1  mrg    If HIGH is not set, throw away the upper half after the check is
   1299      1.1  mrg    made to see if it overflows.  Unfortunately there is no better way
   1300      1.1  mrg    to check for overflow than to do this.  If OVERFLOW is nonnull,
   1301      1.1  mrg    record in *OVERFLOW whether the result overflowed.  SGN controls
   1302  1.1.1.5  mrg    the signedness and is used to check overflow or if HIGH is set.
   1303  1.1.1.5  mrg 
   1304  1.1.1.5  mrg    NOTE: Overflow type for signed overflow is not yet implemented.  */
   1305      1.1  mrg unsigned int
   1306      1.1  mrg wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
   1307      1.1  mrg 		  unsigned int op1len, const HOST_WIDE_INT *op2val,
   1308      1.1  mrg 		  unsigned int op2len, unsigned int prec, signop sgn,
   1309  1.1.1.5  mrg 		  wi::overflow_type *overflow, bool high)
   1310      1.1  mrg {
   1311      1.1  mrg   unsigned HOST_WIDE_INT o0, o1, k, t;
   1312      1.1  mrg   unsigned int i;
   1313      1.1  mrg   unsigned int j;
   1314      1.1  mrg   unsigned int blocks_needed = BLOCKS_NEEDED (prec);
   1315      1.1  mrg   unsigned int half_blocks_needed = blocks_needed * 2;
   1316      1.1  mrg   /* The sizes here are scaled to support a 2x largest mode by 2x
   1317      1.1  mrg      largest mode yielding a 4x largest mode result.  This is what is
   1318      1.1  mrg      needed by vpn.  */
   1319      1.1  mrg 
   1320      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1321      1.1  mrg     u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1322      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1323      1.1  mrg     v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1324      1.1  mrg   /* The '2' in 'R' is because we are internally doing a full
   1325      1.1  mrg      multiply.  */
   1326      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1327      1.1  mrg     r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1328      1.1  mrg   HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
   1329      1.1  mrg 
   1330      1.1  mrg   /* If the top level routine did not really pass in an overflow, then
   1331      1.1  mrg      just make sure that we never attempt to set it.  */
   1332      1.1  mrg   bool needs_overflow = (overflow != 0);
   1333      1.1  mrg   if (needs_overflow)
   1334  1.1.1.5  mrg     *overflow = wi::OVF_NONE;
   1335      1.1  mrg 
   1336      1.1  mrg   wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
   1337      1.1  mrg   wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
   1338      1.1  mrg 
   1339      1.1  mrg   /* This is a surprisingly common case, so do it first.  */
   1340      1.1  mrg   if (op1 == 0 || op2 == 0)
   1341      1.1  mrg     {
   1342      1.1  mrg       val[0] = 0;
   1343      1.1  mrg       return 1;
   1344      1.1  mrg     }
   1345      1.1  mrg 
   1346      1.1  mrg #ifdef umul_ppmm
   1347      1.1  mrg   if (sgn == UNSIGNED)
   1348      1.1  mrg     {
   1349      1.1  mrg       /* If the inputs are single HWIs and the output has room for at
   1350      1.1  mrg 	 least two HWIs, we can use umul_ppmm directly.  */
   1351      1.1  mrg       if (prec >= HOST_BITS_PER_WIDE_INT * 2
   1352      1.1  mrg 	  && wi::fits_uhwi_p (op1)
   1353      1.1  mrg 	  && wi::fits_uhwi_p (op2))
   1354      1.1  mrg 	{
   1355      1.1  mrg 	  /* This case never overflows.  */
   1356      1.1  mrg 	  if (high)
   1357      1.1  mrg 	    {
   1358      1.1  mrg 	      val[0] = 0;
   1359      1.1  mrg 	      return 1;
   1360      1.1  mrg 	    }
   1361      1.1  mrg 	  umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
   1362      1.1  mrg 	  if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
   1363      1.1  mrg 	    {
   1364      1.1  mrg 	      val[2] = 0;
   1365      1.1  mrg 	      return 3;
   1366      1.1  mrg 	    }
   1367      1.1  mrg 	  return 1 + (val[1] != 0 || val[0] < 0);
   1368      1.1  mrg 	}
   1369      1.1  mrg       /* Likewise if the output is a full single HWI, except that the
   1370      1.1  mrg 	 upper HWI of the result is only used for determining overflow.
   1371      1.1  mrg 	 (We handle this case inline when overflow isn't needed.)  */
   1372      1.1  mrg       else if (prec == HOST_BITS_PER_WIDE_INT)
   1373      1.1  mrg 	{
   1374      1.1  mrg 	  unsigned HOST_WIDE_INT upper;
   1375      1.1  mrg 	  umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
   1376      1.1  mrg 	  if (needs_overflow)
   1377  1.1.1.5  mrg 	    /* Unsigned overflow can only be +OVERFLOW.  */
   1378  1.1.1.5  mrg 	    *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
   1379      1.1  mrg 	  if (high)
   1380      1.1  mrg 	    val[0] = upper;
   1381      1.1  mrg 	  return 1;
   1382      1.1  mrg 	}
   1383      1.1  mrg     }
   1384      1.1  mrg #endif
   1385      1.1  mrg 
   1386      1.1  mrg   /* Handle multiplications by 1.  */
   1387      1.1  mrg   if (op1 == 1)
   1388      1.1  mrg     {
   1389      1.1  mrg       if (high)
   1390      1.1  mrg 	{
   1391      1.1  mrg 	  val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
   1392      1.1  mrg 	  return 1;
   1393      1.1  mrg 	}
   1394      1.1  mrg       for (i = 0; i < op2len; i++)
   1395      1.1  mrg 	val[i] = op2val[i];
   1396      1.1  mrg       return op2len;
   1397      1.1  mrg     }
   1398      1.1  mrg   if (op2 == 1)
   1399      1.1  mrg     {
   1400      1.1  mrg       if (high)
   1401      1.1  mrg 	{
   1402      1.1  mrg 	  val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
   1403      1.1  mrg 	  return 1;
   1404      1.1  mrg 	}
   1405      1.1  mrg       for (i = 0; i < op1len; i++)
   1406      1.1  mrg 	val[i] = op1val[i];
   1407      1.1  mrg       return op1len;
   1408      1.1  mrg     }
   1409      1.1  mrg 
   1410      1.1  mrg   /* If we need to check for overflow, we can only do half wide
   1411      1.1  mrg      multiplies quickly because we need to look at the top bits to
   1412      1.1  mrg      check for the overflow.  */
   1413      1.1  mrg   if ((high || needs_overflow)
   1414      1.1  mrg       && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
   1415      1.1  mrg     {
   1416      1.1  mrg       unsigned HOST_WIDE_INT r;
   1417      1.1  mrg 
   1418      1.1  mrg       if (sgn == SIGNED)
   1419      1.1  mrg 	{
   1420      1.1  mrg 	  o0 = op1.to_shwi ();
   1421      1.1  mrg 	  o1 = op2.to_shwi ();
   1422      1.1  mrg 	}
   1423      1.1  mrg       else
   1424      1.1  mrg 	{
   1425      1.1  mrg 	  o0 = op1.to_uhwi ();
   1426      1.1  mrg 	  o1 = op2.to_uhwi ();
   1427      1.1  mrg 	}
   1428      1.1  mrg 
   1429      1.1  mrg       r = o0 * o1;
   1430      1.1  mrg       if (needs_overflow)
   1431      1.1  mrg 	{
   1432      1.1  mrg 	  if (sgn == SIGNED)
   1433      1.1  mrg 	    {
   1434      1.1  mrg 	      if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
   1435  1.1.1.5  mrg 		/* FIXME: Signed overflow type is not implemented yet.  */
   1436  1.1.1.5  mrg 		*overflow = OVF_UNKNOWN;
   1437      1.1  mrg 	    }
   1438      1.1  mrg 	  else
   1439      1.1  mrg 	    {
   1440      1.1  mrg 	      if ((r >> prec) != 0)
   1441  1.1.1.5  mrg 		/* Unsigned overflow can only be +OVERFLOW.  */
   1442  1.1.1.5  mrg 		*overflow = OVF_OVERFLOW;
   1443      1.1  mrg 	    }
   1444      1.1  mrg 	}
   1445      1.1  mrg       val[0] = high ? r >> prec : r;
   1446      1.1  mrg       return 1;
   1447      1.1  mrg     }
   1448      1.1  mrg 
   1449      1.1  mrg   /* We do unsigned mul and then correct it.  */
   1450      1.1  mrg   wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
   1451      1.1  mrg   wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
   1452      1.1  mrg 
   1453      1.1  mrg   /* The 2 is for a full mult.  */
   1454      1.1  mrg   memset (r, 0, half_blocks_needed * 2
   1455      1.1  mrg 	  * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
   1456      1.1  mrg 
   1457      1.1  mrg   for (j = 0; j < half_blocks_needed; j++)
   1458      1.1  mrg     {
   1459      1.1  mrg       k = 0;
   1460      1.1  mrg       for (i = 0; i < half_blocks_needed; i++)
   1461      1.1  mrg 	{
   1462      1.1  mrg 	  t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
   1463      1.1  mrg 	       + r[i + j] + k);
   1464      1.1  mrg 	  r[i + j] = t & HALF_INT_MASK;
   1465      1.1  mrg 	  k = t >> HOST_BITS_PER_HALF_WIDE_INT;
   1466      1.1  mrg 	}
   1467      1.1  mrg       r[j + half_blocks_needed] = k;
   1468      1.1  mrg     }
   1469      1.1  mrg 
   1470      1.1  mrg   /* We did unsigned math above.  For signed we must adjust the
   1471      1.1  mrg      product (assuming we need to see that).  */
   1472      1.1  mrg   if (sgn == SIGNED && (high || needs_overflow))
   1473      1.1  mrg     {
   1474      1.1  mrg       unsigned HOST_WIDE_INT b;
   1475      1.1  mrg       if (wi::neg_p (op1))
   1476      1.1  mrg 	{
   1477      1.1  mrg 	  b = 0;
   1478      1.1  mrg 	  for (i = 0; i < half_blocks_needed; i++)
   1479      1.1  mrg 	    {
   1480      1.1  mrg 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
   1481      1.1  mrg 		- (unsigned HOST_WIDE_INT)v[i] - b;
   1482      1.1  mrg 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
   1483      1.1  mrg 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
   1484      1.1  mrg 	    }
   1485      1.1  mrg 	}
   1486      1.1  mrg       if (wi::neg_p (op2))
   1487      1.1  mrg 	{
   1488      1.1  mrg 	  b = 0;
   1489      1.1  mrg 	  for (i = 0; i < half_blocks_needed; i++)
   1490      1.1  mrg 	    {
   1491      1.1  mrg 	      t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
   1492      1.1  mrg 		- (unsigned HOST_WIDE_INT)u[i] - b;
   1493      1.1  mrg 	      r[i + half_blocks_needed] = t & HALF_INT_MASK;
   1494      1.1  mrg 	      b = t >> (HOST_BITS_PER_WIDE_INT - 1);
   1495      1.1  mrg 	    }
   1496      1.1  mrg 	}
   1497      1.1  mrg     }
   1498      1.1  mrg 
   1499      1.1  mrg   if (needs_overflow)
   1500      1.1  mrg     {
   1501      1.1  mrg       HOST_WIDE_INT top;
   1502      1.1  mrg 
   1503      1.1  mrg       /* For unsigned, overflow is true if any of the top bits are set.
   1504      1.1  mrg 	 For signed, overflow is true if any of the top bits are not equal
   1505      1.1  mrg 	 to the sign bit.  */
   1506      1.1  mrg       if (sgn == UNSIGNED)
   1507      1.1  mrg 	top = 0;
   1508      1.1  mrg       else
   1509      1.1  mrg 	{
   1510      1.1  mrg 	  top = r[(half_blocks_needed) - 1];
   1511      1.1  mrg 	  top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
   1512      1.1  mrg 	  top &= mask;
   1513      1.1  mrg 	}
   1514      1.1  mrg 
   1515      1.1  mrg       for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
   1516      1.1  mrg 	if (((HOST_WIDE_INT)(r[i] & mask)) != top)
   1517  1.1.1.5  mrg 	  /* FIXME: Signed overflow type is not implemented yet.  */
   1518  1.1.1.5  mrg 	  *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
   1519      1.1  mrg     }
   1520      1.1  mrg 
   1521      1.1  mrg   int r_offset = high ? half_blocks_needed : 0;
   1522      1.1  mrg   return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
   1523      1.1  mrg }
   1524      1.1  mrg 
   1525      1.1  mrg /* Compute the population count of X.  */
   1526      1.1  mrg int
   1527      1.1  mrg wi::popcount (const wide_int_ref &x)
   1528      1.1  mrg {
   1529      1.1  mrg   unsigned int i;
   1530      1.1  mrg   int count;
   1531      1.1  mrg 
   1532      1.1  mrg   /* The high order block is special if it is the last block and the
   1533      1.1  mrg      precision is not an even multiple of HOST_BITS_PER_WIDE_INT.  We
   1534      1.1  mrg      have to clear out any ones above the precision before doing
   1535      1.1  mrg      popcount on this block.  */
   1536      1.1  mrg   count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
   1537      1.1  mrg   unsigned int stop = x.len;
   1538      1.1  mrg   if (count < 0)
   1539      1.1  mrg     {
   1540      1.1  mrg       count = popcount_hwi (x.uhigh () << -count);
   1541      1.1  mrg       stop -= 1;
   1542      1.1  mrg     }
   1543      1.1  mrg   else
   1544      1.1  mrg     {
   1545      1.1  mrg       if (x.sign_mask () >= 0)
   1546      1.1  mrg 	count = 0;
   1547      1.1  mrg     }
   1548      1.1  mrg 
   1549      1.1  mrg   for (i = 0; i < stop; ++i)
   1550      1.1  mrg     count += popcount_hwi (x.val[i]);
   1551      1.1  mrg 
   1552      1.1  mrg   return count;
   1553      1.1  mrg }
   1554      1.1  mrg 
   1555      1.1  mrg /* Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
   1556      1.1  mrg    whether the result overflows when OP0 and OP1 are treated as having
   1557      1.1  mrg    signedness SGN.  Return the number of blocks in VAL.  */
   1558      1.1  mrg unsigned int
   1559      1.1  mrg wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
   1560      1.1  mrg 	       unsigned int op0len, const HOST_WIDE_INT *op1,
   1561      1.1  mrg 	       unsigned int op1len, unsigned int prec,
   1562  1.1.1.5  mrg 	       signop sgn, wi::overflow_type *overflow)
   1563      1.1  mrg {
   1564      1.1  mrg   unsigned HOST_WIDE_INT o0 = 0;
   1565      1.1  mrg   unsigned HOST_WIDE_INT o1 = 0;
   1566      1.1  mrg   unsigned HOST_WIDE_INT x = 0;
   1567      1.1  mrg   /* We implement subtraction as an in place negate and add.  Negation
   1568      1.1  mrg      is just inversion and add 1, so we can do the add of 1 by just
   1569      1.1  mrg      starting the borrow in of the first element at 1.  */
   1570      1.1  mrg   unsigned HOST_WIDE_INT borrow = 0;
   1571      1.1  mrg   unsigned HOST_WIDE_INT old_borrow = 0;
   1572      1.1  mrg 
   1573      1.1  mrg   unsigned HOST_WIDE_INT mask0, mask1;
   1574      1.1  mrg   unsigned int i;
   1575      1.1  mrg 
   1576      1.1  mrg   unsigned int len = MAX (op0len, op1len);
   1577      1.1  mrg   mask0 = -top_bit_of (op0, op0len, prec);
   1578      1.1  mrg   mask1 = -top_bit_of (op1, op1len, prec);
   1579      1.1  mrg 
   1580      1.1  mrg   /* Subtract all of the explicitly defined elements.  */
   1581      1.1  mrg   for (i = 0; i < len; i++)
   1582      1.1  mrg     {
   1583      1.1  mrg       o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
   1584      1.1  mrg       o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
   1585      1.1  mrg       x = o0 - o1 - borrow;
   1586      1.1  mrg       val[i] = x;
   1587      1.1  mrg       old_borrow = borrow;
   1588      1.1  mrg       borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
   1589      1.1  mrg     }
   1590      1.1  mrg 
   1591      1.1  mrg   if (len * HOST_BITS_PER_WIDE_INT < prec)
   1592      1.1  mrg     {
   1593      1.1  mrg       val[len] = mask0 - mask1 - borrow;
   1594      1.1  mrg       len++;
   1595      1.1  mrg       if (overflow)
   1596  1.1.1.5  mrg 	*overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
   1597      1.1  mrg     }
   1598      1.1  mrg   else if (overflow)
   1599      1.1  mrg     {
   1600      1.1  mrg       unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
   1601      1.1  mrg       if (sgn == SIGNED)
   1602      1.1  mrg 	{
   1603      1.1  mrg 	  unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
   1604  1.1.1.5  mrg 	  if ((HOST_WIDE_INT) (x << shift) < 0)
   1605  1.1.1.5  mrg 	    {
   1606  1.1.1.5  mrg 	      if (o0 > o1)
   1607  1.1.1.5  mrg 		*overflow = OVF_UNDERFLOW;
   1608  1.1.1.5  mrg 	      else if (o0 < o1)
   1609  1.1.1.5  mrg 		*overflow = OVF_OVERFLOW;
   1610  1.1.1.5  mrg 	      else
   1611  1.1.1.5  mrg 		*overflow = OVF_NONE;
   1612  1.1.1.5  mrg 	    }
   1613  1.1.1.5  mrg 	  else
   1614  1.1.1.5  mrg 	    *overflow = OVF_NONE;
   1615      1.1  mrg 	}
   1616      1.1  mrg       else
   1617      1.1  mrg 	{
   1618      1.1  mrg 	  /* Put the MSB of X and O0 and in the top of the HWI.  */
   1619      1.1  mrg 	  x <<= shift;
   1620      1.1  mrg 	  o0 <<= shift;
   1621      1.1  mrg 	  if (old_borrow)
   1622  1.1.1.5  mrg 	    *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
   1623      1.1  mrg 	  else
   1624  1.1.1.5  mrg 	    *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
   1625      1.1  mrg 	}
   1626      1.1  mrg     }
   1627      1.1  mrg 
   1628      1.1  mrg   return canonize (val, len, prec);
   1629      1.1  mrg }
   1630      1.1  mrg 
   1631      1.1  mrg 
   1632      1.1  mrg /*
   1633      1.1  mrg  * Division and Mod
   1634      1.1  mrg  */
   1635      1.1  mrg 
   1636      1.1  mrg /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR.  The
   1637      1.1  mrg    algorithm is a small modification of the algorithm in Hacker's
   1638      1.1  mrg    Delight by Warren, which itself is a small modification of Knuth's
   1639      1.1  mrg    algorithm.  M is the number of significant elements of U however
   1640      1.1  mrg    there needs to be at least one extra element of B_DIVIDEND
   1641      1.1  mrg    allocated, N is the number of elements of B_DIVISOR.  */
   1642      1.1  mrg static void
   1643      1.1  mrg divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
   1644      1.1  mrg 		   unsigned HOST_HALF_WIDE_INT *b_remainder,
   1645      1.1  mrg 		   unsigned HOST_HALF_WIDE_INT *b_dividend,
   1646      1.1  mrg 		   unsigned HOST_HALF_WIDE_INT *b_divisor,
   1647      1.1  mrg 		   int m, int n)
   1648      1.1  mrg {
   1649      1.1  mrg   /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
   1650      1.1  mrg      HOST_WIDE_INT and stored in the lower bits of each word.  This
   1651      1.1  mrg      algorithm should work properly on both 32 and 64 bit
   1652      1.1  mrg      machines.  */
   1653      1.1  mrg   unsigned HOST_WIDE_INT b
   1654      1.1  mrg     = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
   1655      1.1  mrg   unsigned HOST_WIDE_INT qhat;   /* Estimate of quotient digit.  */
   1656      1.1  mrg   unsigned HOST_WIDE_INT rhat;   /* A remainder.  */
   1657      1.1  mrg   unsigned HOST_WIDE_INT p;      /* Product of two digits.  */
   1658      1.1  mrg   HOST_WIDE_INT t, k;
   1659      1.1  mrg   int i, j, s;
   1660      1.1  mrg 
   1661      1.1  mrg   /* Single digit divisor.  */
   1662      1.1  mrg   if (n == 1)
   1663      1.1  mrg     {
   1664      1.1  mrg       k = 0;
   1665      1.1  mrg       for (j = m - 1; j >= 0; j--)
   1666      1.1  mrg 	{
   1667      1.1  mrg 	  b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
   1668      1.1  mrg 	  k = ((k * b + b_dividend[j])
   1669      1.1  mrg 	       - ((unsigned HOST_WIDE_INT)b_quotient[j]
   1670      1.1  mrg 		  * (unsigned HOST_WIDE_INT)b_divisor[0]));
   1671      1.1  mrg 	}
   1672      1.1  mrg       b_remainder[0] = k;
   1673      1.1  mrg       return;
   1674      1.1  mrg     }
   1675      1.1  mrg 
   1676      1.1  mrg   s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
   1677      1.1  mrg 
   1678      1.1  mrg   if (s)
   1679      1.1  mrg     {
   1680      1.1  mrg       /* Normalize B_DIVIDEND and B_DIVISOR.  Unlike the published
   1681      1.1  mrg 	 algorithm, we can overwrite b_dividend and b_divisor, so we do
   1682      1.1  mrg 	 that.  */
   1683      1.1  mrg       for (i = n - 1; i > 0; i--)
   1684      1.1  mrg 	b_divisor[i] = (b_divisor[i] << s)
   1685      1.1  mrg 	  | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
   1686      1.1  mrg       b_divisor[0] = b_divisor[0] << s;
   1687      1.1  mrg 
   1688      1.1  mrg       b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
   1689      1.1  mrg       for (i = m - 1; i > 0; i--)
   1690      1.1  mrg 	b_dividend[i] = (b_dividend[i] << s)
   1691      1.1  mrg 	  | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
   1692      1.1  mrg       b_dividend[0] = b_dividend[0] << s;
   1693      1.1  mrg     }
   1694      1.1  mrg 
   1695      1.1  mrg   /* Main loop.  */
   1696      1.1  mrg   for (j = m - n; j >= 0; j--)
   1697      1.1  mrg     {
   1698      1.1  mrg       qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
   1699      1.1  mrg       rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
   1700      1.1  mrg     again:
   1701      1.1  mrg       if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
   1702      1.1  mrg 	{
   1703      1.1  mrg 	  qhat -= 1;
   1704      1.1  mrg 	  rhat += b_divisor[n-1];
   1705      1.1  mrg 	  if (rhat < b)
   1706      1.1  mrg 	    goto again;
   1707      1.1  mrg 	}
   1708      1.1  mrg 
   1709      1.1  mrg       /* Multiply and subtract.  */
   1710      1.1  mrg       k = 0;
   1711      1.1  mrg       for (i = 0; i < n; i++)
   1712      1.1  mrg 	{
   1713      1.1  mrg 	  p = qhat * b_divisor[i];
   1714      1.1  mrg 	  t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
   1715      1.1  mrg 	  b_dividend[i + j] = t;
   1716      1.1  mrg 	  k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
   1717      1.1  mrg 	       - (t >> HOST_BITS_PER_HALF_WIDE_INT));
   1718      1.1  mrg 	}
   1719      1.1  mrg       t = b_dividend[j+n] - k;
   1720      1.1  mrg       b_dividend[j+n] = t;
   1721      1.1  mrg 
   1722      1.1  mrg       b_quotient[j] = qhat;
   1723      1.1  mrg       if (t < 0)
   1724      1.1  mrg 	{
   1725      1.1  mrg 	  b_quotient[j] -= 1;
   1726      1.1  mrg 	  k = 0;
   1727      1.1  mrg 	  for (i = 0; i < n; i++)
   1728      1.1  mrg 	    {
   1729      1.1  mrg 	      t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
   1730      1.1  mrg 	      b_dividend[i+j] = t;
   1731      1.1  mrg 	      k = t >> HOST_BITS_PER_HALF_WIDE_INT;
   1732      1.1  mrg 	    }
   1733      1.1  mrg 	  b_dividend[j+n] += k;
   1734      1.1  mrg 	}
   1735      1.1  mrg     }
   1736      1.1  mrg   if (s)
   1737      1.1  mrg     for (i = 0; i < n; i++)
   1738      1.1  mrg       b_remainder[i] = (b_dividend[i] >> s)
   1739      1.1  mrg 	| (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
   1740      1.1  mrg   else
   1741      1.1  mrg     for (i = 0; i < n; i++)
   1742      1.1  mrg       b_remainder[i] = b_dividend[i];
   1743      1.1  mrg }
   1744      1.1  mrg 
   1745      1.1  mrg 
   1746      1.1  mrg /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
   1747      1.1  mrg    the result.  If QUOTIENT is nonnull, store the value of the quotient
   1748      1.1  mrg    there and return the number of blocks in it.  The return value is
   1749      1.1  mrg    not defined otherwise.  If REMAINDER is nonnull, store the value
   1750      1.1  mrg    of the remainder there and store the number of blocks in
   1751      1.1  mrg    *REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
   1752      1.1  mrg    the division overflowed.  */
   1753      1.1  mrg unsigned int
   1754      1.1  mrg wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
   1755      1.1  mrg 		     HOST_WIDE_INT *remainder,
   1756      1.1  mrg 		     const HOST_WIDE_INT *dividend_val,
   1757      1.1  mrg 		     unsigned int dividend_len, unsigned int dividend_prec,
   1758      1.1  mrg 		     const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
   1759      1.1  mrg 		     unsigned int divisor_prec, signop sgn,
   1760  1.1.1.5  mrg 		     wi::overflow_type *oflow)
   1761      1.1  mrg {
   1762      1.1  mrg   unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
   1763      1.1  mrg   unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
   1764      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1765      1.1  mrg     b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1766      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1767      1.1  mrg     b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1768      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1769      1.1  mrg     b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
   1770      1.1  mrg   unsigned HOST_HALF_WIDE_INT
   1771      1.1  mrg     b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
   1772      1.1  mrg   unsigned int m, n;
   1773      1.1  mrg   bool dividend_neg = false;
   1774      1.1  mrg   bool divisor_neg = false;
   1775      1.1  mrg   bool overflow = false;
   1776      1.1  mrg   wide_int neg_dividend, neg_divisor;
   1777      1.1  mrg 
   1778      1.1  mrg   wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
   1779      1.1  mrg 					   dividend_prec);
   1780      1.1  mrg   wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
   1781      1.1  mrg 					  divisor_prec);
   1782      1.1  mrg   if (divisor == 0)
   1783      1.1  mrg     overflow = true;
   1784      1.1  mrg 
   1785      1.1  mrg   /* The smallest signed number / -1 causes overflow.  The dividend_len
   1786      1.1  mrg      check is for speed rather than correctness.  */
   1787      1.1  mrg   if (sgn == SIGNED
   1788      1.1  mrg       && dividend_len == BLOCKS_NEEDED (dividend_prec)
   1789      1.1  mrg       && divisor == -1
   1790      1.1  mrg       && wi::only_sign_bit_p (dividend))
   1791      1.1  mrg     overflow = true;
   1792      1.1  mrg 
   1793      1.1  mrg   /* Handle the overflow cases.  Viewed as unsigned value, the quotient of
   1794      1.1  mrg      (signed min / -1) has the same representation as the orignal dividend.
   1795      1.1  mrg      We have traditionally made division by zero act as division by one,
   1796      1.1  mrg      so there too we use the original dividend.  */
   1797      1.1  mrg   if (overflow)
   1798      1.1  mrg     {
   1799      1.1  mrg       if (remainder)
   1800      1.1  mrg 	{
   1801      1.1  mrg 	  *remainder_len = 1;
   1802      1.1  mrg 	  remainder[0] = 0;
   1803      1.1  mrg 	}
   1804  1.1.1.5  mrg       if (oflow)
   1805  1.1.1.5  mrg 	*oflow = OVF_OVERFLOW;
   1806      1.1  mrg       if (quotient)
   1807      1.1  mrg 	for (unsigned int i = 0; i < dividend_len; ++i)
   1808      1.1  mrg 	  quotient[i] = dividend_val[i];
   1809      1.1  mrg       return dividend_len;
   1810      1.1  mrg     }
   1811      1.1  mrg 
   1812      1.1  mrg   if (oflow)
   1813  1.1.1.5  mrg     *oflow = OVF_NONE;
   1814      1.1  mrg 
   1815      1.1  mrg   /* Do it on the host if you can.  */
   1816      1.1  mrg   if (sgn == SIGNED
   1817      1.1  mrg       && wi::fits_shwi_p (dividend)
   1818      1.1  mrg       && wi::fits_shwi_p (divisor))
   1819      1.1  mrg     {
   1820      1.1  mrg       HOST_WIDE_INT o0 = dividend.to_shwi ();
   1821      1.1  mrg       HOST_WIDE_INT o1 = divisor.to_shwi ();
   1822      1.1  mrg 
   1823      1.1  mrg       if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
   1824      1.1  mrg 	{
   1825      1.1  mrg 	  gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
   1826      1.1  mrg 	  if (quotient)
   1827      1.1  mrg 	    {
   1828      1.1  mrg 	      quotient[0] = HOST_WIDE_INT_MIN;
   1829      1.1  mrg 	      quotient[1] = 0;
   1830      1.1  mrg 	    }
   1831      1.1  mrg 	  if (remainder)
   1832      1.1  mrg 	    {
   1833      1.1  mrg 	      remainder[0] = 0;
   1834      1.1  mrg 	      *remainder_len = 1;
   1835      1.1  mrg 	    }
   1836      1.1  mrg 	  return 2;
   1837      1.1  mrg 	}
   1838      1.1  mrg       else
   1839      1.1  mrg 	{
   1840      1.1  mrg 	  if (quotient)
   1841      1.1  mrg 	    quotient[0] = o0 / o1;
   1842      1.1  mrg 	  if (remainder)
   1843      1.1  mrg 	    {
   1844      1.1  mrg 	      remainder[0] = o0 % o1;
   1845      1.1  mrg 	      *remainder_len = 1;
   1846      1.1  mrg 	    }
   1847      1.1  mrg 	  return 1;
   1848      1.1  mrg 	}
   1849      1.1  mrg     }
   1850      1.1  mrg 
   1851      1.1  mrg   if (sgn == UNSIGNED
   1852      1.1  mrg       && wi::fits_uhwi_p (dividend)
   1853      1.1  mrg       && wi::fits_uhwi_p (divisor))
   1854      1.1  mrg     {
   1855      1.1  mrg       unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
   1856      1.1  mrg       unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
   1857      1.1  mrg       unsigned int quotient_len = 1;
   1858      1.1  mrg 
   1859      1.1  mrg       if (quotient)
   1860      1.1  mrg 	{
   1861      1.1  mrg 	  quotient[0] = o0 / o1;
   1862  1.1.1.2  mrg 	  quotient_len = canonize_uhwi (quotient, dividend_prec);
   1863      1.1  mrg 	}
   1864      1.1  mrg       if (remainder)
   1865      1.1  mrg 	{
   1866      1.1  mrg 	  remainder[0] = o0 % o1;
   1867  1.1.1.2  mrg 	  *remainder_len = canonize_uhwi (remainder, dividend_prec);
   1868      1.1  mrg 	}
   1869      1.1  mrg       return quotient_len;
   1870      1.1  mrg     }
   1871      1.1  mrg 
   1872      1.1  mrg   /* Make the divisor and dividend positive and remember what we
   1873      1.1  mrg      did.  */
   1874      1.1  mrg   if (sgn == SIGNED)
   1875      1.1  mrg     {
   1876      1.1  mrg       if (wi::neg_p (dividend))
   1877      1.1  mrg 	{
   1878      1.1  mrg 	  neg_dividend = -dividend;
   1879      1.1  mrg 	  dividend = neg_dividend;
   1880      1.1  mrg 	  dividend_neg = true;
   1881      1.1  mrg 	}
   1882      1.1  mrg       if (wi::neg_p (divisor))
   1883      1.1  mrg 	{
   1884      1.1  mrg 	  neg_divisor = -divisor;
   1885      1.1  mrg 	  divisor = neg_divisor;
   1886      1.1  mrg 	  divisor_neg = true;
   1887      1.1  mrg 	}
   1888      1.1  mrg     }
   1889      1.1  mrg 
   1890      1.1  mrg   wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
   1891  1.1.1.8  mrg 	     dividend_blocks_needed, dividend_prec, UNSIGNED);
   1892      1.1  mrg   wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
   1893  1.1.1.8  mrg 	     divisor_blocks_needed, divisor_prec, UNSIGNED);
   1894      1.1  mrg 
   1895      1.1  mrg   m = dividend_blocks_needed;
   1896      1.1  mrg   b_dividend[m] = 0;
   1897      1.1  mrg   while (m > 1 && b_dividend[m - 1] == 0)
   1898      1.1  mrg     m--;
   1899      1.1  mrg 
   1900      1.1  mrg   n = divisor_blocks_needed;
   1901      1.1  mrg   while (n > 1 && b_divisor[n - 1] == 0)
   1902      1.1  mrg     n--;
   1903      1.1  mrg 
   1904      1.1  mrg   memset (b_quotient, 0, sizeof (b_quotient));
   1905      1.1  mrg 
   1906      1.1  mrg   divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
   1907      1.1  mrg 
   1908      1.1  mrg   unsigned int quotient_len = 0;
   1909      1.1  mrg   if (quotient)
   1910      1.1  mrg     {
   1911      1.1  mrg       quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
   1912      1.1  mrg       /* The quotient is neg if exactly one of the divisor or dividend is
   1913      1.1  mrg 	 neg.  */
   1914      1.1  mrg       if (dividend_neg != divisor_neg)
   1915      1.1  mrg 	quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
   1916      1.1  mrg 				      quotient_len, dividend_prec,
   1917      1.1  mrg 				      UNSIGNED, 0);
   1918      1.1  mrg     }
   1919      1.1  mrg 
   1920      1.1  mrg   if (remainder)
   1921      1.1  mrg     {
   1922      1.1  mrg       *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
   1923      1.1  mrg       /* The remainder is always the same sign as the dividend.  */
   1924      1.1  mrg       if (dividend_neg)
   1925      1.1  mrg 	*remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
   1926      1.1  mrg 					*remainder_len, dividend_prec,
   1927      1.1  mrg 					UNSIGNED, 0);
   1928      1.1  mrg     }
   1929      1.1  mrg 
   1930      1.1  mrg   return quotient_len;
   1931      1.1  mrg }
   1932      1.1  mrg 
   1933      1.1  mrg /*
   1934      1.1  mrg  * Shifting, rotating and extraction.
   1935      1.1  mrg  */
   1936      1.1  mrg 
   1937      1.1  mrg /* Left shift XVAL by SHIFT and store the result in VAL.  Return the
   1938      1.1  mrg    number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
   1939      1.1  mrg unsigned int
   1940      1.1  mrg wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
   1941      1.1  mrg 		  unsigned int xlen, unsigned int precision,
   1942      1.1  mrg 		  unsigned int shift)
   1943      1.1  mrg {
   1944      1.1  mrg   /* Split the shift into a whole-block shift and a subblock shift.  */
   1945      1.1  mrg   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
   1946      1.1  mrg   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
   1947      1.1  mrg 
   1948      1.1  mrg   /* The whole-block shift fills with zeros.  */
   1949      1.1  mrg   unsigned int len = BLOCKS_NEEDED (precision);
   1950      1.1  mrg   for (unsigned int i = 0; i < skip; ++i)
   1951      1.1  mrg     val[i] = 0;
   1952      1.1  mrg 
   1953      1.1  mrg   /* It's easier to handle the simple block case specially.  */
   1954      1.1  mrg   if (small_shift == 0)
   1955      1.1  mrg     for (unsigned int i = skip; i < len; ++i)
   1956      1.1  mrg       val[i] = safe_uhwi (xval, xlen, i - skip);
   1957      1.1  mrg   else
   1958      1.1  mrg     {
   1959      1.1  mrg       /* The first unfilled output block is a left shift of the first
   1960      1.1  mrg 	 block in XVAL.  The other output blocks contain bits from two
   1961      1.1  mrg 	 consecutive input blocks.  */
   1962      1.1  mrg       unsigned HOST_WIDE_INT carry = 0;
   1963      1.1  mrg       for (unsigned int i = skip; i < len; ++i)
   1964      1.1  mrg 	{
   1965      1.1  mrg 	  unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
   1966      1.1  mrg 	  val[i] = (x << small_shift) | carry;
   1967      1.1  mrg 	  carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
   1968      1.1  mrg 	}
   1969      1.1  mrg     }
   1970      1.1  mrg   return canonize (val, len, precision);
   1971      1.1  mrg }
   1972      1.1  mrg 
   1973      1.1  mrg /* Right shift XVAL by SHIFT and store the result in VAL.  Return the
   1974      1.1  mrg    number of blocks in VAL.  The input has XPRECISION bits and the
   1975      1.1  mrg    output has XPRECISION - SHIFT bits.  */
   1976      1.1  mrg static unsigned int
   1977      1.1  mrg rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
   1978      1.1  mrg 		     unsigned int xlen, unsigned int xprecision,
   1979      1.1  mrg 		     unsigned int shift)
   1980      1.1  mrg {
   1981      1.1  mrg   /* Split the shift into a whole-block shift and a subblock shift.  */
   1982      1.1  mrg   unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
   1983      1.1  mrg   unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
   1984      1.1  mrg 
   1985      1.1  mrg   /* Work out how many blocks are needed to store the significant bits
   1986      1.1  mrg      (excluding the upper zeros or signs).  */
   1987      1.1  mrg   unsigned int len = BLOCKS_NEEDED (xprecision - shift);
   1988      1.1  mrg 
   1989      1.1  mrg   /* It's easier to handle the simple block case specially.  */
   1990      1.1  mrg   if (small_shift == 0)
   1991      1.1  mrg     for (unsigned int i = 0; i < len; ++i)
   1992      1.1  mrg       val[i] = safe_uhwi (xval, xlen, i + skip);
   1993      1.1  mrg   else
   1994      1.1  mrg     {
   1995      1.1  mrg       /* Each output block but the last is a combination of two input blocks.
   1996      1.1  mrg 	 The last block is a right shift of the last block in XVAL.  */
   1997      1.1  mrg       unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
   1998      1.1  mrg       for (unsigned int i = 0; i < len; ++i)
   1999      1.1  mrg 	{
   2000      1.1  mrg 	  val[i] = curr >> small_shift;
   2001      1.1  mrg 	  curr = safe_uhwi (xval, xlen, i + skip + 1);
   2002      1.1  mrg 	  val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
   2003      1.1  mrg 	}
   2004      1.1  mrg     }
   2005      1.1  mrg   return len;
   2006      1.1  mrg }
   2007      1.1  mrg 
   2008      1.1  mrg /* Logically right shift XVAL by SHIFT and store the result in VAL.
   2009      1.1  mrg    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
   2010      1.1  mrg    VAL has PRECISION bits.  */
   2011      1.1  mrg unsigned int
   2012      1.1  mrg wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
   2013      1.1  mrg 		   unsigned int xlen, unsigned int xprecision,
   2014      1.1  mrg 		   unsigned int precision, unsigned int shift)
   2015      1.1  mrg {
   2016      1.1  mrg   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
   2017      1.1  mrg 
   2018      1.1  mrg   /* The value we just created has precision XPRECISION - SHIFT.
   2019      1.1  mrg      Zero-extend it to wider precisions.  */
   2020      1.1  mrg   if (precision > xprecision - shift)
   2021      1.1  mrg     {
   2022      1.1  mrg       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
   2023      1.1  mrg       if (small_prec)
   2024      1.1  mrg 	val[len - 1] = zext_hwi (val[len - 1], small_prec);
   2025      1.1  mrg       else if (val[len - 1] < 0)
   2026      1.1  mrg 	{
   2027      1.1  mrg 	  /* Add a new block with a zero. */
   2028      1.1  mrg 	  val[len++] = 0;
   2029      1.1  mrg 	  return len;
   2030      1.1  mrg 	}
   2031      1.1  mrg     }
   2032      1.1  mrg   return canonize (val, len, precision);
   2033      1.1  mrg }
   2034      1.1  mrg 
   2035      1.1  mrg /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
   2036      1.1  mrg    Return the number of blocks in VAL.  XVAL has XPRECISION bits and
   2037      1.1  mrg    VAL has PRECISION bits.  */
   2038      1.1  mrg unsigned int
   2039      1.1  mrg wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
   2040      1.1  mrg 		   unsigned int xlen, unsigned int xprecision,
   2041      1.1  mrg 		   unsigned int precision, unsigned int shift)
   2042      1.1  mrg {
   2043      1.1  mrg   unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
   2044      1.1  mrg 
   2045      1.1  mrg   /* The value we just created has precision XPRECISION - SHIFT.
   2046      1.1  mrg      Sign-extend it to wider types.  */
   2047      1.1  mrg   if (precision > xprecision - shift)
   2048      1.1  mrg     {
   2049      1.1  mrg       unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
   2050      1.1  mrg       if (small_prec)
   2051      1.1  mrg 	val[len - 1] = sext_hwi (val[len - 1], small_prec);
   2052      1.1  mrg     }
   2053      1.1  mrg   return canonize (val, len, precision);
   2054      1.1  mrg }
   2055      1.1  mrg 
   2056      1.1  mrg /* Return the number of leading (upper) zeros in X.  */
   2057      1.1  mrg int
   2058      1.1  mrg wi::clz (const wide_int_ref &x)
   2059      1.1  mrg {
   2060  1.1.1.8  mrg   if (x.sign_mask () < 0)
   2061  1.1.1.8  mrg     /* The upper bit is set, so there are no leading zeros.  */
   2062  1.1.1.8  mrg     return 0;
   2063  1.1.1.8  mrg 
   2064      1.1  mrg   /* Calculate how many bits there above the highest represented block.  */
   2065      1.1  mrg   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
   2066      1.1  mrg 
   2067      1.1  mrg   unsigned HOST_WIDE_INT high = x.uhigh ();
   2068      1.1  mrg   if (count < 0)
   2069      1.1  mrg     /* The upper -COUNT bits of HIGH are not part of the value.
   2070      1.1  mrg        Clear them out.  */
   2071      1.1  mrg     high = (high << -count) >> -count;
   2072      1.1  mrg 
   2073      1.1  mrg   /* We don't need to look below HIGH.  Either HIGH is nonzero,
   2074      1.1  mrg      or the top bit of the block below is nonzero; clz_hwi is
   2075      1.1  mrg      HOST_BITS_PER_WIDE_INT in the latter case.  */
   2076      1.1  mrg   return count + clz_hwi (high);
   2077      1.1  mrg }
   2078      1.1  mrg 
   2079      1.1  mrg /* Return the number of redundant sign bits in X.  (That is, the number
   2080      1.1  mrg    of bits immediately below the sign bit that have the same value as
   2081      1.1  mrg    the sign bit.)  */
   2082      1.1  mrg int
   2083      1.1  mrg wi::clrsb (const wide_int_ref &x)
   2084      1.1  mrg {
   2085      1.1  mrg   /* Calculate how many bits there above the highest represented block.  */
   2086      1.1  mrg   int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
   2087      1.1  mrg 
   2088      1.1  mrg   unsigned HOST_WIDE_INT high = x.uhigh ();
   2089      1.1  mrg   unsigned HOST_WIDE_INT mask = -1;
   2090      1.1  mrg   if (count < 0)
   2091      1.1  mrg     {
   2092      1.1  mrg       /* The upper -COUNT bits of HIGH are not part of the value.
   2093      1.1  mrg 	 Clear them from both MASK and HIGH.  */
   2094      1.1  mrg       mask >>= -count;
   2095      1.1  mrg       high &= mask;
   2096      1.1  mrg     }
   2097      1.1  mrg 
   2098      1.1  mrg   /* If the top bit is 1, count the number of leading 1s.  If the top
   2099      1.1  mrg      bit is zero, count the number of leading zeros.  */
   2100      1.1  mrg   if (high > mask / 2)
   2101      1.1  mrg     high ^= mask;
   2102      1.1  mrg 
   2103      1.1  mrg   /* There are no sign bits below the top block, so we don't need to look
   2104      1.1  mrg      beyond HIGH.  Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
   2105      1.1  mrg      HIGH is 0.  */
   2106      1.1  mrg   return count + clz_hwi (high) - 1;
   2107      1.1  mrg }
   2108      1.1  mrg 
   2109      1.1  mrg /* Return the number of trailing (lower) zeros in X.  */
   2110      1.1  mrg int
   2111      1.1  mrg wi::ctz (const wide_int_ref &x)
   2112      1.1  mrg {
   2113      1.1  mrg   if (x.len == 1 && x.ulow () == 0)
   2114      1.1  mrg     return x.precision;
   2115      1.1  mrg 
   2116      1.1  mrg   /* Having dealt with the zero case, there must be a block with a
   2117      1.1  mrg      nonzero bit.  We don't care about the bits above the first 1.  */
   2118      1.1  mrg   unsigned int i = 0;
   2119      1.1  mrg   while (x.val[i] == 0)
   2120      1.1  mrg     ++i;
   2121      1.1  mrg   return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
   2122      1.1  mrg }
   2123      1.1  mrg 
   2124      1.1  mrg /* If X is an exact power of 2, return the base-2 logarithm, otherwise
   2125      1.1  mrg    return -1.  */
   2126      1.1  mrg int
   2127      1.1  mrg wi::exact_log2 (const wide_int_ref &x)
   2128      1.1  mrg {
   2129      1.1  mrg   /* Reject cases where there are implicit -1 blocks above HIGH.  */
   2130      1.1  mrg   if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
   2131      1.1  mrg     return -1;
   2132      1.1  mrg 
   2133      1.1  mrg   /* Set CRUX to the index of the entry that should be nonzero.
   2134      1.1  mrg      If the top block is zero then the next lowest block (if any)
   2135      1.1  mrg      must have the high bit set.  */
   2136      1.1  mrg   unsigned int crux = x.len - 1;
   2137      1.1  mrg   if (crux > 0 && x.val[crux] == 0)
   2138      1.1  mrg     crux -= 1;
   2139      1.1  mrg 
   2140      1.1  mrg   /* Check that all lower blocks are zero.  */
   2141      1.1  mrg   for (unsigned int i = 0; i < crux; ++i)
   2142      1.1  mrg     if (x.val[i] != 0)
   2143      1.1  mrg       return -1;
   2144      1.1  mrg 
   2145      1.1  mrg   /* Get a zero-extended form of block CRUX.  */
   2146      1.1  mrg   unsigned HOST_WIDE_INT hwi = x.val[crux];
   2147      1.1  mrg   if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
   2148      1.1  mrg     hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
   2149      1.1  mrg 
   2150      1.1  mrg   /* Now it's down to whether HWI is a power of 2.  */
   2151      1.1  mrg   int res = ::exact_log2 (hwi);
   2152      1.1  mrg   if (res >= 0)
   2153      1.1  mrg     res += crux * HOST_BITS_PER_WIDE_INT;
   2154      1.1  mrg   return res;
   2155      1.1  mrg }
   2156      1.1  mrg 
   2157      1.1  mrg /* Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.  */
   2158      1.1  mrg int
   2159      1.1  mrg wi::floor_log2 (const wide_int_ref &x)
   2160      1.1  mrg {
   2161      1.1  mrg   return x.precision - 1 - clz (x);
   2162      1.1  mrg }
   2163      1.1  mrg 
   2164      1.1  mrg /* Return the index of the first (lowest) set bit in X, counting from 1.
   2165      1.1  mrg    Return 0 if X is 0.  */
   2166      1.1  mrg int
   2167      1.1  mrg wi::ffs (const wide_int_ref &x)
   2168      1.1  mrg {
   2169      1.1  mrg   return eq_p (x, 0) ? 0 : ctz (x) + 1;
   2170      1.1  mrg }
   2171      1.1  mrg 
   2172      1.1  mrg /* Return true if sign-extending X to have precision PRECISION would give
   2173      1.1  mrg    the minimum signed value at that precision.  */
   2174      1.1  mrg bool
   2175      1.1  mrg wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
   2176      1.1  mrg {
   2177      1.1  mrg   return ctz (x) + 1 == int (precision);
   2178      1.1  mrg }
   2179      1.1  mrg 
   2180      1.1  mrg /* Return true if X represents the minimum signed value.  */
   2181      1.1  mrg bool
   2182      1.1  mrg wi::only_sign_bit_p (const wide_int_ref &x)
   2183      1.1  mrg {
   2184      1.1  mrg   return only_sign_bit_p (x, x.precision);
   2185      1.1  mrg }
   2186      1.1  mrg 
   2187  1.1.1.4  mrg /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
   2188  1.1.1.4  mrg    down to the previous value that has no bits set outside MASK.
   2189  1.1.1.4  mrg    This rounding wraps for signed values if VAL is negative and
   2190  1.1.1.4  mrg    the top bit of MASK is clear.
   2191  1.1.1.4  mrg 
   2192  1.1.1.4  mrg    For example, round_down_for_mask (6, 0xf1) would give 1 and
   2193  1.1.1.4  mrg    round_down_for_mask (24, 0xf1) would give 17.  */
   2194  1.1.1.4  mrg 
   2195  1.1.1.4  mrg wide_int
   2196  1.1.1.4  mrg wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
   2197  1.1.1.4  mrg {
   2198  1.1.1.4  mrg   /* Get the bits in VAL that are outside the mask.  */
   2199  1.1.1.4  mrg   wide_int extra_bits = wi::bit_and_not (val, mask);
   2200  1.1.1.4  mrg   if (extra_bits == 0)
   2201  1.1.1.4  mrg     return val;
   2202  1.1.1.4  mrg 
   2203  1.1.1.4  mrg   /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
   2204  1.1.1.4  mrg      below that bit.  */
   2205  1.1.1.4  mrg   unsigned int precision = val.get_precision ();
   2206  1.1.1.4  mrg   wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
   2207  1.1.1.4  mrg 				  false, precision);
   2208  1.1.1.4  mrg 
   2209  1.1.1.4  mrg   /* Clear the bits that aren't in MASK, but ensure that all bits
   2210  1.1.1.4  mrg      in MASK below the top cleared bit are set.  */
   2211  1.1.1.4  mrg   return (val & mask) | (mask & lower_mask);
   2212  1.1.1.4  mrg }
   2213  1.1.1.4  mrg 
   2214  1.1.1.4  mrg /* Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
   2215  1.1.1.4  mrg    up to the next value that has no bits set outside MASK.  The rounding
   2216  1.1.1.4  mrg    wraps if there are no suitable values greater than VAL.
   2217  1.1.1.4  mrg 
   2218  1.1.1.4  mrg    For example, round_up_for_mask (6, 0xf1) would give 16 and
   2219  1.1.1.4  mrg    round_up_for_mask (24, 0xf1) would give 32.  */
   2220  1.1.1.4  mrg 
   2221  1.1.1.4  mrg wide_int
   2222  1.1.1.4  mrg wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
   2223  1.1.1.4  mrg {
   2224  1.1.1.4  mrg   /* Get the bits in VAL that are outside the mask.  */
   2225  1.1.1.4  mrg   wide_int extra_bits = wi::bit_and_not (val, mask);
   2226  1.1.1.4  mrg   if (extra_bits == 0)
   2227  1.1.1.4  mrg     return val;
   2228  1.1.1.4  mrg 
   2229  1.1.1.4  mrg   /* Get a mask that is all 1s above the top bit in EXTRA_BITS.  */
   2230  1.1.1.4  mrg   unsigned int precision = val.get_precision ();
   2231  1.1.1.4  mrg   wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
   2232  1.1.1.4  mrg 				  true, precision);
   2233  1.1.1.4  mrg 
   2234  1.1.1.4  mrg   /* Get the bits of the mask that are above the top bit in EXTRA_BITS.  */
   2235  1.1.1.4  mrg   upper_mask &= mask;
   2236  1.1.1.4  mrg 
   2237  1.1.1.4  mrg   /* Conceptually we need to:
   2238  1.1.1.4  mrg 
   2239  1.1.1.4  mrg      - clear bits of VAL outside UPPER_MASK
   2240  1.1.1.4  mrg      - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
   2241  1.1.1.4  mrg      - propagate the carry through the bits of VAL in UPPER_MASK
   2242  1.1.1.4  mrg 
   2243  1.1.1.4  mrg      If (~VAL & UPPER_MASK) is nonzero, the carry eventually
   2244  1.1.1.4  mrg      reaches that bit and the process leaves all lower bits clear.
   2245  1.1.1.4  mrg      If (~VAL & UPPER_MASK) is zero then the result is also zero.  */
   2246  1.1.1.4  mrg   wide_int tmp = wi::bit_and_not (upper_mask, val);
   2247  1.1.1.4  mrg 
   2248  1.1.1.4  mrg   return (val | tmp) & -tmp;
   2249  1.1.1.4  mrg }
   2250  1.1.1.4  mrg 
   2251  1.1.1.8  mrg /* Compute the modular multiplicative inverse of A modulo B
   2252  1.1.1.8  mrg    using extended Euclid's algorithm.  Assumes A and B are coprime,
   2253  1.1.1.8  mrg    and that A and B have the same precision.  */
   2254  1.1.1.8  mrg wide_int
   2255  1.1.1.8  mrg wi::mod_inv (const wide_int &a, const wide_int &b)
   2256  1.1.1.8  mrg {
   2257  1.1.1.8  mrg   /* Verify the assumption.  */
   2258  1.1.1.8  mrg   gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
   2259  1.1.1.8  mrg 
   2260  1.1.1.8  mrg   unsigned int p = a.get_precision () + 1;
   2261  1.1.1.8  mrg   gcc_checking_assert (b.get_precision () + 1 == p);
   2262  1.1.1.8  mrg   wide_int c = wide_int::from (a, p, UNSIGNED);
   2263  1.1.1.8  mrg   wide_int d = wide_int::from (b, p, UNSIGNED);
   2264  1.1.1.8  mrg   wide_int x0 = wide_int::from (0, p, UNSIGNED);
   2265  1.1.1.8  mrg   wide_int x1 = wide_int::from (1, p, UNSIGNED);
   2266  1.1.1.8  mrg 
   2267  1.1.1.8  mrg   if (wi::eq_p (b, 1))
   2268  1.1.1.8  mrg     return wide_int::from (1, p, UNSIGNED);
   2269  1.1.1.8  mrg 
   2270  1.1.1.8  mrg   while (wi::gt_p (c, 1, UNSIGNED))
   2271  1.1.1.8  mrg     {
   2272  1.1.1.8  mrg       wide_int t = d;
   2273  1.1.1.8  mrg       wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
   2274  1.1.1.8  mrg       c = t;
   2275  1.1.1.8  mrg       wide_int s = x0;
   2276  1.1.1.8  mrg       x0 = wi::sub (x1, wi::mul (q, x0));
   2277  1.1.1.8  mrg       x1 = s;
   2278  1.1.1.8  mrg     }
   2279  1.1.1.8  mrg   if (wi::lt_p (x1, 0, SIGNED))
   2280  1.1.1.8  mrg     x1 += d;
   2281  1.1.1.8  mrg   return x1;
   2282  1.1.1.8  mrg }
   2283  1.1.1.8  mrg 
   2284      1.1  mrg /*
   2285      1.1  mrg  * Private utilities.
   2286      1.1  mrg  */
   2287      1.1  mrg 
   2288      1.1  mrg void gt_ggc_mx (widest_int *) { }
   2289      1.1  mrg void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
   2290      1.1  mrg void gt_pch_nx (widest_int *) { }
   2291      1.1  mrg 
   2292      1.1  mrg template void wide_int::dump () const;
   2293      1.1  mrg template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
   2294      1.1  mrg template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
   2295      1.1  mrg template void offset_int::dump () const;
   2296      1.1  mrg template void widest_int::dump () const;
   2297  1.1.1.3  mrg 
   2298  1.1.1.4  mrg /* We could add all the above ::dump variants here, but wide_int and
   2299  1.1.1.4  mrg    widest_int should handle the common cases.  Besides, you can always
   2300  1.1.1.4  mrg    call the dump method directly.  */
   2301  1.1.1.4  mrg 
   2302  1.1.1.4  mrg DEBUG_FUNCTION void
   2303  1.1.1.4  mrg debug (const wide_int &ref)
   2304  1.1.1.4  mrg {
   2305  1.1.1.4  mrg   ref.dump ();
   2306  1.1.1.4  mrg }
   2307  1.1.1.4  mrg 
   2308  1.1.1.4  mrg DEBUG_FUNCTION void
   2309  1.1.1.4  mrg debug (const wide_int *ptr)
   2310  1.1.1.4  mrg {
   2311  1.1.1.4  mrg   if (ptr)
   2312  1.1.1.4  mrg     debug (*ptr);
   2313  1.1.1.4  mrg   else
   2314  1.1.1.4  mrg     fprintf (stderr, "<nil>\n");
   2315  1.1.1.4  mrg }
   2316  1.1.1.4  mrg 
   2317  1.1.1.4  mrg DEBUG_FUNCTION void
   2318  1.1.1.4  mrg debug (const widest_int &ref)
   2319  1.1.1.4  mrg {
   2320  1.1.1.4  mrg   ref.dump ();
   2321  1.1.1.4  mrg }
   2322  1.1.1.4  mrg 
   2323  1.1.1.4  mrg DEBUG_FUNCTION void
   2324  1.1.1.4  mrg debug (const widest_int *ptr)
   2325  1.1.1.4  mrg {
   2326  1.1.1.4  mrg   if (ptr)
   2327  1.1.1.4  mrg     debug (*ptr);
   2328  1.1.1.4  mrg   else
   2329  1.1.1.4  mrg     fprintf (stderr, "<nil>\n");
   2330  1.1.1.4  mrg }
   2331  1.1.1.3  mrg 
   2332  1.1.1.3  mrg #if CHECKING_P
   2333  1.1.1.3  mrg 
   2334  1.1.1.3  mrg namespace selftest {
   2335  1.1.1.3  mrg 
   2336  1.1.1.3  mrg /* Selftests for wide ints.  We run these multiple times, once per type.  */
   2337  1.1.1.3  mrg 
   2338  1.1.1.3  mrg /* Helper function for building a test value.  */
   2339  1.1.1.3  mrg 
   2340  1.1.1.3  mrg template <class VALUE_TYPE>
   2341  1.1.1.3  mrg static VALUE_TYPE
   2342  1.1.1.3  mrg from_int (int i);
   2343  1.1.1.3  mrg 
   2344  1.1.1.3  mrg /* Specializations of the fixture for each wide-int type.  */
   2345  1.1.1.3  mrg 
   2346  1.1.1.3  mrg /* Specialization for VALUE_TYPE == wide_int.  */
   2347  1.1.1.3  mrg 
   2348  1.1.1.3  mrg template <>
   2349  1.1.1.3  mrg wide_int
   2350  1.1.1.3  mrg from_int (int i)
   2351  1.1.1.3  mrg {
   2352  1.1.1.3  mrg   return wi::shwi (i, 32);
   2353  1.1.1.3  mrg }
   2354  1.1.1.3  mrg 
   2355  1.1.1.3  mrg /* Specialization for VALUE_TYPE == offset_int.  */
   2356  1.1.1.3  mrg 
   2357  1.1.1.3  mrg template <>
   2358  1.1.1.3  mrg offset_int
   2359  1.1.1.3  mrg from_int (int i)
   2360  1.1.1.3  mrg {
   2361  1.1.1.3  mrg   return offset_int (i);
   2362  1.1.1.3  mrg }
   2363  1.1.1.3  mrg 
   2364  1.1.1.3  mrg /* Specialization for VALUE_TYPE == widest_int.  */
   2365  1.1.1.3  mrg 
   2366  1.1.1.3  mrg template <>
   2367  1.1.1.3  mrg widest_int
   2368  1.1.1.3  mrg from_int (int i)
   2369  1.1.1.3  mrg {
   2370  1.1.1.3  mrg   return widest_int (i);
   2371  1.1.1.3  mrg }
   2372  1.1.1.3  mrg 
   2373  1.1.1.3  mrg /* Verify that print_dec (WI, ..., SGN) gives the expected string
   2374  1.1.1.3  mrg    representation (using base 10).  */
   2375  1.1.1.3  mrg 
   2376  1.1.1.3  mrg static void
   2377  1.1.1.3  mrg assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
   2378  1.1.1.3  mrg {
   2379  1.1.1.3  mrg   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
   2380  1.1.1.3  mrg   print_dec (wi, buf, sgn);
   2381  1.1.1.3  mrg   ASSERT_STREQ (expected, buf);
   2382  1.1.1.3  mrg }
   2383  1.1.1.3  mrg 
   2384  1.1.1.3  mrg /* Likewise for base 16.  */
   2385  1.1.1.3  mrg 
   2386  1.1.1.3  mrg static void
   2387  1.1.1.3  mrg assert_hexeq (const char *expected, const wide_int_ref &wi)
   2388  1.1.1.3  mrg {
   2389  1.1.1.3  mrg   char buf[WIDE_INT_PRINT_BUFFER_SIZE];
   2390  1.1.1.3  mrg   print_hex (wi, buf);
   2391  1.1.1.3  mrg   ASSERT_STREQ (expected, buf);
   2392  1.1.1.3  mrg }
   2393  1.1.1.3  mrg 
   2394  1.1.1.3  mrg /* Test cases.  */
   2395  1.1.1.3  mrg 
   2396  1.1.1.3  mrg /* Verify that print_dec and print_hex work for VALUE_TYPE.  */
   2397  1.1.1.3  mrg 
   2398  1.1.1.3  mrg template <class VALUE_TYPE>
   2399  1.1.1.3  mrg static void
   2400  1.1.1.3  mrg test_printing ()
   2401  1.1.1.3  mrg {
   2402  1.1.1.3  mrg   VALUE_TYPE a = from_int<VALUE_TYPE> (42);
   2403  1.1.1.3  mrg   assert_deceq ("42", a, SIGNED);
   2404  1.1.1.3  mrg   assert_hexeq ("0x2a", a);
   2405  1.1.1.4  mrg   assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
   2406  1.1.1.4  mrg   assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
   2407  1.1.1.4  mrg   assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
   2408  1.1.1.4  mrg   if (WIDE_INT_MAX_PRECISION > 128)
   2409  1.1.1.4  mrg     {
   2410  1.1.1.4  mrg       assert_hexeq ("0x20000000000000000fffffffffffffffe",
   2411  1.1.1.4  mrg 		    wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
   2412  1.1.1.4  mrg       assert_hexeq ("0x200000000000004000123456789abcdef",
   2413  1.1.1.4  mrg 		    wi::lshift (1, 129) + wi::lshift (1, 74)
   2414  1.1.1.4  mrg 		    + wi::lshift (0x1234567, 32) + 0x89abcdef);
   2415  1.1.1.4  mrg     }
   2416  1.1.1.3  mrg }
   2417  1.1.1.3  mrg 
   2418  1.1.1.3  mrg /* Verify that various operations work correctly for VALUE_TYPE,
   2419  1.1.1.3  mrg    unary and binary, using both function syntax, and
   2420  1.1.1.3  mrg    overloaded-operators.  */
   2421  1.1.1.3  mrg 
   2422  1.1.1.3  mrg template <class VALUE_TYPE>
   2423  1.1.1.3  mrg static void
   2424  1.1.1.3  mrg test_ops ()
   2425  1.1.1.3  mrg {
   2426  1.1.1.3  mrg   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
   2427  1.1.1.3  mrg   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
   2428  1.1.1.3  mrg 
   2429  1.1.1.3  mrg   /* Using functions.  */
   2430  1.1.1.3  mrg   assert_deceq ("-7", wi::neg (a), SIGNED);
   2431  1.1.1.3  mrg   assert_deceq ("10", wi::add (a, b), SIGNED);
   2432  1.1.1.3  mrg   assert_deceq ("4", wi::sub (a, b), SIGNED);
   2433  1.1.1.3  mrg   assert_deceq ("-4", wi::sub (b, a), SIGNED);
   2434  1.1.1.3  mrg   assert_deceq ("21", wi::mul (a, b), SIGNED);
   2435  1.1.1.3  mrg 
   2436  1.1.1.3  mrg   /* Using operators.  */
   2437  1.1.1.3  mrg   assert_deceq ("-7", -a, SIGNED);
   2438  1.1.1.3  mrg   assert_deceq ("10", a + b, SIGNED);
   2439  1.1.1.3  mrg   assert_deceq ("4", a - b, SIGNED);
   2440  1.1.1.3  mrg   assert_deceq ("-4", b - a, SIGNED);
   2441  1.1.1.3  mrg   assert_deceq ("21", a * b, SIGNED);
   2442  1.1.1.3  mrg }
   2443  1.1.1.3  mrg 
   2444  1.1.1.3  mrg /* Verify that various comparisons work correctly for VALUE_TYPE.  */
   2445  1.1.1.3  mrg 
   2446  1.1.1.3  mrg template <class VALUE_TYPE>
   2447  1.1.1.3  mrg static void
   2448  1.1.1.3  mrg test_comparisons ()
   2449  1.1.1.3  mrg {
   2450  1.1.1.3  mrg   VALUE_TYPE a = from_int<VALUE_TYPE> (7);
   2451  1.1.1.3  mrg   VALUE_TYPE b = from_int<VALUE_TYPE> (3);
   2452  1.1.1.3  mrg 
   2453  1.1.1.3  mrg   /* == */
   2454  1.1.1.3  mrg   ASSERT_TRUE (wi::eq_p (a, a));
   2455  1.1.1.3  mrg   ASSERT_FALSE (wi::eq_p (a, b));
   2456  1.1.1.3  mrg 
   2457  1.1.1.3  mrg   /* != */
   2458  1.1.1.3  mrg   ASSERT_TRUE (wi::ne_p (a, b));
   2459  1.1.1.3  mrg   ASSERT_FALSE (wi::ne_p (a, a));
   2460  1.1.1.3  mrg 
   2461  1.1.1.3  mrg   /* < */
   2462  1.1.1.3  mrg   ASSERT_FALSE (wi::lts_p (a, a));
   2463  1.1.1.3  mrg   ASSERT_FALSE (wi::lts_p (a, b));
   2464  1.1.1.3  mrg   ASSERT_TRUE (wi::lts_p (b, a));
   2465  1.1.1.3  mrg 
   2466  1.1.1.3  mrg   /* <= */
   2467  1.1.1.3  mrg   ASSERT_TRUE (wi::les_p (a, a));
   2468  1.1.1.3  mrg   ASSERT_FALSE (wi::les_p (a, b));
   2469  1.1.1.3  mrg   ASSERT_TRUE (wi::les_p (b, a));
   2470  1.1.1.3  mrg 
   2471  1.1.1.3  mrg   /* > */
   2472  1.1.1.3  mrg   ASSERT_FALSE (wi::gts_p (a, a));
   2473  1.1.1.3  mrg   ASSERT_TRUE (wi::gts_p (a, b));
   2474  1.1.1.3  mrg   ASSERT_FALSE (wi::gts_p (b, a));
   2475  1.1.1.3  mrg 
   2476  1.1.1.3  mrg   /* >= */
   2477  1.1.1.3  mrg   ASSERT_TRUE (wi::ges_p (a, a));
   2478  1.1.1.3  mrg   ASSERT_TRUE (wi::ges_p (a, b));
   2479  1.1.1.3  mrg   ASSERT_FALSE (wi::ges_p (b, a));
   2480  1.1.1.3  mrg 
   2481  1.1.1.3  mrg   /* comparison */
   2482  1.1.1.3  mrg   ASSERT_EQ (-1, wi::cmps (b, a));
   2483  1.1.1.3  mrg   ASSERT_EQ (0, wi::cmps (a, a));
   2484  1.1.1.3  mrg   ASSERT_EQ (1, wi::cmps (a, b));
   2485  1.1.1.3  mrg }
   2486  1.1.1.3  mrg 
   2487  1.1.1.3  mrg /* Run all of the selftests, using the given VALUE_TYPE.  */
   2488  1.1.1.3  mrg 
   2489  1.1.1.3  mrg template <class VALUE_TYPE>
   2490  1.1.1.3  mrg static void run_all_wide_int_tests ()
   2491  1.1.1.3  mrg {
   2492  1.1.1.3  mrg   test_printing <VALUE_TYPE> ();
   2493  1.1.1.3  mrg   test_ops <VALUE_TYPE> ();
   2494  1.1.1.3  mrg   test_comparisons <VALUE_TYPE> ();
   2495  1.1.1.3  mrg }
   2496  1.1.1.3  mrg 
   2497  1.1.1.4  mrg /* Test overflow conditions.  */
   2498  1.1.1.4  mrg 
   2499  1.1.1.4  mrg static void
   2500  1.1.1.4  mrg test_overflow ()
   2501  1.1.1.4  mrg {
   2502  1.1.1.4  mrg   static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
   2503  1.1.1.4  mrg   static int offsets[] = { 16, 1, 0 };
   2504  1.1.1.4  mrg   for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
   2505  1.1.1.4  mrg     for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
   2506  1.1.1.4  mrg       {
   2507  1.1.1.4  mrg 	int prec = precs[i];
   2508  1.1.1.4  mrg 	int offset = offsets[j];
   2509  1.1.1.5  mrg 	wi::overflow_type overflow;
   2510  1.1.1.4  mrg 	wide_int sum, diff;
   2511  1.1.1.4  mrg 
   2512  1.1.1.4  mrg 	sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
   2513  1.1.1.4  mrg 		       UNSIGNED, &overflow);
   2514  1.1.1.4  mrg 	ASSERT_EQ (sum, -offset);
   2515  1.1.1.5  mrg 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
   2516  1.1.1.4  mrg 
   2517  1.1.1.4  mrg 	sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
   2518  1.1.1.4  mrg 		       UNSIGNED, &overflow);
   2519  1.1.1.4  mrg 	ASSERT_EQ (sum, -offset);
   2520  1.1.1.5  mrg 	ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
   2521  1.1.1.4  mrg 
   2522  1.1.1.4  mrg 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
   2523  1.1.1.4  mrg 			wi::max_value (prec, UNSIGNED),
   2524  1.1.1.4  mrg 			UNSIGNED, &overflow);
   2525  1.1.1.4  mrg 	ASSERT_EQ (diff, -offset);
   2526  1.1.1.5  mrg 	ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
   2527  1.1.1.4  mrg 
   2528  1.1.1.4  mrg 	diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
   2529  1.1.1.4  mrg 			wi::max_value (prec, UNSIGNED) - 1,
   2530  1.1.1.4  mrg 			UNSIGNED, &overflow);
   2531  1.1.1.4  mrg 	ASSERT_EQ (diff, 1 - offset);
   2532  1.1.1.5  mrg 	ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
   2533  1.1.1.4  mrg     }
   2534  1.1.1.4  mrg }
   2535  1.1.1.4  mrg 
   2536  1.1.1.4  mrg /* Test the round_{down,up}_for_mask functions.  */
   2537  1.1.1.4  mrg 
   2538  1.1.1.4  mrg static void
   2539  1.1.1.4  mrg test_round_for_mask ()
   2540  1.1.1.4  mrg {
   2541  1.1.1.4  mrg   unsigned int prec = 18;
   2542  1.1.1.4  mrg   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
   2543  1.1.1.4  mrg 					  wi::shwi (0xf1, prec)));
   2544  1.1.1.4  mrg   ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
   2545  1.1.1.4  mrg 					wi::shwi (0xf1, prec)));
   2546  1.1.1.4  mrg 
   2547  1.1.1.4  mrg   ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
   2548  1.1.1.4  mrg 					 wi::shwi (0xf1, prec)));
   2549  1.1.1.4  mrg   ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
   2550  1.1.1.4  mrg 					wi::shwi (0xf1, prec)));
   2551  1.1.1.4  mrg 
   2552  1.1.1.4  mrg   ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
   2553  1.1.1.4  mrg 					  wi::shwi (0xf1, prec)));
   2554  1.1.1.4  mrg   ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
   2555  1.1.1.4  mrg 					wi::shwi (0xf1, prec)));
   2556  1.1.1.4  mrg 
   2557  1.1.1.4  mrg   ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
   2558  1.1.1.4  mrg 					     wi::shwi (0x111, prec)));
   2559  1.1.1.4  mrg   ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
   2560  1.1.1.4  mrg 					   wi::shwi (0x111, prec)));
   2561  1.1.1.4  mrg 
   2562  1.1.1.4  mrg   ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
   2563  1.1.1.4  mrg 					   wi::shwi (0xfc, prec)));
   2564  1.1.1.4  mrg   ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
   2565  1.1.1.4  mrg 					 wi::shwi (0xfc, prec)));
   2566  1.1.1.4  mrg 
   2567  1.1.1.4  mrg   ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
   2568  1.1.1.4  mrg 					     wi::shwi (0xabc, prec)));
   2569  1.1.1.4  mrg   ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
   2570  1.1.1.4  mrg 					   wi::shwi (0xabc, prec)));
   2571  1.1.1.4  mrg 
   2572  1.1.1.4  mrg   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
   2573  1.1.1.4  mrg 					     wi::shwi (0xabc, prec)));
   2574  1.1.1.4  mrg   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
   2575  1.1.1.4  mrg 				       wi::shwi (0xabc, prec)));
   2576  1.1.1.4  mrg 
   2577  1.1.1.4  mrg   ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
   2578  1.1.1.4  mrg 					     wi::shwi (0xabc, prec)));
   2579  1.1.1.4  mrg   ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
   2580  1.1.1.4  mrg 				       wi::shwi (0xabc, prec)));
   2581  1.1.1.4  mrg }
   2582  1.1.1.4  mrg 
   2583  1.1.1.3  mrg /* Run all of the selftests within this file, for all value types.  */
   2584  1.1.1.3  mrg 
   2585  1.1.1.3  mrg void
   2586  1.1.1.3  mrg wide_int_cc_tests ()
   2587  1.1.1.3  mrg {
   2588  1.1.1.4  mrg   run_all_wide_int_tests <wide_int> ();
   2589  1.1.1.4  mrg   run_all_wide_int_tests <offset_int> ();
   2590  1.1.1.4  mrg   run_all_wide_int_tests <widest_int> ();
   2591  1.1.1.4  mrg   test_overflow ();
   2592  1.1.1.4  mrg   test_round_for_mask ();
   2593  1.1.1.7  mrg   ASSERT_EQ (wi::mask (128, false, 128),
   2594  1.1.1.7  mrg 	     wi::shifted_mask (0, 128, false, 128));
   2595  1.1.1.7  mrg   ASSERT_EQ (wi::mask (128, true, 128),
   2596  1.1.1.7  mrg 	     wi::shifted_mask (0, 128, true, 128));
   2597  1.1.1.3  mrg }
   2598  1.1.1.3  mrg 
   2599  1.1.1.3  mrg } // namespace selftest
   2600  1.1.1.3  mrg #endif /* CHECKING_P */
   2601