Home | History | Annotate | Line # | Download | only in include
longlong.h revision 1.1
      1  1.1  mrg /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
      2  1.1  mrg    Copyright (C) 1991-2014 Free Software Foundation, Inc.
      3  1.1  mrg 
      4  1.1  mrg    This file is part of the GNU C Library.
      5  1.1  mrg 
      6  1.1  mrg    The GNU C Library is free software; you can redistribute it and/or
      7  1.1  mrg    modify it under the terms of the GNU Lesser General Public
      8  1.1  mrg    License as published by the Free Software Foundation; either
      9  1.1  mrg    version 2.1 of the License, or (at your option) any later version.
     10  1.1  mrg 
     11  1.1  mrg    In addition to the permissions in the GNU Lesser General Public
     12  1.1  mrg    License, the Free Software Foundation gives you unlimited
     13  1.1  mrg    permission to link the compiled version of this file into
     14  1.1  mrg    combinations with other programs, and to distribute those
     15  1.1  mrg    combinations without any restriction coming from the use of this
     16  1.1  mrg    file.  (The Lesser General Public License restrictions do apply in
     17  1.1  mrg    other respects; for example, they cover modification of the file,
     18  1.1  mrg    and distribution when not linked into a combine executable.)
     19  1.1  mrg 
     20  1.1  mrg    The GNU C Library is distributed in the hope that it will be useful,
     21  1.1  mrg    but WITHOUT ANY WARRANTY; without even the implied warranty of
     22  1.1  mrg    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     23  1.1  mrg    Lesser General Public License for more details.
     24  1.1  mrg 
     25  1.1  mrg    You should have received a copy of the GNU Lesser General Public
     26  1.1  mrg    License along with the GNU C Library; if not, see
     27  1.1  mrg    <http://www.gnu.org/licenses/>.  */
     28  1.1  mrg 
     29  1.1  mrg /* You have to define the following before including this file:
     30  1.1  mrg 
     31  1.1  mrg    UWtype -- An unsigned type, default type for operations (typically a "word")
     32  1.1  mrg    UHWtype -- An unsigned type, at least half the size of UWtype.
     33  1.1  mrg    UDWtype -- An unsigned type, at least twice as large a UWtype
     34  1.1  mrg    W_TYPE_SIZE -- size in bits of UWtype
     35  1.1  mrg 
     36  1.1  mrg    UQItype -- Unsigned 8 bit type.
     37  1.1  mrg    SItype, USItype -- Signed and unsigned 32 bit types.
     38  1.1  mrg    DItype, UDItype -- Signed and unsigned 64 bit types.
     39  1.1  mrg 
     40  1.1  mrg    On a 32 bit machine UWtype should typically be USItype;
     41  1.1  mrg    on a 64 bit machine, UWtype should typically be UDItype.  */
     42  1.1  mrg 
     43  1.1  mrg #define __BITS4 (W_TYPE_SIZE / 4)
     44  1.1  mrg #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
     45  1.1  mrg #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
     46  1.1  mrg #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
     47  1.1  mrg 
     48  1.1  mrg #ifndef W_TYPE_SIZE
     49  1.1  mrg #define W_TYPE_SIZE	32
     50  1.1  mrg #define UWtype		USItype
     51  1.1  mrg #define UHWtype		USItype
     52  1.1  mrg #define UDWtype		UDItype
     53  1.1  mrg #endif
     54  1.1  mrg 
     55  1.1  mrg /* Used in glibc only.  */
     56  1.1  mrg #ifndef attribute_hidden
     57  1.1  mrg #define attribute_hidden
     58  1.1  mrg #endif
     59  1.1  mrg 
     60  1.1  mrg extern const UQItype __clz_tab[256] attribute_hidden;
     61  1.1  mrg 
     62  1.1  mrg /* Define auxiliary asm macros.
     63  1.1  mrg 
     64  1.1  mrg    1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
     65  1.1  mrg    UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
     66  1.1  mrg    word product in HIGH_PROD and LOW_PROD.
     67  1.1  mrg 
     68  1.1  mrg    2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
     69  1.1  mrg    UDWtype product.  This is just a variant of umul_ppmm.
     70  1.1  mrg 
     71  1.1  mrg    3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
     72  1.1  mrg    denominator) divides a UDWtype, composed by the UWtype integers
     73  1.1  mrg    HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
     74  1.1  mrg    in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
     75  1.1  mrg    than DENOMINATOR for correct operation.  If, in addition, the most
     76  1.1  mrg    significant bit of DENOMINATOR must be 1, then the pre-processor symbol
     77  1.1  mrg    UDIV_NEEDS_NORMALIZATION is defined to 1.
     78  1.1  mrg 
     79  1.1  mrg    4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
     80  1.1  mrg    denominator).  Like udiv_qrnnd but the numbers are signed.  The quotient
     81  1.1  mrg    is rounded towards 0.
     82  1.1  mrg 
     83  1.1  mrg    5) count_leading_zeros(count, x) counts the number of zero-bits from the
     84  1.1  mrg    msb to the first nonzero bit in the UWtype X.  This is the number of
     85  1.1  mrg    steps X needs to be shifted left to set the msb.  Undefined for X == 0,
     86  1.1  mrg    unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
     87  1.1  mrg 
     88  1.1  mrg    6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
     89  1.1  mrg    from the least significant end.
     90  1.1  mrg 
     91  1.1  mrg    7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
     92  1.1  mrg    high_addend_2, low_addend_2) adds two UWtype integers, composed by
     93  1.1  mrg    HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
     94  1.1  mrg    respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
     95  1.1  mrg    (i.e. carry out) is not stored anywhere, and is lost.
     96  1.1  mrg 
     97  1.1  mrg    8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
     98  1.1  mrg    high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
     99  1.1  mrg    composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
    100  1.1  mrg    LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
    101  1.1  mrg    and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
    102  1.1  mrg    and is lost.
    103  1.1  mrg 
    104  1.1  mrg    If any of these macros are left undefined for a particular CPU,
    105  1.1  mrg    C macros are used.  */
    106  1.1  mrg 
    107  1.1  mrg /* The CPUs come in alphabetical order below.
    108  1.1  mrg 
    109  1.1  mrg    Please add support for more CPUs here, or improve the current support
    110  1.1  mrg    for the CPUs below!
    111  1.1  mrg    (E.g. WE32100, IBM360.)  */
    112  1.1  mrg 
    113  1.1  mrg #if defined (__GNUC__) && !defined (NO_ASM)
    114  1.1  mrg 
    115  1.1  mrg /* We sometimes need to clobber "cc" with gcc2, but that would not be
    116  1.1  mrg    understood by gcc1.  Use cpp to avoid major code duplication.  */
    117  1.1  mrg #if __GNUC__ < 2
    118  1.1  mrg #define __CLOBBER_CC
    119  1.1  mrg #define __AND_CLOBBER_CC
    120  1.1  mrg #else /* __GNUC__ >= 2 */
    121  1.1  mrg #define __CLOBBER_CC : "cc"
    122  1.1  mrg #define __AND_CLOBBER_CC , "cc"
    123  1.1  mrg #endif /* __GNUC__ < 2 */
    124  1.1  mrg 
    125  1.1  mrg #if defined (__aarch64__)
    126  1.1  mrg 
    127  1.1  mrg #if W_TYPE_SIZE == 32
    128  1.1  mrg #define count_leading_zeros(COUNT, X)	((COUNT) = __builtin_clz (X))
    129  1.1  mrg #define count_trailing_zeros(COUNT, X)   ((COUNT) = __builtin_ctz (X))
    130  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    131  1.1  mrg #endif /* W_TYPE_SIZE == 32 */
    132  1.1  mrg 
    133  1.1  mrg #if W_TYPE_SIZE == 64
    134  1.1  mrg #define count_leading_zeros(COUNT, X)	((COUNT) = __builtin_clzll (X))
    135  1.1  mrg #define count_trailing_zeros(COUNT, X)   ((COUNT) = __builtin_ctzll (X))
    136  1.1  mrg #define COUNT_LEADING_ZEROS_0 64
    137  1.1  mrg #endif /* W_TYPE_SIZE == 64 */
    138  1.1  mrg 
    139  1.1  mrg #endif /* __aarch64__ */
    140  1.1  mrg 
    141  1.1  mrg #if defined (__alpha) && W_TYPE_SIZE == 64
    142  1.1  mrg /* There is a bug in g++ before version 5 that
    143  1.1  mrg    errors on __builtin_alpha_umulh.  */
    144  1.1  mrg #if !defined(__cplusplus) || __GNUC__ >= 5
    145  1.1  mrg #define umul_ppmm(ph, pl, m0, m1) \
    146  1.1  mrg   do {									\
    147  1.1  mrg     UDItype __m0 = (m0), __m1 = (m1);					\
    148  1.1  mrg     (ph) = __builtin_alpha_umulh (__m0, __m1);				\
    149  1.1  mrg     (pl) = __m0 * __m1;							\
    150  1.1  mrg   } while (0)
    151  1.1  mrg #define UMUL_TIME 46
    152  1.1  mrg #endif /* !c++ */
    153  1.1  mrg #ifndef LONGLONG_STANDALONE
    154  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
    155  1.1  mrg   do { UDItype __r;							\
    156  1.1  mrg     (q) = __udiv_qrnnd (&__r, (n1), (n0), (d));				\
    157  1.1  mrg     (r) = __r;								\
    158  1.1  mrg   } while (0)
    159  1.1  mrg extern UDItype __udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype);
    160  1.1  mrg #define UDIV_TIME 220
    161  1.1  mrg #endif /* LONGLONG_STANDALONE */
    162  1.1  mrg #ifdef __alpha_cix__
    163  1.1  mrg #define count_leading_zeros(COUNT,X)	((COUNT) = __builtin_clzl (X))
    164  1.1  mrg #define count_trailing_zeros(COUNT,X)	((COUNT) = __builtin_ctzl (X))
    165  1.1  mrg #define COUNT_LEADING_ZEROS_0 64
    166  1.1  mrg #else
    167  1.1  mrg #define count_leading_zeros(COUNT,X) \
    168  1.1  mrg   do {									\
    169  1.1  mrg     UDItype __xr = (X), __t, __a;					\
    170  1.1  mrg     __t = __builtin_alpha_cmpbge (0, __xr);				\
    171  1.1  mrg     __a = __clz_tab[__t ^ 0xff] - 1;					\
    172  1.1  mrg     __t = __builtin_alpha_extbl (__xr, __a);				\
    173  1.1  mrg     (COUNT) = 64 - (__clz_tab[__t] + __a*8);				\
    174  1.1  mrg   } while (0)
    175  1.1  mrg #define count_trailing_zeros(COUNT,X) \
    176  1.1  mrg   do {									\
    177  1.1  mrg     UDItype __xr = (X), __t, __a;					\
    178  1.1  mrg     __t = __builtin_alpha_cmpbge (0, __xr);				\
    179  1.1  mrg     __t = ~__t & -~__t;							\
    180  1.1  mrg     __a = ((__t & 0xCC) != 0) * 2;					\
    181  1.1  mrg     __a += ((__t & 0xF0) != 0) * 4;					\
    182  1.1  mrg     __a += ((__t & 0xAA) != 0);						\
    183  1.1  mrg     __t = __builtin_alpha_extbl (__xr, __a);				\
    184  1.1  mrg     __a <<= 3;								\
    185  1.1  mrg     __t &= -__t;							\
    186  1.1  mrg     __a += ((__t & 0xCC) != 0) * 2;					\
    187  1.1  mrg     __a += ((__t & 0xF0) != 0) * 4;					\
    188  1.1  mrg     __a += ((__t & 0xAA) != 0);						\
    189  1.1  mrg     (COUNT) = __a;							\
    190  1.1  mrg   } while (0)
    191  1.1  mrg #endif /* __alpha_cix__ */
    192  1.1  mrg #endif /* __alpha */
    193  1.1  mrg 
    194  1.1  mrg #if defined (__arc__) && W_TYPE_SIZE == 32
    195  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    196  1.1  mrg   __asm__ ("add.f	%1, %4, %5\n\tadc	%0, %2, %3"		\
    197  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    198  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    199  1.1  mrg 	   : "%r" ((USItype) (ah)),					\
    200  1.1  mrg 	     "rIJ" ((USItype) (bh)),					\
    201  1.1  mrg 	     "%r" ((USItype) (al)),					\
    202  1.1  mrg 	     "rIJ" ((USItype) (bl)))
    203  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    204  1.1  mrg   __asm__ ("sub.f	%1, %4, %5\n\tsbc	%0, %2, %3"		\
    205  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    206  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    207  1.1  mrg 	   : "r" ((USItype) (ah)),					\
    208  1.1  mrg 	     "rIJ" ((USItype) (bh)),					\
    209  1.1  mrg 	     "r" ((USItype) (al)),					\
    210  1.1  mrg 	     "rIJ" ((USItype) (bl)))
    211  1.1  mrg 
    212  1.1  mrg #define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
    213  1.1  mrg #ifdef __ARC_NORM__
    214  1.1  mrg #define count_leading_zeros(count, x) \
    215  1.1  mrg   do									\
    216  1.1  mrg     {									\
    217  1.1  mrg       SItype c_;							\
    218  1.1  mrg 									\
    219  1.1  mrg       __asm__ ("norm.f\t%0,%1\n\tmov.mi\t%0,-1" : "=r" (c_) : "r" (x) : "cc");\
    220  1.1  mrg       (count) = c_ + 1;							\
    221  1.1  mrg     }									\
    222  1.1  mrg   while (0)
    223  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    224  1.1  mrg #endif
    225  1.1  mrg #endif
    226  1.1  mrg 
    227  1.1  mrg #if defined (__arm__) && (defined (__thumb2__) || !defined (__thumb__)) \
    228  1.1  mrg  && W_TYPE_SIZE == 32
    229  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    230  1.1  mrg   __asm__ ("adds	%1, %4, %5\n\tadc	%0, %2, %3"		\
    231  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    232  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    233  1.1  mrg 	   : "%r" ((USItype) (ah)),					\
    234  1.1  mrg 	     "rI" ((USItype) (bh)),					\
    235  1.1  mrg 	     "%r" ((USItype) (al)),					\
    236  1.1  mrg 	     "rI" ((USItype) (bl)) __CLOBBER_CC)
    237  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    238  1.1  mrg   __asm__ ("subs	%1, %4, %5\n\tsbc	%0, %2, %3"		\
    239  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    240  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    241  1.1  mrg 	   : "r" ((USItype) (ah)),					\
    242  1.1  mrg 	     "rI" ((USItype) (bh)),					\
    243  1.1  mrg 	     "r" ((USItype) (al)),					\
    244  1.1  mrg 	     "rI" ((USItype) (bl)) __CLOBBER_CC)
    245  1.1  mrg # if defined(__ARM_ARCH_2__) || defined(__ARM_ARCH_2A__) \
    246  1.1  mrg      || defined(__ARM_ARCH_3__)
    247  1.1  mrg #  define umul_ppmm(xh, xl, a, b)					\
    248  1.1  mrg   do {									\
    249  1.1  mrg     register USItype __t0, __t1, __t2;					\
    250  1.1  mrg     __asm__ ("%@ Inlined umul_ppmm\n"					\
    251  1.1  mrg 	   "	mov	%2, %5, lsr #16\n"				\
    252  1.1  mrg 	   "	mov	%0, %6, lsr #16\n"				\
    253  1.1  mrg 	   "	bic	%3, %5, %2, lsl #16\n"				\
    254  1.1  mrg 	   "	bic	%4, %6, %0, lsl #16\n"				\
    255  1.1  mrg 	   "	mul	%1, %3, %4\n"					\
    256  1.1  mrg 	   "	mul	%4, %2, %4\n"					\
    257  1.1  mrg 	   "	mul	%3, %0, %3\n"					\
    258  1.1  mrg 	   "	mul	%0, %2, %0\n"					\
    259  1.1  mrg 	   "	adds	%3, %4, %3\n"					\
    260  1.1  mrg 	   "	addcs	%0, %0, #65536\n"				\
    261  1.1  mrg 	   "	adds	%1, %1, %3, lsl #16\n"				\
    262  1.1  mrg 	   "	adc	%0, %0, %3, lsr #16"				\
    263  1.1  mrg 	   : "=&r" ((USItype) (xh)),					\
    264  1.1  mrg 	     "=r" ((USItype) (xl)),					\
    265  1.1  mrg 	     "=&r" (__t0), "=&r" (__t1), "=r" (__t2)			\
    266  1.1  mrg 	   : "r" ((USItype) (a)),					\
    267  1.1  mrg 	     "r" ((USItype) (b)) __CLOBBER_CC );			\
    268  1.1  mrg   } while (0)
    269  1.1  mrg #  define UMUL_TIME 20
    270  1.1  mrg # else
    271  1.1  mrg #  define umul_ppmm(xh, xl, a, b)					\
    272  1.1  mrg   do {									\
    273  1.1  mrg     /* Generate umull, under compiler control.  */			\
    274  1.1  mrg     register UDItype __t0 = (UDItype)(USItype)(a) * (USItype)(b);	\
    275  1.1  mrg     (xl) = (USItype)__t0;						\
    276  1.1  mrg     (xh) = (USItype)(__t0 >> 32);					\
    277  1.1  mrg   } while (0)
    278  1.1  mrg #  define UMUL_TIME 3
    279  1.1  mrg # endif
    280  1.1  mrg # define UDIV_TIME 100
    281  1.1  mrg #endif /* __arm__ */
    282  1.1  mrg 
    283  1.1  mrg #if defined(__arm__)
    284  1.1  mrg /* Let gcc decide how best to implement count_leading_zeros.  */
    285  1.1  mrg #define count_leading_zeros(COUNT,X)	((COUNT) = __builtin_clz (X))
    286  1.1  mrg #define count_trailing_zeros(COUNT,X)   ((COUNT) = __builtin_ctz (X))
    287  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    288  1.1  mrg #endif
    289  1.1  mrg 
    290  1.1  mrg #if defined (__AVR__)
    291  1.1  mrg 
    292  1.1  mrg #if W_TYPE_SIZE == 16
    293  1.1  mrg #define count_leading_zeros(COUNT,X)  ((COUNT) = __builtin_clz (X))
    294  1.1  mrg #define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X))
    295  1.1  mrg #define COUNT_LEADING_ZEROS_0 16
    296  1.1  mrg #endif /* W_TYPE_SIZE == 16 */
    297  1.1  mrg 
    298  1.1  mrg #if W_TYPE_SIZE == 32
    299  1.1  mrg #define count_leading_zeros(COUNT,X)  ((COUNT) = __builtin_clzl (X))
    300  1.1  mrg #define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X))
    301  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    302  1.1  mrg #endif /* W_TYPE_SIZE == 32 */
    303  1.1  mrg 
    304  1.1  mrg #if W_TYPE_SIZE == 64
    305  1.1  mrg #define count_leading_zeros(COUNT,X)  ((COUNT) = __builtin_clzll (X))
    306  1.1  mrg #define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzll (X))
    307  1.1  mrg #define COUNT_LEADING_ZEROS_0 64
    308  1.1  mrg #endif /* W_TYPE_SIZE == 64 */
    309  1.1  mrg 
    310  1.1  mrg #endif /* defined (__AVR__) */
    311  1.1  mrg 
    312  1.1  mrg #if defined (__CRIS__)
    313  1.1  mrg 
    314  1.1  mrg #if __CRIS_arch_version >= 3
    315  1.1  mrg #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X))
    316  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    317  1.1  mrg #endif /* __CRIS_arch_version >= 3 */
    318  1.1  mrg 
    319  1.1  mrg #if __CRIS_arch_version >= 8
    320  1.1  mrg #define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctz (X))
    321  1.1  mrg #endif /* __CRIS_arch_version >= 8 */
    322  1.1  mrg 
    323  1.1  mrg #if __CRIS_arch_version >= 10
    324  1.1  mrg #define __umulsidi3(u,v) ((UDItype)(USItype) (u) * (UDItype)(USItype) (v))
    325  1.1  mrg #else
    326  1.1  mrg #define __umulsidi3 __umulsidi3
    327  1.1  mrg extern UDItype __umulsidi3 (USItype, USItype);
    328  1.1  mrg #endif /* __CRIS_arch_version >= 10 */
    329  1.1  mrg 
    330  1.1  mrg #define umul_ppmm(w1, w0, u, v)		\
    331  1.1  mrg   do {					\
    332  1.1  mrg     UDItype __x = __umulsidi3 (u, v);	\
    333  1.1  mrg     (w0) = (USItype) (__x);		\
    334  1.1  mrg     (w1) = (USItype) (__x >> 32);	\
    335  1.1  mrg   } while (0)
    336  1.1  mrg 
    337  1.1  mrg /* FIXME: defining add_ssaaaa and sub_ddmmss should be advantageous for
    338  1.1  mrg    DFmode ("double" intrinsics, avoiding two of the three insns handling
    339  1.1  mrg    carry), but defining them as open-code C composing and doing the
    340  1.1  mrg    operation in DImode (UDImode) shows that the DImode needs work:
    341  1.1  mrg    register pressure from requiring neighboring registers and the
    342  1.1  mrg    traffic to and from them come to dominate, in the 4.7 series.  */
    343  1.1  mrg 
    344  1.1  mrg #endif /* defined (__CRIS__) */
    345  1.1  mrg 
    346  1.1  mrg #if defined (__hppa) && W_TYPE_SIZE == 32
    347  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    348  1.1  mrg   __asm__ ("add %4,%5,%1\n\taddc %2,%3,%0"				\
    349  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    350  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    351  1.1  mrg 	   : "%rM" ((USItype) (ah)),					\
    352  1.1  mrg 	     "rM" ((USItype) (bh)),					\
    353  1.1  mrg 	     "%rM" ((USItype) (al)),					\
    354  1.1  mrg 	     "rM" ((USItype) (bl)))
    355  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    356  1.1  mrg   __asm__ ("sub %4,%5,%1\n\tsubb %2,%3,%0"				\
    357  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    358  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    359  1.1  mrg 	   : "rM" ((USItype) (ah)),					\
    360  1.1  mrg 	     "rM" ((USItype) (bh)),					\
    361  1.1  mrg 	     "rM" ((USItype) (al)),					\
    362  1.1  mrg 	     "rM" ((USItype) (bl)))
    363  1.1  mrg #if defined (_PA_RISC1_1)
    364  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    365  1.1  mrg   do {									\
    366  1.1  mrg     union								\
    367  1.1  mrg       {									\
    368  1.1  mrg 	UDItype __f;							\
    369  1.1  mrg 	struct {USItype __w1, __w0;} __w1w0;				\
    370  1.1  mrg       } __t;								\
    371  1.1  mrg     __asm__ ("xmpyu %1,%2,%0"						\
    372  1.1  mrg 	     : "=x" (__t.__f)						\
    373  1.1  mrg 	     : "x" ((USItype) (u)),					\
    374  1.1  mrg 	       "x" ((USItype) (v)));					\
    375  1.1  mrg     (w1) = __t.__w1w0.__w1;						\
    376  1.1  mrg     (w0) = __t.__w1w0.__w0;						\
    377  1.1  mrg      } while (0)
    378  1.1  mrg #define UMUL_TIME 8
    379  1.1  mrg #else
    380  1.1  mrg #define UMUL_TIME 30
    381  1.1  mrg #endif
    382  1.1  mrg #define UDIV_TIME 40
    383  1.1  mrg #define count_leading_zeros(count, x) \
    384  1.1  mrg   do {									\
    385  1.1  mrg     USItype __tmp;							\
    386  1.1  mrg     __asm__ (								\
    387  1.1  mrg        "ldi		1,%0\n"						\
    388  1.1  mrg "	extru,=		%1,15,16,%%r0		; Bits 31..16 zero?\n"	\
    389  1.1  mrg "	extru,tr	%1,15,16,%1		; No.  Shift down, skip add.\n"\
    390  1.1  mrg "	ldo		16(%0),%0		; Yes.  Perform add.\n"	\
    391  1.1  mrg "	extru,=		%1,23,8,%%r0		; Bits 15..8 zero?\n"	\
    392  1.1  mrg "	extru,tr	%1,23,8,%1		; No.  Shift down, skip add.\n"\
    393  1.1  mrg "	ldo		8(%0),%0		; Yes.  Perform add.\n"	\
    394  1.1  mrg "	extru,=		%1,27,4,%%r0		; Bits 7..4 zero?\n"	\
    395  1.1  mrg "	extru,tr	%1,27,4,%1		; No.  Shift down, skip add.\n"\
    396  1.1  mrg "	ldo		4(%0),%0		; Yes.  Perform add.\n"	\
    397  1.1  mrg "	extru,=		%1,29,2,%%r0		; Bits 3..2 zero?\n"	\
    398  1.1  mrg "	extru,tr	%1,29,2,%1		; No.  Shift down, skip add.\n"\
    399  1.1  mrg "	ldo		2(%0),%0		; Yes.  Perform add.\n"	\
    400  1.1  mrg "	extru		%1,30,1,%1		; Extract bit 1.\n"	\
    401  1.1  mrg "	sub		%0,%1,%0		; Subtract it.\n"	\
    402  1.1  mrg 	: "=r" (count), "=r" (__tmp) : "1" (x));			\
    403  1.1  mrg   } while (0)
    404  1.1  mrg #endif
    405  1.1  mrg 
    406  1.1  mrg #if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
    407  1.1  mrg #if !defined (__zarch__)
    408  1.1  mrg #define smul_ppmm(xh, xl, m0, m1) \
    409  1.1  mrg   do {									\
    410  1.1  mrg     union {DItype __ll;							\
    411  1.1  mrg 	   struct {USItype __h, __l;} __i;				\
    412  1.1  mrg 	  } __x;							\
    413  1.1  mrg     __asm__ ("lr %N0,%1\n\tmr %0,%2"					\
    414  1.1  mrg 	     : "=&r" (__x.__ll)						\
    415  1.1  mrg 	     : "r" (m0), "r" (m1));					\
    416  1.1  mrg     (xh) = __x.__i.__h; (xl) = __x.__i.__l;				\
    417  1.1  mrg   } while (0)
    418  1.1  mrg #define sdiv_qrnnd(q, r, n1, n0, d) \
    419  1.1  mrg   do {									\
    420  1.1  mrg     union {DItype __ll;							\
    421  1.1  mrg 	   struct {USItype __h, __l;} __i;				\
    422  1.1  mrg 	  } __x;							\
    423  1.1  mrg     __x.__i.__h = n1; __x.__i.__l = n0;					\
    424  1.1  mrg     __asm__ ("dr %0,%2"							\
    425  1.1  mrg 	     : "=r" (__x.__ll)						\
    426  1.1  mrg 	     : "0" (__x.__ll), "r" (d));				\
    427  1.1  mrg     (q) = __x.__i.__l; (r) = __x.__i.__h;				\
    428  1.1  mrg   } while (0)
    429  1.1  mrg #else
    430  1.1  mrg #define smul_ppmm(xh, xl, m0, m1) \
    431  1.1  mrg   do {                                                                  \
    432  1.1  mrg     register SItype __r0 __asm__ ("0");					\
    433  1.1  mrg     register SItype __r1 __asm__ ("1") = (m0);				\
    434  1.1  mrg 									\
    435  1.1  mrg     __asm__ ("mr\t%%r0,%3"                                              \
    436  1.1  mrg 	     : "=r" (__r0), "=r" (__r1)					\
    437  1.1  mrg 	     : "r"  (__r1),  "r" (m1));					\
    438  1.1  mrg     (xh) = __r0; (xl) = __r1;						\
    439  1.1  mrg   } while (0)
    440  1.1  mrg 
    441  1.1  mrg #define sdiv_qrnnd(q, r, n1, n0, d) \
    442  1.1  mrg   do {									\
    443  1.1  mrg     register SItype __r0 __asm__ ("0") = (n1);				\
    444  1.1  mrg     register SItype __r1 __asm__ ("1") = (n0);				\
    445  1.1  mrg 									\
    446  1.1  mrg     __asm__ ("dr\t%%r0,%4"                                              \
    447  1.1  mrg 	     : "=r" (__r0), "=r" (__r1)					\
    448  1.1  mrg 	     : "r" (__r0), "r" (__r1), "r" (d));			\
    449  1.1  mrg     (q) = __r1; (r) = __r0;						\
    450  1.1  mrg   } while (0)
    451  1.1  mrg #endif /* __zarch__ */
    452  1.1  mrg #endif
    453  1.1  mrg 
    454  1.1  mrg #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
    455  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    456  1.1  mrg   __asm__ ("add{l} {%5,%1|%1,%5}\n\tadc{l} {%3,%0|%0,%3}"		\
    457  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    458  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    459  1.1  mrg 	   : "%0" ((USItype) (ah)),					\
    460  1.1  mrg 	     "g" ((USItype) (bh)),					\
    461  1.1  mrg 	     "%1" ((USItype) (al)),					\
    462  1.1  mrg 	     "g" ((USItype) (bl)))
    463  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    464  1.1  mrg   __asm__ ("sub{l} {%5,%1|%1,%5}\n\tsbb{l} {%3,%0|%0,%3}"		\
    465  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    466  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    467  1.1  mrg 	   : "0" ((USItype) (ah)),					\
    468  1.1  mrg 	     "g" ((USItype) (bh)),					\
    469  1.1  mrg 	     "1" ((USItype) (al)),					\
    470  1.1  mrg 	     "g" ((USItype) (bl)))
    471  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    472  1.1  mrg   __asm__ ("mul{l} %3"							\
    473  1.1  mrg 	   : "=a" ((USItype) (w0)),					\
    474  1.1  mrg 	     "=d" ((USItype) (w1))					\
    475  1.1  mrg 	   : "%0" ((USItype) (u)),					\
    476  1.1  mrg 	     "rm" ((USItype) (v)))
    477  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, dv) \
    478  1.1  mrg   __asm__ ("div{l} %4"							\
    479  1.1  mrg 	   : "=a" ((USItype) (q)),					\
    480  1.1  mrg 	     "=d" ((USItype) (r))					\
    481  1.1  mrg 	   : "0" ((USItype) (n0)),					\
    482  1.1  mrg 	     "1" ((USItype) (n1)),					\
    483  1.1  mrg 	     "rm" ((USItype) (dv)))
    484  1.1  mrg #define count_leading_zeros(count, x)	((count) = __builtin_clz (x))
    485  1.1  mrg #define count_trailing_zeros(count, x)	((count) = __builtin_ctz (x))
    486  1.1  mrg #define UMUL_TIME 40
    487  1.1  mrg #define UDIV_TIME 40
    488  1.1  mrg #endif /* 80x86 */
    489  1.1  mrg 
    490  1.1  mrg #if defined (__x86_64__) && W_TYPE_SIZE == 64
    491  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    492  1.1  mrg   __asm__ ("add{q} {%5,%1|%1,%5}\n\tadc{q} {%3,%0|%0,%3}"		\
    493  1.1  mrg 	   : "=r" ((UDItype) (sh)),					\
    494  1.1  mrg 	     "=&r" ((UDItype) (sl))					\
    495  1.1  mrg 	   : "%0" ((UDItype) (ah)),					\
    496  1.1  mrg 	     "rme" ((UDItype) (bh)),					\
    497  1.1  mrg 	     "%1" ((UDItype) (al)),					\
    498  1.1  mrg 	     "rme" ((UDItype) (bl)))
    499  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    500  1.1  mrg   __asm__ ("sub{q} {%5,%1|%1,%5}\n\tsbb{q} {%3,%0|%0,%3}"		\
    501  1.1  mrg 	   : "=r" ((UDItype) (sh)),					\
    502  1.1  mrg 	     "=&r" ((UDItype) (sl))					\
    503  1.1  mrg 	   : "0" ((UDItype) (ah)),					\
    504  1.1  mrg 	     "rme" ((UDItype) (bh)),					\
    505  1.1  mrg 	     "1" ((UDItype) (al)),					\
    506  1.1  mrg 	     "rme" ((UDItype) (bl)))
    507  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    508  1.1  mrg   __asm__ ("mul{q} %3"							\
    509  1.1  mrg 	   : "=a" ((UDItype) (w0)),					\
    510  1.1  mrg 	     "=d" ((UDItype) (w1))					\
    511  1.1  mrg 	   : "%0" ((UDItype) (u)),					\
    512  1.1  mrg 	     "rm" ((UDItype) (v)))
    513  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, dv) \
    514  1.1  mrg   __asm__ ("div{q} %4"							\
    515  1.1  mrg 	   : "=a" ((UDItype) (q)),					\
    516  1.1  mrg 	     "=d" ((UDItype) (r))					\
    517  1.1  mrg 	   : "0" ((UDItype) (n0)),					\
    518  1.1  mrg 	     "1" ((UDItype) (n1)),					\
    519  1.1  mrg 	     "rm" ((UDItype) (dv)))
    520  1.1  mrg #define count_leading_zeros(count, x)	((count) = __builtin_clzll (x))
    521  1.1  mrg #define count_trailing_zeros(count, x)	((count) = __builtin_ctzll (x))
    522  1.1  mrg #define UMUL_TIME 40
    523  1.1  mrg #define UDIV_TIME 40
    524  1.1  mrg #endif /* x86_64 */
    525  1.1  mrg 
    526  1.1  mrg #if defined (__i960__) && W_TYPE_SIZE == 32
    527  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    528  1.1  mrg   ({union {UDItype __ll;						\
    529  1.1  mrg 	   struct {USItype __l, __h;} __i;				\
    530  1.1  mrg 	  } __xx;							\
    531  1.1  mrg   __asm__ ("emul	%2,%1,%0"					\
    532  1.1  mrg 	   : "=d" (__xx.__ll)						\
    533  1.1  mrg 	   : "%dI" ((USItype) (u)),					\
    534  1.1  mrg 	     "dI" ((USItype) (v)));					\
    535  1.1  mrg   (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
    536  1.1  mrg #define __umulsidi3(u, v) \
    537  1.1  mrg   ({UDItype __w;							\
    538  1.1  mrg     __asm__ ("emul	%2,%1,%0"					\
    539  1.1  mrg 	     : "=d" (__w)						\
    540  1.1  mrg 	     : "%dI" ((USItype) (u)),					\
    541  1.1  mrg 	       "dI" ((USItype) (v)));					\
    542  1.1  mrg     __w; })
    543  1.1  mrg #endif /* __i960__ */
    544  1.1  mrg 
    545  1.1  mrg #if defined (__ia64) && W_TYPE_SIZE == 64
    546  1.1  mrg /* This form encourages gcc (pre-release 3.4 at least) to emit predicated
    547  1.1  mrg    "sub r=r,r" and "sub r=r,r,1", giving a 2 cycle latency.  The generic
    548  1.1  mrg    code using "al<bl" arithmetically comes out making an actual 0 or 1 in a
    549  1.1  mrg    register, which takes an extra cycle.  */
    550  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl)				\
    551  1.1  mrg   do {									\
    552  1.1  mrg     UWtype __x;								\
    553  1.1  mrg     __x = (al) - (bl);							\
    554  1.1  mrg     if ((al) < (bl))							\
    555  1.1  mrg       (sh) = (ah) - (bh) - 1;						\
    556  1.1  mrg     else								\
    557  1.1  mrg       (sh) = (ah) - (bh);						\
    558  1.1  mrg     (sl) = __x;								\
    559  1.1  mrg   } while (0)
    560  1.1  mrg 
    561  1.1  mrg /* Do both product parts in assembly, since that gives better code with
    562  1.1  mrg    all gcc versions.  Some callers will just use the upper part, and in
    563  1.1  mrg    that situation we waste an instruction, but not any cycles.  */
    564  1.1  mrg #define umul_ppmm(ph, pl, m0, m1)					\
    565  1.1  mrg   __asm__ ("xma.hu %0 = %2, %3, f0\n\txma.l %1 = %2, %3, f0"		\
    566  1.1  mrg 	   : "=&f" (ph), "=f" (pl)					\
    567  1.1  mrg 	   : "f" (m0), "f" (m1))
    568  1.1  mrg #define count_leading_zeros(count, x)					\
    569  1.1  mrg   do {									\
    570  1.1  mrg     UWtype _x = (x), _y, _a, _c;					\
    571  1.1  mrg     __asm__ ("mux1 %0 = %1, @rev" : "=r" (_y) : "r" (_x));		\
    572  1.1  mrg     __asm__ ("czx1.l %0 = %1" : "=r" (_a) : "r" (-_y | _y));		\
    573  1.1  mrg     _c = (_a - 1) << 3;							\
    574  1.1  mrg     _x >>= _c;								\
    575  1.1  mrg     if (_x >= 1 << 4)							\
    576  1.1  mrg       _x >>= 4, _c += 4;						\
    577  1.1  mrg     if (_x >= 1 << 2)							\
    578  1.1  mrg       _x >>= 2, _c += 2;						\
    579  1.1  mrg     _c += _x >> 1;							\
    580  1.1  mrg     (count) =  W_TYPE_SIZE - 1 - _c;					\
    581  1.1  mrg   } while (0)
    582  1.1  mrg /* similar to what gcc does for __builtin_ffs, but 0 based rather than 1
    583  1.1  mrg    based, and we don't need a special case for x==0 here */
    584  1.1  mrg #define count_trailing_zeros(count, x)					\
    585  1.1  mrg   do {									\
    586  1.1  mrg     UWtype __ctz_x = (x);						\
    587  1.1  mrg     __asm__ ("popcnt %0 = %1"						\
    588  1.1  mrg 	     : "=r" (count)						\
    589  1.1  mrg 	     : "r" ((__ctz_x-1) & ~__ctz_x));				\
    590  1.1  mrg   } while (0)
    591  1.1  mrg #define UMUL_TIME 14
    592  1.1  mrg #endif
    593  1.1  mrg 
    594  1.1  mrg #if defined (__M32R__) && W_TYPE_SIZE == 32
    595  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    596  1.1  mrg   /* The cmp clears the condition bit.  */ \
    597  1.1  mrg   __asm__ ("cmp %0,%0\n\taddx %1,%5\n\taddx %0,%3"			\
    598  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    599  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    600  1.1  mrg 	   : "0" ((USItype) (ah)),					\
    601  1.1  mrg 	     "r" ((USItype) (bh)),					\
    602  1.1  mrg 	     "1" ((USItype) (al)),					\
    603  1.1  mrg 	     "r" ((USItype) (bl))					\
    604  1.1  mrg 	   : "cbit")
    605  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    606  1.1  mrg   /* The cmp clears the condition bit.  */ \
    607  1.1  mrg   __asm__ ("cmp %0,%0\n\tsubx %1,%5\n\tsubx %0,%3"			\
    608  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    609  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    610  1.1  mrg 	   : "0" ((USItype) (ah)),					\
    611  1.1  mrg 	     "r" ((USItype) (bh)),					\
    612  1.1  mrg 	     "1" ((USItype) (al)),					\
    613  1.1  mrg 	     "r" ((USItype) (bl))					\
    614  1.1  mrg 	   : "cbit")
    615  1.1  mrg #endif /* __M32R__ */
    616  1.1  mrg 
    617  1.1  mrg #if defined (__mc68000__) && W_TYPE_SIZE == 32
    618  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    619  1.1  mrg   __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0"				\
    620  1.1  mrg 	   : "=d" ((USItype) (sh)),					\
    621  1.1  mrg 	     "=&d" ((USItype) (sl))					\
    622  1.1  mrg 	   : "%0" ((USItype) (ah)),					\
    623  1.1  mrg 	     "d" ((USItype) (bh)),					\
    624  1.1  mrg 	     "%1" ((USItype) (al)),					\
    625  1.1  mrg 	     "g" ((USItype) (bl)))
    626  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    627  1.1  mrg   __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0"				\
    628  1.1  mrg 	   : "=d" ((USItype) (sh)),					\
    629  1.1  mrg 	     "=&d" ((USItype) (sl))					\
    630  1.1  mrg 	   : "0" ((USItype) (ah)),					\
    631  1.1  mrg 	     "d" ((USItype) (bh)),					\
    632  1.1  mrg 	     "1" ((USItype) (al)),					\
    633  1.1  mrg 	     "g" ((USItype) (bl)))
    634  1.1  mrg 
    635  1.1  mrg /* The '020, '030, '040, '060 and CPU32 have 32x32->64 and 64/32->32q-32r.  */
    636  1.1  mrg #if (defined (__mc68020__) && !defined (__mc68060__))
    637  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    638  1.1  mrg   __asm__ ("mulu%.l %3,%1:%0"						\
    639  1.1  mrg 	   : "=d" ((USItype) (w0)),					\
    640  1.1  mrg 	     "=d" ((USItype) (w1))					\
    641  1.1  mrg 	   : "%0" ((USItype) (u)),					\
    642  1.1  mrg 	     "dmi" ((USItype) (v)))
    643  1.1  mrg #define UMUL_TIME 45
    644  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
    645  1.1  mrg   __asm__ ("divu%.l %4,%1:%0"						\
    646  1.1  mrg 	   : "=d" ((USItype) (q)),					\
    647  1.1  mrg 	     "=d" ((USItype) (r))					\
    648  1.1  mrg 	   : "0" ((USItype) (n0)),					\
    649  1.1  mrg 	     "1" ((USItype) (n1)),					\
    650  1.1  mrg 	     "dmi" ((USItype) (d)))
    651  1.1  mrg #define UDIV_TIME 90
    652  1.1  mrg #define sdiv_qrnnd(q, r, n1, n0, d) \
    653  1.1  mrg   __asm__ ("divs%.l %4,%1:%0"						\
    654  1.1  mrg 	   : "=d" ((USItype) (q)),					\
    655  1.1  mrg 	     "=d" ((USItype) (r))					\
    656  1.1  mrg 	   : "0" ((USItype) (n0)),					\
    657  1.1  mrg 	     "1" ((USItype) (n1)),					\
    658  1.1  mrg 	     "dmi" ((USItype) (d)))
    659  1.1  mrg 
    660  1.1  mrg #elif defined (__mcoldfire__) /* not mc68020 */
    661  1.1  mrg 
    662  1.1  mrg #define umul_ppmm(xh, xl, a, b) \
    663  1.1  mrg   __asm__ ("| Inlined umul_ppmm\n"					\
    664  1.1  mrg 	   "	move%.l	%2,%/d0\n"					\
    665  1.1  mrg 	   "	move%.l	%3,%/d1\n"					\
    666  1.1  mrg 	   "	move%.l	%/d0,%/d2\n"					\
    667  1.1  mrg 	   "	swap	%/d0\n"						\
    668  1.1  mrg 	   "	move%.l	%/d1,%/d3\n"					\
    669  1.1  mrg 	   "	swap	%/d1\n"						\
    670  1.1  mrg 	   "	move%.w	%/d2,%/d4\n"					\
    671  1.1  mrg 	   "	mulu	%/d3,%/d4\n"					\
    672  1.1  mrg 	   "	mulu	%/d1,%/d2\n"					\
    673  1.1  mrg 	   "	mulu	%/d0,%/d3\n"					\
    674  1.1  mrg 	   "	mulu	%/d0,%/d1\n"					\
    675  1.1  mrg 	   "	move%.l	%/d4,%/d0\n"					\
    676  1.1  mrg 	   "	clr%.w	%/d0\n"						\
    677  1.1  mrg 	   "	swap	%/d0\n"						\
    678  1.1  mrg 	   "	add%.l	%/d0,%/d2\n"					\
    679  1.1  mrg 	   "	add%.l	%/d3,%/d2\n"					\
    680  1.1  mrg 	   "	jcc	1f\n"						\
    681  1.1  mrg 	   "	add%.l	%#65536,%/d1\n"					\
    682  1.1  mrg 	   "1:	swap	%/d2\n"						\
    683  1.1  mrg 	   "	moveq	%#0,%/d0\n"					\
    684  1.1  mrg 	   "	move%.w	%/d2,%/d0\n"					\
    685  1.1  mrg 	   "	move%.w	%/d4,%/d2\n"					\
    686  1.1  mrg 	   "	move%.l	%/d2,%1\n"					\
    687  1.1  mrg 	   "	add%.l	%/d1,%/d0\n"					\
    688  1.1  mrg 	   "	move%.l	%/d0,%0"					\
    689  1.1  mrg 	   : "=g" ((USItype) (xh)),					\
    690  1.1  mrg 	     "=g" ((USItype) (xl))					\
    691  1.1  mrg 	   : "g" ((USItype) (a)),					\
    692  1.1  mrg 	     "g" ((USItype) (b))					\
    693  1.1  mrg 	   : "d0", "d1", "d2", "d3", "d4")
    694  1.1  mrg #define UMUL_TIME 100
    695  1.1  mrg #define UDIV_TIME 400
    696  1.1  mrg #else /* not ColdFire */
    697  1.1  mrg /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX.  */
    698  1.1  mrg #define umul_ppmm(xh, xl, a, b) \
    699  1.1  mrg   __asm__ ("| Inlined umul_ppmm\n"					\
    700  1.1  mrg 	   "	move%.l	%2,%/d0\n"					\
    701  1.1  mrg 	   "	move%.l	%3,%/d1\n"					\
    702  1.1  mrg 	   "	move%.l	%/d0,%/d2\n"					\
    703  1.1  mrg 	   "	swap	%/d0\n"						\
    704  1.1  mrg 	   "	move%.l	%/d1,%/d3\n"					\
    705  1.1  mrg 	   "	swap	%/d1\n"						\
    706  1.1  mrg 	   "	move%.w	%/d2,%/d4\n"					\
    707  1.1  mrg 	   "	mulu	%/d3,%/d4\n"					\
    708  1.1  mrg 	   "	mulu	%/d1,%/d2\n"					\
    709  1.1  mrg 	   "	mulu	%/d0,%/d3\n"					\
    710  1.1  mrg 	   "	mulu	%/d0,%/d1\n"					\
    711  1.1  mrg 	   "	move%.l	%/d4,%/d0\n"					\
    712  1.1  mrg 	   "	eor%.w	%/d0,%/d0\n"					\
    713  1.1  mrg 	   "	swap	%/d0\n"						\
    714  1.1  mrg 	   "	add%.l	%/d0,%/d2\n"					\
    715  1.1  mrg 	   "	add%.l	%/d3,%/d2\n"					\
    716  1.1  mrg 	   "	jcc	1f\n"						\
    717  1.1  mrg 	   "	add%.l	%#65536,%/d1\n"					\
    718  1.1  mrg 	   "1:	swap	%/d2\n"						\
    719  1.1  mrg 	   "	moveq	%#0,%/d0\n"					\
    720  1.1  mrg 	   "	move%.w	%/d2,%/d0\n"					\
    721  1.1  mrg 	   "	move%.w	%/d4,%/d2\n"					\
    722  1.1  mrg 	   "	move%.l	%/d2,%1\n"					\
    723  1.1  mrg 	   "	add%.l	%/d1,%/d0\n"					\
    724  1.1  mrg 	   "	move%.l	%/d0,%0"					\
    725  1.1  mrg 	   : "=g" ((USItype) (xh)),					\
    726  1.1  mrg 	     "=g" ((USItype) (xl))					\
    727  1.1  mrg 	   : "g" ((USItype) (a)),					\
    728  1.1  mrg 	     "g" ((USItype) (b))					\
    729  1.1  mrg 	   : "d0", "d1", "d2", "d3", "d4")
    730  1.1  mrg #define UMUL_TIME 100
    731  1.1  mrg #define UDIV_TIME 400
    732  1.1  mrg 
    733  1.1  mrg #endif /* not mc68020 */
    734  1.1  mrg 
    735  1.1  mrg /* The '020, '030, '040 and '060 have bitfield insns.
    736  1.1  mrg    cpu32 disguises as a 68020, but lacks them.  */
    737  1.1  mrg #if defined (__mc68020__) && !defined (__mcpu32__)
    738  1.1  mrg #define count_leading_zeros(count, x) \
    739  1.1  mrg   __asm__ ("bfffo %1{%b2:%b2},%0"					\
    740  1.1  mrg 	   : "=d" ((USItype) (count))					\
    741  1.1  mrg 	   : "od" ((USItype) (x)), "n" (0))
    742  1.1  mrg /* Some ColdFire architectures have a ff1 instruction supported via
    743  1.1  mrg    __builtin_clz. */
    744  1.1  mrg #elif defined (__mcfisaaplus__) || defined (__mcfisac__)
    745  1.1  mrg #define count_leading_zeros(count,x) ((count) = __builtin_clz (x))
    746  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    747  1.1  mrg #endif
    748  1.1  mrg #endif /* mc68000 */
    749  1.1  mrg 
    750  1.1  mrg #if defined (__m88000__) && W_TYPE_SIZE == 32
    751  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    752  1.1  mrg   __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3"			\
    753  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    754  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    755  1.1  mrg 	   : "%rJ" ((USItype) (ah)),					\
    756  1.1  mrg 	     "rJ" ((USItype) (bh)),					\
    757  1.1  mrg 	     "%rJ" ((USItype) (al)),					\
    758  1.1  mrg 	     "rJ" ((USItype) (bl)))
    759  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    760  1.1  mrg   __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3"			\
    761  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
    762  1.1  mrg 	     "=&r" ((USItype) (sl))					\
    763  1.1  mrg 	   : "rJ" ((USItype) (ah)),					\
    764  1.1  mrg 	     "rJ" ((USItype) (bh)),					\
    765  1.1  mrg 	     "rJ" ((USItype) (al)),					\
    766  1.1  mrg 	     "rJ" ((USItype) (bl)))
    767  1.1  mrg #define count_leading_zeros(count, x) \
    768  1.1  mrg   do {									\
    769  1.1  mrg     USItype __cbtmp;							\
    770  1.1  mrg     __asm__ ("ff1 %0,%1"						\
    771  1.1  mrg 	     : "=r" (__cbtmp)						\
    772  1.1  mrg 	     : "r" ((USItype) (x)));					\
    773  1.1  mrg     (count) = __cbtmp ^ 31;						\
    774  1.1  mrg   } while (0)
    775  1.1  mrg #define COUNT_LEADING_ZEROS_0 63 /* sic */
    776  1.1  mrg #if defined (__mc88110__)
    777  1.1  mrg #define umul_ppmm(wh, wl, u, v) \
    778  1.1  mrg   do {									\
    779  1.1  mrg     union {UDItype __ll;						\
    780  1.1  mrg 	   struct {USItype __h, __l;} __i;				\
    781  1.1  mrg 	  } __xx;							\
    782  1.1  mrg     __asm__ ("mulu.d	%0,%1,%2"					\
    783  1.1  mrg 	     : "=r" (__xx.__ll)						\
    784  1.1  mrg 	     : "r" ((USItype) (u)),					\
    785  1.1  mrg 	       "r" ((USItype) (v)));					\
    786  1.1  mrg     (wh) = __xx.__i.__h;						\
    787  1.1  mrg     (wl) = __xx.__i.__l;						\
    788  1.1  mrg   } while (0)
    789  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
    790  1.1  mrg   ({union {UDItype __ll;						\
    791  1.1  mrg 	   struct {USItype __h, __l;} __i;				\
    792  1.1  mrg 	  } __xx;							\
    793  1.1  mrg   USItype __q;								\
    794  1.1  mrg   __xx.__i.__h = (n1); __xx.__i.__l = (n0);				\
    795  1.1  mrg   __asm__ ("divu.d %0,%1,%2"						\
    796  1.1  mrg 	   : "=r" (__q)							\
    797  1.1  mrg 	   : "r" (__xx.__ll),						\
    798  1.1  mrg 	     "r" ((USItype) (d)));					\
    799  1.1  mrg   (r) = (n0) - __q * (d); (q) = __q; })
    800  1.1  mrg #define UMUL_TIME 5
    801  1.1  mrg #define UDIV_TIME 25
    802  1.1  mrg #else
    803  1.1  mrg #define UMUL_TIME 17
    804  1.1  mrg #define UDIV_TIME 150
    805  1.1  mrg #endif /* __mc88110__ */
    806  1.1  mrg #endif /* __m88000__ */
    807  1.1  mrg 
    808  1.1  mrg #if defined (__mn10300__)
    809  1.1  mrg # if defined (__AM33__)
    810  1.1  mrg #  define count_leading_zeros(COUNT,X)	((COUNT) = __builtin_clz (X))
    811  1.1  mrg #  define umul_ppmm(w1, w0, u, v)		\
    812  1.1  mrg     asm("mulu %3,%2,%1,%0" : "=r"(w0), "=r"(w1) : "r"(u), "r"(v))
    813  1.1  mrg #  define smul_ppmm(w1, w0, u, v)		\
    814  1.1  mrg     asm("mul %3,%2,%1,%0" : "=r"(w0), "=r"(w1) : "r"(u), "r"(v))
    815  1.1  mrg # else
    816  1.1  mrg #  define umul_ppmm(w1, w0, u, v)		\
    817  1.1  mrg     asm("nop; nop; mulu %3,%0" : "=d"(w0), "=z"(w1) : "%0"(u), "d"(v))
    818  1.1  mrg #  define smul_ppmm(w1, w0, u, v)		\
    819  1.1  mrg     asm("nop; nop; mul %3,%0" : "=d"(w0), "=z"(w1) : "%0"(u), "d"(v))
    820  1.1  mrg # endif
    821  1.1  mrg # define add_ssaaaa(sh, sl, ah, al, bh, bl)	\
    822  1.1  mrg   do {						\
    823  1.1  mrg     DWunion __s, __a, __b;			\
    824  1.1  mrg     __a.s.low = (al); __a.s.high = (ah);	\
    825  1.1  mrg     __b.s.low = (bl); __b.s.high = (bh);	\
    826  1.1  mrg     __s.ll = __a.ll + __b.ll;			\
    827  1.1  mrg     (sl) = __s.s.low; (sh) = __s.s.high;	\
    828  1.1  mrg   } while (0)
    829  1.1  mrg # define sub_ddmmss(sh, sl, ah, al, bh, bl)	\
    830  1.1  mrg   do {						\
    831  1.1  mrg     DWunion __s, __a, __b;			\
    832  1.1  mrg     __a.s.low = (al); __a.s.high = (ah);	\
    833  1.1  mrg     __b.s.low = (bl); __b.s.high = (bh);	\
    834  1.1  mrg     __s.ll = __a.ll - __b.ll;			\
    835  1.1  mrg     (sl) = __s.s.low; (sh) = __s.s.high;	\
    836  1.1  mrg   } while (0)
    837  1.1  mrg # define udiv_qrnnd(q, r, nh, nl, d)		\
    838  1.1  mrg   asm("divu %2,%0" : "=D"(q), "=z"(r) : "D"(d), "0"(nl), "1"(nh))
    839  1.1  mrg # define sdiv_qrnnd(q, r, nh, nl, d)		\
    840  1.1  mrg   asm("div %2,%0" : "=D"(q), "=z"(r) : "D"(d), "0"(nl), "1"(nh))
    841  1.1  mrg # define UMUL_TIME 3
    842  1.1  mrg # define UDIV_TIME 38
    843  1.1  mrg #endif
    844  1.1  mrg 
    845  1.1  mrg #if defined (__mips__) && W_TYPE_SIZE == 32
    846  1.1  mrg #define umul_ppmm(w1, w0, u, v)						\
    847  1.1  mrg   do {									\
    848  1.1  mrg     UDItype __x = (UDItype) (USItype) (u) * (USItype) (v);		\
    849  1.1  mrg     (w1) = (USItype) (__x >> 32);					\
    850  1.1  mrg     (w0) = (USItype) (__x);						\
    851  1.1  mrg   } while (0)
    852  1.1  mrg #define UMUL_TIME 10
    853  1.1  mrg #define UDIV_TIME 100
    854  1.1  mrg 
    855  1.1  mrg #if (__mips == 32 || __mips == 64) && ! defined (__mips16)
    856  1.1  mrg #define count_leading_zeros(COUNT,X)	((COUNT) = __builtin_clz (X))
    857  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    858  1.1  mrg #endif
    859  1.1  mrg #endif /* __mips__ */
    860  1.1  mrg 
    861  1.1  mrg #if defined (__ns32000__) && W_TYPE_SIZE == 32
    862  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
    863  1.1  mrg   ({union {UDItype __ll;						\
    864  1.1  mrg 	   struct {USItype __l, __h;} __i;				\
    865  1.1  mrg 	  } __xx;							\
    866  1.1  mrg   __asm__ ("meid %2,%0"							\
    867  1.1  mrg 	   : "=g" (__xx.__ll)						\
    868  1.1  mrg 	   : "%0" ((USItype) (u)),					\
    869  1.1  mrg 	     "g" ((USItype) (v)));					\
    870  1.1  mrg   (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
    871  1.1  mrg #define __umulsidi3(u, v) \
    872  1.1  mrg   ({UDItype __w;							\
    873  1.1  mrg     __asm__ ("meid %2,%0"						\
    874  1.1  mrg 	     : "=g" (__w)						\
    875  1.1  mrg 	     : "%0" ((USItype) (u)),					\
    876  1.1  mrg 	       "g" ((USItype) (v)));					\
    877  1.1  mrg     __w; })
    878  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
    879  1.1  mrg   ({union {UDItype __ll;						\
    880  1.1  mrg 	   struct {USItype __l, __h;} __i;				\
    881  1.1  mrg 	  } __xx;							\
    882  1.1  mrg   __xx.__i.__h = (n1); __xx.__i.__l = (n0);				\
    883  1.1  mrg   __asm__ ("deid %2,%0"							\
    884  1.1  mrg 	   : "=g" (__xx.__ll)						\
    885  1.1  mrg 	   : "0" (__xx.__ll),						\
    886  1.1  mrg 	     "g" ((USItype) (d)));					\
    887  1.1  mrg   (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
    888  1.1  mrg #define count_trailing_zeros(count,x) \
    889  1.1  mrg   do {									\
    890  1.1  mrg     __asm__ ("ffsd     %2,%0"						\
    891  1.1  mrg 	    : "=r" ((USItype) (count))					\
    892  1.1  mrg 	    : "0" ((USItype) 0),					\
    893  1.1  mrg 	      "r" ((USItype) (x)));					\
    894  1.1  mrg   } while (0)
    895  1.1  mrg #endif /* __ns32000__ */
    896  1.1  mrg 
    897  1.1  mrg /* FIXME: We should test _IBMR2 here when we add assembly support for the
    898  1.1  mrg    system vendor compilers.
    899  1.1  mrg    FIXME: What's needed for gcc PowerPC VxWorks?  __vxworks__ is not good
    900  1.1  mrg    enough, since that hits ARM and m68k too.  */
    901  1.1  mrg #if (defined (_ARCH_PPC)	/* AIX */				\
    902  1.1  mrg      || defined (__powerpc__)	/* gcc */				\
    903  1.1  mrg      || defined (__POWERPC__)	/* BEOS */				\
    904  1.1  mrg      || defined (__ppc__)	/* Darwin */				\
    905  1.1  mrg      || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */    \
    906  1.1  mrg      || (defined (PPC) && defined (CPU_FAMILY)    /* VxWorks */               \
    907  1.1  mrg 	 && CPU_FAMILY == PPC)                                                \
    908  1.1  mrg      ) && W_TYPE_SIZE == 32
    909  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    910  1.1  mrg   do {									\
    911  1.1  mrg     if (__builtin_constant_p (bh) && (bh) == 0)				\
    912  1.1  mrg       __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2"		\
    913  1.1  mrg 	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
    914  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)		\
    915  1.1  mrg       __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2"		\
    916  1.1  mrg 	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
    917  1.1  mrg     else								\
    918  1.1  mrg       __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3"		\
    919  1.1  mrg 	     : "=r" (sh), "=&r" (sl)					\
    920  1.1  mrg 	     : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));		\
    921  1.1  mrg   } while (0)
    922  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    923  1.1  mrg   do {									\
    924  1.1  mrg     if (__builtin_constant_p (ah) && (ah) == 0)				\
    925  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2"	\
    926  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
    927  1.1  mrg     else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0)		\
    928  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2"	\
    929  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
    930  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == 0)			\
    931  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2"		\
    932  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
    933  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)		\
    934  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2"		\
    935  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
    936  1.1  mrg     else								\
    937  1.1  mrg       __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2"	\
    938  1.1  mrg 	       : "=r" (sh), "=&r" (sl)					\
    939  1.1  mrg 	       : "r" (ah), "r" (bh), "rI" (al), "r" (bl));		\
    940  1.1  mrg   } while (0)
    941  1.1  mrg #define count_leading_zeros(count, x) \
    942  1.1  mrg   __asm__ ("cntlzw %0,%1" : "=r" (count) : "r" (x))
    943  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
    944  1.1  mrg #if defined (_ARCH_PPC) || defined (__powerpc__) || defined (__POWERPC__) \
    945  1.1  mrg   || defined (__ppc__)                                                    \
    946  1.1  mrg   || (defined (PPC) && ! defined (CPU_FAMILY)) /* gcc 2.7.x GNU&SysV */       \
    947  1.1  mrg   || (defined (PPC) && defined (CPU_FAMILY)    /* VxWorks */                  \
    948  1.1  mrg 	 && CPU_FAMILY == PPC)
    949  1.1  mrg #define umul_ppmm(ph, pl, m0, m1) \
    950  1.1  mrg   do {									\
    951  1.1  mrg     USItype __m0 = (m0), __m1 = (m1);					\
    952  1.1  mrg     __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
    953  1.1  mrg     (pl) = __m0 * __m1;							\
    954  1.1  mrg   } while (0)
    955  1.1  mrg #define UMUL_TIME 15
    956  1.1  mrg #define smul_ppmm(ph, pl, m0, m1) \
    957  1.1  mrg   do {									\
    958  1.1  mrg     SItype __m0 = (m0), __m1 = (m1);					\
    959  1.1  mrg     __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
    960  1.1  mrg     (pl) = __m0 * __m1;							\
    961  1.1  mrg   } while (0)
    962  1.1  mrg #define SMUL_TIME 14
    963  1.1  mrg #define UDIV_TIME 120
    964  1.1  mrg #endif
    965  1.1  mrg #endif /* 32-bit POWER architecture variants.  */
    966  1.1  mrg 
    967  1.1  mrg /* We should test _IBMR2 here when we add assembly support for the system
    968  1.1  mrg    vendor compilers.  */
    969  1.1  mrg #if (defined (_ARCH_PPC64) || defined (__powerpc64__)) && W_TYPE_SIZE == 64
    970  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    971  1.1  mrg   do {									\
    972  1.1  mrg     if (__builtin_constant_p (bh) && (bh) == 0)				\
    973  1.1  mrg       __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2"		\
    974  1.1  mrg 	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
    975  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)		\
    976  1.1  mrg       __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2"		\
    977  1.1  mrg 	     : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
    978  1.1  mrg     else								\
    979  1.1  mrg       __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3"		\
    980  1.1  mrg 	     : "=r" (sh), "=&r" (sl)					\
    981  1.1  mrg 	     : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl));		\
    982  1.1  mrg   } while (0)
    983  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    984  1.1  mrg   do {									\
    985  1.1  mrg     if (__builtin_constant_p (ah) && (ah) == 0)				\
    986  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2"	\
    987  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
    988  1.1  mrg     else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0)		\
    989  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2"	\
    990  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
    991  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == 0)			\
    992  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2"		\
    993  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
    994  1.1  mrg     else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)		\
    995  1.1  mrg       __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2"		\
    996  1.1  mrg 	       : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
    997  1.1  mrg     else								\
    998  1.1  mrg       __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2"	\
    999  1.1  mrg 	       : "=r" (sh), "=&r" (sl)					\
   1000  1.1  mrg 	       : "r" (ah), "r" (bh), "rI" (al), "r" (bl));		\
   1001  1.1  mrg   } while (0)
   1002  1.1  mrg #define count_leading_zeros(count, x) \
   1003  1.1  mrg   __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
   1004  1.1  mrg #define COUNT_LEADING_ZEROS_0 64
   1005  1.1  mrg #define umul_ppmm(ph, pl, m0, m1) \
   1006  1.1  mrg   do {									\
   1007  1.1  mrg     UDItype __m0 = (m0), __m1 = (m1);					\
   1008  1.1  mrg     __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
   1009  1.1  mrg     (pl) = __m0 * __m1;							\
   1010  1.1  mrg   } while (0)
   1011  1.1  mrg #define UMUL_TIME 15
   1012  1.1  mrg #define smul_ppmm(ph, pl, m0, m1) \
   1013  1.1  mrg   do {									\
   1014  1.1  mrg     DItype __m0 = (m0), __m1 = (m1);					\
   1015  1.1  mrg     __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));	\
   1016  1.1  mrg     (pl) = __m0 * __m1;							\
   1017  1.1  mrg   } while (0)
   1018  1.1  mrg #define SMUL_TIME 14  /* ??? */
   1019  1.1  mrg #define UDIV_TIME 120 /* ??? */
   1020  1.1  mrg #endif /* 64-bit PowerPC.  */
   1021  1.1  mrg 
   1022  1.1  mrg #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
   1023  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1024  1.1  mrg   __asm__ ("a %1,%5\n\tae %0,%3"					\
   1025  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
   1026  1.1  mrg 	     "=&r" ((USItype) (sl))					\
   1027  1.1  mrg 	   : "%0" ((USItype) (ah)),					\
   1028  1.1  mrg 	     "r" ((USItype) (bh)),					\
   1029  1.1  mrg 	     "%1" ((USItype) (al)),					\
   1030  1.1  mrg 	     "r" ((USItype) (bl)))
   1031  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   1032  1.1  mrg   __asm__ ("s %1,%5\n\tse %0,%3"					\
   1033  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
   1034  1.1  mrg 	     "=&r" ((USItype) (sl))					\
   1035  1.1  mrg 	   : "0" ((USItype) (ah)),					\
   1036  1.1  mrg 	     "r" ((USItype) (bh)),					\
   1037  1.1  mrg 	     "1" ((USItype) (al)),					\
   1038  1.1  mrg 	     "r" ((USItype) (bl)))
   1039  1.1  mrg #define umul_ppmm(ph, pl, m0, m1) \
   1040  1.1  mrg   do {									\
   1041  1.1  mrg     USItype __m0 = (m0), __m1 = (m1);					\
   1042  1.1  mrg     __asm__ (								\
   1043  1.1  mrg        "s	r2,r2\n"						\
   1044  1.1  mrg "	mts	r10,%2\n"						\
   1045  1.1  mrg "	m	r2,%3\n"						\
   1046  1.1  mrg "	m	r2,%3\n"						\
   1047  1.1  mrg "	m	r2,%3\n"						\
   1048  1.1  mrg "	m	r2,%3\n"						\
   1049  1.1  mrg "	m	r2,%3\n"						\
   1050  1.1  mrg "	m	r2,%3\n"						\
   1051  1.1  mrg "	m	r2,%3\n"						\
   1052  1.1  mrg "	m	r2,%3\n"						\
   1053  1.1  mrg "	m	r2,%3\n"						\
   1054  1.1  mrg "	m	r2,%3\n"						\
   1055  1.1  mrg "	m	r2,%3\n"						\
   1056  1.1  mrg "	m	r2,%3\n"						\
   1057  1.1  mrg "	m	r2,%3\n"						\
   1058  1.1  mrg "	m	r2,%3\n"						\
   1059  1.1  mrg "	m	r2,%3\n"						\
   1060  1.1  mrg "	m	r2,%3\n"						\
   1061  1.1  mrg "	cas	%0,r2,r0\n"						\
   1062  1.1  mrg "	mfs	r10,%1"							\
   1063  1.1  mrg 	     : "=r" ((USItype) (ph)),					\
   1064  1.1  mrg 	       "=r" ((USItype) (pl))					\
   1065  1.1  mrg 	     : "%r" (__m0),						\
   1066  1.1  mrg 		"r" (__m1)						\
   1067  1.1  mrg 	     : "r2");							\
   1068  1.1  mrg     (ph) += ((((SItype) __m0 >> 31) & __m1)				\
   1069  1.1  mrg 	     + (((SItype) __m1 >> 31) & __m0));				\
   1070  1.1  mrg   } while (0)
   1071  1.1  mrg #define UMUL_TIME 20
   1072  1.1  mrg #define UDIV_TIME 200
   1073  1.1  mrg #define count_leading_zeros(count, x) \
   1074  1.1  mrg   do {									\
   1075  1.1  mrg     if ((x) >= 0x10000)							\
   1076  1.1  mrg       __asm__ ("clz	%0,%1"						\
   1077  1.1  mrg 	       : "=r" ((USItype) (count))				\
   1078  1.1  mrg 	       : "r" ((USItype) (x) >> 16));				\
   1079  1.1  mrg     else								\
   1080  1.1  mrg       {									\
   1081  1.1  mrg 	__asm__ ("clz	%0,%1"						\
   1082  1.1  mrg 		 : "=r" ((USItype) (count))				\
   1083  1.1  mrg 		 : "r" ((USItype) (x)));					\
   1084  1.1  mrg 	(count) += 16;							\
   1085  1.1  mrg       }									\
   1086  1.1  mrg   } while (0)
   1087  1.1  mrg #endif
   1088  1.1  mrg 
   1089  1.1  mrg #if defined(__sh__) && !__SHMEDIA__ && W_TYPE_SIZE == 32
   1090  1.1  mrg #ifndef __sh1__
   1091  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
   1092  1.1  mrg   __asm__ (								\
   1093  1.1  mrg        "dmulu.l	%2,%3\n\tsts%M1	macl,%1\n\tsts%M0	mach,%0"	\
   1094  1.1  mrg 	   : "=r<" ((USItype)(w1)),					\
   1095  1.1  mrg 	     "=r<" ((USItype)(w0))					\
   1096  1.1  mrg 	   : "r" ((USItype)(u)),					\
   1097  1.1  mrg 	     "r" ((USItype)(v))						\
   1098  1.1  mrg 	   : "macl", "mach")
   1099  1.1  mrg #define UMUL_TIME 5
   1100  1.1  mrg #endif
   1101  1.1  mrg 
   1102  1.1  mrg /* This is the same algorithm as __udiv_qrnnd_c.  */
   1103  1.1  mrg #define UDIV_NEEDS_NORMALIZATION 1
   1104  1.1  mrg 
   1105  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
   1106  1.1  mrg   do {									\
   1107  1.1  mrg     extern UWtype __udiv_qrnnd_16 (UWtype, UWtype)			\
   1108  1.1  mrg 			__attribute__ ((visibility ("hidden")));	\
   1109  1.1  mrg     /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */	\
   1110  1.1  mrg     __asm__ (								\
   1111  1.1  mrg 	"mov%M4 %4,r5\n"						\
   1112  1.1  mrg "	swap.w %3,r4\n"							\
   1113  1.1  mrg "	swap.w r5,r6\n"							\
   1114  1.1  mrg "	jsr @%5\n"							\
   1115  1.1  mrg "	shll16 r6\n"							\
   1116  1.1  mrg "	swap.w r4,r4\n"							\
   1117  1.1  mrg "	jsr @%5\n"							\
   1118  1.1  mrg "	swap.w r1,%0\n"							\
   1119  1.1  mrg "	or r1,%0"							\
   1120  1.1  mrg 	: "=r" (q), "=&z" (r)						\
   1121  1.1  mrg 	: "1" (n1), "r" (n0), "rm" (d), "r" (&__udiv_qrnnd_16)		\
   1122  1.1  mrg 	: "r1", "r2", "r4", "r5", "r6", "pr", "t");			\
   1123  1.1  mrg   } while (0)
   1124  1.1  mrg 
   1125  1.1  mrg #define UDIV_TIME 80
   1126  1.1  mrg 
   1127  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl)				\
   1128  1.1  mrg   __asm__ ("clrt;subc %5,%1; subc %4,%0"				\
   1129  1.1  mrg 	   : "=r" (sh), "=r" (sl)					\
   1130  1.1  mrg 	   : "0" (ah), "1" (al), "r" (bh), "r" (bl) : "t")
   1131  1.1  mrg 
   1132  1.1  mrg #endif /* __sh__ */
   1133  1.1  mrg 
   1134  1.1  mrg #if defined (__SH5__) && __SHMEDIA__ && W_TYPE_SIZE == 32
   1135  1.1  mrg #define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
   1136  1.1  mrg #define count_leading_zeros(count, x) \
   1137  1.1  mrg   do									\
   1138  1.1  mrg     {									\
   1139  1.1  mrg       UDItype x_ = (USItype)(x);					\
   1140  1.1  mrg       SItype c_;							\
   1141  1.1  mrg 									\
   1142  1.1  mrg       __asm__ ("nsb %1, %0" : "=r" (c_) : "r" (x_));			\
   1143  1.1  mrg       (count) = c_ - 31;						\
   1144  1.1  mrg     }									\
   1145  1.1  mrg   while (0)
   1146  1.1  mrg #define COUNT_LEADING_ZEROS_0 32
   1147  1.1  mrg #endif
   1148  1.1  mrg 
   1149  1.1  mrg #if defined (__sparc__) && !defined (__arch64__) && !defined (__sparcv9) \
   1150  1.1  mrg     && W_TYPE_SIZE == 32
   1151  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1152  1.1  mrg   __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0"				\
   1153  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
   1154  1.1  mrg 	     "=&r" ((USItype) (sl))					\
   1155  1.1  mrg 	   : "%rJ" ((USItype) (ah)),					\
   1156  1.1  mrg 	     "rI" ((USItype) (bh)),					\
   1157  1.1  mrg 	     "%rJ" ((USItype) (al)),					\
   1158  1.1  mrg 	     "rI" ((USItype) (bl))					\
   1159  1.1  mrg 	   __CLOBBER_CC)
   1160  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   1161  1.1  mrg   __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0"				\
   1162  1.1  mrg 	   : "=r" ((USItype) (sh)),					\
   1163  1.1  mrg 	     "=&r" ((USItype) (sl))					\
   1164  1.1  mrg 	   : "rJ" ((USItype) (ah)),					\
   1165  1.1  mrg 	     "rI" ((USItype) (bh)),					\
   1166  1.1  mrg 	     "rJ" ((USItype) (al)),					\
   1167  1.1  mrg 	     "rI" ((USItype) (bl))					\
   1168  1.1  mrg 	   __CLOBBER_CC)
   1169  1.1  mrg #if defined (__sparc_v9__)
   1170  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
   1171  1.1  mrg   do {									\
   1172  1.1  mrg     register USItype __g1 asm ("g1");					\
   1173  1.1  mrg     __asm__ ("umul\t%2,%3,%1\n\t"					\
   1174  1.1  mrg 	     "srlx\t%1, 32, %0"						\
   1175  1.1  mrg 	     : "=r" ((USItype) (w1)),					\
   1176  1.1  mrg 	       "=r" (__g1)						\
   1177  1.1  mrg 	     : "r" ((USItype) (u)),					\
   1178  1.1  mrg 	       "r" ((USItype) (v)));					\
   1179  1.1  mrg     (w0) = __g1;							\
   1180  1.1  mrg   } while (0)
   1181  1.1  mrg #define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
   1182  1.1  mrg   __asm__ ("mov\t%2,%%y\n\t"						\
   1183  1.1  mrg 	   "udiv\t%3,%4,%0\n\t"						\
   1184  1.1  mrg 	   "umul\t%0,%4,%1\n\t"						\
   1185  1.1  mrg 	   "sub\t%3,%1,%1"						\
   1186  1.1  mrg 	   : "=&r" ((USItype) (__q)),					\
   1187  1.1  mrg 	     "=&r" ((USItype) (__r))					\
   1188  1.1  mrg 	   : "r" ((USItype) (__n1)),					\
   1189  1.1  mrg 	     "r" ((USItype) (__n0)),					\
   1190  1.1  mrg 	     "r" ((USItype) (__d)))
   1191  1.1  mrg #else
   1192  1.1  mrg #if defined (__sparc_v8__)
   1193  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
   1194  1.1  mrg   __asm__ ("umul %2,%3,%1;rd %%y,%0"					\
   1195  1.1  mrg 	   : "=r" ((USItype) (w1)),					\
   1196  1.1  mrg 	     "=r" ((USItype) (w0))					\
   1197  1.1  mrg 	   : "r" ((USItype) (u)),					\
   1198  1.1  mrg 	     "r" ((USItype) (v)))
   1199  1.1  mrg #define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
   1200  1.1  mrg   __asm__ ("mov %2,%%y;nop;nop;nop;udiv %3,%4,%0;umul %0,%4,%1;sub %3,%1,%1"\
   1201  1.1  mrg 	   : "=&r" ((USItype) (__q)),					\
   1202  1.1  mrg 	     "=&r" ((USItype) (__r))					\
   1203  1.1  mrg 	   : "r" ((USItype) (__n1)),					\
   1204  1.1  mrg 	     "r" ((USItype) (__n0)),					\
   1205  1.1  mrg 	     "r" ((USItype) (__d)))
   1206  1.1  mrg #else
   1207  1.1  mrg #if defined (__sparclite__)
   1208  1.1  mrg /* This has hardware multiply but not divide.  It also has two additional
   1209  1.1  mrg    instructions scan (ffs from high bit) and divscc.  */
   1210  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
   1211  1.1  mrg   __asm__ ("umul %2,%3,%1;rd %%y,%0"					\
   1212  1.1  mrg 	   : "=r" ((USItype) (w1)),					\
   1213  1.1  mrg 	     "=r" ((USItype) (w0))					\
   1214  1.1  mrg 	   : "r" ((USItype) (u)),					\
   1215  1.1  mrg 	     "r" ((USItype) (v)))
   1216  1.1  mrg #define udiv_qrnnd(q, r, n1, n0, d) \
   1217  1.1  mrg   __asm__ ("! Inlined udiv_qrnnd\n"					\
   1218  1.1  mrg "	wr	%%g0,%2,%%y	! Not a delayed write for sparclite\n"	\
   1219  1.1  mrg "	tst	%%g0\n"							\
   1220  1.1  mrg "	divscc	%3,%4,%%g1\n"						\
   1221  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1222  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1223  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1224  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1225  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1226  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1227  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1228  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1229  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1230  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1231  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1232  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1233  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1234  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1235  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1236  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1237  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1238  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1239  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1240  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1241  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1242  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1243  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1244  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1245  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1246  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1247  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1248  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1249  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1250  1.1  mrg "	divscc	%%g1,%4,%%g1\n"						\
   1251  1.1  mrg "	divscc	%%g1,%4,%0\n"						\
   1252  1.1  mrg "	rd	%%y,%1\n"						\
   1253  1.1  mrg "	bl,a 1f\n"							\
   1254  1.1  mrg "	add	%1,%4,%1\n"						\
   1255  1.1  mrg "1:	! End of inline udiv_qrnnd"					\
   1256  1.1  mrg 	   : "=r" ((USItype) (q)),					\
   1257  1.1  mrg 	     "=r" ((USItype) (r))					\
   1258  1.1  mrg 	   : "r" ((USItype) (n1)),					\
   1259  1.1  mrg 	     "r" ((USItype) (n0)),					\
   1260  1.1  mrg 	     "rI" ((USItype) (d))					\
   1261  1.1  mrg 	   : "g1" __AND_CLOBBER_CC)
   1262  1.1  mrg #define UDIV_TIME 37
   1263  1.1  mrg #define count_leading_zeros(count, x) \
   1264  1.1  mrg   do {                                                                  \
   1265  1.1  mrg   __asm__ ("scan %1,1,%0"                                               \
   1266  1.1  mrg 	   : "=r" ((USItype) (count))                                   \
   1267  1.1  mrg 	   : "r" ((USItype) (x)));					\
   1268  1.1  mrg   } while (0)
   1269  1.1  mrg /* Early sparclites return 63 for an argument of 0, but they warn that future
   1270  1.1  mrg    implementations might change this.  Therefore, leave COUNT_LEADING_ZEROS_0
   1271  1.1  mrg    undefined.  */
   1272  1.1  mrg #else
   1273  1.1  mrg /* SPARC without integer multiplication and divide instructions.
   1274  1.1  mrg    (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
   1275  1.1  mrg #define umul_ppmm(w1, w0, u, v) \
   1276  1.1  mrg   __asm__ ("! Inlined umul_ppmm\n"					\
   1277  1.1  mrg "	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr\n"\
   1278  1.1  mrg "	sra	%3,31,%%o5	! Don't move this insn\n"		\
   1279  1.1  mrg "	and	%2,%%o5,%%o5	! Don't move this insn\n"		\
   1280  1.1  mrg "	andcc	%%g0,0,%%g1	! Don't move this insn\n"		\
   1281  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1282  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1283  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1284  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1285  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1286  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1287  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1288  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1289  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1290  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1291  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1292  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1293  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1294  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1295  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1296  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1297  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1298  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1299  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1300  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1301  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1302  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1303  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1304  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1305  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1306  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1307  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1308  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1309  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1310  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1311  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1312  1.1  mrg "	mulscc	%%g1,%3,%%g1\n"						\
   1313  1.1  mrg "	mulscc	%%g1,0,%%g1\n"						\
   1314  1.1  mrg "	add	%%g1,%%o5,%0\n"						\
   1315  1.1  mrg "	rd	%%y,%1"							\
   1316  1.1  mrg 	   : "=r" ((USItype) (w1)),					\
   1317  1.1  mrg 	     "=r" ((USItype) (w0))					\
   1318  1.1  mrg 	   : "%rI" ((USItype) (u)),					\
   1319  1.1  mrg 	     "r" ((USItype) (v))						\
   1320  1.1  mrg 	   : "g1", "o5" __AND_CLOBBER_CC)
   1321  1.1  mrg #define UMUL_TIME 39		/* 39 instructions */
   1322  1.1  mrg /* It's quite necessary to add this much assembler for the sparc.
   1323  1.1  mrg    The default udiv_qrnnd (in C) is more than 10 times slower!  */
   1324  1.1  mrg #define udiv_qrnnd(__q, __r, __n1, __n0, __d) \
   1325  1.1  mrg   __asm__ ("! Inlined udiv_qrnnd\n"					\
   1326  1.1  mrg "	mov	32,%%g1\n"						\
   1327  1.1  mrg "	subcc	%1,%2,%%g0\n"						\
   1328  1.1  mrg "1:	bcs	5f\n"							\
   1329  1.1  mrg "	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
   1330  1.1  mrg "	sub	%1,%2,%1	! this kills msb of n\n"		\
   1331  1.1  mrg "	addx	%1,%1,%1	! so this can't give carry\n"		\
   1332  1.1  mrg "	subcc	%%g1,1,%%g1\n"						\
   1333  1.1  mrg "2:	bne	1b\n"							\
   1334  1.1  mrg "	 subcc	%1,%2,%%g0\n"						\
   1335  1.1  mrg "	bcs	3f\n"							\
   1336  1.1  mrg "	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
   1337  1.1  mrg "	b	3f\n"							\
   1338  1.1  mrg "	 sub	%1,%2,%1	! this kills msb of n\n"		\
   1339  1.1  mrg "4:	sub	%1,%2,%1\n"						\
   1340  1.1  mrg "5:	addxcc	%1,%1,%1\n"						\
   1341  1.1  mrg "	bcc	2b\n"							\
   1342  1.1  mrg "	 subcc	%%g1,1,%%g1\n"						\
   1343  1.1  mrg "! Got carry from n.  Subtract next step to cancel this carry.\n"	\
   1344  1.1  mrg "	bne	4b\n"							\
   1345  1.1  mrg "	 addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb\n"	\
   1346  1.1  mrg "	sub	%1,%2,%1\n"						\
   1347  1.1  mrg "3:	xnor	%0,0,%0\n"						\
   1348  1.1  mrg "	! End of inline udiv_qrnnd"					\
   1349  1.1  mrg 	   : "=&r" ((USItype) (__q)),					\
   1350  1.1  mrg 	     "=&r" ((USItype) (__r))					\
   1351  1.1  mrg 	   : "r" ((USItype) (__d)),					\
   1352  1.1  mrg 	     "1" ((USItype) (__n1)),					\
   1353  1.1  mrg 	     "0" ((USItype) (__n0)) : "g1" __AND_CLOBBER_CC)
   1354  1.1  mrg #define UDIV_TIME (3+7*32)	/* 7 instructions/iteration. 32 iterations.  */
   1355  1.1  mrg #endif /* __sparclite__ */
   1356  1.1  mrg #endif /* __sparc_v8__ */
   1357  1.1  mrg #endif /* __sparc_v9__ */
   1358  1.1  mrg #endif /* sparc32 */
   1359  1.1  mrg 
   1360  1.1  mrg #if ((defined (__sparc__) && defined (__arch64__)) || defined (__sparcv9)) \
   1361  1.1  mrg     && W_TYPE_SIZE == 64
   1362  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
   1363  1.1  mrg   do {									\
   1364  1.1  mrg     UDItype __carry = 0;						\
   1365  1.1  mrg     __asm__ ("addcc\t%r5,%6,%1\n\t"					\
   1366  1.1  mrg 	     "add\t%r3,%4,%0\n\t"					\
   1367  1.1  mrg 	     "movcs\t%%xcc, 1, %2\n\t"					\
   1368  1.1  mrg 	     "add\t%0, %2, %0"						\
   1369  1.1  mrg 	     : "=r" ((UDItype)(sh)),				      	\
   1370  1.1  mrg 	       "=&r" ((UDItype)(sl)),				      	\
   1371  1.1  mrg 	       "+r" (__carry)				      		\
   1372  1.1  mrg 	     : "%rJ" ((UDItype)(ah)),				     	\
   1373  1.1  mrg 	       "rI" ((UDItype)(bh)),				      	\
   1374  1.1  mrg 	       "%rJ" ((UDItype)(al)),				     	\
   1375  1.1  mrg 	       "rI" ((UDItype)(bl))				       	\
   1376  1.1  mrg 	     __CLOBBER_CC);						\
   1377  1.1  mrg   } while (0)
   1378  1.1  mrg 
   1379  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl)				\
   1380  1.1  mrg   do {									\
   1381  1.1  mrg     UDItype __carry = 0;						\
   1382  1.1  mrg     __asm__ ("subcc\t%r5,%6,%1\n\t"					\
   1383  1.1  mrg 	     "sub\t%r3,%4,%0\n\t"					\
   1384  1.1  mrg 	     "movcs\t%%xcc, 1, %2\n\t"					\
   1385  1.1  mrg 	     "sub\t%0, %2, %0"						\
   1386  1.1  mrg 	     : "=r" ((UDItype)(sh)),				      	\
   1387  1.1  mrg 	       "=&r" ((UDItype)(sl)),				      	\
   1388  1.1  mrg 	       "+r" (__carry)				      		\
   1389  1.1  mrg 	     : "%rJ" ((UDItype)(ah)),				     	\
   1390  1.1  mrg 	       "rI" ((UDItype)(bh)),				      	\
   1391  1.1  mrg 	       "%rJ" ((UDItype)(al)),				     	\
   1392  1.1  mrg 	       "rI" ((UDItype)(bl))				       	\
   1393  1.1  mrg 	     __CLOBBER_CC);						\
   1394  1.1  mrg   } while (0)
   1395  1.1  mrg 
   1396  1.1  mrg #define umul_ppmm(wh, wl, u, v)						\
   1397  1.1  mrg   do {									\
   1398  1.1  mrg 	  UDItype tmp1, tmp2, tmp3, tmp4;				\
   1399  1.1  mrg 	  __asm__ __volatile__ (					\
   1400  1.1  mrg 		   "srl %7,0,%3\n\t"					\
   1401  1.1  mrg 		   "mulx %3,%6,%1\n\t"					\
   1402  1.1  mrg 		   "srlx %6,32,%2\n\t"					\
   1403  1.1  mrg 		   "mulx %2,%3,%4\n\t"					\
   1404  1.1  mrg 		   "sllx %4,32,%5\n\t"					\
   1405  1.1  mrg 		   "srl %6,0,%3\n\t"					\
   1406  1.1  mrg 		   "sub %1,%5,%5\n\t"					\
   1407  1.1  mrg 		   "srlx %5,32,%5\n\t"					\
   1408  1.1  mrg 		   "addcc %4,%5,%4\n\t"					\
   1409  1.1  mrg 		   "srlx %7,32,%5\n\t"					\
   1410  1.1  mrg 		   "mulx %3,%5,%3\n\t"					\
   1411  1.1  mrg 		   "mulx %2,%5,%5\n\t"					\
   1412  1.1  mrg 		   "sethi %%hi(0x80000000),%2\n\t"			\
   1413  1.1  mrg 		   "addcc %4,%3,%4\n\t"					\
   1414  1.1  mrg 		   "srlx %4,32,%4\n\t"					\
   1415  1.1  mrg 		   "add %2,%2,%2\n\t"					\
   1416  1.1  mrg 		   "movcc %%xcc,%%g0,%2\n\t"				\
   1417  1.1  mrg 		   "addcc %5,%4,%5\n\t"					\
   1418  1.1  mrg 		   "sllx %3,32,%3\n\t"					\
   1419  1.1  mrg 		   "add %1,%3,%1\n\t"					\
   1420  1.1  mrg 		   "add %5,%2,%0"					\
   1421  1.1  mrg 	   : "=r" ((UDItype)(wh)),					\
   1422  1.1  mrg 	     "=&r" ((UDItype)(wl)),					\
   1423  1.1  mrg 	     "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4)	\
   1424  1.1  mrg 	   : "r" ((UDItype)(u)),					\
   1425  1.1  mrg 	     "r" ((UDItype)(v))						\
   1426  1.1  mrg 	   __CLOBBER_CC);						\
   1427  1.1  mrg   } while (0)
   1428  1.1  mrg #define UMUL_TIME 96
   1429  1.1  mrg #define UDIV_TIME 230
   1430  1.1  mrg #endif /* sparc64 */
   1431  1.1  mrg 
   1432  1.1  mrg #if defined (__vax__) && W_TYPE_SIZE == 32
   1433  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1434  1.1  mrg   __asm__ ("addl2 %5,%1\n\tadwc %3,%0"					\
   1435  1.1  mrg 	   : "=g" ((USItype) (sh)),					\
   1436  1.1  mrg 	     "=&g" ((USItype) (sl))					\
   1437  1.1  mrg 	   : "%0" ((USItype) (ah)),					\
   1438  1.1  mrg 	     "g" ((USItype) (bh)),					\
   1439  1.1  mrg 	     "%1" ((USItype) (al)),					\
   1440  1.1  mrg 	     "g" ((USItype) (bl)))
   1441  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   1442  1.1  mrg   __asm__ ("subl2 %5,%1\n\tsbwc %3,%0"					\
   1443  1.1  mrg 	   : "=g" ((USItype) (sh)),					\
   1444  1.1  mrg 	     "=&g" ((USItype) (sl))					\
   1445  1.1  mrg 	   : "0" ((USItype) (ah)),					\
   1446  1.1  mrg 	     "g" ((USItype) (bh)),					\
   1447  1.1  mrg 	     "1" ((USItype) (al)),					\
   1448  1.1  mrg 	     "g" ((USItype) (bl)))
   1449  1.1  mrg #define umul_ppmm(xh, xl, m0, m1) \
   1450  1.1  mrg   do {									\
   1451  1.1  mrg     union {								\
   1452  1.1  mrg 	UDItype __ll;							\
   1453  1.1  mrg 	struct {USItype __l, __h;} __i;					\
   1454  1.1  mrg       } __xx;								\
   1455  1.1  mrg     USItype __m0 = (m0), __m1 = (m1);					\
   1456  1.1  mrg     __asm__ ("emul %1,%2,$0,%0"						\
   1457  1.1  mrg 	     : "=r" (__xx.__ll)						\
   1458  1.1  mrg 	     : "g" (__m0),						\
   1459  1.1  mrg 	       "g" (__m1));						\
   1460  1.1  mrg     (xh) = __xx.__i.__h;						\
   1461  1.1  mrg     (xl) = __xx.__i.__l;						\
   1462  1.1  mrg     (xh) += ((((SItype) __m0 >> 31) & __m1)				\
   1463  1.1  mrg 	     + (((SItype) __m1 >> 31) & __m0));				\
   1464  1.1  mrg   } while (0)
   1465  1.1  mrg #define sdiv_qrnnd(q, r, n1, n0, d) \
   1466  1.1  mrg   do {									\
   1467  1.1  mrg     union {DItype __ll;							\
   1468  1.1  mrg 	   struct {SItype __l, __h;} __i;				\
   1469  1.1  mrg 	  } __xx;							\
   1470  1.1  mrg     __xx.__i.__h = n1; __xx.__i.__l = n0;				\
   1471  1.1  mrg     __asm__ ("ediv %3,%2,%0,%1"						\
   1472  1.1  mrg 	     : "=g" (q), "=g" (r)					\
   1473  1.1  mrg 	     : "g" (__xx.__ll), "g" (d));				\
   1474  1.1  mrg   } while (0)
   1475  1.1  mrg #endif /* __vax__ */
   1476  1.1  mrg 
   1477  1.1  mrg #ifdef _TMS320C6X
   1478  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1479  1.1  mrg   do									\
   1480  1.1  mrg     {									\
   1481  1.1  mrg       UDItype __ll;							\
   1482  1.1  mrg       __asm__ ("addu .l1 %1, %2, %0"					\
   1483  1.1  mrg 	       : "=a" (__ll) : "a" (al), "a" (bl));			\
   1484  1.1  mrg       (sl) = (USItype)__ll;						\
   1485  1.1  mrg       (sh) = ((USItype)(__ll >> 32)) + (ah) + (bh);			\
   1486  1.1  mrg     }									\
   1487  1.1  mrg   while (0)
   1488  1.1  mrg 
   1489  1.1  mrg #ifdef _TMS320C6400_PLUS
   1490  1.1  mrg #define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
   1491  1.1  mrg #define umul_ppmm(w1, w0, u, v)						\
   1492  1.1  mrg   do {									\
   1493  1.1  mrg     UDItype __x = (UDItype) (USItype) (u) * (USItype) (v);		\
   1494  1.1  mrg     (w1) = (USItype) (__x >> 32);					\
   1495  1.1  mrg     (w0) = (USItype) (__x);						\
   1496  1.1  mrg   } while (0)
   1497  1.1  mrg #endif  /* _TMS320C6400_PLUS */
   1498  1.1  mrg 
   1499  1.1  mrg #define count_leading_zeros(count, x)	((count) = __builtin_clz (x))
   1500  1.1  mrg #ifdef _TMS320C6400
   1501  1.1  mrg #define count_trailing_zeros(count, x)	((count) = __builtin_ctz (x))
   1502  1.1  mrg #endif
   1503  1.1  mrg #define UMUL_TIME 4
   1504  1.1  mrg #define UDIV_TIME 40
   1505  1.1  mrg #endif /* _TMS320C6X */
   1506  1.1  mrg 
   1507  1.1  mrg #if defined (__xtensa__) && W_TYPE_SIZE == 32
   1508  1.1  mrg /* This code is not Xtensa-configuration-specific, so rely on the compiler
   1509  1.1  mrg    to expand builtin functions depending on what configuration features
   1510  1.1  mrg    are available.  This avoids library calls when the operation can be
   1511  1.1  mrg    performed in-line.  */
   1512  1.1  mrg #define umul_ppmm(w1, w0, u, v)						\
   1513  1.1  mrg   do {									\
   1514  1.1  mrg     DWunion __w;							\
   1515  1.1  mrg     __w.ll = __builtin_umulsidi3 (u, v);				\
   1516  1.1  mrg     w1 = __w.s.high;							\
   1517  1.1  mrg     w0 = __w.s.low;							\
   1518  1.1  mrg   } while (0)
   1519  1.1  mrg #define __umulsidi3(u, v)		__builtin_umulsidi3 (u, v)
   1520  1.1  mrg #define count_leading_zeros(COUNT, X)	((COUNT) = __builtin_clz (X))
   1521  1.1  mrg #define count_trailing_zeros(COUNT, X)	((COUNT) = __builtin_ctz (X))
   1522  1.1  mrg #endif /* __xtensa__ */
   1523  1.1  mrg 
   1524  1.1  mrg #if defined xstormy16
   1525  1.1  mrg extern UHItype __stormy16_count_leading_zeros (UHItype);
   1526  1.1  mrg #define count_leading_zeros(count, x)					\
   1527  1.1  mrg   do									\
   1528  1.1  mrg     {									\
   1529  1.1  mrg       UHItype size;							\
   1530  1.1  mrg 									\
   1531  1.1  mrg       /* We assume that W_TYPE_SIZE is a multiple of 16...  */		\
   1532  1.1  mrg       for ((count) = 0, size = W_TYPE_SIZE; size; size -= 16)		\
   1533  1.1  mrg 	{								\
   1534  1.1  mrg 	  UHItype c;							\
   1535  1.1  mrg 									\
   1536  1.1  mrg 	  c = __clzhi2 ((x) >> (size - 16));				\
   1537  1.1  mrg 	  (count) += c;							\
   1538  1.1  mrg 	  if (c != 16)							\
   1539  1.1  mrg 	    break;							\
   1540  1.1  mrg 	}								\
   1541  1.1  mrg     }									\
   1542  1.1  mrg   while (0)
   1543  1.1  mrg #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
   1544  1.1  mrg #endif
   1545  1.1  mrg 
   1546  1.1  mrg #if defined (__z8000__) && W_TYPE_SIZE == 16
   1547  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1548  1.1  mrg   __asm__ ("add	%H1,%H5\n\tadc	%H0,%H3"				\
   1549  1.1  mrg 	   : "=r" ((unsigned int)(sh)),					\
   1550  1.1  mrg 	     "=&r" ((unsigned int)(sl))					\
   1551  1.1  mrg 	   : "%0" ((unsigned int)(ah)),					\
   1552  1.1  mrg 	     "r" ((unsigned int)(bh)),					\
   1553  1.1  mrg 	     "%1" ((unsigned int)(al)),					\
   1554  1.1  mrg 	     "rQR" ((unsigned int)(bl)))
   1555  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   1556  1.1  mrg   __asm__ ("sub	%H1,%H5\n\tsbc	%H0,%H3"				\
   1557  1.1  mrg 	   : "=r" ((unsigned int)(sh)),					\
   1558  1.1  mrg 	     "=&r" ((unsigned int)(sl))					\
   1559  1.1  mrg 	   : "0" ((unsigned int)(ah)),					\
   1560  1.1  mrg 	     "r" ((unsigned int)(bh)),					\
   1561  1.1  mrg 	     "1" ((unsigned int)(al)),					\
   1562  1.1  mrg 	     "rQR" ((unsigned int)(bl)))
   1563  1.1  mrg #define umul_ppmm(xh, xl, m0, m1) \
   1564  1.1  mrg   do {									\
   1565  1.1  mrg     union {long int __ll;						\
   1566  1.1  mrg 	   struct {unsigned int __h, __l;} __i;				\
   1567  1.1  mrg 	  } __xx;							\
   1568  1.1  mrg     unsigned int __m0 = (m0), __m1 = (m1);				\
   1569  1.1  mrg     __asm__ ("mult	%S0,%H3"					\
   1570  1.1  mrg 	     : "=r" (__xx.__i.__h),					\
   1571  1.1  mrg 	       "=r" (__xx.__i.__l)					\
   1572  1.1  mrg 	     : "%1" (__m0),						\
   1573  1.1  mrg 	       "rQR" (__m1));						\
   1574  1.1  mrg     (xh) = __xx.__i.__h; (xl) = __xx.__i.__l;				\
   1575  1.1  mrg     (xh) += ((((signed int) __m0 >> 15) & __m1)				\
   1576  1.1  mrg 	     + (((signed int) __m1 >> 15) & __m0));			\
   1577  1.1  mrg   } while (0)
   1578  1.1  mrg #endif /* __z8000__ */
   1579  1.1  mrg 
   1580  1.1  mrg #endif /* __GNUC__ */
   1581  1.1  mrg 
   1582  1.1  mrg /* If this machine has no inline assembler, use C macros.  */
   1583  1.1  mrg 
   1584  1.1  mrg #if !defined (add_ssaaaa)
   1585  1.1  mrg #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   1586  1.1  mrg   do {									\
   1587  1.1  mrg     UWtype __x;								\
   1588  1.1  mrg     __x = (al) + (bl);							\
   1589  1.1  mrg     (sh) = (ah) + (bh) + (__x < (al));					\
   1590  1.1  mrg     (sl) = __x;								\
   1591  1.1  mrg   } while (0)
   1592  1.1  mrg #endif
   1593  1.1  mrg 
   1594  1.1  mrg #if !defined (sub_ddmmss)
   1595  1.1  mrg #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   1596  1.1  mrg   do {									\
   1597  1.1  mrg     UWtype __x;								\
   1598  1.1  mrg     __x = (al) - (bl);							\
   1599  1.1  mrg     (sh) = (ah) - (bh) - (__x > (al));					\
   1600  1.1  mrg     (sl) = __x;								\
   1601  1.1  mrg   } while (0)
   1602  1.1  mrg #endif
   1603  1.1  mrg 
   1604  1.1  mrg /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
   1605  1.1  mrg    smul_ppmm.  */
   1606  1.1  mrg #if !defined (umul_ppmm) && defined (smul_ppmm)
   1607  1.1  mrg #define umul_ppmm(w1, w0, u, v)						\
   1608  1.1  mrg   do {									\
   1609  1.1  mrg     UWtype __w1;							\
   1610  1.1  mrg     UWtype __xm0 = (u), __xm1 = (v);					\
   1611  1.1  mrg     smul_ppmm (__w1, w0, __xm0, __xm1);					\
   1612  1.1  mrg     (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)		\
   1613  1.1  mrg 		+ (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);		\
   1614  1.1  mrg   } while (0)
   1615  1.1  mrg #endif
   1616  1.1  mrg 
   1617  1.1  mrg /* If we still don't have umul_ppmm, define it using plain C.  */
   1618  1.1  mrg #if !defined (umul_ppmm)
   1619  1.1  mrg #define umul_ppmm(w1, w0, u, v)						\
   1620  1.1  mrg   do {									\
   1621  1.1  mrg     UWtype __x0, __x1, __x2, __x3;					\
   1622  1.1  mrg     UHWtype __ul, __vl, __uh, __vh;					\
   1623  1.1  mrg 									\
   1624  1.1  mrg     __ul = __ll_lowpart (u);						\
   1625  1.1  mrg     __uh = __ll_highpart (u);						\
   1626  1.1  mrg     __vl = __ll_lowpart (v);						\
   1627  1.1  mrg     __vh = __ll_highpart (v);						\
   1628  1.1  mrg 									\
   1629  1.1  mrg     __x0 = (UWtype) __ul * __vl;					\
   1630  1.1  mrg     __x1 = (UWtype) __ul * __vh;					\
   1631  1.1  mrg     __x2 = (UWtype) __uh * __vl;					\
   1632  1.1  mrg     __x3 = (UWtype) __uh * __vh;					\
   1633  1.1  mrg 									\
   1634  1.1  mrg     __x1 += __ll_highpart (__x0);/* this can't give carry */		\
   1635  1.1  mrg     __x1 += __x2;		/* but this indeed can */		\
   1636  1.1  mrg     if (__x1 < __x2)		/* did we get it? */			\
   1637  1.1  mrg       __x3 += __ll_B;		/* yes, add it in the proper pos.  */	\
   1638  1.1  mrg 									\
   1639  1.1  mrg     (w1) = __x3 + __ll_highpart (__x1);					\
   1640  1.1  mrg     (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0);		\
   1641  1.1  mrg   } while (0)
   1642  1.1  mrg #endif
   1643  1.1  mrg 
   1644  1.1  mrg #if !defined (__umulsidi3)
   1645  1.1  mrg #define __umulsidi3(u, v) \
   1646  1.1  mrg   ({DWunion __w;							\
   1647  1.1  mrg     umul_ppmm (__w.s.high, __w.s.low, u, v);				\
   1648  1.1  mrg     __w.ll; })
   1649  1.1  mrg #endif
   1650  1.1  mrg 
   1651  1.1  mrg /* Define this unconditionally, so it can be used for debugging.  */
   1652  1.1  mrg #define __udiv_qrnnd_c(q, r, n1, n0, d) \
   1653  1.1  mrg   do {									\
   1654  1.1  mrg     UWtype __d1, __d0, __q1, __q0;					\
   1655  1.1  mrg     UWtype __r1, __r0, __m;						\
   1656  1.1  mrg     __d1 = __ll_highpart (d);						\
   1657  1.1  mrg     __d0 = __ll_lowpart (d);						\
   1658  1.1  mrg 									\
   1659  1.1  mrg     __r1 = (n1) % __d1;							\
   1660  1.1  mrg     __q1 = (n1) / __d1;							\
   1661  1.1  mrg     __m = (UWtype) __q1 * __d0;						\
   1662  1.1  mrg     __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
   1663  1.1  mrg     if (__r1 < __m)							\
   1664  1.1  mrg       {									\
   1665  1.1  mrg 	__q1--, __r1 += (d);						\
   1666  1.1  mrg 	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
   1667  1.1  mrg 	  if (__r1 < __m)						\
   1668  1.1  mrg 	    __q1--, __r1 += (d);					\
   1669  1.1  mrg       }									\
   1670  1.1  mrg     __r1 -= __m;							\
   1671  1.1  mrg 									\
   1672  1.1  mrg     __r0 = __r1 % __d1;							\
   1673  1.1  mrg     __q0 = __r1 / __d1;							\
   1674  1.1  mrg     __m = (UWtype) __q0 * __d0;						\
   1675  1.1  mrg     __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
   1676  1.1  mrg     if (__r0 < __m)							\
   1677  1.1  mrg       {									\
   1678  1.1  mrg 	__q0--, __r0 += (d);						\
   1679  1.1  mrg 	if (__r0 >= (d))						\
   1680  1.1  mrg 	  if (__r0 < __m)						\
   1681  1.1  mrg 	    __q0--, __r0 += (d);					\
   1682  1.1  mrg       }									\
   1683  1.1  mrg     __r0 -= __m;							\
   1684  1.1  mrg 									\
   1685  1.1  mrg     (q) = (UWtype) __q1 * __ll_B | __q0;				\
   1686  1.1  mrg     (r) = __r0;								\
   1687  1.1  mrg   } while (0)
   1688  1.1  mrg 
   1689  1.1  mrg /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
   1690  1.1  mrg    __udiv_w_sdiv (defined in libgcc or elsewhere).  */
   1691  1.1  mrg #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
   1692  1.1  mrg #define udiv_qrnnd(q, r, nh, nl, d) \
   1693  1.1  mrg   do {									\
   1694  1.1  mrg     extern UWtype __udiv_w_sdiv (UWtype *, UWtype, UWtype, UWtype);	\
   1695  1.1  mrg     UWtype __r;								\
   1696  1.1  mrg     (q) = __udiv_w_sdiv (&__r, nh, nl, d);				\
   1697  1.1  mrg     (r) = __r;								\
   1698  1.1  mrg   } while (0)
   1699  1.1  mrg #endif
   1700  1.1  mrg 
   1701  1.1  mrg /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
   1702  1.1  mrg #if !defined (udiv_qrnnd)
   1703  1.1  mrg #define UDIV_NEEDS_NORMALIZATION 1
   1704  1.1  mrg #define udiv_qrnnd __udiv_qrnnd_c
   1705  1.1  mrg #endif
   1706  1.1  mrg 
   1707  1.1  mrg #if !defined (count_leading_zeros)
   1708  1.1  mrg #define count_leading_zeros(count, x) \
   1709  1.1  mrg   do {									\
   1710  1.1  mrg     UWtype __xr = (x);							\
   1711  1.1  mrg     UWtype __a;								\
   1712  1.1  mrg 									\
   1713  1.1  mrg     if (W_TYPE_SIZE <= 32)						\
   1714  1.1  mrg       {									\
   1715  1.1  mrg 	__a = __xr < ((UWtype)1<<2*__BITS4)				\
   1716  1.1  mrg 	  ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4)			\
   1717  1.1  mrg 	  : (__xr < ((UWtype)1<<3*__BITS4) ?  2*__BITS4 : 3*__BITS4);	\
   1718  1.1  mrg       }									\
   1719  1.1  mrg     else								\
   1720  1.1  mrg       {									\
   1721  1.1  mrg 	for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)			\
   1722  1.1  mrg 	  if (((__xr >> __a) & 0xff) != 0)				\
   1723  1.1  mrg 	    break;							\
   1724  1.1  mrg       }									\
   1725  1.1  mrg 									\
   1726  1.1  mrg     (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a);		\
   1727  1.1  mrg   } while (0)
   1728  1.1  mrg #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
   1729  1.1  mrg #endif
   1730  1.1  mrg 
   1731  1.1  mrg #if !defined (count_trailing_zeros)
   1732  1.1  mrg /* Define count_trailing_zeros using count_leading_zeros.  The latter might be
   1733  1.1  mrg    defined in asm, but if it is not, the C version above is good enough.  */
   1734  1.1  mrg #define count_trailing_zeros(count, x) \
   1735  1.1  mrg   do {									\
   1736  1.1  mrg     UWtype __ctz_x = (x);						\
   1737  1.1  mrg     UWtype __ctz_c;							\
   1738  1.1  mrg     count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x);			\
   1739  1.1  mrg     (count) = W_TYPE_SIZE - 1 - __ctz_c;				\
   1740  1.1  mrg   } while (0)
   1741  1.1  mrg #endif
   1742  1.1  mrg 
   1743  1.1  mrg #ifndef UDIV_NEEDS_NORMALIZATION
   1744  1.1  mrg #define UDIV_NEEDS_NORMALIZATION 0
   1745  1.1  mrg #endif
   1746